text
stringlengths 0
3.34M
|
---|
module COMPLEX11 where
import Data.Complex
import Numeric
x = 0 :+ 3
y = 1 + x
|
import incidence_world.level05 --hide
open IncidencePlane --hide
/-
# Incidence World
## Level 6: almost there...
Congratulations! You're almost there! To solve this level, we provide you with the mathematical proof in paper. Read it carefully and make
an effort to understand every bit of it. Then, try to type the mathematical proof in Lean by your own. If needed, you can go back to the previous
levels to remember how to use some tactics. By the way, don't forget your list of theorem statements! Just as a final hint, you may want to start
your proof by typing `rcases (point_existence_postulate P) with ⟨Q, R, ⟨hPQ, hPR, hQR,H⟩⟩,`... Good luck!
## The mathematical proof in paper...
**Claim:** There are at least two different lines passing through a given point.
**Proof:** Let P be the given point.
By the lemma `point_existence_postulate`, there exist two points Q and R such that the points P, Q and R are non-collinear.
By the first axiom of incidence, let `r` be the line that passes through (= is incident with) the points P and Q.
By the first axiom of incidence, let `s` be the line that passes through the points P and R.
By the lemma `ne_of_not_share_point`, since the point R is incident with the line through P and R and it is not incident with the
line through P and Q, then the lines `r` and `s` are not equal.
Hence, we have shown that there at least two different lines passing through a given point.
-/
/- Hint : Click here for a hint, in case you get stuck.
This is not a proof by cases. The `use` tactic may get you going with the second and third lines of the proof. To close the last goal,
go back to Level 3 of this world and try to see how we used the `point_in_line_not_point` lemma. Still bewildered? Click on "View source"
(located on the top right corner of the game screen) to see the solution.
-/
variables {Ω : Type} [IncidencePlane Ω] --hide
/- Lemma :
There are at least two different lines passing through a given point.
-/
lemma point_exists_two_lines (P : Ω) : ∃ (r s: Line Ω), P ∈ s ∧ P ∈ r ∧ s ≠ r :=
begin
rcases (point_existence_postulate P) with ⟨Q, R, ⟨hPQ, hPR, hQR,H⟩⟩,
use line_through P Q,
use line_through P R,
split,
{
exact line_through_left P R,
},
split,
{
exact line_through_left P Q,
},
{
exact ne_of_not_share_point (line_through_right P R) H,
},
end
|
import relatively_prime set_lemmas linear_algebra.projection ring_theory.coprime linear_algebra.basis linear_algebra.direct_sum_module new_file tactic.abel
open_locale classical
/-- Given prime `p : R`, for all `n : ℕ`, `p ^ (n + 1)` isn't a unit.-/
lemma not_unit_of_prime_pow {R : Type*} [comm_ring R] {p : R} (hp : prime p) {n : ℕ} :
¬(is_unit (p ^ n.succ)) :=
begin
intro hu,
rw is_unit_iff_dvd_one at hu,
exact hp.2.1 (is_unit_iff_dvd_one.2 $ dvd_trans (⟨p ^ n, by {rw ←pow_succ}⟩) hu)
end
variables (R : Type*) [integral_domain R] [is_principal_ideal_ring R]
[decidable_eq R] [decidable_eq (associates R)]
namespace coprimes
variable (r : R)
def span := submodule.span R (↑(coprimes' r) : set R)
variables {R} (hr : r ≠ 0) (hu : ¬is_unit r)
include hr hu
lemma span_eq_top : span R r = ⊤ :=
begin
cases submodule.is_principal.principal (span R r) with x hx,
erw [hx, ideal.span_singleton_eq_top],
exact relatively_prime' r hr hu
(λ y H, by {rw ←ideal.mem_span_singleton, change _ = ideal.span {x} at hx, rw ←hx,
exact submodule.subset_span H})
end
end coprimes
variables (R) (M : Type*) [add_comm_group M] [module R M] (A : submodule R M)
def Tp := submodule.span R $ ⋃ (p : {p : R // prime p}), (prime_pow_ord A p).1
variables {R} {α : Type*} {β : Type*} [ring α] [add_comm_group β] [module α β]
set_option class.instance_max_depth 50
lemma ord_eq (y : tors R A) : ord R A (y : A) = ord R (tors R A) y :=
submodule.ext $ λ x, ⟨λ (h : _ = _), show _ = _, from subtype.ext_iff.2 $
by erw ←submodule.coe_smul at h; erw h; refl,
λ (h : _ = _), show _ = _, from subtype.ext_iff.1 h⟩
variables (R)
lemma tors_gen : Tp R M A = ⊤ :=
begin
ext,
split,
tauto,
intro,
cases classical.em (x = 0),
rw [h, ←submodule.mem_coe], exact (Tp R M A).2,
cases submodule.is_principal.principal (ord R (tors R A) x) with r hr,
change _ = ideal.span {r} at hr,
have hr0 : r ≠ 0, by
{ assume h,
rw [h, ideal.span_singleton_eq_bot.2 rfl] at hr,
rcases x.2 with ⟨s, hs0, hs⟩,
change s ∈ ord R A (x : A) at hs,
rw ←ord_eq at hr,
rw hr at hs,
exact absurd ((submodule.mem_bot R).1 hs) hs0},
have hu : ¬is_unit r, by
{ assume hu,
rw (ideal.span_singleton_eq_top.2 hu) at hr,
have h1 : (1 : R) ∈ ord R (tors R A) x, from hr.symm ▸ submodule.mem_top,
exact absurd (show x = 0, from one_smul R x ▸ set.mem_def.1 h1) h},
cases exists_repr_of_mem_span (coprimes' r) 1
(show (1:R) ∈ (coprimes.span R r), by rw coprimes.span_eq_top r hr0 hu; simp) with ι h1,
rw ←one_smul R x,
rw h1, rw finset.sum_smul, rw Tp,
refine sum_mem_span _, intros q hq, rcases finset.mem_image.1 hq with ⟨p, hp, h⟩,
use p,
exact (unique_factorization_domain.prime_factors hr0 p (multiset.mem_to_finset.1 hp)),
have hp0 : (multiset.filter (λ x, associated x p)
(unique_factorization_domain.factors r)).prod • (ι q * q) • x = 0,
begin
rw ←mul_smul, rw mul_comm, rw mul_assoc, rw ←h,
cases prod_of_dvd_eq' r hr0 p with u hu,
rw ←units.eq_mul_inv_iff_mul_eq at hu, erw hu,
rw mul_comm r, rw ←mul_assoc,
rw mul_smul, convert smul_zero _,
show r ∈ ord R (tors R A) x, rw hr, exact ideal.mem_span_singleton.2 ⟨1, (mul_one r).symm⟩,
end,
have H1 := @associated_pow' _ _ _ _ _ _ (unique_factorization_domain.factors r) rfl p,
have H2 := @associated_prod _ _ _ _ _ _ (multiset.filter (λ (x : R), associated x p)
(unique_factorization_domain.factors r))
(multiset.repeat p (multiset.filter (λ (x : R), associated x p)
(unique_factorization_domain.factors r)).card) rfl
(multiset.card_repeat _ _) H1,
cases associates.mk_eq_mk_iff_associated.1 H2 with v hv,
rw multiset.prod_repeat at hv,
cases prime_pow_le _ _ _ _ (by {rw ←hv, rw mul_comm, rw mul_smul, rw hp0, rw smul_zero}) with j hj,
use j, exact hj.2, exact (unique_factorization_domain.prime_factors
hr0 p (multiset.mem_to_finset.1 hp)),
end
instance huh {ι : Type*} (s : finset ι) : fintype (↑s : set ι) :=
fintype.of_finset s (λ x, iff.rfl)
local infix ` ~ᵤ ` : 50 := associated
variables {R}
lemma is_coprime_of_prime {p q : R} (hp : prime p) (h : ¬ p ∣ q) : is_coprime p q :=
begin
cases submodule.is_principal.principal (ideal.span ({p, q} : set R)) with w hw,
cases ideal.mem_span_singleton.1 (show p ∈ ideal.span {w}, by rw ideal.span;
rw ←hw; exact submodule.subset_span (show p ∈ {p, q}, by simp only
[set.mem_insert_iff, true_or, eq_self_iff_true])) with t ht,
cases ideal.mem_span_singleton.1 (show q ∈ ideal.span {w}, by rw ideal.span;
rw ←hw; exact submodule.subset_span (show q ∈ {p, q}, by simp only
[set.mem_insert_iff, set.mem_singleton, or_true])) with r hr,
have hwp : ¬ p ∣ w := λ hwp, h $ dvd_trans hwp ⟨r, hr⟩,
have hwu : is_unit w := by {
refine or.resolve_right ((irreducible_of_prime hp).2 w t ht) _,
rintro ⟨s, hs⟩, exact hwp ⟨(s⁻¹ : units R), by {rw ht, rw ←hs,
simp only [units.mul_inv_cancel_right],}⟩},
rcases ideal.mem_span_pair.1 (show w ∈ ideal.span {p, q}, by rw hw;
exact ideal.mem_span_singleton.2 (dvd_refl _)) with ⟨a, b, hab⟩,
cases hwu with u hu,
rw ←units.mul_right_inj u⁻¹ at hab,
rw ←hu at hab,
rw units.inv_mul at hab,
rw mul_add at hab,rw ←mul_assoc at hab, rw ←mul_assoc at hab,
use [↑u⁻¹ * a, ↑u⁻¹ * b],
exact hab,
end
lemma prime_pow_unique {p : {p : R // prime p}} {q1 q2 : @subtype R (λ q, prime q ∧ ¬ associated q ↑p)}
{m n : ℕ} (h0 : m ≠ 0) (h : ideal.span ({q1.1 ^ m} : set R) = ideal.span {q2.1 ^ n}) :
associated q1.1 q2.1 ∧ m = n :=
begin
have h1 := ideal.mem_span_singleton.1 (by rw ←h; exact ideal.mem_span_singleton.2 (dvd_refl _)),
have h2 := ideal.mem_span_singleton.1 (by rw h; exact ideal.mem_span_singleton.2 (dvd_refl _)),
have h12 := associated_of_dvd_dvd h1 h2,
cases m,
exfalso, exact h0 rfl,
rcases unique_factorization_domain.exists_mem_factors_of_dvd (ne_zero_of_prime_pow R n q2.2.1)
(irreducible_of_prime q1.2.1) (dvd_trans ⟨q1.1 ^ m, by rw pow_succ; refl⟩ h2) with ⟨r, hrm, hr⟩,
have hm2 := factors_prime_pow n q2.2.1,
have hra : associated r q2.1 := by {rw ←associates.mk_eq_mk_iff_associated,
suffices : associates.mk r ∈ multiset.map associates.mk
(unique_factorization_domain.factors (q2.val ^ n)), by
{rw hm2 at this, exact multiset.eq_of_mem_repeat this},
exact multiset.mem_map_of_mem _ hrm},
have := associated.trans hr hra,
split,
exact this,
have hunique := associates.rel_associated_iff_map_eq_map.1
(unique_factorization_domain.unique (unique_factorization_domain.irreducible_factors $
ne_zero_of_prime_pow R m.succ q1.2.1)
(unique_factorization_domain.irreducible_factors $ ne_zero_of_prime_pow R n q2.2.1)
(associated.trans (unique_factorization_domain.factors_prod (ne_zero_of_prime_pow R m.succ q1.2.1)) $
associated.trans h12.symm
(unique_factorization_domain.factors_prod (ne_zero_of_prime_pow R n q2.2.1)).symm)),
rw hm2 at hunique,
have hm1 := factors_prime_pow m.succ q1.2.1,
rw hunique at hm1, rw ←multiset.card_repeat (associates.mk q2.1) n,
rw ←multiset.card_repeat (associates.mk q1.1) m.succ,
rw hm1,
end
lemma not_dvd (p : {p : R // prime p})
{l : (tors R A) →₀ R}
(hS : ↑(l.support) ⊆ (⋃ (q : { q // prime q ∧ ¬associated q ↑p}),
(prime_pow_ord A ⟨(q.1 : R), q.2.1⟩).1))
(H : ∀ (x : (tors R A)), x ∈ l.support →
(∃ (q : {q // prime q ∧ ¬associated q ↑p}) (n : ℕ), ord R (tors R A) x = ideal.span {(q.1 : R) ^ n})) :
¬ p.1 ∣ @finset.prod (↑l.support : set _) _ _
(@finset.univ (↑l.support : set _) $ huh l.support)(λ x, (classical.some (H x x.2)).1 ^
(classical.some (classical.some_spec $ H x x.2))) :=
begin
set Q := @finset.prod (↑l.support : set _) _ _ (@finset.univ (↑l.support : set _) $ huh l.support)
(λ x, (classical.some (H x x.2)).1 ^ (classical.some (classical.some_spec $ H x x.2))),
intro hnot,
rcases exists_mem_multiset_dvd_of_prime p.2 hnot with ⟨a, hma, ha⟩,
cases multiset.mem_map.1 hma with y hy,
have h0 : classical.some (classical.some_spec (H y y.2)) ≠ 0 :=
λ h0, by {rw h0 at hy, rw pow_zero at hy, rw ←hy.2 at ha, exact p.2.2.1 (is_unit_iff_dvd_one.2 ha)},
cases set.mem_Union.1 (hS y.2) with r hr,
cases hr with k hk,
have hreq : associated (classical.some (H y y.2)).1 r.1 :=
(prime_pow_unique h0 ((classical.some_spec $ classical.some_spec $ H y.1 y.2).symm.trans hk)).1,
have hkeq : (classical.some (classical.some_spec (H y y.2))) = k :=
(prime_pow_unique h0 ((classical.some_spec $ classical.some_spec $ H y.1 y.2).symm.trans hk)).2,
cases hreq.symm with u hu,
rw hkeq at hy,
rw ←hu at hy,
rw ←hy.2 at ha,
rw ←multiset.prod_repeat at ha,
rcases exists_associated_mem_of_dvd_prod p.2 (λ b hb, by {have hb' : b = r * u :=
@multiset.eq_of_mem_repeat _ _ _ k hb, rw hb', exact (prime_iff_of_associated ⟨u, rfl⟩).1 r.2.1,})
ha with ⟨s, hms, hs⟩,
rw multiset.eq_of_mem_repeat hms at hs,
exact r.2.2 (associated.trans (⟨u, rfl⟩) hs.symm),
end
lemma trivial_inter (p : {p : R // prime p}) :
prime_pow_ord A p ⊓ (submodule.span R $ ⋃ (q : {q : R // prime q ∧ ¬ associated q p}),
(prime_pow_ord A ⟨q.1, q.2.1⟩).1) = ⊥ :=
begin
rw eq_bot_iff,
intros x hx,
apply (submodule.mem_bot R).2,
cases hx.1 with m hm,
have hpm := eq_zero_of_ord_pow m p.2 hm,
rcases (@finsupp.mem_span_iff_total (tors R A) (tors R A) R _ _ _ (@id (tors R A))
(⋃ (q : {q : R // prime q ∧ ¬associated q ↑p}), (prime_pow_ord A ⟨q.1, q.2.1⟩).1) x).1
(by {rw set.image_id, exact hx.2}) with ⟨l, hl, hl'⟩,
have hS := (finsupp.mem_supported _ _).1 hl,
cases (classical.em (l.support = ∅)),
rw finsupp.support_eq_empty.1 h at hl',
rw ←hl',
rw finsupp.total_apply,
exact finset.sum_empty,
have H : ∀ x ∈ l.support, ∃ (q : {q : R // prime q ∧ ¬ associated q p}) (n : ℕ),
ord R (tors R A) x = ideal.span ({q.1 ^ n} : set R) := λ y hy, set.mem_Union.1
(hS (finset.mem_coe.2 hy)),
cases finset.nonempty_of_ne_empty h with z hz,
let Q := @finset.prod (↑l.support : set _) _ _ (@finset.univ (↑l.support : set _) $ huh l.support)
(λ x, (classical.some (H x x.2)).1 ^ (classical.some (classical.some_spec $ H x x.2))),
have HQ : Q • x = 0, by {rw ←hl', rw finsupp.total_apply, rw finsupp.smul_sum,
rw ←@finset.sum_const_zero _ _ l.support _,
apply finset.sum_congr rfl,
intros y hy, dsimp,
rw smul_comm,
let q := classical.some (H y hy),
let n := classical.some (classical.some_spec $ H y hy),
let hq := classical.some_spec (classical.some_spec $ H y hy),
suffices : q.1 ^ n ∣ Q, by {cases this with c hc, rw hc, rw mul_comm, rw mul_smul,
rw eq_zero_of_ord_pow n q.2.1 hq,
erw smul_zero, erw smul_zero },
refine @dvd_prod_finset (↑l.support : set _) _ _ ⟨y, hy⟩ (λ x, (classical.some (H x x.2)).1 ^
(classical.some (classical.some_spec $ H x x.2)))
(@finset.univ (↑l.support : set _) $ huh l.support) _,
exact finset.mem_univ _},
rcases (show is_coprime (p.1 ^ m) Q, by
{ apply is_coprime.pow_left,
apply is_coprime_of_prime p.2,
exact not_dvd _ _ p hS H
}) with ⟨a, b, hab⟩,
rw ←one_smul _ x,
rw ←hab,
rw add_smul,
rw mul_smul, rw mul_smul,
rw hpm, rw smul_zero, rw HQ, rw smul_zero, rw zero_add,
end
instance noe (Hfg : A.fg) : is_noetherian R (tors R A) :=
is_noetherian_of_submodule_of_noetherian R A (tors R A) (is_noetherian_of_fg_of_noetherian A Hfg)
noncomputable def prime_gen_set (Hfg : A.fg) (p : {p : R // prime p}) : finset (tors R A) :=
classical.some (@is_noetherian.noetherian _ _ _ _ _ (noe M A Hfg) (prime_pow_ord A p))
theorem prime_gen_set_gens (Hfg : A.fg) (p : {p : R // prime p}) :
submodule.span R (↑(prime_gen_set M A Hfg p) : set _) = prime_pow_ord A p :=
classical.some_spec (@is_noetherian.noetherian _ _ _ _ _ (noe M A Hfg) (prime_pow_ord A p))
lemma smul_zero_of_Inf {x : R} (Hn : Inf (set.range (ord R M)) = ideal.span ({x} : set R))
(y : M) : x • y = 0 :=
begin
show x ∈ ord R M y,
apply Inf_le (show ord R M y ∈ set.range (ord R M), from ⟨y, rfl⟩),
rw Hn,
exact ideal.mem_span_singleton.2 (dvd_refl _),
end
lemma exists_inter (n : ℕ) (h0 : n ≠ 0) (p : R) (hp : prime p)
(Hn : Inf (set.range (ord R M)) = ideal.span {p ^ n})
(a : M) (ha : p ^ (n - 1) • a ≠ 0) (hna : ∃ b : M, b ∉ submodule.span R ({a} : set M)) :
∃ b : M, b ≠ 0 ∧ ∀ (x : M), x ∈ ((submodule.span R {a}) ⊓ (submodule.span R ({b} : set M))) → x = 0 :=
begin
cases hna with c hc,
have hn : p ^ n • c = 0 := by
{have : p ^ n ∈ Inf (set.range (ord R M)), by rw Hn; exact ideal.mem_span_singleton.2 (dvd_refl _),
have huh : Inf (set.range (ord R M)) ≤ ord R M c := Inf_le (show ord R M c ∈ set.range (ord R M),
from ⟨c, rfl⟩),
exact huh this},
haveI := classical.dec_pred,
let j := @nat.find _ (_inst _) (show ∃ n, p ^ n • c ∈ submodule.span R ({a} : set M), from ⟨n,
by { rw hn, exact submodule.zero_mem (submodule.span R ({a} : set M))}⟩),
have hj0 : j ≠ 0 := λ hj, absurd (show c ∈ submodule.span R {a}, by
{rw [←one_smul _ c, ←pow_zero p, ←hj],
exact @nat.find_spec _ (_inst _) (show ∃ n, p ^ n • c ∈ submodule.span R ({a} : set M), from ⟨n,
by { rw hn, exact submodule.zero_mem (submodule.span R ({a} : set M)),
}⟩),
}) hc,
have hj : p ^ j • c ∈ submodule.span R {a}, from @nat.find_spec _ (_inst _)
(show ∃ n, p ^ n • c ∈ submodule.span R ({a} : set M), from ⟨n,
by { rw hn, exact submodule.zero_mem (submodule.span R ({a} : set M)),
}⟩),
cases submodule.mem_span_singleton.1 hj with r₁ hr₁,
have hjs : p ^ (j - 1) • c ∉ submodule.span R {a} := @nat.find_min _ (_inst _)
(show ∃ n, p ^ n • c ∈ submodule.span R ({a} : set M), from ⟨n,
by { rw hn, exact submodule.zero_mem (submodule.span R ({a} : set M)),
}⟩) (j - 1) (nat.pred_lt hj0),
cases (classical.em (r₁ = 0)),
rw h at hr₁, rw zero_smul at hr₁,
use p ^ (j - 1) • c,
split,
intro hpj0,
rw hpj0 at hjs,
exact hjs (submodule.zero_mem _),
intros x hx,
cases submodule.mem_span_singleton.1 hx.2 with t ht,
rw ← @classical.not_not (x = 0),
intro hn,
have hpt : ¬ p ∣ t := λ ⟨m, hm⟩, by {rw hm at ht, rw mul_comm at ht, rw ←mul_smul at ht,
rw mul_assoc at ht, rw ←pow_succ at ht, rw nat.sub_add_cancel
(nat.succ_le_of_lt $ nat.pos_iff_ne_zero.2 hj0) at ht, rw mul_smul at ht,
rw ←hr₁ at ht, rw smul_zero at ht, exact hn ht.symm
},
have hcop : is_coprime t (p ^ n) := is_coprime.symm (is_coprime.pow_left (is_coprime_of_prime hp hpt)),
rcases hcop with ⟨z, w, hwz⟩,
have hbeq : p ^ (j - 1) • c = (z * t) • (p ^ (j - 1) • c) + (w * (p ^ n)) • (p ^ (j - 1) • c) :=
by {rw ←add_smul, rw hwz, rw one_smul},
have hsb : (z * t) • (p ^ (j - 1) • c) ∈ submodule.span R ({a} : set M) :=
by {rw mul_smul, refine submodule.smul_mem (submodule.span R ({a} : set M)) z _, rw ht, exact hx.1},
cases submodule.mem_span_singleton.1 hsb with y hy,
have hjm : p ^ (j - 1) • c ∈ submodule.span R ({a} : set M) := by {rw ←mul_smul (w * _) at hbeq,
rw mul_assoc at hbeq, rw ←pow_add at hbeq,
cases nat.le.dest (nat.succ_le_of_lt (nat.pos_iff_ne_zero.2 h0)) with l hl,
rw add_comm at hl, rw ←hl at hbeq, rw add_assoc at hbeq, rw add_comm 1 at hbeq,
rw nat.sub_add_cancel (nat.succ_le_of_lt (nat.pos_iff_ne_zero.2 hj0)) at hbeq,
rw pow_add at hbeq, rw mul_smul at hbeq, rw mul_smul at hbeq, rw mul_smul at hbeq,
rw ←hr₁ at hbeq, rw smul_zero at hbeq, rw smul_zero at hbeq, rw add_zero at hbeq,
rw hbeq, rw ←mul_smul z t, exact hsb },
exact hjs hjm,
let k := @nat.find (λ k, ¬ (p ^ (k + 1) ∣ r₁)) (_inst _)
(by {use (multiset.filter (λ x, associated x p) (unique_factorization_domain.factors r₁)).card,
exact fin_pow_dvd h hp }),
have hk : p ^ k ∣ r₁ := by {
cases (classical.em (k = 0)),
rw h_1, rw pow_zero, exact one_dvd _,
apply classical.not_not.1,
intro huh,
rw ←nat.succ_pred_eq_of_pos (nat.pos_iff_ne_zero.2 h_1) at huh,
exact @nat.find_min _ (_inst _) (by {use (multiset.filter (λ x, associated x p)
(unique_factorization_domain.factors r₁)).card,
exact fin_pow_dvd h hp }) _ (nat.pred_lt h_1) huh},
cases hk with r hr,
have hpr : ¬ p ∣ r := λ hpr, by {cases hpr with m hm, rw hm at hr,
rw ←mul_assoc at hr, rw mul_comm _ p at hr, rw ←pow_succ at hr, exact @nat.find_spec
(λ k, ¬ (p ^ (k + 1) ∣ r₁)) (_inst _) (by {use (multiset.filter (λ x, associated x p)
(unique_factorization_domain.factors r₁)).card,
exact fin_pow_dvd h hp }) (show p ^ (k + 1) ∣ r₁, from ⟨m, by rw hr⟩)},
have hjn : j ≤ n := @nat.find_min' _ (_inst _) (show ∃ n, p ^ n • c ∈ submodule.span R ({a} : set M),
from ⟨n,
by { rw hn, exact submodule.zero_mem (submodule.span R ({a} : set M))}⟩) n
(hn.symm ▸ (submodule.span R ({a} : set M)).zero_mem),
have hnjk : n ≤ n - j + k := by {suffices : (p ^ (n - j + k) * r) • a = 0, by
{rw ←not_lt, intro hnot,
have hgh : ord R M a = ideal.span ({p ^ n} : set R) := by
rw ←nat.sub_add_cancel (nat.succ_le_of_lt (nat.pos_iff_ne_zero.2 h0)) at ⊢ Hn;
exact ord_eq_of_pow hp ha (smul_zero_of_Inf _ Hn a),
cases ideal.mem_span_singleton.1 (by rw ←hgh; exact this) with w hw,
cases nat.le.dest (nat.succ_le_of_lt hnot) with m hm,
conv at hw {to_rhs, rw ←hm},
rw nat.succ_eq_add_one at hw,
rw add_assoc at hw,
rw pow_add p (n - j + k) (1 + m) at hw,
rw mul_assoc at hw,
have hhh := mul_left_cancel' (ne_zero_of_prime_pow R (n - j + k) hp) hw,
exact hpr (by {rw hhh, rw pow_add, rw mul_assoc, rw pow_one, use p ^ m * w, }),
},
rw ←nat.sub_add_cancel hjn at hn,
rw pow_add at hn,
rw mul_smul at hn,
rw ←hr₁ at hn,
rw hr at hn,
rw ←mul_smul at hn,
rw ←mul_assoc at hn,
rw ←pow_add at hn,
exact hn},
have hjk : j ≤ k := by {rw ←nat.sub_le_sub_left_iff hjn, apply nat.sub_le_left_of_le_add,
rw add_comm, exact hnjk },
have h1j : 1 ≤ j := nat.succ_le_of_lt (nat.pos_iff_ne_zero.2 hj0),
let b := p ^ (j - 1) • c - (r * p ^ (k - 1)) • a,
have hpb : p • b = 0 := show p • (_ - _) = (0 : M), by
{rw smul_sub, rw ←mul_smul, rw ←pow_succ, rw ←mul_smul, rw mul_comm r, rw ←mul_assoc, rw ←pow_succ,
rw nat.sub_add_cancel h1j, rw nat.sub_add_cancel (le_trans h1j hjk),
rw ←hr₁, rw hr, rw sub_self },
use b, split,
show _ - _ ≠ (0 : M),
intro hb,
rw sub_eq_zero at hb,
apply hjs,
rw hb,
exact submodule.mem_span_singleton.2 ⟨r * p ^ (k - 1), rfl⟩,
intros x hx,
rw ← @classical.not_not (x = 0),
intro hn,
cases submodule.mem_span_singleton.1 hx.2 with s hs,
have hps : ¬ p ∣ s := λ ⟨q, hq⟩, by {rw hq at hs, rw mul_comm at hs, rw mul_smul at hs,
rw hpb at hs, rw smul_zero at hs, exact hn hs.symm},
have hcop : is_coprime s (p ^ n) := is_coprime.symm (is_coprime.pow_left (is_coprime_of_prime hp hps)),
rcases hcop with ⟨z, w, hwz⟩,
have hbeq : b = (z * s) • b + (w * (p ^ n)) • b := by {rw ←add_smul, rw hwz, rw one_smul},
have hsb : (z * s) • b ∈ submodule.span R ({a} : set M) := by {rw mul_smul,
refine submodule.smul_mem (submodule.span R ({a} : set M)) z _, rw hs, exact hx.1},
have hjm : p ^ (j - 1) • c ∈ submodule.span R ({a} : set M) := by
{cases nat.le.dest (le_trans h1j hjn) with l hl,
rw add_comm at hl, rw ←hl at hbeq, rw mul_smul at hbeq, rw pow_add at hbeq,
rw pow_one at hbeq, rw mul_smul at hbeq, rw mul_smul at hbeq, rw hpb at hbeq, rw smul_zero at hbeq,
rw smul_zero at hbeq, rw add_zero at hbeq,
rw ←mul_smul at hbeq, rw ←hbeq at hsb,
change _ - _ ∈ _ at hsb,
rw sub_eq_add_neg at hsb,
rw ←neg_smul at hsb,
exact (submodule.add_mem_iff_left _ (submodule.mem_span_singleton.2 ⟨-(r * p ^ (k - 1)), rfl⟩)).1 hsb,
},
exact hjs hjm,
end
lemma span_quotient_iff (a : M) : A ⊔ submodule.span R {a} = ⊤ ↔
submodule.span R ({submodule.quotient.mk a} : set A.quotient) = ⊤ :=
begin
split,
intro h,
rw eq_top_iff,
intros x hx,
apply quotient.induction_on' x,
intro y,
rcases submodule.mem_sup.1 (show y ∈ A ⊔ submodule.span R ({a} : set M), by rw h;
exact submodule.mem_top) with ⟨b, hb, c, hc, hbc⟩,
cases submodule.mem_span_singleton.1 hc with d hd,
rw ←hd at hbc,
apply submodule.mem_span_singleton.2,
use d,
rw ←hbc,
apply (submodule.quotient.eq _).2,
convert (submodule.neg_mem _ hb),
abel,
intro h,
rw eq_top_iff,
intros x hx,
apply submodule.mem_sup.2,
cases submodule.mem_span_singleton.1 (show submodule.quotient.mk x ∈ submodule.span R
({submodule.quotient.mk a} : set A.quotient), by rw h; exact submodule.mem_top) with y hy,
rw ←submodule.quotient.mk_smul at hy,
have huh := (submodule.quotient.eq _).1 hy,
use (x - y • a),
split,
convert A.neg_mem huh, abel,
use y • a,
split,
exact submodule.mem_span_singleton.2 ⟨y, rfl⟩,
rw sub_add_cancel,
end
lemma exists_compl (Hfg : A.fg) (n : ℕ) (h0 : n ≠ 0) (p : R) (hp : prime p)
(Hn : Inf (set.range (ord R A)) = ideal.span {p ^ n})
(a : A) (ha : p ^ (n - 1) • a ≠ 0) (hna : ∃ b : A, b ∉ submodule.span R ({a} : set A)) :
∃ C : submodule R A, C ⊓ submodule.span R ({a} : set A) = ⊥
∧ C ⊔ submodule.span R ({a} : set A) = ⊤ :=
begin
cases (classical.em (submodule.span R ({a} : set A) = ⊤)),
use ⊥,
split,
simp,
rw h, simp,
let S := { B : submodule R A | submodule.span R ({a} : set A) ⊓ B = ⊥ },
have hS : set.nonempty S := by {cases exists_inter _ n h0 p hp Hn a ha hna with b hb,
use submodule.span R ({b} : set A),
apply eq_bot_iff.2, intros x hx, apply (submodule.mem_bot R).2, exact hb.2 x hx},
let C := @well_founded.min (submodule R A) (>)
(is_noetherian_iff_well_founded.1 $ is_noetherian_of_fg_of_noetherian A Hfg) S hS,
have hAC : ∀ x : C.quotient, p ^ n • x = 0 := λ x, by {apply quotient.induction_on' x,
intros y, erw ←submodule.quotient.mk_smul,
rw smul_zero_of_Inf _ Hn, rw submodule.quotient.mk_zero },
have haC : p ^ n • (@submodule.quotient.mk _ _ _ _ _ C a) = 0 := hAC _,
have hC : submodule.span R ({a} : set A) ⊓ C = ⊥ := well_founded.min_mem
(is_noetherian_iff_well_founded.1 $ is_noetherian_of_fg_of_noetherian A Hfg) S hS,
have hanC : p ^ (n - 1) • (@submodule.quotient.mk _ _ _ _ _ C a) ≠ 0 :=λ ha0,
by {rw ←submodule.quotient.mk_smul at ha0, rw submodule.quotient.mk_eq_zero at ha0,
apply ha,rw ←submodule.mem_bot R, rw ←hC, exact ⟨submodule.mem_span_singleton.2
⟨p ^ (n - 1), rfl⟩, ha0⟩,
},
have hACn : Inf (set.range (ord R C.quotient)) = ideal.span {p ^ n} :=
by { apply le_antisymm,
apply Inf_le _,
use (submodule.quotient.mk a),
rw ←nat.sub_add_cancel (nat.succ_le_of_lt $ nat.pos_iff_ne_zero.2 h0) at haC ⊢,
exact ord_eq_of_pow hp hanC haC,
apply le_Inf _,
intros b hb,
cases hb with c hc,
revert hc,
apply quotient.induction_on' c,
intros d hd,
rw ←hd,
intros r hr,
cases ideal.mem_span_singleton.1 hr with s hs,
rw hs, show _ • _ = _, rw mul_smul, exact hAC _,},
use C,
split,
rw inf_comm,
exact hC,
rw span_quotient_iff,
apply classical.not_not.1,
intro HN,
cases @exists_inter _ _ _ _ _ C.quotient _ _ n h0 p hp hACn (submodule.quotient.mk a) hanC
(by {rcases submodule.exists_of_lt (lt_top_iff_ne_top.2 HN) with ⟨x, _, hx⟩,
use x})
with b hb,
revert hb,
apply @quotient.induction_on _ C.quotient_rel _ b,
intros d hd,
have hda : submodule.span R {a} ⊓ (submodule.span R {d} ⊔ C) = ⊥ := by
{rw eq_bot_iff,
intros x hx, apply (submodule.mem_bot R).2,
rcases submodule.mem_sup.1 hx.2 with ⟨y, hy, z, hz, hyz⟩,
have hyx : submodule.quotient.mk x = submodule.quotient.mk y :=
(submodule.quotient.eq C).2 (by {rw add_comm at hyz, rw ←eq_sub_iff_add_eq at hyz,
rw ←hyz, exact hz}),
cases submodule.mem_span_singleton.1 hx.1 with k hk,
cases submodule.mem_span_singleton.1 hy with j hj,
have hd' := hd.2 (submodule.quotient.mk x) ⟨submodule.mem_span_singleton.2
⟨k, by {rw ←submodule.quotient.mk_smul, rw hk,}⟩, submodule.mem_span_singleton.2 ⟨j,
by {rw hyx, erw ←submodule.quotient.mk_smul, rw hj}⟩⟩,
apply (submodule.mem_bot R).1, rw ←hC,
exact ⟨hx.1, (submodule.quotient.mk_eq_zero _).1 hd'⟩,
},
have : ∀ B, B ∈ S → ¬ C < B := λ B, well_founded.not_lt_min (is_noetherian_iff_well_founded.1 $
is_noetherian_of_fg_of_noetherian A Hfg) S hS,
refine this (submodule.span R {d} ⊔ C) hda _,
rw submodule.lt_iff_le_and_exists,
split,
exact le_sup_right, use d,
split,
exact submodule.le_def.1 le_sup_left (submodule.subset_span $ set.mem_singleton d),
intro hdC,
rw ←submodule.quotient.mk_eq_zero at hdC,
exact hd.1 hdC,
end
lemma Inf_eq (A : submodule R M) (s : finset M) (hs : submodule.span R (↑s : set _) = A)
(p : R) (hp : prime p)
(Hp : ∀ (a : M), a ∈ A ∧ a ≠ 0 → (∃! (n : ℕ), p ^ (n + 1) • a = 0 ∧ p ^ n • a ≠ 0))
(X : M) (N : ℕ) (hN : X ∈ finset.erase s 0 ∧
(p ^ (N + 1) • X = 0 ∧ ¬p ^ N • X = 0) ∧
∀ (b : M) (m : ℕ), b ∈ finset.erase s 0 ∧ p ^ (m + 1) • b = 0 ∧ ¬p ^ m • b = 0 → m ≤ N) :
Inf (set.range (ord R A)) = ideal.span {p ^ (N + 1)} :=
begin
have hmem : ∀ x ∈ s, x ∈ A := λ x H, hs ▸ (submodule.subset_span H),
apply le_antisymm,
apply Inf_le _,
use ⟨X, hmem X $ finset.mem_of_mem_erase hN.1⟩,
have hX : A.subtype ⟨X, hmem X $ finset.mem_of_mem_erase hN.1⟩ = X := rfl,
exact ord_eq_of_pow hp (λ h, hN.2.1.2 $
by {rw ←hX, rw ←linear_map.map_smul, rw h, rw linear_map.map_zero })
(by {
have h2 : p ^ (N + 1) • X = 0 := hN.2.1.1,
rw ←hX at h2, rw ←linear_map.map_smul at h2,
exact linear_map.ker_eq_bot'.1 A.ker_subtype _ h2,
}) ,
apply le_Inf _,
intros c hc,
cases hc with x hx,
cases classical.em (x = 0),
rw h at hx, rw ord_ideal_zero_eq_top at hx,
rw ←hx,
exact le_top,
rw ←hx,
intros y hy,
cases ideal.mem_span_singleton.1 hy with r hr,
cases Hp x ⟨x.2, λ h0, h $ subtype.ext_iff.2 $ h0⟩ with n hn,
rcases (finsupp.mem_span_iff_total _).1 (show (x : M) ∈ submodule.span R (id '' (↑s : set _)), by
rw set.image_id; rw hs; exact x.2)
with ⟨l, hlm, hl⟩,
rw hr,
show _ = (0 : A),
apply subtype.ext_iff.2, rw submodule.coe_smul, rw mul_comm,
rw mul_smul,
rw submodule.coe_zero,
rw ←hl,
rw finsupp.total_apply,
rw finsupp.smul_sum,
have hNs : ∀ x, x ∈ (↑s : set _) → p ^ (N + 1) • x = (0 : M) := λ z hz, by
{cases classical.em (z = 0),
rw h_1,
rw smul_zero,
cases Hp z ⟨hmem z $ finset.mem_coe.1 hz, h_1⟩ with m hm,
cases nat.le.dest (hN.2.2 z m ⟨finset.mem_erase.2 ⟨h_1, hz⟩, hm.1⟩) with k hk,
rw ←hk, rw add_comm m k,
rw add_assoc,rw pow_add, rw mul_smul, rw hm.1.1, rw smul_zero},
unfold finsupp.sum,
convert smul_zero _,
rw ←@finset.sum_const_zero M _ l.support _,
apply finset.sum_congr rfl,
intros z hz,
rw smul_comm, convert smul_zero _,
exact hNs z ((finsupp.mem_supported _ l).1 hlm hz),
end
lemma inf_aux {X : M} (hX : X ∈ A) {C : submodule R A}
(hC : C ⊓ submodule.span R {⟨X, hX⟩} = ⊥ ∧ C ⊔ submodule.span R {⟨X, hX⟩} = ⊤)
{S : finset M} (hS : submodule.span R (↑S : set M) = submodule.map A.subtype C ∧
∀ (a : M), a ∈ S → submodule.span R {a} ⊓ submodule.span R (↑(S.erase a) : set M) = ⊥) :
submodule.span R {X} ⊓ submodule.span R (↑S : set M) ≤ ⊥ :=
begin
have := map_inf' C X hX,
rw ←hS.1 at this,rw this,
rw hC.1,
rw submodule.map_bot, exact le_refl _,
end
lemma inf_aux' {X : M} (hX : X ∈ A) (hX0 : X ≠ 0) {C : submodule R A}
(hC : C ⊓ submodule.span R {⟨X, hX⟩} = ⊥ ∧ C ⊔ submodule.span R {⟨X, hX⟩} = ⊤)
{S : finset M} (hS : submodule.span R (↑S : set _) = submodule.map A.subtype C ∧
∀ (a : M), a ∈ S → submodule.span R {a} ⊓ submodule.span R (↑(S.erase a) : set M) = ⊥)
{b : M} (hb : b ∈ S) :
submodule.span R ({b} : set M) ⊓ submodule.span R (↑((insert X S).erase b : finset M) : set M) ≤ ⊥ :=
begin
have H := inf_aux _ _ hX hC hS,
have := hS.2 b hb,
rw erase_insert_eq,
intros z hz,
rw insert_to_set at hz,
rcases submodule.mem_span_insert.1 hz.2 with ⟨r, v, hvm, hv⟩,
cases submodule.mem_span_singleton.1 hz.1 with c hc,
have H := inf_aux _ _ hX hC hS,
rw ←hc at hv,
rw ←sub_eq_iff_eq_add at hv,
have h0 : r • X = 0 := by {rw ←submodule.mem_bot R,
apply H,
split,
exact submodule.mem_span_singleton.2 ⟨r, rfl⟩,
rw ←hv,
rw submodule.mem_coe,
refine submodule.sub_mem _ _ _,
apply submodule.span_mono (show {b} ⊆ S, from finset.singleton_subset_iff.2 hb),
show c • b ∈ submodule.span R (↑({b} : finset M) : set M),
rw finset.coe_singleton,
exact submodule.mem_span_singleton.2 ⟨c, rfl⟩,
exact submodule.span_mono (show S.erase b ⊆ S, from finset.erase_subset _ _) hvm,},
rw h0 at hv, rw sub_eq_zero at hv,
rw ←this, split,
rw ←hc, exact submodule.mem_span_singleton.2 ⟨c, rfl⟩,
rw ←hc, rw hv, exact hvm,
exact hb,
intro hXb,
have huh := inf_of_le_left (@submodule.span_mono R _ _ _ _ _ _ (show {X} ⊆ S, by rw hXb;
exact finset.singleton_subset_iff.2 hb)),
change (submodule.span R (↑({X} : finset M) : set M) ⊓ submodule.span R (↑S : set M) =
submodule.span R (↑({X} : finset M) : set M)) at huh,
rw finset.coe_singleton at huh,
rw huh at H,
rw ←eq_bot_iff at H,
erw submodule.span_singleton_eq_bot at H,
exact hX0 H,
end
theorem gen_le (s : finset A) (hs : submodule.span R (↑s : set A) = ⊤)
(x : A) (hx : x ∈ s) (h0 : x ≠ 0)
(C : submodule R A) (h : is_compl C (submodule.span R {x})) :
C = submodule.span R ((C.subtype.comp (C.linear_proj_of_is_compl (submodule.span R {x}) h) : A → A)''
(↑(s.erase x) : set A)) :=
begin
apply le_antisymm,
intros y hy,
have : (⟨y, hy⟩ : C) ∈ (C.linear_proj_of_is_compl _ h).range := by
rw submodule.linear_proj_of_is_compl_range h; exact submodule.mem_top,
rcases this with ⟨a, hma, ha⟩,
have hsa : a ∈ submodule.span R (↑s : set _) := by rw hs; exact submodule.mem_top,
rw ←set.image_id (↑s : set _) at hsa,
rcases (finsupp.mem_span_iff_total R ).1 hsa with ⟨l, hlm, hl⟩,
rw ←hl at ha,
rw linear_map.map_finsupp_total at ha,
rw finsupp.total_apply at ha,
cases classical.em (x ∈ l.support) with hxl hxl,
unfold finsupp.sum at ha,
rw ←finset.insert_erase hxl at ha,
rw finset.sum_insert (finset.not_mem_erase x l.support) at ha,
dsimp at ha,
rw linear_map.mem_ker.1 (show x ∈ (C.linear_proj_of_is_compl _ h).ker,
by {rw submodule.linear_proj_of_is_compl_ker h, exact submodule.subset_span rfl}) at ha,
rw smul_zero at ha, rw zero_add at ha,
have huh : (⟨y, hy⟩ : C) ∈ submodule.span R ((C.linear_proj_of_is_compl (submodule.span R {x}) h) ''
(↑(s.erase x) : _)) :=
by {rw finsupp.mem_span_iff_total, use l.erase x, split,
rw finsupp.mem_supported,
rw finsupp.support_erase,
convert finset.coe_subset.2 (finset.erase_subset_erase x hlm),
rw finsupp.total_apply, rw ← ha, unfold finsupp.sum,
rw finsupp.support_erase,
refine finset.sum_congr (by {convert (eq.refl (l.support.erase x)), }) _,intros z hz,
rw finsupp.erase_ne (finset.ne_of_mem_erase hz)
},
rw submodule.span_image at ⊢ huh,
rcases huh with ⟨z, hzm, hz⟩,
use z, split,
exact hzm, rw subtype.ext_iff at hz, exact hz,
unfold finsupp.sum at ha,
rw submodule.span_image,
rw submodule.map_comp, use ⟨y, hy⟩,
split,
rw ←submodule.span_image,
apply (finsupp.mem_span_iff_total R).2, use l, split,
intros b hb, apply finset.mem_erase.2,
split,
intro hbx,
rw hbx at hb, exact hxl hb,
exact hlm hb,
rw finsupp.total_apply, exact ha, refl,
intros y hy,
rw submodule.span_image at hy,
rcases hy with ⟨z, hzm, hz⟩,
rw ←set.image_id (↑(s.erase x) : set A) at hzm,
rcases (finsupp.mem_span_iff_total R).1 hzm with ⟨l, hlm, hl⟩,
rw ←hl at hz,
rw linear_map.map_finsupp_total at hz,
rw finsupp.total_apply at hz, rw ←hz,
apply submodule.sum_mem,
intros c hc,
apply submodule.smul_mem,
simp only [submodule.subtype_apply, function.comp.right_id, submodule.coe_mem, linear_map.comp_apply],
end
variables {M} {R}
noncomputable def map_erase {C : submodule R A} {s : finset M} {X : M} (h : X ∈ s)
(hs : submodule.span R (↑s : set M) = A)
(hc : is_compl C (submodule.span R {⟨X, subset_span' hs h⟩})) :=
finset.image (A.subtype.comp (C.subtype.comp (C.linear_proj_of_is_compl
(submodule.span R {⟨X, subset_span' hs h⟩}) hc)))
((subtype_mk' s (A : set M) $ (subset_span' hs)).erase ⟨X, subset_span' hs h⟩)
lemma card_map_erase {C : submodule R A} {s : finset M} {X : M} (h : X ∈ s)
(hs : submodule.span R (↑s : set M) = A)
(hc : is_compl C (submodule.span R {⟨X, subset_span' hs h⟩})) :
finset.card (map_erase _ h hs hc) ≤ (s.erase X).card :=
begin
have H : finset.card (map_erase _ h hs hc) ≤ s.card.pred := by
{have : (map_erase _ h hs hc).card ≤
((subtype_mk' s ↑A (subset_span' hs)).erase ⟨X, subset_span' hs h⟩).card :=
finset.card_image_le,
have h' : (subtype_mk' s ↑A (subset_span' hs)).card = s.card :=
by {unfold subtype_mk', rw univ_card s,
refine finset.card_image_of_injective _ _,
intros x y hxy, simp only [subtype.mk_eq_mk] at hxy, exact subtype.ext_iff.2 hxy},
rw ←h',
have := finset.card_erase_of_mem (show (⟨X, subset_span' hs h⟩ : A) ∈
subtype_mk' s ↑A (subset_span' hs), from (mem_subtype_mk' (subset_span' hs)).2 $
by {rw subtype.coe_mk, exact h}),
rw ←this,
exact finset.card_image_le},
apply le_trans H,
rw finset.card_erase_of_mem h,
end
lemma gen_le2 {C : submodule R A} {s : finset M} {X : M} (h : X ∈ s) (h0 : X ≠ 0)
(hs : submodule.span R (↑s : set M) = A)
(hc : is_compl C (submodule.span R {⟨X, subset_span' hs h⟩})) :
C.map A.subtype = submodule.span R (↑(map_erase _ h hs hc) : set M) :=
begin
have := gen_le M A (subtype_mk' s (A : set M) (subset_span' hs)) (span_subtype hs)
⟨X, subset_span' hs h⟩ ((mem_subtype_mk' (subset_span' hs)).2 h)
(λ hn0, h0 $ subtype.ext_iff.1 hn0) C hc,
unfold map_erase,
rw finset.coe_image,
rw submodule.span_image,
rw submodule.span_image at this,
rw submodule.map_comp,
exact congr_arg (submodule.map A.subtype) this,
end
lemma woohoo (n : ℕ) (s : finset M) (hs : submodule.span R (↑s : set M) = A) (hn : s.card ≤ n) :
(∃ (p : R), prime p ∧ ∀ a : M, (a ∈ A ∧ a ≠ 0) → ∃ (n : ℕ), p ^ (n + 1) • a = 0 ∧ p ^ n • a ≠ 0) →
∃ (S : finset M), submodule.span R (↑S : set _) = A ∧
∀ a ∈ S, submodule.span R {a} ⊓ submodule.span R (↑(S.erase a) : set M) = ⊥ :=
begin
revert A s hs hn,
induction n with m Hm,
intros A s hs hn H,
rw finset.card_eq_zero.1 (nat.le_zero_iff.1 hn) at hs,
cases H with p hp,
use ∅,
split,
exact hs,
intros a ha,
exfalso,
exact finset.not_mem_empty _ ha,
intros A s hs hm htors,
cases htors with p hp,
have Hp : ∀ a : M, (a ∈ A ∧ a ≠ 0) → ∃! (n : ℕ), p ^ (n + 1) • a = 0 ∧ p ^ n • a ≠ 0 :=
λ a ha, exists_unique_of_exists_of_unique (hp.2 a ha) (λ y z hy hz, by
{rw ←(@classical.not_not (y = z)),
intro hnot,
wlog hyz : y < z, exact lt_or_gt_of_ne hnot,
cases nat.le.dest (nat.succ_le_of_lt hyz) with k hk,
apply hz.2,
rw ←hk, rw add_comm,
rw pow_add, rw mul_smul, rw hy.1, rw smul_zero,
}),
have hmem : ∀ x ∈ s, x ∈ A := λ x H, hs ▸ (submodule.subset_span H),
cases classical.em (m = 0),
use s,
split,
exact hs,
intros a ha,
rw h at hm,
have h01 : s.card = 0 ∨ s.card = 1 := by {revert hm, omega},
cases h01,
rw finset.card_eq_zero.1 h01, simp only [finset.coe_empty, submodule.span_empty,
finset.erase_empty, inf_bot_eq],
cases finset.card_eq_one.1 h01 with b hb,
rw hb at ha ⊢,
have hab : a = b := finset.mem_singleton.1 ha,
rw hab,
rw singleton_erase,
erw submodule.span_empty,
rw inf_bot_eq,
cases (classical.em (s.card ≤ m)) with hsm hsm,
cases Hm A s hs hsm ⟨p, hp⟩ with S hS,
use S, exact hS,
replace hsm : s.card = m.succ := by omega,
have hn0 : (s.erase 0).nonempty := by {cases classical.em ((0 : M) ∈ s) with hs0 hs0,
have hcard := finset.card_erase_of_mem hs0,
rw hsm at hcard, rw nat.pred_succ at hcard,
exact finset.card_pos.1 (by {rw hcard, exact nat.pos_iff_ne_zero.2 h}),
have heq := finset.erase_eq_of_not_mem hs0,
exact finset.card_pos.1 (by {rw heq, rw hsm, exact nat.succ_pos _}) },
rcases exists_max hn0 (λ a ha, Hp a ⟨hmem a $ finset.mem_of_mem_erase ha,
finset.ne_of_mem_erase ha⟩) with ⟨X, N, hN⟩,
dsimp at hN,
have hX0 : (⟨X, hmem X $ finset.mem_of_mem_erase hN.1⟩ : A) ≠ 0 := λ hX0,
finset.ne_of_mem_erase hN.1 $ subtype.ext_iff.1 hX0,
have hX : A.subtype ⟨X, hmem X $ finset.mem_of_mem_erase hN.1⟩ = X := rfl,
cases classical.em ((0 : M) ∈ s),
have he0 : submodule.span R (↑(s.erase 0) : set M) = submodule.span R (↑s : set _) :=
by {conv {to_rhs, rw ←finset.insert_erase h_1}, rw insert_to_set, rw span_insert_zero_eq, },
exact Hm A (s.erase 0) (he0.symm ▸ hs)
(by {rw finset.card_erase_of_mem h_1, rw hsm, rw nat.pred_succ}) ⟨p, hp⟩,
cases classical.em (∃ b, b ∉ submodule.span R ({⟨X, hmem X $ finset.mem_of_mem_erase hN.1⟩} : set A))
with hmA hmA,
cases exists_compl M A ⟨s, hs⟩ (N + 1) (nat.succ_ne_zero N)
p hp.1 (Inf_eq _ A s hs p hp.1 Hp X N hN)
⟨X, hmem X $ finset.mem_of_mem_erase hN.1⟩
(by rw nat.add_sub_cancel at *; exact (λ h, hN.2.1.2 $
by {rw ←hX, rw ←linear_map.map_smul, rw h, rw linear_map.map_zero }))
hmA with C hC,
have hXs : (s.erase X).card = m :=
by {rw finset.card_erase_of_mem (finset.erase_subset 0 s hN.1), rw hsm,
rw nat.pred_succ},
cases Hm (C.map A.subtype) (map_erase A (finset.erase_subset 0 s hN.1) hs
⟨eq_bot_iff.1 hC.1, eq_top_iff.1 hC.2⟩)
(gen_le2 A (finset.erase_subset 0 s hN.1) (finset.ne_of_mem_erase hN.1) hs
⟨eq_bot_iff.1 hC.1, eq_top_iff.1 hC.2⟩).symm
(by rw ←hXs; exact card_map_erase A (finset.erase_subset 0 s hN.1) hs
⟨eq_bot_iff.1 hC.1, eq_top_iff.1 hC.2⟩)
(⟨p, ⟨hp.1, λ a ha, by {rcases ha.1 with ⟨b, hmb, hb⟩, apply hp.2, split, rw ←hb,
exact b.2, exact ha.2}⟩⟩) with S hS,
use insert X S, split,
rw insert_to_set, rw span_insert', rw hS.1,
rw sup_comm,
rw map_sup' _ _ (hmem X $ finset.mem_of_mem_erase hN.1), rw hC.2, exact submodule.map_subtype_top _,
intros b hb,
rw finset.mem_insert at hb,
cases hb,
rw hb,
rw eq_bot_iff,
refine le_trans _ (inf_aux _ _ (hmem X $ finset.mem_of_mem_erase hN.1) hC hS),
exact (@inf_le_inf_left (submodule R M) _ _ _ _ (submodule.span_mono $
show (↑((insert X S).erase X : finset M) : set M) ⊆ (↑S : set _), from
finset.erase_insert_subset X S)),
rw eq_bot_iff,
exact inf_aux' _ _ (hmem X $ finset.mem_of_mem_erase hN.1) (finset.mem_erase.1 hN.1).1 hC hS hb,
use {X},
split,
apply le_antisymm,
exact submodule.span_le.2 (by {rw finset.coe_singleton, exact set.singleton_subset_iff.2
(hmem X $ finset.mem_of_mem_erase hN.1)}),
intros Y hY, rw ←@classical.not_not (Y ∈ submodule.span R (↑{X} : set M)),
intro hnot,
rw finset.coe_singleton at *,
exact hmA ⟨⟨Y, hY⟩, λ hmnot, hnot $ (map_singleton (hmem X $ finset.mem_of_mem_erase hN.1) hY).2 hmnot⟩,
intros a ha, erw finset.mem_singleton at ha,
rw ha,
erw singleton_erase,
show _ ⊓ submodule.span R ∅ = _,
rw submodule.span_empty,
rw inf_bot_eq,
end
instance quotient_module (I : ideal R) : module R I.quotient :=
submodule.quotient.semimodule I
variables (R) {M}
lemma ord_eq_ker (a : M) : ord R M a = (linear_map.id.smul_right a).ker :=
begin
ext,
rw linear_map.mem_ker,
refl,
end
variables {R}
def quotient_congr {A B : submodule R M} (H : A = B) : linear_equiv R A.quotient B.quotient :=
(@quotient.congr_right _ A.quotient_rel B.quotient_rel $ λ x y : M, H ▸ iff.rfl).to_linear_equiv
{ map_add := λ x y, quotient.induction_on₂' x y $ λ w z, rfl,
map_smul := λ x y, quotient.induction_on' y $ λ w, rfl }
lemma smul_range {a : M} : (submodule.span R ({a} : set M)) = (linear_map.id.smul_right a).range :=
begin
ext,
rw linear_map.mem_range,
rw submodule.mem_span_singleton,
refl,
end
def smul_range_equiv {a : M} : linear_equiv R (submodule.span R ({a} : set M))
((@linear_map.id R R _ _ _).smul_right a).range :=
(equiv.set_congr $ submodule.ext'_iff.1 smul_range).to_linear_equiv $
{ map_add := λ x y, rfl,
map_smul := λ c x, rfl }
noncomputable def ord_quotient (a : M) : linear_equiv R (ord R M a).quotient
(submodule.span R ({a} : set M)) :=
(quotient_congr $ ord_eq_ker R a).trans $ (linear_map.quot_ker_equiv_range
((@linear_map.id R R _ _ _).smul_right a)).trans
smul_range_equiv.symm |
DSPOSV Example Program Results
Solution
1.0000 -1.0000 2.0000 -3.0000
|
###############################################################################
#subtraction
@universal function sub(a::Ubound, b::Unum)
lb = resolve_lower(a.lower - b)
hb = resolve_upper(a.upper - b)
(is_ulp(lb) && is_ulp(hb)) ? resolve_as_utype!(lb, hb) : B(lb, hb)
end
@universal function sub(a::Unum, b::Ubound)
lb = resolve_lower(a - b.upper)
hb = resolve_upper(a - b.lower)
(is_ulp(lb) && is_ulp(hb)) ? resolve_as_utype!(lb, hb) : B(lb, hb)
end
@universal function sub(a::Ubound, b::Ubound)
lb = resolve_lower(a.lower - b.upper)
hb = resolve_upper(a.upper - b.lower)
is_sss(lb) && is_sss(hb) && (@signof(lb) == @signof(hb)) && return sss(U, @signof(lb))
is_mmr(lb) && is_mmr(hb) && (@signof(lb) == @signof(hb)) && return mmr(U, @signof(lb))
(is_ulp(lb) && is_ulp(hb)) ? resolve_as_utype!(lb, hb) : B(lb, hb)
end
@universal function additiveinverse!(a::Ubound)
hb = additiveinverse!(a.lower)
a.lower = additiveinverse!(a.upper)
a.upper = hb
return a
end
#unary subtraction creates a new unum and flips it.
@universal function -(x::Ubound)
additiveinverse!(copy(x))
end
|
State Before: d : ℕ
a : ℤ√↑d
n : ℕ
ha : Nonneg a
⊢ Nonneg (↑n * a) State After: d : ℕ
a : ℤ√↑d
n : ℕ
ha : Nonneg a
⊢ Nonneg (↑↑n * a) Tactic: rw [← Int.cast_ofNat n] State Before: d : ℕ
a : ℤ√↑d
n : ℕ
ha : Nonneg a
⊢ Nonneg (↑↑n * a) State After: no goals Tactic: exact
match a, nonneg_cases ha, ha with
| _, ⟨x, y, Or.inl rfl⟩, _ => by rw [smul_val]; trivial
| _, ⟨x, y, Or.inr <| Or.inl rfl⟩, ha => by
rw [smul_val]; simpa using nonnegg_pos_neg.2 (sqLe_smul n <| nonnegg_pos_neg.1 ha)
| _, ⟨x, y, Or.inr <| Or.inr rfl⟩, ha => by
rw [smul_val]; simpa using nonnegg_neg_pos.2 (sqLe_smul n <| nonnegg_neg_pos.1 ha) State Before: d : ℕ
a : ℤ√↑d
n : ℕ
ha : Nonneg a
x y : ℕ
x✝ : Nonneg { re := ↑x, im := ↑y }
⊢ Nonneg (↑↑n * { re := ↑x, im := ↑y }) State After: d : ℕ
a : ℤ√↑d
n : ℕ
ha : Nonneg a
x y : ℕ
x✝ : Nonneg { re := ↑x, im := ↑y }
⊢ Nonneg { re := ↑n * ↑x, im := ↑n * ↑y } Tactic: rw [smul_val] State Before: d : ℕ
a : ℤ√↑d
n : ℕ
ha : Nonneg a
x y : ℕ
x✝ : Nonneg { re := ↑x, im := ↑y }
⊢ Nonneg { re := ↑n * ↑x, im := ↑n * ↑y } State After: no goals Tactic: trivial State Before: d : ℕ
a : ℤ√↑d
n : ℕ
ha✝ : Nonneg a
x y : ℕ
ha : Nonneg { re := ↑x, im := -↑y }
⊢ Nonneg (↑↑n * { re := ↑x, im := -↑y }) State After: d : ℕ
a : ℤ√↑d
n : ℕ
ha✝ : Nonneg a
x y : ℕ
ha : Nonneg { re := ↑x, im := -↑y }
⊢ Nonneg { re := ↑n * ↑x, im := ↑n * -↑y } Tactic: rw [smul_val] State Before: d : ℕ
a : ℤ√↑d
n : ℕ
ha✝ : Nonneg a
x y : ℕ
ha : Nonneg { re := ↑x, im := -↑y }
⊢ Nonneg { re := ↑n * ↑x, im := ↑n * -↑y } State After: no goals Tactic: simpa using nonnegg_pos_neg.2 (sqLe_smul n <| nonnegg_pos_neg.1 ha) State Before: d : ℕ
a : ℤ√↑d
n : ℕ
ha✝ : Nonneg a
x y : ℕ
ha : Nonneg { re := -↑x, im := ↑y }
⊢ Nonneg (↑↑n * { re := -↑x, im := ↑y }) State After: d : ℕ
a : ℤ√↑d
n : ℕ
ha✝ : Nonneg a
x y : ℕ
ha : Nonneg { re := -↑x, im := ↑y }
⊢ Nonneg { re := ↑n * -↑x, im := ↑n * ↑y } Tactic: rw [smul_val] State Before: d : ℕ
a : ℤ√↑d
n : ℕ
ha✝ : Nonneg a
x y : ℕ
ha : Nonneg { re := -↑x, im := ↑y }
⊢ Nonneg { re := ↑n * -↑x, im := ↑n * ↑y } State After: no goals Tactic: simpa using nonnegg_neg_pos.2 (sqLe_smul n <| nonnegg_neg_pos.1 ha) |
Other audio recordings include a " Theatre Masterworks " version from 1953 , directed and narrated by Margaret Webster , with a cast including Maurice Evans , Lucile Watson and Mildred Natwick ; a 1989 version by California Artists Radio Theatre , featuring Dan O 'Herlihy Jeanette Nolan , Les Tremayne and Richard Erdman ; and one by L.A. Theatre Works issued in 2009 , featuring Charles Busch , James Marsters and Andrea Bowen .
|
module mod2
end module mod2
|
classdef FEMInputWriter < handle
properties (Access = private)
fileName
xmax
ymax
Nx
Ny
P
xmesh
ymesh
zmesh
mesh
nDirichlet
nNeumann
DoF
dispPrescribed
pointLoads
edgeNodes
end
methods (Access = public)
function obj = FEMInputWriter(cParams)
obj.init(cParams);
end
function createTest(obj)
obj.computeMeshGrid();
obj.createMesh();
obj.computeBoundaryConditions();
obj.computePrescribedDisplacementsMatrix();
obj.computePrescribedPointLoads();
obj.computeEdgeNodes();
obj.writeFile();
end
end
methods (Access = private)
function init(obj,cParams)
obj.fileName = cParams.testName;
obj.xmax = cParams.x1;
obj.ymax = cParams.y1;
obj.Nx = cParams.N;
obj.Ny = cParams.M;
obj.P = cParams.P;
obj.DoF = cParams.DoF;
end
function computeMeshGrid(obj)
x1 = linspace(0,obj.xmax,obj.Nx);
x2 = linspace(0,obj.ymax,obj.Ny);
[X,Y] = meshgrid(x1,x2);
Z = zeros(size(X));
obj.xmesh = X;
obj.ymesh = Y;
obj.zmesh = Z;
end
function createMesh(obj)
[F,V] = mesh2tri(obj.xmesh,obj.ymesh,obj.zmesh,'f');
s.coord = V(:,1:2);
s.connec = F;
obj.mesh = Mesh(s);
obj.mesh.plot;
end
function computeBoundaryConditions(obj)
t = 0.3*obj.ymax;
m = obj.mesh;
root = m.coord(:,1) == 0;
tipLength = m.coord(:,1) == obj.xmax;
tipWidth = m.coord(:,2) < obj.ymax-t & m.coord(:,2) > t;
tip = tipLength & tipWidth;
obj.nDirichlet = find(root);
obj.nNeumann = find(tip);
end
function computePrescribedDisplacementsMatrix(obj)
obj.dispPrescribed = obj.computeBoundaryConditionMatrix(obj.DoF,obj.nDirichlet);
end
function computePrescribedPointLoads(obj)
Fmat = obj.computeBoundaryConditionMatrix(obj.DoF,obj.nNeumann);
nnode = size(Fmat,1)/obj.DoF;
Pnod = obj.P/nnode;
for i = 2:2:size(Fmat,1)
Fmat(i,3) = Pnod;
end
obj.pointLoads = Fmat;
end
function computeEdgeNodes(obj)
COOR = obj.mesh.coord;
x = COOR(:,1);
y = COOR(:,2);
edge = find(x==0 | x==max(x) | y==0 | y==max(y));
obj.edgeNodes = sort(edge);
end
function writeFile(obj)
fileID = fopen([obj.fileName],'w');
obj.writeProblemData(fileID);
obj.writeCoordinates(fileID);
obj.writeConnectivities(fileID);
obj.writeDirichletData(fileID);
obj.writeNeumannData(fileID);
obj.writeNodesSolid(fileID);
obj.writeExternalBorderNodes(fileID);
fclose(fileID);
end
function writeCoordinates(obj,fileID)
COOR = zeros(size(obj.mesh.coord,1),4);
COOR(:,2:3) = obj.mesh.coord;
COOR(:,1) = (1:1:size(COOR,1))';
fprintf(fileID,'%%%% Coordinates\n%% Node\n');
fprintf(fileID,'gidcoord = [\n');
for i=1:size(COOR,1)
fprintf(fileID,'%d %f %f %f;\n',COOR(i,:));
end
fprintf(fileID,'];\n');
end
function writeConnectivities(obj,fileID)
Tnod = zeros(size(obj.mesh.connec,1),5);
Tnod(:,2:4) = obj.mesh.connec;
Tnod(:,1) = (1:1:size(Tnod,1))';
fprintf(fileID,'%%%% Connectivities\n%% Node\n');
fprintf(fileID,'gidlnods = [\n');
for i=1:size(Tnod,1)
fprintf(fileID,'%d %d %d %d %d;\n',Tnod(i,:));
end
fprintf(fileID,'];\n');
end
function writeDirichletData(obj,fileID)
fprintf(fileID,'%%%% Variable prescribed\n%% Node\n');
fprintf(fileID,'lnodes = [\n');
for i=1:size(obj.dispPrescribed,1)
fprintf(fileID,'%d %d %f;\n',obj.dispPrescribed(i,:));
end
fprintf(fileID,'];\n');
end
function writeNeumannData(obj,fileID)
fprintf(fileID,'%%%% Point loads\n%% Node\n');
fprintf(fileID,'pointload_complete = [\n');
for i=1:size(obj.pointLoads,1)
fprintf(fileID,'%d %d %f;\n',obj.pointLoads(i,:));
end
fprintf(fileID,'];\n');
end
function writeExternalBorderNodes(obj,fileID)
fprintf(fileID,'%%%% External Border Nodes\n%% Node\n');
fprintf(fileID,'External_border_nodes = [\n');
for i=1:size(obj.edgeNodes,1)
fprintf(fileID,'%d;\n',obj.edgeNodes(i));
end
fprintf(fileID,'];\n');
end
end
methods (Access = private, Static)
function bc = computeBoundaryConditionMatrix(DoF,n)
bc = zeros(DoF*length(n),3);
for i = 1:length(n)
bc(2*i-1,1:2) = [n(i),1];
bc(2*i,1:2) = [n(i),2];
end
end
function writeProblemData(fileID)
fprintf(fileID,'%%%% Data\nData_prb = {\n');
fprintf(fileID,'''TRIANGLE'';\n''SI'';\n''2D'';\n''Plane_Stress'';\n');
fprintf(fileID,'''ELASTIC'';\n''MACRO'';\n};\n');
end
function writeNodesSolid(fileID)
fprintf(fileID,'%%%% Nodes solid\n%% Node\n');
fprintf(fileID,'nodesolid = unique(pointload_complete(:,1));\n');
end
end
end
|
import data.fin.basic
namespace fin
lemma lt_or_eq_nat {n : ℕ} (i : fin n.succ) : (i : ℕ) < n ∨ (i : ℕ) = n :=
begin
cases nat.decidable_lt i n with h,
{
right,
exact nat.eq_of_lt_succ_of_not_lt (fin.is_lt i) h,
},
{
left,
exact h,
}
end
lemma lt_coe_iff_val_lt {n m : ℕ} (i : fin n.succ) (hle : m < n.succ) :
(i : ℕ) < m ↔ i < (m : fin n.succ) :=
begin
rw fin.lt_def,
repeat {rw fin.val_eq_coe},
rw fin.coe_coe_of_lt hle,
end
lemma lt_or_eq_fin {n : ℕ} (i : fin n.succ) : i < (n : fin n.succ) ∨ i = (n : fin n.succ) :=
begin
cases fin.lt_or_eq_nat i with h,
{
left,
rw ← fin.lt_coe_iff_val_lt i (nat.lt_succ_self _),
exact h,
},
{
right,
rw ← fin.coe_coe_eq_self i,
have f := @congr_arg _ _ (i : ℕ) n fin.of_nat h,
simp only [fin.of_nat_eq_coe] at f,
exact f,
}
end
/-- converts an n-ary tuple to an n.succ-ary tuple -/
@[simp] def x_val {A : Type*} {n} (x : A) (val : fin n → A) :
fin n.succ → A :=
@fin.cases n (λ _, A) x (λ i, val i)
end fin
|
Benedict appeared with his fellow Tinker Tailor Solider Spy castmates at the UK premiere. Images have been added to the gallery. Also, Benedict did not win ‘Best Actor’ at the TV Choice Awards – instead it went to David Tennant. |
/// based on https://mklimenko.github.io/english/2018/06/04/constexpr-random/
#pragma once
#include <atomic>
#include <cstdint>
#include <gsl/gsl>
#include <limits>
#include <random>
#include "HyperionUtils/Concepts.h"
namespace hyperion::math {
using gsl::narrow_cast;
#ifndef _MSC_VER
using std::size_t;
#endif //_MSC_VER
// clang-format off
#ifndef _MSC_VER
// NOLINTNEXTLINE
#define IGNORE_DATETIME_START \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wdate-time\"")
#else
// NOLINTNEXTLINE
#define IGNORE_DATETIME_START
#endif
// clang-format on
// clang-format off
#ifndef _MSC_VER
// NOLINTNEXTLINE
#define IGNORE_DATETIME_STOP \
_Pragma("GCC diagnostic pop")
#else
// NOLINTNEXTLINE
#define IGNORE_DATETIME_STOP
#endif
// clang-format on
// clang-format off
#ifndef _MSC_VER
// NOLINTNEXTLINE
#define IGNORE_WEAK_VTABLES_START \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wweak-vtables\"")
#else
// NOLINTNEXTLINE
#define IGNORE_WEAK_VTABLES_START
#endif
// clang-format on
// clang-format off
#ifndef _MSC_VER
// NOLINTNEXTLINE
#define IGNORE_WEAK_VTABLES_STOP \
_Pragma("GCC diagnostic pop")
#else
// NOLINTNEXTLINE
#define IGNORE_WEAK_VTABLES_STOP
#endif
// clang-format on
IGNORE_WEAK_VTABLES_START
class Engine {
public:
constexpr Engine() noexcept = default;
constexpr Engine(const Engine& engine) noexcept = default;
constexpr Engine(Engine&& engine) noexcept = default;
virtual constexpr ~Engine() noexcept = default;
[[nodiscard]] virtual constexpr auto get_seed() const noexcept -> size_t = 0;
virtual constexpr auto seed(size_t seed) noexcept -> void = 0;
virtual constexpr auto generate() noexcept -> size_t = 0;
template<size_t size>
inline constexpr auto generate_array() noexcept -> std::array<size_t, size> {
auto array = std::array<size_t, size>();
for(auto& element : array) {
element = generate();
}
}
[[nodiscard]] virtual constexpr auto max_value() const noexcept -> size_t = 0;
constexpr auto operator=(const Engine& engine) noexcept -> Engine& = default;
constexpr auto operator=(Engine&& engine) noexcept -> Engine& = default;
virtual constexpr auto operator()() noexcept -> size_t = 0;
};
IGNORE_WEAK_VTABLES_STOP
template<size_t max = 714025>
class LinearCongruentialEngine final : public Engine {
public:
constexpr LinearCongruentialEngine() noexcept = default;
constexpr LinearCongruentialEngine(
const LinearCongruentialEngine& engine) noexcept = default;
constexpr LinearCongruentialEngine(LinearCongruentialEngine&& engine) noexcept = default;
constexpr ~LinearCongruentialEngine() noexcept final = default;
IGNORE_DATETIME_START
[[nodiscard]] inline constexpr auto get_seed() const noexcept -> size_t final {
const auto* t = __TIME__;
return time_from_string(t, 0) * 60 * 60 + time_from_string(t, 3) * 60
+ time_from_string(t, 6);
}
IGNORE_DATETIME_STOP
inline constexpr auto seed(size_t seed) noexcept -> void final {
m_previous = seed;
}
[[nodiscard]] inline constexpr auto generate() noexcept -> size_t final {
m_previous = (m_a * m_previous + m_c) % max;
return m_previous;
}
[[nodiscard]] inline constexpr auto max_value() const noexcept -> size_t final {
return max;
}
constexpr auto operator=(const LinearCongruentialEngine& engine) noexcept
-> LinearCongruentialEngine& = default;
constexpr auto operator=(LinearCongruentialEngine&& engine) noexcept
-> LinearCongruentialEngine& = default;
inline constexpr auto operator()() noexcept -> size_t final {
return generate();
}
private:
size_t m_a = 4096;
size_t m_c = 150889;
size_t m_previous = get_seed();
[[nodiscard]] static inline constexpr auto
time_from_string(const char* string, int offset) noexcept -> size_t {
return narrow_cast<size_t>(string[offset] - '0') * 10 // NOLINT
+ narrow_cast<size_t>(string[offset + 1] - '0'); // NOLINT
}
};
IGNORE_WEAK_VTABLES_START
template<typename EngineType, utils::concepts::Numeric T = int>
requires utils::concepts::Derived<EngineType, Engine>
class Distribution {
public:
constexpr Distribution() noexcept = default;
constexpr Distribution(const Distribution& distribution) noexcept = default;
constexpr Distribution(Distribution&& distribution) noexcept = default;
virtual constexpr ~Distribution() noexcept = default;
virtual constexpr auto normalized_random_value() noexcept -> double = 0;
virtual constexpr auto random_value() noexcept -> T = 0;
template<size_t size>
inline constexpr auto normalized_random_values() noexcept -> std::array<double, size> {
auto array = std::array<double, size>();
for(auto& element : array) {
element = normalized_random_value();
}
return array;
}
template<size_t size>
inline constexpr auto random_values() noexcept -> std::array<T, size> {
auto array = std::array<T, size>();
for(auto& element : array) {
element = random_value();
}
return array;
}
virtual constexpr auto seed(size_t seed) noexcept -> void = 0;
[[nodiscard]] virtual constexpr auto get_seed() noexcept -> size_t = 0;
[[nodiscard]] virtual constexpr auto get_min() const noexcept -> T = 0;
virtual constexpr auto set_min(T min) noexcept -> void = 0;
[[nodiscard]] virtual constexpr auto get_max() const noexcept -> T = 0;
virtual constexpr auto set_max(T max) noexcept -> void = 0;
constexpr auto
operator=(const Distribution& distribution) noexcept -> Distribution& = default;
constexpr auto operator=(Distribution&& distribution) noexcept -> Distribution& = default;
virtual constexpr auto operator()() noexcept -> T = 0;
};
IGNORE_WEAK_VTABLES_STOP
template<typename EngineType, utils::concepts::Numeric T = int>
requires utils::concepts::Derived<EngineType, Engine>
class UniformDistribution final : public Distribution<EngineType, T> {
public:
constexpr UniformDistribution() noexcept requires
utils::concepts::DefaultConstructible<EngineType>
: m_engine(std::make_unique<EngineType>()) {
m_engine->seed(m_engine->get_seed());
}
constexpr UniformDistribution(T min, T max) noexcept requires
utils::concepts::DefaultConstructible<EngineType>
: m_min(min), m_max(max), m_engine(std::make_unique<EngineType>()) {
m_engine->seed(m_engine->get_seed());
}
constexpr UniformDistribution(T min, T max, std::unique_ptr<EngineType>&& engine) noexcept
: m_min(min), m_max(max), m_engine(engine) {
m_engine->seed(m_engine->get_seed());
}
explicit constexpr UniformDistribution(std::unique_ptr<EngineType>&& engine) noexcept
: m_engine(std::move(engine)) {
m_engine->seed(m_engine->get_seed());
}
template<typename... Args>
requires utils::concepts::Derived<EngineType, Engine> && utils::concepts::
ConstructibleFrom<EngineType, Args...>
explicit constexpr UniformDistribution(Args&&... args) noexcept
: m_engine(std::make_unique<EngineType>(args...)) {
m_engine->seed(m_engine->get_seed());
}
template<typename... Args>
requires utils::concepts::Derived<EngineType, Engine> && utils::concepts::
ConstructibleFrom<EngineType, Args...>
explicit constexpr UniformDistribution(T min, T max, Args&&... args) noexcept
: m_min(min), m_max(max), m_engine(std::make_unique<EngineType>(args...)) {
m_engine->seed(m_engine->get_seed());
}
constexpr UniformDistribution(const UniformDistribution& distribution) noexcept = default;
constexpr UniformDistribution(UniformDistribution&& distribution) noexcept = default;
constexpr ~UniformDistribution() noexcept final = default;
inline constexpr auto normalized_random_value() noexcept -> double final {
return narrow_cast<double>(m_engine->generate())
/ narrow_cast<double>(m_engine->max_value());
}
inline constexpr auto random_value() noexcept -> T final {
return narrow_cast<T>(this->normalized_random_value()) * (m_max - m_min) + m_min;
}
inline constexpr auto seed(size_t seed) noexcept -> void final {
m_engine->seed(seed);
}
[[nodiscard]] inline constexpr auto get_seed() noexcept -> size_t final {
return m_engine->get_seed();
}
inline constexpr auto set_min(T min) noexcept -> void final {
m_min = min;
}
[[nodiscard]] inline constexpr auto get_min() const noexcept -> T final {
return m_min;
}
inline constexpr auto set_max(T max) noexcept -> void final {
m_max = max;
}
[[nodiscard]] inline constexpr auto get_max() const noexcept -> T final {
return m_max;
}
constexpr auto operator=(const UniformDistribution& distribution) noexcept
-> UniformDistribution& = default;
constexpr auto
operator=(UniformDistribution&& distribution) noexcept -> UniformDistribution& = default;
inline constexpr auto operator()() noexcept -> T final {
return random_value();
}
private:
T m_min = narrow_cast<T>(0);
T m_max = narrow_cast<T>(1);
std::unique_ptr<EngineType> m_engine;
};
template<utils::concepts::Numeric T = float>
[[clang::no_destroy]] static math::UniformDistribution<math::LinearCongruentialEngine<>, T>
GLOBAL_UNIFORM_DISTRIBUTION;
template<utils::concepts::Numeric T = float>
static std::atomic_bool GLOBAL_UNIFORM_DISTRIBUTION_INITIALIZED;
template<utils::concepts::Numeric T = float>
inline static constexpr auto initialize_global_uniform_distribution() noexcept -> void {
bool initialized = false;
if(GLOBAL_UNIFORM_DISTRIBUTION_INITIALIZED<T>.compare_exchange_strong(
initialized,
true,
std::memory_order_seq_cst))
{
GLOBAL_UNIFORM_DISTRIBUTION<
T> = math::UniformDistribution<math::LinearCongruentialEngine<>, T>();
}
}
template<utils::concepts::FloatingPoint T = float>
inline auto random_value() noexcept -> T {
initialize_global_uniform_distribution<T>();
GLOBAL_UNIFORM_DISTRIBUTION<T>.set_min(narrow_cast<T>(0));
GLOBAL_UNIFORM_DISTRIBUTION<T>.set_max(narrow_cast<T>(1));
return GLOBAL_UNIFORM_DISTRIBUTION<T>();
}
template<utils::concepts::Numeric T = float>
inline auto random_value(T min, T max) noexcept -> T {
initialize_global_uniform_distribution<T>();
GLOBAL_UNIFORM_DISTRIBUTION<T>.set_min(min);
GLOBAL_UNIFORM_DISTRIBUTION<T>.set_max(max);
return GLOBAL_UNIFORM_DISTRIBUTION<T>();
}
} // namespace hyperion::math
|
-- https://leanprover-community.github.io/mathlib_docs/category_theory/functor/hom.html
-- THE PLAN IS TO DEFINE AN AFFINE GROUP SCHEME AS A REPRESENTABLE FUNCTOR FROM K-ALGEBRAS TO GROUPS, AND PROVE THAT G_a := Hom(K[X], _) IS AN AFFINE GROUP SCHEME
import category_theory.types
import algebra.category.Algebra.basic
import algebra.category.Group.basic
import data.polynomial.basic
import data.polynomial.algebra_map
import category_theory.over
import tactic
open_locale polynomial
open polynomial
open category_theory
variables (K : Type*) [comm_ring K]
-- variables (K_alg : Type*) [category K_alg] [category_theory.under K] -- category of commutative K-algebras. But how to use it...
class affine_scheme :=
(scheme : Algebra K ⥤ Type*)
(corepresentable : scheme.corepresentable)
class affine_group_scheme :=
(scheme : Algebra K ⥤ Group)
(corepresentable : (scheme ⋙ forget Group).corepresentable)
example : ∃ (f : coyoneda.obj (opposite.op (Algebra.of K K[X])) ⟶ forget (Algebra K)), is_iso f :=
begin
end
-- DEFINING G_a
instance G_a : affine_scheme K :=
{
scheme := forget (Algebra K),
corepresentable := begin
refine {has_corepresentation := _},
refine ⟨opposite.op (Algebra.of K K[X]), _⟩,
-- need to contruct a natural tranformation Hom(K[X], _) ⟶ forget (Algebra K)
-- see for an example of defining a natural transformation https://leanprover-community.github.io/mathlib_docs/category_theory/yoneda.html#category_theory.yoneda_lemma
end,
}
#check Algebra.of K K[X]
#check polynomial K
|
From machine_program_logic.program_logic Require Import weakestpre.
From HypVeri Require Import lifting rules.rules_base machine_extra.
From HypVeri.algebra Require Import base mem reg pagetable mailbox base_extra.
From HypVeri.lang Require Import lang_extra reg_extra.
Section add.
Context `{hypparams:HypervisorParameters}.
Context `{vmG: !gen_VMG Σ}.
Lemma add {E i wi w1 w2 q p} ai ra rb s :
decode_instruction wi = Some(Add ra rb) ->
tpa ai ∈ s ->
tpa ai ≠ p ->
{SS{{ ▷ (PC @@ i ->r ai) ∗
▷ (ai ->a wi) ∗
▷ (ra @@ i ->r w1) ∗
▷ (rb @@ i ->r w2) ∗
▷ (i -@{ q }A> s) ∗
▷ (TX@ i := p)}}}
ExecI @ i; E
{{{ RET (false, ExecI);
PC @@ i ->r (ai ^+ 1)%f ∗
ai ->a wi ∗
ra @@ i ->r (w1 ^+ (finz.to_z w2))%f ∗
rb @@ i ->r w2 ∗
(i -@{ q }A> s) ∗
(TX@ i := p) }}}.
Proof.
iIntros (Hdecode Hin Hneq ϕ) "(>Hpc & >Hai & >Hra & >Hrb & >Hacc & >HTX) Hϕ".
iApply (sswp_lift_atomic_step ExecI);[done|].
iIntros (n σ1) "%Hsche Hσ".
rewrite /scheduled in Hsche.
simpl in Hsche.
rewrite /scheduler in Hsche.
apply bool_decide_unpack in Hsche as Hcur.
clear Hsche.
apply fin_to_nat_inj in Hcur.
iModIntro.
iDestruct "Hσ" as "(#Hneq & Hmem & Hreg & Hmb & Hrx & Hown & Haccess & Hrest)".
(* valid regs *)
iDestruct ((gen_reg_valid3 i PC ai ra w1 rb w2 Hcur) with "Hreg Hpc Hra Hrb") as "[%HPC [%Hra %Hrb]]";eauto.
(* valid pt *)
iDestruct (access_agree_check_true (tpa ai) i with "Haccess Hacc") as "%Hai"; first set_solver.
(* valid mem *)
iDestruct (gen_mem_valid ai wi with "Hmem Hai") as %Hmem.
iDestruct (mb_valid_tx i p with "Hmb HTX") as %Htx.
set (instr := Add ra rb).
iSplit.
- (* reducible *)
iPureIntro.
apply (reducible_normal i instr ai wi);eauto.
by rewrite Htx.
- (* step *)
iModIntro.
iIntros (m2 σ2) "[%P PAuth] %HstepP".
apply (step_ExecI_normal i instr ai wi ) in HstepP;eauto.
remember (exec instr σ1) as c2 eqn:Heqc2.
pose proof (decode_instruction_valid wi instr Hdecode) as Hvalidinstr.
inversion Hvalidinstr as [| | | | | | ra' rb' Hvalidrb Hvalidra | | | | |] .
subst ra' rb'.
inversion Hvalidra as [ HneqPCa HneqNZa ].
inversion Hvalidrb as [ HneqPCb HneqNZb ].
subst instr.
rewrite /exec (add_ExecI σ1 ra w1 rb w2) /update_incr_PC /update_reg in Heqc2;auto.
destruct HstepP;subst m2 σ2; subst c2; simpl.
rewrite /gen_vm_interp.
(* unchanged part *)
rewrite (preserve_get_mb_gmap σ1).
rewrite (preserve_get_rx_gmap σ1).
rewrite (preserve_get_own_gmap σ1).
rewrite (preserve_get_access_gmap σ1).
rewrite (preserve_get_excl_gmap σ1).
rewrite (preserve_get_trans_gmap σ1).
rewrite (preserve_get_hpool_gset σ1).
rewrite (preserve_get_retri_gmap σ1).
rewrite (preserve_inv_trans_pgt_consistent σ1).
rewrite (preserve_inv_trans_wellformed σ1).
rewrite (preserve_inv_trans_ps_disj σ1).
rewrite p_upd_pc_mem p_upd_reg_mem.
all: try rewrite p_upd_pc_pgt p_upd_reg_pgt //.
all: try rewrite p_upd_pc_trans p_upd_reg_trans //.
all: try rewrite p_upd_pc_mb p_upd_reg_mb //.
rewrite Hcur. iFrame.
(* updated part *)
rewrite -> (u_upd_pc_regs _ i ai 1);eauto.
rewrite u_upd_reg_regs.
+ iDestruct ((gen_reg_update2_global PC i ai (ai ^+ 1)%f ra i w1 (w1 ^+ (finz.to_z w2))%f)
with "Hreg Hpc Hra") as ">[Hreg [Hpc Hra]]";eauto.
iModIntro.
iSplitL "PAuth".
by iExists P.
rewrite /just_scheduled_vms.
rewrite /just_scheduled.
assert (filter
(λ id : vmid,
base.negb (scheduled σ1 id) && scheduled (update_offset_PC (update_reg_global σ1 i ra (w1 ^+ w2)%f) 1) id = true)
(seq 0 n) = []) as ->.
{
rewrite /scheduled /machine.scheduler //= /scheduler Hcur.
rewrite p_upd_pc_current_vm p_upd_reg_current_vm.
rewrite Hcur.
induction n.
- simpl.
rewrite filter_nil //=.
- rewrite seq_S.
rewrite filter_app.
rewrite IHn.
simpl.
rewrite filter_cons_False //=.
rewrite andb_negb_l.
done.
}
iSimpl.
iFrame.
iSplit; first done.
iSplit; first done.
assert ((scheduled (update_offset_PC (update_reg_global σ1 i ra (w1 ^+ w2)%f) 1) i) = true) as ->.
rewrite /scheduled.
simpl.
rewrite /scheduler.
rewrite p_upd_pc_current_vm p_upd_reg_current_vm Hcur.
rewrite bool_decide_eq_true.
reflexivity.
simpl.
iApply ("Hϕ" with "[Hpc Hai Hacc Hra Hrb HTX]").
iFrame.
+ rewrite u_upd_reg_regs.
apply (get_reg_gmap_get_reg_Some _ _ _ i) in HPC;eauto.
rewrite lookup_insert_ne.
by simplify_map_eq /=.
congruence.
+ by rewrite Htx.
Qed.
End add.
|
Between 4500 and 3800 BCE , all of the British Isles came to abandon its former Mesolithic hunter @-@ gatherer lifestyle , to be replaced by the new agricultural subsistence of the Neolithic Age . Although a common material culture was shared throughout most of the British Isles in this period , there was great regional variation regarding the nature and distribution of settlement , architectural styles , and the use of natural resources . Throughout most of Britain , there is little evidence of cereal or permanent dwellings from this period , leading archaeologists to believe that the Early Neolithic economy on the island was largely pastoral , relying on herding cattle , with people living a nomadic or semi @-@ nomadic way of life . Although witnessing some land clearance , Britain was largely forested in this period , and it is unclear what level of deforestation the area of Kent had experienced in the Early Neolithic ; widespread forest clearance only took place on the <unk> of south @-@ east Britain in the Late Bronze Age . Environmental data from the area around the White Horse Stone supports the idea that the area was still largely forested in the Early Neolithic , covered by a woodland of oak , ash , hazel / alder and <unk> .
|
/* Copyright (c) 2009-2010 Tyrell Corporation & Moyshe Ben Rabi.
The contents of this file are subject to the Mozilla Public License
Version 1.1 (the "License"); you may not use this file except in
compliance with the License. You may obtain a copy of the License at
http://www.mozilla.org/MPL/
Software distributed under the License is distributed on an "AS IS"
basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
License for the specific language governing rights and limitations
under the License.
The Original Code is an implementation of the Metaverse eXchange Protocol.
The Initial Developer of the Original Code is Akos Maroy and Moyshe Ben Rabi.
All Rights Reserved.
Contributor(s): Akos Maroy and Moyshe Ben Rabi.
Alternatively, the contents of this file may be used under the terms
of the Affero General Public License (the "AGPL"), in which case the
provisions of the AGPL are applicable instead of those
above. If you wish to allow use of your version of this file only
under the terms of the AGPL and not to allow others to use
your version of this file under the MPL, indicate your decision by
deleting the provisions above and replace them with the notice and
other provisions required by the AGPL. If you do not delete
the provisions above, a recipient may use your version of this file
under either the MPL or the AGPL.
*/
#include <vector>
#include <boost/test/unit_test.hpp>
#include <mxp/packet/packet.h>
#include "test_helpers.h"
#include "test_packet.h"
namespace mxp {
namespace test {
namespace packet {
namespace packet {
using namespace boost;
mxp::packet::packet generate_test_packet() {
std::vector<mxp::packet::message_frame> frames(5);
for (unsigned int i = 0; i < 5; ++i) {
mxp::packet::message_frame frame;
uint8_t data[255];
for (unsigned int j = 0; j < 255; ++j) {
data[j] = (uint8_t) j;
}
frame.frame_data(data, 255);
frames.push_back(frame);
}
mxp::packet::packet p;
p.session_id = 1234;
p.packet_id = 4567;
p.first_send_time = posix_time::second_clock::universal_time();
p.guaranteed = 0;
p.resend_count = 0;
p.message_frames(frames.begin(), frames.end());
return p;
}
void test_too_much_data() {
std::vector<mxp::packet::message_frame> frames(6);
for (unsigned int i = 0; i < 6; ++i) {
mxp::packet::message_frame frame;
uint8_t data[255];
for (unsigned int j = 0; j < 255; ++j) {
data[j] = (uint8_t) j;
}
frame.frame_data(data, 255);
frames.push_back(frame);
}
mxp::packet::packet p;
// add 5 frames of 265 bytes each - this should work out fine, as the
// total packet payload will be 1325 bytes, which if below the maximum
// payload size
p.message_frames(frames.begin(), frames.begin() + 5);
BOOST_CHECK_EQUAL(p.message_frames_end() - p.message_frames_begin(), 5);
// now try to add 6 frames, which would exceed the maximum payload size
BOOST_CHECK_THROW(p.message_frames(frames.begin(), frames.end()),
std::invalid_argument);
}
void test_serialization() {
check_two_way_serialization(generate_test_packet());
}
}
}
}
}
|
SUBROUTINE zrenam (IFLTAB, CPATHOLD, NPATHOLD, CPATHNEW,
* NPATHNEW, LFOUND)
C
implicit none
C
C Retrieve information about a time series record
C (both regular interval and irregular interval).
C The pathname must be valid (with a correct D part).
C
C
C Argument Dimensions
C
C Argument Dimensions
CHARACTER CPATHOLD*(*), CPATHNEW*(*)
INTEGER IFLTAB(*),zdssVersion
LOGICAL LFOUND
INTEGER NPATHOLD, NPATHNEW
C
IF (zdssVersion(IFLTAB).EQ.6) THEN
CALL zrenam6 (IFLTAB, CPATHOLD, NPATHOLD, CPATHNEW,
* NPATHNEW, LFOUND)
ELSE
CALL zrenam7 (IFLTAB, CPATHOLD, NPATHOLD, CPATHNEW,
* NPATHNEW, LFOUND)
ENDIF
C
RETURN
END
|
/-
Copyright (c) 2017 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl
-/
import data.option.basic
import logic.nontrivial
import order.lattice
import order.order_dual
import tactic.pi_instances
/-!
# ⊤ and ⊥, bounded lattices and variants
This file defines top and bottom elements (greatest and least elements) of a type, the bounded
variants of different kinds of lattices, sets up the typeclass hierarchy between them and provides
instances for `Prop` and `fun`.
## Main declarations
* `has_<top/bot> α`: Typeclasses to declare the `⊤`/`⊥` notation.
* `order_<top/bot> α`: Order with a top/bottom element.
* `bounded_order α`: Order with a top and bottom element.
* `with_<top/bot> α`: Equips `option α` with the order on `α` plus `none` as the top/bottom element.
* `is_compl x y`: In a bounded lattice, predicate for "`x` is a complement of `y`". Note that in a
non distributive lattice, an element can have several complements.
* `is_complemented α`: Typeclass stating that any element of a lattice has a complement.
## Common lattices
* Distributive lattices with a bottom element. Notated by `[distrib_lattice α] [order_bot α]`
It captures the properties of `disjoint` that are common to `generalized_boolean_algebra` and
`distrib_lattice` when `order_bot`.
* Bounded and distributive lattice. Notated by `[distrib_lattice α] [bounded_order α]`.
Typical examples include `Prop` and `set α`.
## Implementation notes
We didn't prove things about `[distrib_lattice α] [order_top α]` because the dual notion of
`disjoint` isn't really used anywhere.
-/
/-! ### Top, bottom element -/
set_option old_structure_cmd true
universes u v
variables {α : Type u} {β : Type v}
/-- Typeclass for the `⊤` (`\top`) notation -/
@[notation_class] class has_top (α : Type u) := (top : α)
/-- Typeclass for the `⊥` (`\bot`) notation -/
@[notation_class] class has_bot (α : Type u) := (bot : α)
notation `⊤` := has_top.top
notation `⊥` := has_bot.bot
@[priority 100] instance has_top_nonempty (α : Type u) [has_top α] : nonempty α := ⟨⊤⟩
@[priority 100] instance has_bot_nonempty (α : Type u) [has_bot α] : nonempty α := ⟨⊥⟩
attribute [pattern] has_bot.bot has_top.top
/-- An order is an `order_top` if it has a greatest element.
We state this using a data mixin, holding the value of `⊤` and the greatest element constraint. -/
@[ancestor has_top]
class order_top (α : Type u) [has_le α] extends has_top α :=
(le_top : ∀ a : α, a ≤ ⊤)
section order_top
variables [partial_order α] [order_top α] {a b : α}
@[simp] theorem le_top {α : Type u} [has_le α] [order_top α] {a : α} : a ≤ ⊤ :=
order_top.le_top a
@[simp] theorem not_top_lt {α : Type u} [preorder α] [order_top α] {a : α} : ¬ ⊤ < a :=
λ h, lt_irrefl a (lt_of_le_of_lt le_top h)
theorem top_unique (h : ⊤ ≤ a) : a = ⊤ :=
le_top.antisymm h
-- TODO: delete in favor of the next?
theorem eq_top_iff : a = ⊤ ↔ ⊤ ≤ a :=
⟨λ eq, eq.symm ▸ le_refl ⊤, top_unique⟩
@[simp] theorem top_le_iff : ⊤ ≤ a ↔ a = ⊤ :=
⟨top_unique, λ h, h.symm ▸ le_refl ⊤⟩
@[simp] theorem is_top_iff_eq_top : is_top a ↔ a = ⊤ :=
⟨λ h, h.unique le_top, λ h b, h.symm ▸ le_top⟩
theorem eq_top_mono (h : a ≤ b) (h₂ : a = ⊤) : b = ⊤ :=
top_le_iff.1 $ h₂ ▸ h
lemma lt_top_iff_ne_top : a < ⊤ ↔ a ≠ ⊤ := le_top.lt_iff_ne
lemma ne_top_of_lt (h : a < b) : a ≠ ⊤ :=
lt_top_iff_ne_top.1 $ lt_of_lt_of_le h le_top
alias ne_top_of_lt ← has_lt.lt.ne_top
theorem ne_top_of_le_ne_top {a b : α} (hb : b ≠ ⊤) (hab : a ≤ b) : a ≠ ⊤ :=
λ ha, hb $ top_unique $ ha ▸ hab
lemma eq_top_of_maximal (h : ∀ b, ¬ a < b) : a = ⊤ :=
or.elim (lt_or_eq_of_le le_top) (λ hlt, absurd hlt (h ⊤)) (λ he, he)
lemma ne.lt_top (h : a ≠ ⊤) : a < ⊤ := lt_top_iff_ne_top.mpr h
lemma ne.lt_top' (h : ⊤ ≠ a) : a < ⊤ := h.symm.lt_top
end order_top
lemma strict_mono.maximal_preimage_top [linear_order α] [preorder β] [order_top β]
{f : α → β} (H : strict_mono f) {a} (h_top : f a = ⊤) (x : α) :
x ≤ a :=
H.maximal_of_maximal_image (λ p, by { rw h_top, exact le_top }) x
theorem order_top.ext_top {α} {hA : partial_order α} (A : order_top α)
{hB : partial_order α} (B : order_top α)
(H : ∀ x y : α, (by haveI := hA; exact x ≤ y) ↔ x ≤ y) :
(by haveI := A; exact ⊤ : α) = ⊤ :=
top_unique $ by rw ← H; apply le_top
theorem order_top.ext {α} [partial_order α] {A B : order_top α} : A = B :=
begin
have tt := order_top.ext_top A B (λ _ _, iff.rfl),
casesI A with _ ha, casesI B with _ hb,
congr,
exact le_antisymm (hb _) (ha _)
end
/-- An order is an `order_bot` if it has a least element.
We state this using a data mixin, holding the value of `⊥` and the least element constraint. -/
@[ancestor has_bot]
class order_bot (α : Type u) [has_le α] extends has_bot α :=
(bot_le : ∀ a : α, ⊥ ≤ a)
section order_bot
variables [partial_order α] [order_bot α] {a b : α}
@[simp] theorem bot_le {α : Type u} [has_le α] [order_bot α] {a : α} : ⊥ ≤ a := order_bot.bot_le a
@[simp] theorem not_lt_bot {α : Type u} [preorder α] [order_bot α] {a : α} : ¬ a < ⊥ :=
λ h, lt_irrefl a (lt_of_lt_of_le h bot_le)
theorem bot_unique (h : a ≤ ⊥) : a = ⊥ :=
h.antisymm bot_le
-- TODO: delete?
theorem eq_bot_iff : a = ⊥ ↔ a ≤ ⊥ :=
⟨λ eq, eq.symm ▸ le_refl ⊥, bot_unique⟩
@[simp] theorem le_bot_iff : a ≤ ⊥ ↔ a = ⊥ :=
⟨bot_unique, λ h, h.symm ▸ le_refl ⊥⟩
@[simp] theorem is_bot_iff_eq_bot : is_bot a ↔ a = ⊥ :=
⟨λ h, h.unique bot_le, λ h b, h.symm ▸ bot_le⟩
theorem ne_bot_of_le_ne_bot {a b : α} (hb : b ≠ ⊥) (hab : b ≤ a) : a ≠ ⊥ :=
λ ha, hb $ bot_unique $ ha ▸ hab
theorem eq_bot_mono (h : a ≤ b) (h₂ : b = ⊥) : a = ⊥ :=
le_bot_iff.1 $ h₂ ▸ h
lemma bot_lt_iff_ne_bot : ⊥ < a ↔ a ≠ ⊥ :=
begin
haveI := classical.dec_eq α,
haveI : decidable (a ≤ ⊥) := decidable_of_iff' _ le_bot_iff,
simp only [lt_iff_le_not_le, not_iff_not.mpr le_bot_iff, true_and, bot_le],
end
lemma ne_bot_of_gt (h : a < b) : b ≠ ⊥ :=
bot_lt_iff_ne_bot.1 $ lt_of_le_of_lt bot_le h
alias ne_bot_of_gt ← has_lt.lt.ne_bot
lemma eq_bot_of_minimal (h : ∀ b, ¬ b < a) : a = ⊥ :=
or.elim (lt_or_eq_of_le bot_le) (λ hlt, absurd hlt (h ⊥)) (λ he, he.symm)
lemma ne.bot_lt (h : a ≠ ⊥) : ⊥ < a := bot_lt_iff_ne_bot.mpr h
lemma ne.bot_lt' (h : ⊥ ≠ a) : ⊥ < a := h.symm.bot_lt
end order_bot
lemma strict_mono.minimal_preimage_bot [linear_order α] [partial_order β] [order_bot β]
{f : α → β} (H : strict_mono f) {a} (h_bot : f a = ⊥) (x : α) :
a ≤ x :=
H.minimal_of_minimal_image (λ p, by { rw h_bot, exact bot_le }) x
theorem order_bot.ext_bot {α} {hA : partial_order α} (A : order_bot α)
{hB : partial_order α} (B : order_bot α)
(H : ∀ x y : α, (by haveI := hA; exact x ≤ y) ↔ x ≤ y) :
(by haveI := A; exact ⊥ : α) = ⊥ :=
bot_unique $ by rw ← H; apply bot_le
theorem order_bot.ext {α} [partial_order α] {A B : order_bot α} : A = B :=
begin
have tt := order_bot.ext_bot A B (λ _ _, iff.rfl),
casesI A with a ha, casesI B with b hb,
congr,
exact le_antisymm (ha _) (hb _)
end
section semilattice_sup_top
variables [semilattice_sup α] [order_top α] {a : α}
@[simp] theorem top_sup_eq : ⊤ ⊔ a = ⊤ :=
sup_of_le_left le_top
@[simp] theorem sup_top_eq : a ⊔ ⊤ = ⊤ :=
sup_of_le_right le_top
end semilattice_sup_top
section semilattice_sup_bot
variables [semilattice_sup α] [order_bot α] {a b : α}
@[simp] theorem bot_sup_eq : ⊥ ⊔ a = a :=
sup_of_le_right bot_le
@[simp] theorem sup_bot_eq : a ⊔ ⊥ = a :=
sup_of_le_left bot_le
@[simp] theorem sup_eq_bot_iff : a ⊔ b = ⊥ ↔ (a = ⊥ ∧ b = ⊥) :=
by rw [eq_bot_iff, sup_le_iff]; simp
end semilattice_sup_bot
section semilattice_inf_top
variables [semilattice_inf α] [order_top α] {a b : α}
@[simp] theorem top_inf_eq : ⊤ ⊓ a = a :=
inf_of_le_right le_top
@[simp] theorem inf_top_eq : a ⊓ ⊤ = a :=
inf_of_le_left le_top
@[simp] theorem inf_eq_top_iff : a ⊓ b = ⊤ ↔ (a = ⊤ ∧ b = ⊤) :=
by rw [eq_top_iff, le_inf_iff]; simp
end semilattice_inf_top
section semilattice_inf_bot
variables [semilattice_inf α] [order_bot α] {a : α}
@[simp] theorem bot_inf_eq : ⊥ ⊓ a = ⊥ :=
inf_of_le_left bot_le
@[simp] theorem inf_bot_eq : a ⊓ ⊥ = ⊥ :=
inf_of_le_right bot_le
end semilattice_inf_bot
/-! ### Bounded order -/
/-- A bounded order describes an order `(≤)` with a top and bottom element,
denoted `⊤` and `⊥` respectively. -/
@[ancestor order_top order_bot]
class bounded_order (α : Type u) [has_le α] extends order_top α, order_bot α.
theorem bounded_order.ext {α} [partial_order α] {A B : bounded_order α} : A = B :=
begin
have ht : @bounded_order.to_order_top α _ A = @bounded_order.to_order_top α _ B := order_top.ext,
have hb : @bounded_order.to_order_bot α _ A = @bounded_order.to_order_bot α _ B := order_bot.ext,
casesI A,
casesI B,
injection ht with h,
injection hb with h',
convert rfl,
{ exact h.symm },
{ exact h'.symm }
end
/-- Propositions form a distributive lattice. -/
instance Prop.distrib_lattice : distrib_lattice Prop :=
{ le := λ a b, a → b,
le_refl := λ _, id,
le_trans := λ a b c f g, g ∘ f,
le_antisymm := λ a b Hab Hba, propext ⟨Hab, Hba⟩,
sup := or,
le_sup_left := @or.inl,
le_sup_right := @or.inr,
sup_le := λ a b c, or.rec,
inf := and,
inf_le_left := @and.left,
inf_le_right := @and.right,
le_inf := λ a b c Hab Hac Ha, and.intro (Hab Ha) (Hac Ha),
le_sup_inf := λ a b c H, or_iff_not_imp_left.2 $
λ Ha, ⟨H.1.resolve_left Ha, H.2.resolve_left Ha⟩ }
/-- Propositions form a bounded order. -/
instance Prop.bounded_order : bounded_order Prop :=
{ top := true,
le_top := λ a Ha, true.intro,
bot := false,
bot_le := @false.elim }
noncomputable instance Prop.linear_order : linear_order Prop :=
@lattice.to_linear_order Prop _ (classical.dec_eq _) (classical.dec_rel _) (classical.dec_rel _) $
λ p q, by { change (p → q) ∨ (q → p), tauto! }
@[simp] lemma le_Prop_eq : ((≤) : Prop → Prop → Prop) = (→) := rfl
@[simp] lemma sup_Prop_eq : (⊔) = (∨) := rfl
@[simp] lemma inf_Prop_eq : (⊓) = (∧) := rfl
section logic
variable [preorder α]
theorem monotone_and {p q : α → Prop} (m_p : monotone p) (m_q : monotone q) :
monotone (λ x, p x ∧ q x) :=
λ a b h, and.imp (m_p h) (m_q h)
-- Note: by finish [monotone] doesn't work
theorem monotone_or {p q : α → Prop} (m_p : monotone p) (m_q : monotone q) :
monotone (λ x, p x ∨ q x) :=
λ a b h, or.imp (m_p h) (m_q h)
end logic
/-! ### Function lattices -/
namespace pi
variables {ι : Type*} {α' : ι → Type*}
instance [Π i, has_bot (α' i)] : has_bot (Π i, α' i) := ⟨λ i, ⊥⟩
@[simp] lemma bot_apply [Π i, has_bot (α' i)] (i : ι) : (⊥ : Π i, α' i) i = ⊥ := rfl
lemma bot_def [Π i, has_bot (α' i)] : (⊥ : Π i, α' i) = λ i, ⊥ := rfl
instance [Π i, has_top (α' i)] : has_top (Π i, α' i) := ⟨λ i, ⊤⟩
@[simp] lemma top_apply [Π i, has_top (α' i)] (i : ι) : (⊤ : Π i, α' i) i = ⊤ := rfl
lemma top_def [Π i, has_top (α' i)] : (⊤ : Π i, α' i) = λ i, ⊤ := rfl
instance [Π i, has_le (α' i)] [Π i, order_top (α' i)] : order_top (Π i, α' i) :=
{ le_top := λ _ _, le_top, ..pi.has_top }
instance [Π i, has_le (α' i)] [Π i, order_bot (α' i)] : order_bot (Π i, α' i) :=
{ bot_le := λ _ _, bot_le, ..pi.has_bot }
instance [Π i, has_le (α' i)] [Π i, bounded_order (α' i)] :
bounded_order (Π i, α' i) :=
{ ..pi.order_top, ..pi.order_bot }
end pi
section subsingleton
variables [partial_order α] [bounded_order α]
lemma eq_bot_of_bot_eq_top (hα : (⊥ : α) = ⊤) (x : α) :
x = (⊥ : α) :=
eq_bot_mono le_top (eq.symm hα)
lemma eq_top_of_bot_eq_top (hα : (⊥ : α) = ⊤) (x : α) :
x = (⊤ : α) :=
eq_top_mono bot_le hα
lemma subsingleton_of_top_le_bot (h : (⊤ : α) ≤ (⊥ : α)) :
subsingleton α :=
⟨λ a b, le_antisymm (le_trans le_top $ le_trans h bot_le) (le_trans le_top $ le_trans h bot_le)⟩
lemma subsingleton_of_bot_eq_top (hα : (⊥ : α) = (⊤ : α)) :
subsingleton α :=
subsingleton_of_top_le_bot (ge_of_eq hα)
lemma subsingleton_iff_bot_eq_top :
(⊥ : α) = (⊤ : α) ↔ subsingleton α :=
⟨subsingleton_of_bot_eq_top, λ h, by exactI subsingleton.elim ⊥ ⊤⟩
end subsingleton
/-! ### `with_bot`, `with_top` -/
/-- Attach `⊥` to a type. -/
def with_bot (α : Type*) := option α
namespace with_bot
meta instance {α} [has_to_format α] : has_to_format (with_bot α) :=
{ to_format := λ x,
match x with
| none := "⊥"
| (some x) := to_fmt x
end }
instance : has_coe_t α (with_bot α) := ⟨some⟩
instance has_bot : has_bot (with_bot α) := ⟨none⟩
instance : inhabited (with_bot α) := ⟨⊥⟩
lemma none_eq_bot : (none : with_bot α) = (⊥ : with_bot α) := rfl
lemma some_eq_coe (a : α) : (some a : with_bot α) = (↑a : with_bot α) := rfl
@[simp] theorem bot_ne_coe (a : α) : ⊥ ≠ (a : with_bot α) .
@[simp] theorem coe_ne_bot (a : α) : (a : with_bot α) ≠ ⊥ .
/-- Recursor for `with_bot` using the preferred forms `⊥` and `↑a`. -/
@[elab_as_eliminator]
def rec_bot_coe {C : with_bot α → Sort*} (h₁ : C ⊥) (h₂ : Π (a : α), C a) :
Π (n : with_bot α), C n :=
option.rec h₁ h₂
@[norm_cast]
theorem coe_eq_coe {a b : α} : (a : with_bot α) = b ↔ a = b :=
by rw [← option.some.inj_eq a b]; refl
lemma ne_bot_iff_exists {x : with_bot α} : x ≠ ⊥ ↔ ∃ (a : α), ↑a = x :=
option.ne_none_iff_exists
/-- Deconstruct a `x : with_bot α` to the underlying value in `α`, given a proof that `x ≠ ⊥`. -/
def unbot : Π (x : with_bot α), x ≠ ⊥ → α
| ⊥ h := absurd rfl h
| (some x) h := x
@[simp] lemma coe_unbot {α : Type*} (x : with_bot α) (h : x ≠ ⊥) :
(x.unbot h : with_bot α) = x :=
by { cases x, simpa using h, refl, }
@[simp] lemma unbot_coe (x : α) (h : (x : with_bot α) ≠ ⊥ := coe_ne_bot _) :
(x : with_bot α).unbot h = x := rfl
@[priority 10]
instance has_le [has_le α] : has_le (with_bot α) :=
{ le := λ o₁ o₂ : option α, ∀ a ∈ o₁, ∃ b ∈ o₂, a ≤ b }
@[priority 10]
instance has_lt [has_lt α] : has_lt (with_bot α) :=
{ lt := λ o₁ o₂ : option α, ∃ b ∈ o₂, ∀ a ∈ o₁, a < b }
@[simp] theorem some_lt_some [has_lt α] {a b : α} :
@has_lt.lt (with_bot α) _ (some a) (some b) ↔ a < b :=
by simp [(<)]
lemma none_lt_some [has_lt α] (a : α) :
@has_lt.lt (with_bot α) _ none (some a) :=
⟨a, rfl, λ b hb, (option.not_mem_none _ hb).elim⟩
lemma bot_lt_coe [has_lt α] (a : α) : (⊥ : with_bot α) < a := none_lt_some a
instance : can_lift (with_bot α) α :=
{ coe := coe,
cond := λ r, r ≠ ⊥,
prf := λ x hx, ⟨option.get $ option.ne_none_iff_is_some.1 hx, option.some_get _⟩ }
instance [preorder α] : preorder (with_bot α) :=
{ le := (≤),
lt := (<),
lt_iff_le_not_le := by intros; cases a; cases b;
simp [lt_iff_le_not_le]; simp [(≤), (<)];
split; refl,
le_refl := λ o a ha, ⟨a, ha, le_refl _⟩,
le_trans := λ o₁ o₂ o₃ h₁ h₂ a ha,
let ⟨b, hb, ab⟩ := h₁ a ha, ⟨c, hc, bc⟩ := h₂ b hb in
⟨c, hc, le_trans ab bc⟩ }
instance partial_order [partial_order α] : partial_order (with_bot α) :=
{ le_antisymm := λ o₁ o₂ h₁ h₂, begin
cases o₁ with a,
{ cases o₂ with b, {refl},
rcases h₂ b rfl with ⟨_, ⟨⟩, _⟩ },
{ rcases h₁ a rfl with ⟨b, ⟨⟩, h₁'⟩,
rcases h₂ b rfl with ⟨_, ⟨⟩, h₂'⟩,
rw le_antisymm h₁' h₂' }
end,
.. with_bot.preorder }
instance order_bot [has_le α] : order_bot (with_bot α) :=
{ bot_le := λ a a' h, option.no_confusion h,
..with_bot.has_bot }
@[simp, norm_cast] theorem coe_le_coe [has_le α] {a b : α} :
(a : with_bot α) ≤ b ↔ a ≤ b :=
⟨λ h, by rcases h a rfl with ⟨_, ⟨⟩, h⟩; exact h,
λ h a' e, option.some_inj.1 e ▸ ⟨b, rfl, h⟩⟩
@[simp] theorem some_le_some [has_le α] {a b : α} :
@has_le.le (with_bot α) _ (some a) (some b) ↔ a ≤ b := coe_le_coe
theorem coe_le [has_le α] {a b : α} :
∀ {o : option α}, b ∈ o → ((a : with_bot α) ≤ o ↔ a ≤ b)
| _ rfl := coe_le_coe
@[norm_cast]
lemma coe_lt_coe [has_lt α] {a b : α} : (a : with_bot α) < b ↔ a < b := some_lt_some
lemma le_coe_get_or_else [preorder α] : ∀ (a : with_bot α) (b : α), a ≤ a.get_or_else b
| (some a) b := le_refl a
| none b := λ _ h, option.no_confusion h
@[simp] lemma get_or_else_bot (a : α) : option.get_or_else (⊥ : with_bot α) a = a := rfl
lemma get_or_else_bot_le_iff [has_le α] [order_bot α] {a : with_bot α} {b : α} :
a.get_or_else ⊥ ≤ b ↔ a ≤ b :=
by cases a; simp [none_eq_bot, some_eq_coe]
instance decidable_le [has_le α] [@decidable_rel α (≤)] : @decidable_rel (with_bot α) (≤)
| none x := is_true $ λ a h, option.no_confusion h
| (some x) (some y) :=
if h : x ≤ y
then is_true (some_le_some.2 h)
else is_false $ by simp *
| (some x) none := is_false $ λ h, by rcases h x rfl with ⟨y, ⟨_⟩, _⟩
instance decidable_lt [has_lt α] [@decidable_rel α (<)] : @decidable_rel (with_bot α) (<)
| none (some x) := is_true $ by existsi [x,rfl]; rintros _ ⟨⟩
| (some x) (some y) :=
if h : x < y
then is_true $ by simp *
else is_false $ by simp *
| x none := is_false $ by rintro ⟨a,⟨⟨⟩⟩⟩
instance [partial_order α] [is_total α (≤)] : is_total (with_bot α) (≤) :=
{ total := λ a b, match a, b with
| none , _ := or.inl bot_le
| _ , none := or.inr bot_le
| some x, some y := by simp only [some_le_some, total_of]
end }
instance semilattice_sup [semilattice_sup α] : semilattice_sup (with_bot α) :=
{ sup := option.lift_or_get (⊔),
le_sup_left := λ o₁ o₂ a ha,
by cases ha; cases o₂; simp [option.lift_or_get],
le_sup_right := λ o₁ o₂ a ha,
by cases ha; cases o₁; simp [option.lift_or_get],
sup_le := λ o₁ o₂ o₃ h₁ h₂ a ha, begin
cases o₁ with b; cases o₂ with c; cases ha,
{ exact h₂ a rfl },
{ exact h₁ a rfl },
{ rcases h₁ b rfl with ⟨d, ⟨⟩, h₁'⟩,
simp at h₂,
exact ⟨d, rfl, sup_le h₁' h₂⟩ }
end,
..with_bot.order_bot,
..with_bot.partial_order }
lemma coe_sup [semilattice_sup α] (a b : α) : ((a ⊔ b : α) : with_bot α) = a ⊔ b := rfl
instance semilattice_inf [semilattice_inf α] : semilattice_inf (with_bot α) :=
{ inf := λ o₁ o₂, o₁.bind (λ a, o₂.map (λ b, a ⊓ b)),
inf_le_left := λ o₁ o₂ a ha, begin
simp at ha, rcases ha with ⟨b, rfl, c, rfl, rfl⟩,
exact ⟨_, rfl, inf_le_left⟩
end,
inf_le_right := λ o₁ o₂ a ha, begin
simp at ha, rcases ha with ⟨b, rfl, c, rfl, rfl⟩,
exact ⟨_, rfl, inf_le_right⟩
end,
le_inf := λ o₁ o₂ o₃ h₁ h₂ a ha, begin
cases ha,
rcases h₁ a rfl with ⟨b, ⟨⟩, ab⟩,
rcases h₂ a rfl with ⟨c, ⟨⟩, ac⟩,
exact ⟨_, rfl, le_inf ab ac⟩
end,
..with_bot.order_bot,
..with_bot.partial_order }
lemma coe_inf [semilattice_inf α] (a b : α) : ((a ⊓ b : α) : with_bot α) = a ⊓ b := rfl
instance lattice [lattice α] : lattice (with_bot α) :=
{ ..with_bot.semilattice_sup, ..with_bot.semilattice_inf }
instance linear_order [linear_order α] : linear_order (with_bot α) :=
lattice.to_linear_order _ $ λ o₁ o₂,
begin
cases o₁ with a, {exact or.inl bot_le},
cases o₂ with b, {exact or.inr bot_le},
simp [le_total]
end
@[norm_cast] -- this is not marked simp because the corresponding with_top lemmas are used
lemma coe_min [linear_order α] (x y : α) : ((min x y : α) : with_bot α) = min x y := rfl
@[norm_cast] -- this is not marked simp because the corresponding with_top lemmas are used
lemma coe_max [linear_order α] (x y : α) : ((max x y : α) : with_bot α) = max x y := rfl
instance order_top [has_le α] [order_top α] : order_top (with_bot α) :=
{ top := some ⊤,
le_top := λ o a ha, by cases ha; exact ⟨_, rfl, le_top⟩ }
instance bounded_order [has_le α] [order_top α] : bounded_order (with_bot α) :=
{ ..with_bot.order_top, ..with_bot.order_bot }
lemma well_founded_lt [partial_order α] (h : well_founded ((<) : α → α → Prop)) :
well_founded ((<) : with_bot α → with_bot α → Prop) :=
have acc_bot : acc ((<) : with_bot α → with_bot α → Prop) ⊥ :=
acc.intro _ (λ a ha, (not_le_of_gt ha bot_le).elim),
⟨λ a, option.rec_on a acc_bot (λ a, acc.intro _ (λ b, option.rec_on b (λ _, acc_bot)
(λ b, well_founded.induction h b
(show ∀ b : α, (∀ c, c < b → (c : with_bot α) < a →
acc ((<) : with_bot α → with_bot α → Prop) c) → (b : with_bot α) < a →
acc ((<) : with_bot α → with_bot α → Prop) b,
from λ b ih hba, acc.intro _ (λ c, option.rec_on c (λ _, acc_bot)
(λ c hc, ih _ (some_lt_some.1 hc) (lt_trans hc hba)))))))⟩
instance densely_ordered [partial_order α] [densely_ordered α] [no_bot_order α] :
densely_ordered (with_bot α) :=
⟨ λ a b,
match a, b with
| a, none := λ h : a < ⊥, (not_lt_bot h).elim
| none, some b := λ h, let ⟨a, ha⟩ := no_bot b in ⟨a, bot_lt_coe a, coe_lt_coe.2 ha⟩
| some a, some b := λ h, let ⟨a, ha₁, ha₂⟩ := exists_between (coe_lt_coe.1 h) in
⟨a, coe_lt_coe.2 ha₁, coe_lt_coe.2 ha₂⟩
end⟩
instance {α : Type*} [preorder α] [no_top_order α] [nonempty α] : no_top_order (with_bot α) :=
⟨begin
apply with_bot.rec_bot_coe,
{ apply ‹nonempty α›.elim,
exact λ a, ⟨a, with_bot.bot_lt_coe a⟩, },
{ intro a,
obtain ⟨b, ha⟩ := no_top a,
exact ⟨b, with_bot.coe_lt_coe.mpr ha⟩, }
end⟩
end with_bot
--TODO(Mario): Construct using order dual on with_bot
/-- Attach `⊤` to a type. -/
def with_top (α : Type*) := option α
namespace with_top
meta instance {α} [has_to_format α] : has_to_format (with_top α) :=
{ to_format := λ x,
match x with
| none := "⊤"
| (some x) := to_fmt x
end }
instance : has_coe_t α (with_top α) := ⟨some⟩
instance has_top : has_top (with_top α) := ⟨none⟩
instance : inhabited (with_top α) := ⟨⊤⟩
lemma none_eq_top : (none : with_top α) = (⊤ : with_top α) := rfl
lemma some_eq_coe (a : α) : (some a : with_top α) = (↑a : with_top α) := rfl
/-- Recursor for `with_top` using the preferred forms `⊤` and `↑a`. -/
@[elab_as_eliminator]
def rec_top_coe {C : with_top α → Sort*} (h₁ : C ⊤) (h₂ : Π (a : α), C a) :
Π (n : with_top α), C n :=
option.rec h₁ h₂
@[norm_cast]
theorem coe_eq_coe {a b : α} : (a : with_top α) = b ↔ a = b :=
by rw [← option.some.inj_eq a b]; refl
@[simp] theorem top_ne_coe {a : α} : ⊤ ≠ (a : with_top α) .
@[simp] theorem coe_ne_top {a : α} : (a : with_top α) ≠ ⊤ .
lemma ne_top_iff_exists {x : with_top α} : x ≠ ⊤ ↔ ∃ (a : α), ↑a = x :=
option.ne_none_iff_exists
/-- Deconstruct a `x : with_top α` to the underlying value in `α`, given a proof that `x ≠ ⊤`. -/
def untop : Π (x : with_top α), x ≠ ⊤ → α :=
with_bot.unbot
@[simp] lemma coe_untop {α : Type*} (x : with_top α) (h : x ≠ ⊤) :
(x.untop h : with_top α) = x :=
by { cases x, simpa using h, refl, }
@[simp] lemma untop_coe (x : α) (h : (x : with_top α) ≠ ⊤ := coe_ne_top) :
(x : with_top α).untop h = x := rfl
@[priority 10]
instance has_lt [has_lt α] : has_lt (with_top α) :=
{ lt := λ o₁ o₂ : option α, ∃ b ∈ o₁, ∀ a ∈ o₂, b < a }
@[priority 10]
instance has_le [has_le α] : has_le (with_top α) :=
{ le := λ o₁ o₂ : option α, ∀ a ∈ o₂, ∃ b ∈ o₁, b ≤ a }
@[simp] theorem some_lt_some [has_lt α] {a b : α} :
@has_lt.lt (with_top α) _ (some a) (some b) ↔ a < b :=
by simp [(<)]
@[simp] theorem some_le_some [has_le α] {a b : α} :
@has_le.le (with_top α) _ (some a) (some b) ↔ a ≤ b :=
by simp [(≤)]
@[simp] theorem le_none [has_le α] {a : with_top α} :
@has_le.le (with_top α) _ a none :=
by simp [(≤)]
@[simp] theorem some_lt_none [has_lt α] (a : α) :
@has_lt.lt (with_top α) _ (some a) none :=
by simp [(<)]; existsi a; refl
instance : can_lift (with_top α) α :=
{ coe := coe,
cond := λ r, r ≠ ⊤,
prf := λ x hx, ⟨option.get $ option.ne_none_iff_is_some.1 hx, option.some_get _⟩ }
instance [preorder α] : preorder (with_top α) :=
{ le := λ o₁ o₂ : option α, ∀ a ∈ o₂, ∃ b ∈ o₁, b ≤ a,
lt := (<),
lt_iff_le_not_le := by { intros; cases a; cases b;
simp [lt_iff_le_not_le]; simp [(<),(≤)] },
le_refl := λ o a ha, ⟨a, ha, le_refl _⟩,
le_trans := λ o₁ o₂ o₃ h₁ h₂ c hc,
let ⟨b, hb, bc⟩ := h₂ c hc, ⟨a, ha, ab⟩ := h₁ b hb in
⟨a, ha, le_trans ab bc⟩, }
instance partial_order [partial_order α] : partial_order (with_top α) :=
{ le_antisymm := λ o₁ o₂ h₁ h₂, begin
cases o₂ with b,
{ cases o₁ with a, {refl},
rcases h₂ a rfl with ⟨_, ⟨⟩, _⟩ },
{ rcases h₁ b rfl with ⟨a, ⟨⟩, h₁'⟩,
rcases h₂ a rfl with ⟨_, ⟨⟩, h₂'⟩,
rw le_antisymm h₁' h₂' }
end,
.. with_top.preorder }
instance order_top [has_le α] : order_top (with_top α) :=
{ le_top := λ a a' h, option.no_confusion h,
.. with_top.has_top }
@[simp, norm_cast] theorem coe_le_coe [has_le α] {a b : α} :
(a : with_top α) ≤ b ↔ a ≤ b :=
⟨λ h, by rcases h b rfl with ⟨_, ⟨⟩, h⟩; exact h,
λ h a' e, option.some_inj.1 e ▸ ⟨a, rfl, h⟩⟩
theorem le_coe [has_le α] {a b : α} :
∀ {o : option α}, a ∈ o →
(@has_le.le (with_top α) _ o b ↔ a ≤ b)
| _ rfl := coe_le_coe
theorem le_coe_iff [partial_order α] {b : α} : ∀{x : with_top α}, x ≤ b ↔ (∃a:α, x = a ∧ a ≤ b)
| (some a) := by simp [some_eq_coe, coe_eq_coe]
| none := by simp [none_eq_top]
theorem coe_le_iff [partial_order α] {a : α} : ∀{x : with_top α}, ↑a ≤ x ↔ (∀b:α, x = ↑b → a ≤ b)
| (some b) := by simp [some_eq_coe, coe_eq_coe]
| none := by simp [none_eq_top]
theorem lt_iff_exists_coe [partial_order α] : ∀{a b : with_top α}, a < b ↔ (∃p:α, a = p ∧ ↑p < b)
| (some a) b := by simp [some_eq_coe, coe_eq_coe]
| none b := by simp [none_eq_top]
@[norm_cast]
lemma coe_lt_coe [has_lt α] {a b : α} : (a : with_top α) < b ↔ a < b := some_lt_some
lemma coe_lt_top [has_lt α] (a : α) : (a : with_top α) < ⊤ := some_lt_none a
theorem coe_lt_iff [preorder α] {a : α} : ∀{x : with_top α}, ↑a < x ↔ (∀b:α, x = ↑b → a < b)
| (some b) := by simp [some_eq_coe, coe_eq_coe, coe_lt_coe]
| none := by simp [none_eq_top, coe_lt_top]
lemma not_top_le_coe [preorder α] (a : α) : ¬ (⊤:with_top α) ≤ ↑a :=
λ h, (lt_irrefl ⊤ (lt_of_le_of_lt h (coe_lt_top a))).elim
instance decidable_le [has_le α] [@decidable_rel α (≤)] : @decidable_rel (with_top α) (≤) :=
λ x y, @with_bot.decidable_le (order_dual α) _ _ y x
instance decidable_lt [has_lt α] [@decidable_rel α (<)] : @decidable_rel (with_top α) (<) :=
λ x y, @with_bot.decidable_lt (order_dual α) _ _ y x
instance [partial_order α] [is_total α (≤)] : is_total (with_top α) (≤) :=
{ total := λ a b, match a, b with
| none , _ := or.inr le_top
| _ , none := or.inl le_top
| some x, some y := by simp only [some_le_some, total_of]
end }
instance semilattice_inf [semilattice_inf α] : semilattice_inf (with_top α) :=
{ inf := option.lift_or_get (⊓),
inf_le_left := λ o₁ o₂ a ha,
by cases ha; cases o₂; simp [option.lift_or_get],
inf_le_right := λ o₁ o₂ a ha,
by cases ha; cases o₁; simp [option.lift_or_get],
le_inf := λ o₁ o₂ o₃ h₁ h₂ a ha, begin
cases o₂ with b; cases o₃ with c; cases ha,
{ exact h₂ a rfl },
{ exact h₁ a rfl },
{ rcases h₁ b rfl with ⟨d, ⟨⟩, h₁'⟩,
simp at h₂,
exact ⟨d, rfl, le_inf h₁' h₂⟩ }
end,
..with_top.partial_order }
lemma coe_inf [semilattice_inf α] (a b : α) : ((a ⊓ b : α) : with_top α) = a ⊓ b := rfl
instance semilattice_sup [semilattice_sup α] : semilattice_sup (with_top α) :=
{ sup := λ o₁ o₂, o₁.bind (λ a, o₂.map (λ b, a ⊔ b)),
le_sup_left := λ o₁ o₂ a ha, begin
simp at ha, rcases ha with ⟨b, rfl, c, rfl, rfl⟩,
exact ⟨_, rfl, le_sup_left⟩
end,
le_sup_right := λ o₁ o₂ a ha, begin
simp at ha, rcases ha with ⟨b, rfl, c, rfl, rfl⟩,
exact ⟨_, rfl, le_sup_right⟩
end,
sup_le := λ o₁ o₂ o₃ h₁ h₂ a ha, begin
cases ha,
rcases h₁ a rfl with ⟨b, ⟨⟩, ab⟩,
rcases h₂ a rfl with ⟨c, ⟨⟩, ac⟩,
exact ⟨_, rfl, sup_le ab ac⟩
end,
..with_top.partial_order }
lemma coe_sup [semilattice_sup α] (a b : α) : ((a ⊔ b : α) : with_top α) = a ⊔ b := rfl
instance lattice [lattice α] : lattice (with_top α) :=
{ ..with_top.semilattice_sup, ..with_top.semilattice_inf }
instance linear_order [linear_order α] : linear_order (with_top α) :=
lattice.to_linear_order _ $ λ o₁ o₂,
begin
cases o₁ with a, {exact or.inr le_top},
cases o₂ with b, {exact or.inl le_top},
simp [le_total]
end
@[simp, norm_cast]
lemma coe_min [linear_order α] (x y : α) : ((min x y : α) : with_top α) = min x y := rfl
@[simp, norm_cast]
lemma coe_max [linear_order α] (x y : α) : ((max x y : α) : with_top α) = max x y := rfl
instance order_bot [has_le α] [order_bot α] : order_bot (with_top α) :=
{ bot := some ⊥,
bot_le := λ o a ha, by cases ha; exact ⟨_, rfl, bot_le⟩ }
instance bounded_order [has_le α] [order_bot α] : bounded_order (with_top α) :=
{ ..with_top.order_top, ..with_top.order_bot }
lemma well_founded_lt {α : Type*} [partial_order α] (h : well_founded ((<) : α → α → Prop)) :
well_founded ((<) : with_top α → with_top α → Prop) :=
have acc_some : ∀ a : α, acc ((<) : with_top α → with_top α → Prop) (some a) :=
λ a, acc.intro _ (well_founded.induction h a
(show ∀ b, (∀ c, c < b → ∀ d : with_top α, d < some c → acc (<) d) →
∀ y : with_top α, y < some b → acc (<) y,
from λ b ih c, option.rec_on c (λ hc, (not_lt_of_ge le_top hc).elim)
(λ c hc, acc.intro _ (ih _ (some_lt_some.1 hc))))),
⟨λ a, option.rec_on a (acc.intro _ (λ y, option.rec_on y (λ h, (lt_irrefl _ h).elim)
(λ _ _, acc_some _))) acc_some⟩
instance densely_ordered [partial_order α] [densely_ordered α] [no_top_order α] :
densely_ordered (with_top α) :=
⟨ λ a b,
match a, b with
| none, a := λ h : ⊤ < a, (not_top_lt h).elim
| some a, none := λ h, let ⟨b, hb⟩ := no_top a in ⟨b, coe_lt_coe.2 hb, coe_lt_top b⟩
| some a, some b := λ h, let ⟨a, ha₁, ha₂⟩ := exists_between (coe_lt_coe.1 h) in
⟨a, coe_lt_coe.2 ha₁, coe_lt_coe.2 ha₂⟩
end⟩
lemma lt_iff_exists_coe_btwn [partial_order α] [densely_ordered α] [no_top_order α]
{a b : with_top α} :
(a < b) ↔ (∃ x : α, a < ↑x ∧ ↑x < b) :=
⟨λ h, let ⟨y, hy⟩ := exists_between h, ⟨x, hx⟩ := lt_iff_exists_coe.1 hy.2 in ⟨x, hx.1 ▸ hy⟩,
λ ⟨x, hx⟩, lt_trans hx.1 hx.2⟩
instance {α : Type*} [preorder α] [no_bot_order α] [nonempty α] : no_bot_order (with_top α) :=
⟨begin
apply with_top.rec_top_coe,
{ apply ‹nonempty α›.elim,
exact λ a, ⟨a, with_top.coe_lt_top a⟩, },
{ intro a,
obtain ⟨b, ha⟩ := no_bot a,
exact ⟨b, with_top.coe_lt_coe.mpr ha⟩, }
end⟩
end with_top
/-! ### Subtype, order dual, product lattices -/
namespace subtype
/-- A subtype remains a `⊥`-order if the property holds at `⊥`.
See note [reducible non-instances]. -/
@[reducible]
protected def order_bot [preorder α] [order_bot α] {P : α → Prop} (Pbot : P ⊥) :
order_bot {x : α // P x} :=
{ bot := ⟨⊥, Pbot⟩,
bot_le := λ _, bot_le }
/-- A subtype remains a `⊤`-order if the property holds at `⊤`.
See note [reducible non-instances]. -/
@[reducible]
protected def order_top [preorder α] [order_top α] {P : α → Prop} (Ptop : P ⊤) :
order_top {x : α // P x} :=
{ top := ⟨⊤, Ptop⟩,
le_top := λ _, le_top }
end subtype
namespace order_dual
variable (α)
instance [has_bot α] : has_top (order_dual α) := ⟨(⊥ : α)⟩
instance [has_top α] : has_bot (order_dual α) := ⟨(⊤ : α)⟩
instance [has_le α] [order_bot α] : order_top (order_dual α) :=
{ le_top := @bot_le α _ _,
.. order_dual.has_top α }
instance [has_le α] [order_top α] : order_bot (order_dual α) :=
{ bot_le := @le_top α _ _,
.. order_dual.has_bot α }
instance [has_le α] [bounded_order α] : bounded_order (order_dual α) :=
{ .. order_dual.order_top α, .. order_dual.order_bot α }
end order_dual
namespace prod
variables (α β)
instance [has_top α] [has_top β] : has_top (α × β) := ⟨⟨⊤, ⊤⟩⟩
instance [has_bot α] [has_bot β] : has_bot (α × β) := ⟨⟨⊥, ⊥⟩⟩
instance [has_le α] [has_le β] [order_top α] [order_top β] : order_top (α × β) :=
{ le_top := λ a, ⟨le_top, le_top⟩,
.. prod.has_top α β }
instance [has_le α] [has_le β] [order_bot α] [order_bot β] : order_bot (α × β) :=
{ bot_le := λ a, ⟨bot_le, bot_le⟩,
.. prod.has_bot α β }
instance [has_le α] [has_le β] [bounded_order α] [bounded_order β] : bounded_order (α × β) :=
{ .. prod.order_top α β, .. prod.order_bot α β }
end prod
/-! ### Disjointness and complements -/
section disjoint
section semilattice_inf_bot
variables [semilattice_inf α] [order_bot α]
/-- Two elements of a lattice are disjoint if their inf is the bottom element.
(This generalizes disjoint sets, viewed as members of the subset lattice.) -/
def disjoint (a b : α) : Prop := a ⊓ b ≤ ⊥
theorem disjoint.eq_bot {a b : α} (h : disjoint a b) : a ⊓ b = ⊥ :=
eq_bot_iff.2 h
theorem disjoint_iff {a b : α} : disjoint a b ↔ a ⊓ b = ⊥ :=
eq_bot_iff.symm
theorem disjoint.comm {a b : α} : disjoint a b ↔ disjoint b a :=
by rw [disjoint, disjoint, inf_comm]
@[symm] theorem disjoint.symm ⦃a b : α⦄ : disjoint a b → disjoint b a :=
disjoint.comm.1
lemma symmetric_disjoint : symmetric (disjoint : α → α → Prop) := disjoint.symm
@[simp] theorem disjoint_bot_left {a : α} : disjoint ⊥ a := inf_le_left
@[simp] theorem disjoint_bot_right {a : α} : disjoint a ⊥ := inf_le_right
theorem disjoint.mono {a b c d : α} (h₁ : a ≤ b) (h₂ : c ≤ d) :
disjoint b d → disjoint a c := le_trans (inf_le_inf h₁ h₂)
theorem disjoint.mono_left {a b c : α} (h : a ≤ b) : disjoint b c → disjoint a c :=
disjoint.mono h (le_refl _)
theorem disjoint.mono_right {a b c : α} (h : b ≤ c) : disjoint a c → disjoint a b :=
disjoint.mono (le_refl _) h
@[simp] lemma disjoint_self {a : α} : disjoint a a ↔ a = ⊥ :=
by simp [disjoint]
lemma disjoint.ne {a b : α} (ha : a ≠ ⊥) (hab : disjoint a b) : a ≠ b :=
by { intro h, rw [←h, disjoint_self] at hab, exact ha hab }
lemma disjoint.eq_bot_of_le {a b : α} (hab : disjoint a b) (h : a ≤ b) : a = ⊥ :=
eq_bot_iff.2 (by rwa ←inf_eq_left.2 h)
lemma disjoint.of_disjoint_inf_of_le {a b c : α} (h : disjoint (a ⊓ b) c) (hle : a ≤ c) :
disjoint a b := by rw [disjoint_iff, h.eq_bot_of_le (inf_le_left.trans hle)]
lemma disjoint.of_disjoint_inf_of_le' {a b c : α} (h : disjoint (a ⊓ b) c) (hle : b ≤ c) :
disjoint a b := by rw [disjoint_iff, h.eq_bot_of_le (inf_le_right.trans hle)]
end semilattice_inf_bot
section bounded_order
variables [lattice α] [bounded_order α] {a : α}
@[simp] theorem disjoint_top : disjoint a ⊤ ↔ a = ⊥ := by simp [disjoint_iff]
@[simp] theorem top_disjoint : disjoint ⊤ a ↔ a = ⊥ := by simp [disjoint_iff]
lemma eq_bot_of_disjoint_absorbs
{a b : α} (w : disjoint a b) (h : a ⊔ b = a) : b = ⊥ :=
begin
rw disjoint_iff at w,
rw [←w, right_eq_inf],
rwa sup_eq_left at h,
end
end bounded_order
section linear_order
variables [linear_order α]
lemma min_top_left [order_top α] (a : α) : min (⊤ : α) a = a := min_eq_right le_top
lemma min_top_right [order_top α] (a : α) : min a ⊤ = a := min_eq_left le_top
lemma max_bot_left [order_bot α] (a : α) : max (⊥ : α) a = a := max_eq_right bot_le
lemma max_bot_right [order_bot α] (a : α) : max a ⊥ = a := max_eq_left bot_le
-- `simp` can prove these, so they shouldn't be simp-lemmas.
lemma min_bot_left [order_bot α] (a : α) : min ⊥ a = ⊥ := min_eq_left bot_le
lemma min_bot_right [order_bot α] (a : α) : min a ⊥ = ⊥ := min_eq_right bot_le
lemma max_top_left [order_top α] (a : α) : max ⊤ a = ⊤ := max_eq_left le_top
lemma max_top_right [order_top α] (a : α) : max a ⊤ = ⊤ := max_eq_right le_top
@[simp] lemma min_eq_bot [order_bot α] {a b : α} : min a b = ⊥ ↔ a = ⊥ ∨ b = ⊥ :=
by { symmetry, cases le_total a b; simpa [*, min_eq_left, min_eq_right] using eq_bot_mono h }
@[simp] lemma max_eq_top [order_top α] {a b : α} : max a b = ⊤ ↔ a = ⊤ ∨ b = ⊤ :=
@min_eq_bot (order_dual α) _ _ a b
@[simp] lemma max_eq_bot [order_bot α] {a b : α} : max a b = ⊥ ↔ a = ⊥ ∧ b = ⊥ := sup_eq_bot_iff
@[simp] lemma min_eq_top [order_top α] {a b : α} : min a b = ⊤ ↔ a = ⊤ ∧ b = ⊤ := inf_eq_top_iff
end linear_order
section distrib_lattice_bot
variables [distrib_lattice α] [order_bot α] {a b c : α}
@[simp] lemma disjoint_sup_left : disjoint (a ⊔ b) c ↔ disjoint a c ∧ disjoint b c :=
by simp only [disjoint_iff, inf_sup_right, sup_eq_bot_iff]
@[simp] lemma disjoint_sup_right : disjoint a (b ⊔ c) ↔ disjoint a b ∧ disjoint a c :=
by simp only [disjoint_iff, inf_sup_left, sup_eq_bot_iff]
lemma disjoint.sup_left (ha : disjoint a c) (hb : disjoint b c) : disjoint (a ⊔ b) c :=
disjoint_sup_left.2 ⟨ha, hb⟩
lemma disjoint.sup_right (hb : disjoint a b) (hc : disjoint a c) : disjoint a (b ⊔ c) :=
disjoint_sup_right.2 ⟨hb, hc⟩
lemma disjoint.left_le_of_le_sup_right {a b c : α} (h : a ≤ b ⊔ c) (hd : disjoint a c) : a ≤ b :=
(λ x, le_of_inf_le_sup_le x (sup_le h le_sup_right)) ((disjoint_iff.mp hd).symm ▸ bot_le)
lemma disjoint.left_le_of_le_sup_left {a b c : α} (h : a ≤ c ⊔ b) (hd : disjoint a c) : a ≤ b :=
@le_of_inf_le_sup_le _ _ a b c ((disjoint_iff.mp hd).symm ▸ bot_le)
((@sup_comm _ _ c b) ▸ (sup_le h le_sup_left))
end distrib_lattice_bot
section semilattice_inf_bot
variables [semilattice_inf α] [order_bot α] {a b : α} (c : α)
lemma disjoint.inf_left (h : disjoint a b) : disjoint (a ⊓ c) b :=
h.mono_left inf_le_left
lemma disjoint.inf_left' (h : disjoint a b) : disjoint (c ⊓ a) b :=
h.mono_left inf_le_right
lemma disjoint.inf_right (h : disjoint a b) : disjoint a (b ⊓ c) :=
h.mono_right inf_le_left
lemma disjoint.inf_right' (h : disjoint a b) : disjoint a (c ⊓ b) :=
h.mono_right inf_le_right
end semilattice_inf_bot
end disjoint
lemma inf_eq_bot_iff_le_compl [distrib_lattice α] [bounded_order α] {a b c : α}
(h₁ : b ⊔ c = ⊤) (h₂ : b ⊓ c = ⊥) : a ⊓ b = ⊥ ↔ a ≤ c :=
⟨λ h,
calc a ≤ a ⊓ (b ⊔ c) : by simp [h₁]
... = (a ⊓ b) ⊔ (a ⊓ c) : by simp [inf_sup_left]
... ≤ c : by simp [h, inf_le_right],
λ h,
bot_unique $
calc a ⊓ b ≤ b ⊓ c : by { rw inf_comm, exact inf_le_inf_left _ h }
... = ⊥ : h₂⟩
section is_compl
/-- Two elements `x` and `y` are complements of each other if `x ⊔ y = ⊤` and `x ⊓ y = ⊥`. -/
structure is_compl [lattice α] [bounded_order α] (x y : α) : Prop :=
(inf_le_bot : x ⊓ y ≤ ⊥)
(top_le_sup : ⊤ ≤ x ⊔ y)
namespace is_compl
section bounded_order
variables [lattice α] [bounded_order α] {x y z : α}
protected lemma disjoint (h : is_compl x y) : disjoint x y := h.1
@[symm] protected lemma symm (h : is_compl x y) : is_compl y x :=
⟨by { rw inf_comm, exact h.1 }, by { rw sup_comm, exact h.2 }⟩
lemma of_eq (h₁ : x ⊓ y = ⊥) (h₂ : x ⊔ y = ⊤) : is_compl x y :=
⟨le_of_eq h₁, le_of_eq h₂.symm⟩
lemma inf_eq_bot (h : is_compl x y) : x ⊓ y = ⊥ := h.disjoint.eq_bot
lemma sup_eq_top (h : is_compl x y) : x ⊔ y = ⊤ := top_unique h.top_le_sup
open order_dual (to_dual)
lemma to_order_dual (h : is_compl x y) : is_compl (to_dual x) (to_dual y) := ⟨h.2, h.1⟩
end bounded_order
variables [distrib_lattice α] [bounded_order α] {a b x y z : α}
lemma inf_left_le_of_le_sup_right (h : is_compl x y) (hle : a ≤ b ⊔ y) : a ⊓ x ≤ b :=
calc a ⊓ x ≤ (b ⊔ y) ⊓ x : inf_le_inf hle le_rfl
... = (b ⊓ x) ⊔ (y ⊓ x) : inf_sup_right
... = b ⊓ x : by rw [h.symm.inf_eq_bot, sup_bot_eq]
... ≤ b : inf_le_left
lemma le_sup_right_iff_inf_left_le {a b} (h : is_compl x y) : a ≤ b ⊔ y ↔ a ⊓ x ≤ b :=
⟨h.inf_left_le_of_le_sup_right, h.symm.to_order_dual.inf_left_le_of_le_sup_right⟩
lemma inf_left_eq_bot_iff (h : is_compl y z) : x ⊓ y = ⊥ ↔ x ≤ z :=
by rw [← le_bot_iff, ← h.le_sup_right_iff_inf_left_le, bot_sup_eq]
lemma inf_right_eq_bot_iff (h : is_compl y z) : x ⊓ z = ⊥ ↔ x ≤ y :=
h.symm.inf_left_eq_bot_iff
lemma disjoint_left_iff (h : is_compl y z) : disjoint x y ↔ x ≤ z :=
by { rw disjoint_iff, exact h.inf_left_eq_bot_iff }
lemma disjoint_right_iff (h : is_compl y z) : disjoint x z ↔ x ≤ y :=
h.symm.disjoint_left_iff
lemma le_left_iff (h : is_compl x y) : z ≤ x ↔ disjoint z y :=
h.disjoint_right_iff.symm
lemma le_right_iff (h : is_compl x y) : z ≤ y ↔ disjoint z x :=
h.symm.le_left_iff
lemma left_le_iff (h : is_compl x y) : x ≤ z ↔ ⊤ ≤ z ⊔ y :=
h.to_order_dual.le_left_iff
lemma right_le_iff (h : is_compl x y) : y ≤ z ↔ ⊤ ≤ z ⊔ x :=
h.symm.left_le_iff
protected lemma antitone {x' y'} (h : is_compl x y) (h' : is_compl x' y') (hx : x ≤ x') :
y' ≤ y :=
h'.right_le_iff.2 $ le_trans h.symm.top_le_sup (sup_le_sup_left hx _)
lemma right_unique (hxy : is_compl x y) (hxz : is_compl x z) :
y = z :=
le_antisymm (hxz.antitone hxy $ le_refl x) (hxy.antitone hxz $ le_refl x)
lemma left_unique (hxz : is_compl x z) (hyz : is_compl y z) :
x = y :=
hxz.symm.right_unique hyz.symm
lemma sup_inf {x' y'} (h : is_compl x y) (h' : is_compl x' y') :
is_compl (x ⊔ x') (y ⊓ y') :=
of_eq
(by rw [inf_sup_right, ← inf_assoc, h.inf_eq_bot, bot_inf_eq, bot_sup_eq, inf_left_comm,
h'.inf_eq_bot, inf_bot_eq])
(by rw [sup_inf_left, @sup_comm _ _ x, sup_assoc, h.sup_eq_top, sup_top_eq, top_inf_eq,
sup_assoc, sup_left_comm, h'.sup_eq_top, sup_top_eq])
lemma inf_sup {x' y'} (h : is_compl x y) (h' : is_compl x' y') :
is_compl (x ⊓ x') (y ⊔ y') :=
(h.symm.sup_inf h'.symm).symm
end is_compl
lemma is_compl_bot_top [lattice α] [bounded_order α] : is_compl (⊥ : α) ⊤ :=
is_compl.of_eq bot_inf_eq sup_top_eq
lemma is_compl_top_bot [lattice α] [bounded_order α] : is_compl (⊤ : α) ⊥ :=
is_compl.of_eq inf_bot_eq top_sup_eq
section
variables [lattice α] [bounded_order α] {x : α}
lemma eq_top_of_is_compl_bot (h : is_compl x ⊥) : x = ⊤ :=
sup_bot_eq.symm.trans h.sup_eq_top
lemma eq_top_of_bot_is_compl (h : is_compl ⊥ x) : x = ⊤ :=
eq_top_of_is_compl_bot h.symm
lemma eq_bot_of_is_compl_top (h : is_compl x ⊤) : x = ⊥ :=
eq_top_of_is_compl_bot h.to_order_dual
lemma eq_bot_of_top_is_compl (h : is_compl ⊤ x) : x = ⊥ :=
eq_top_of_bot_is_compl h.to_order_dual
end
/-- A complemented bounded lattice is one where every element has a (not necessarily unique)
complement. -/
class is_complemented (α) [lattice α] [bounded_order α] : Prop :=
(exists_is_compl : ∀ (a : α), ∃ (b : α), is_compl a b)
export is_complemented (exists_is_compl)
namespace is_complemented
variables [lattice α] [bounded_order α] [is_complemented α]
instance : is_complemented (order_dual α) :=
⟨λ a, let ⟨b, hb⟩ := exists_is_compl (show α, from a) in ⟨b, hb.to_order_dual⟩⟩
end is_complemented
end is_compl
section nontrivial
variables [partial_order α] [bounded_order α] [nontrivial α]
lemma bot_ne_top : (⊥ : α) ≠ ⊤ :=
λ H, not_nontrivial_iff_subsingleton.mpr (subsingleton_of_bot_eq_top H) ‹_›
lemma top_ne_bot : (⊤ : α) ≠ ⊥ := ne.symm bot_ne_top
end nontrivial
namespace bool
-- TODO: is this comment relevant now that `bounded_order` is factored out?
-- Could be generalised to `bounded_distrib_lattice` and `is_complemented`
instance : bounded_order bool :=
{ top := tt,
le_top := λ x, le_tt,
bot := ff,
bot_le := λ x, ff_le }
end bool
section bool
@[simp] lemma top_eq_tt : ⊤ = tt := rfl
@[simp] lemma bot_eq_ff : ⊥ = ff := rfl
end bool
|
{-# OPTIONS --warning=error #-}
A : Set₁
A = Set
{-# POLARITY A #-}
|
module Oscar.Class.ThickAndThin where
open import Oscar.Data.Fin
open import Oscar.Data.Equality
open import Oscar.Data.Nat
open import Oscar.Data.Maybe
record ThickAndThin {a} (A : Nat → Set a) : Set a where
field
thin : ∀ {m} → Fin (suc m) → A m → A (suc m)
thin-injective : ∀ {m} (x : Fin (suc m)) {y₁ y₂ : A m} → thin x y₁ ≡ thin x y₂ → y₁ ≡ y₂
thick : ∀ {m} → A (suc m) → Fin m → A m
thick∘thin=id : ∀ {m} (x : Fin m) (y : A m) → thick (thin (suc x) y) x ≡ y
check : ∀ {m} → Fin (suc m) → A (suc m) → Maybe (A m)
thin-check-id : ∀ {m} (x : Fin (suc m)) y → ∀ y' → thin x y' ≡ y → check x y ≡ just y'
open ThickAndThin ⦃ … ⦄ public
-- open import Oscar.Level
-- record ThickAndThin' {a} {A : Set a} (f : A → A) {b} (B : A → Set b) (g : ∀ {x} → B x → B (f x)) {c} (C : A → Set c) : Set (a ⊔ b ⊔ c) where
-- field
-- thin : ∀ {n} → B (f n) → C n → C (f n)
-- thick : ∀ {n} → C (f n) → B n → C n
-- thin-injective : ∀ {n} (z : B (f n)) {x y : C n} → thin z x ≡ thin z y → x ≡ y
-- thick∘thin=id : ∀ {n} (x : B n) (y : C n) → thick (thin (g x) y) x ≡ y
-- check : ∀ {n} → B (f n) → C (f n) → Maybe (C n)
-- thin-check-id : ∀ {n} (x : B (f n)) y → ∀ y' → thin x y' ≡ y → check x y ≡ just y'
-- --open ThickAndThin' ⦃ … ⦄ public
|
[GOAL]
m n : ℤ
⊢ dist m n = ↑|m - n|
[PROOFSTEP]
rw [dist_eq]
[GOAL]
m n : ℤ
⊢ |↑m - ↑n| = ↑|m - n|
[PROOFSTEP]
norm_cast
[GOAL]
⊢ Pairwise fun m n => 1 ≤ dist m n
[PROOFSTEP]
intro m n hne
[GOAL]
m n : ℤ
hne : m ≠ n
⊢ 1 ≤ dist m n
[PROOFSTEP]
rw [dist_eq]
[GOAL]
m n : ℤ
hne : m ≠ n
⊢ 1 ≤ |↑m - ↑n|
[PROOFSTEP]
norm_cast
[GOAL]
m n : ℤ
hne : m ≠ n
⊢ 1 ≤ |m - n|
[PROOFSTEP]
rwa [← zero_add (1 : ℤ), Int.add_one_le_iff, abs_pos, sub_ne_zero]
[GOAL]
x : ℤ
r : ℝ
⊢ ball x r = Ioo ⌊↑x - r⌋ ⌈↑x + r⌉
[PROOFSTEP]
rw [← preimage_ball, Real.ball_eq_Ioo, preimage_Ioo]
[GOAL]
x : ℤ
r : ℝ
⊢ closedBall x r = Icc ⌈↑x - r⌉ ⌊↑x + r⌋
[PROOFSTEP]
rw [← preimage_closedBall, Real.closedBall_eq_Icc, preimage_Icc]
[GOAL]
x : ℤ
r : ℝ
⊢ IsCompact (closedBall x r)
[PROOFSTEP]
rw [closedBall_eq_Icc]
[GOAL]
x : ℤ
r : ℝ
⊢ IsCompact (Icc ⌈↑x - r⌉ ⌊↑x + r⌋)
[PROOFSTEP]
exact (Set.finite_Icc _ _).isCompact
[GOAL]
⊢ cocompact ℤ = atBot ⊔ atTop
[PROOFSTEP]
simp_rw [← comap_dist_right_atTop_eq_cocompact (0 : ℤ), dist_eq', sub_zero, ← comap_abs_atTop, ←
@Int.comap_cast_atTop ℝ, comap_comap]
[GOAL]
⊢ comap (fun y => ↑|y|) atTop = comap (Int.cast ∘ abs) atTop
[PROOFSTEP]
rfl
[GOAL]
⊢ cofinite = atBot ⊔ atTop
[PROOFSTEP]
rw [← cocompact_eq_cofinite, cocompact_eq]
|
\subsection{Field extensions}\label{subsec:field_extensions}
\begin{definition}\label{def:splitting_field}
A \term{splitting field} for a nonconstant polynomial \( f(X) \in \Bbbk[X] \) of degree \( n \) is the smallest \hyperref[def:field_extension]{field extension} \( \BbbK \) of \( \Bbbk \) in which \( f(X) \) has \( n \) \hyperref[def:polynomial_root]{roots}. That is,
\begin{equation*}
\BbbK \cong \Bbbk(a_1, \ldots, a_n),
\end{equation*}
where \( a_1, \ldots, a_n \) are roots of \( f(X) \).
By \fullref{thm:splitting_field_existence}, splitting fields exist and are unique up to an isomorphism.
\end{definition}
\begin{proposition}\label{thm:splitting_field_existence}
There exists a unique up to an (possibly nonunique) isomorphism \hyperref[def:splitting_field]{splitting field} for every nonconstant polynomial in one indeterminate over a field.
\end{proposition}
\begin{proof}
\SubProof{Proof of existence}\mcite[thm 9.10 \\ thm. 9.12]{Knapp2016BasicAlgebra} We use induction on the degree of the polynomial \( f(X) = \sum_{k=0}^n a_k x^k \) over \( \Bbbk \). In the base case \( n = 1 \), \( f(X) \) is already linear, and hence \( \Bbbk \) is itself a splitting field for \( f(X) \).
Suppose that there exist splitting fields for polynomials over \( \Bbbk \) of degree \( n - 1 \). By \fullref{thm:maximal_ideal_theorem}, the \hyperref[def:semiring_ideal]{principal ideal} \( \braket{ f(X) } \) is contained in some maximal ideal \( M \). By \fullref{thm:quotient_by_maximal_ideal}, the quotient of \( R[X] \) by \( M \) is a field.
Define \( u_n \coloneqq \braket{ X } + M \). We have \( f(u_n) = \braket{ f(X) } + M \), hence \( u_n \) is a root of \( f \) in \( M \). Then
\begin{equation*}
f(X) = (X - u_n) q(X)
\end{equation*}
for some polynomials \( q(X) \) and \( r(X) \), both of degree less than \( n \) (or \( r(X) = 0 \)).
We can now apply the inductive hypothesis to obtain a splitting field of \( q(X) \). Let \( u_1, \ldots, u_{n-1} \) be the roots of \( q(X) \) in this field. We can then adjoin \( u_1, \ldots, u_n \) to the field \( \Bbbk \) to obtain a splitting field \( \Bbbk(u_1, \ldots, u_n) \) of \( f(X) \). Denote this field by \( \BbbK \).
\SubProof{Proof of uniqueness} Suppose that, given our previous construction, \( \BbbL \) is also a splitting field for \( f(X) \).
Again, we use induction on the degree \( n \) of \( f(X) \). The case \( n = 1 \) is again obvious.
Suppose that any two splitting fields for polynomials over \( \Bbbk \) of degree \( n - 1 \) are isomorphic. Let \( b_n \) be a root of \( f(X) \) in \( \BbbL \) and let
\begin{equation*}
f(X) = (X - b_n) r(X).
\end{equation*}
Let \( b_1, \ldots, b_{n-1} \) be the roots of \( r(X) \). Let \( \varphi \) be an isomorphism between the subfield \( \Bbbk(a_1, \ldots, a_{n-1}) \) of \( \BbbK \) and the corresponding subfield \( \Bbbk(b_1, \ldots, b_{n-1}) \) of \( \BbbL \). It follows that
\begin{equation*}
\underbrace{\prod_{k=1}^{n-1} (X - b_k)}_{r(X)} = \underbrace{\prod_{k=1}^{n-1} (X - \varphi(a_k))}_{q^\varphi(X)}.
\end{equation*}
Therefore, we can extend \( \varphi \) to an isomorphism \( \widehat{\varphi}: \BbbK \to \BbbL \) by putting \( \widehat{\varphi}(a_n) \coloneqq b_n \).
\end{proof}
\begin{theorem}[Classification of finite fields]\label{thm:finite_fields}
\hfill
\begin{thmenum}
\thmitem{thm:finite_fields/characteristic} The \hyperref[def:ring_characteristic]{characteristic} of a \hyperref[def:field]{field} with \( q \) elements is a \hyperref[def:prime_number]{prime number} \( p \), and \( q \) is a power of \( p \).
The fields of prime cardinality are sometimes called \term{prime fields}.
\thmitem{thm:finite_fields/prime_field} For a prime number \( p \), the ring \hyperref[thm:ring_of_integers_modulo]{\( \BbbZ_p \)} of integers modulo \( p \) is a field.
\thmitem{thm:finite_fields/splitting} All \hyperref[def:field]{fields} with \( q \) elements are \hyperref[def:field/homomorphism]{isomorphic} as \hyperref[def:splitting_field]{splitting fields} for the polynomial
\begin{equation*}
X^q - X \in \BbbZ_p[X].
\end{equation*}
Utilizing the general conventions of identifying isomorphic objects in algebra, we denote by \( \BbbF_q \) \enquote{the} finite field with \( q \) elements. Finite fields are also called \term{Galois fields}.
Every member of \( \BbbF_q \) is a root of \( X^q - X \).
\end{thmenum}
\end{theorem}
\begin{proof}
\SubProofOf{thm:finite_fields/characteristic} Let \( \BbbK \) be a field with \( q \) elements and let \( p \) be the \hyperref[def:ring_characteristic]{characteristic} of \( \BbbK \). Then \( \BbbZ_p \) is a subring of \( \BbbK \). By \fullref{thm:multiplicative_group_of_integers_modulo}, \( \BbbZ_p \) is a field.
By \fullref{thm:quotient_by_maximal_ideal}, \( \braket{ p } \) is a maximal ideal in \( \BbbZ \), and, by \fullref{thm:def:semiring_ideal/maximal_is_prime}, \( p \) is a prime number.
By \fullref{thm:lagranges_theorem_for_groups}, \( p \) divides \( q \). But \( \BbbK / \BbbZ_p \) is again a field by \fullref{thm:quotient_ideal_lattice_theorem}, and again has prime characteristic. Continuing by induction, we eventually obtain a sequence \( p_1, \ldots, p_n \) of prime numbers such that
\begin{equation*}
q = p_1 \cdots p_n.
\end{equation*}
By \fullref{thm:lagranges_theorem_for_groups}, \( q \) cannot contain subgroups of prime cardinalities \( p_1 \) and \( p_2 \) unless \( p_1 = p_2 \). Hence, again by induction, we conclude that
\begin{equation*}
p_1 = \cdots = p_n.
\end{equation*}
Therefore, \( q = p^n \).
\SubProofOf{thm:finite_fields/prime_field} Follows from \fullref{thm:multiplicative_group_of_integers_modulo}.
\SubProofOf{thm:finite_fields/splitting} Let \( \BbbK \) be a field with \( q \) elements with characteristic \( p \). We will show that every element of \( \BbbK \) is a root of \( X^q - X \in \BbbZ_p[X] \).
The multiplicative group of \( \BbbK \) has order \( q - 1 \). The order of a non-zero element \( a \in \BbbK \) divides \( q - 1 \) by \fullref{thm:def:group_order/divides}, hence \( a^{q - 1} = 1 \pmod q \). We also have \( 0^q = 0 \). Therefore, for every element of \( \BbbF_q \), we have \( a^q = a \).
Then
\begin{equation*}
X^q - X = \prod_{u \in \BbbK} (X - u).
\end{equation*}
\end{proof}
\begin{proposition}\label{thm:functions_over_prime_fields}
For every \hyperref[thm:finite_fields]{finite field} \( \BbbF_q \) and every \hyperref[def:polynomial_algebra]{polynomial ring} \( \BbbF_q[X_1, \ldots, X_n] \) in finitely many indeterminates, there exists an \( \BbbF_q \)-\hyperref[def:algebra_over_ring]{algebra} isomorphism
\begin{equation*}
\frac {\BbbF_q[X_1, \ldots, X_n]} {\braket{ X_i^q - X_i \given i = 1, \ldots, n }} \cong \fun(\BbbF_q^n, \BbbF_q),
\end{equation*}
where \( \fun(\BbbF_q^n, \BbbF_q) \) is the \hyperref[thm:functions_over_algebra]{\( \BbbF_q \)-algebra of all functions} from \( \BbbF
_q^n \) to \( \BbbF_q \).
Furthermore, every coset of polynomials has a unique representative given by \fullref{thm:finite_field_lagrange_interpolation}.
\end{proposition}
\begin{proof}
Consider the \hyperref[thm:polynomial_semiring_universal_qroperty]{functional evaluation homomorphism}
\begin{equation*}
\Phi: \BbbF_q[X_1, \ldots, X_m] \to \fun(\BbbF_q^m, \BbbF_q).
\end{equation*}
By \fullref{thm:finite_field_lagrange_interpolation}, \( \Phi \) is surjective. Then, by \fullref{thm:quotient_algebra_universal_property},
\begin{equation*}
\BbbF_q[X_1, \ldots, X_n] / \ker \Phi \cong \fun(\BbbF_q^m, \BbbF_q).
\end{equation*}
We will now prove that \( \ker \Phi \) equals
\begin{equation*}
I \coloneqq \braket{ X_i^q - X_i \given i = 1, \ldots, n }.
\end{equation*}
First, let \( e: \mscrX \to \BbbF_q \) be the variable assignment that assigns \( u_1, \ldots, u_n \) to the corresponding indeterminates. By \fullref{thm:finite_fields/splitting}, every member of \( \BbbF_q \) is a root of \( X_i^q - X_i \). Then, for any indeterminate \( X_i \),
\begin{equation*}
\Phi_e(X_i^q - X_i) = u_i^q - u_i = 0 \pmod q.
\end{equation*}
Hence, the polynomial function \( \Phi(X_i^q - X_i) \) is the zero constant function. It follows that any linear combination of the polynomials \( X_i^q - X_i \) for \( i = 1, \ldots, n \) is also the zero function. Therefore, \( I \subseteq \ker \Phi \).
We will prove the converse inclusion via induction on \( n \).
In the case of a single indeterminate \( X \), for every polynomial \( f(X) \in \ker \Phi \), we know that the entirety of \( \BbbF_q \) are roots of \( f(X) \). By \fullref{thm:def:integral_domain/root_limit}, \( f(X) \) has at most \( q \) roots. Hence, \( X - u \) divides \( f(X) \) for every \( u \in \BbbF_q \). We have
\begin{equation*}
\underbrace{\prod_{u \in \BbbF_q} (X - u)}_{\mathclap{ X^q - X \T*{by \fullref{thm:finite_fields/splitting}}}} \mid f(X),
\end{equation*}
and hence \( f(X) \in \braket{ X^q - X } \).
We have, up until now, shown that the entire proposition holds for the case of one indeterminate. Suppose that the proposition holds for \( n - 1 \) indeterminates and let \( f \in \BbbF_q[X_1, \ldots, X_n] \) be a nonconstant polynomial such that \( \Phi(f) \) is the zero function. Due to \fullref{thm:def:polynomial_algebra/iterated}, we can regard \( f \) as a univariate polynomial in \( X_n \)over \( \BbbF_q[X_1, \ldots, X_{n-1}] \). Thus,
\begin{equation*}
f(X_1, \ldots, X_n) = \sum_{k =0}^\infty \underbrace{\parens*{ \sum_\gamma a_{(k,\gamma)} X_1^{\gamma_1} X_2^{\gamma_1} \cdots X_{n-1}^{\gamma_{n-1}} }}_{s_k(X_1, \ldots, X_{n-1})} {X_n}^k,
\end{equation*}
where \( \gamma \) is a multi-index over the first \( n - 1 \) indeterminates.
As a polynomial in \( X_n \), \( f \) has \( m \coloneqq (n-1)p \) roots \( s_1, \ldots, s_m \), which are themselves polynomials from \( \BbbF_q[X_1, \ldots, X_{n-1}] \). For some \( c \), we have
\begin{equation*}
f(X_1, \ldots, X_n) = c(X_1, \ldots, X_{n-1}) \prod_{j=1}^m (X_n - s_j(X_1, \ldots, X_{n-1}))
\end{equation*}
and
\begin{equation*}
0 = \Phi(f) = \Phi(c) \cdot \prod_{j=1}^m \parens[\Big]{ \Phi(X_n) - \Phi(s_j) }.
\end{equation*}
Since \( \BbbF_q[X_1, \ldots, X_{n-1}] \) is \hyperref[def:entire_semiring]{entire}, we conclude that either \( \Phi(c) \) is the zero function or \( \Phi(X_n) = \Phi(s_j) \) for at least one index \( 1 \leq j \leq m \). The latter is impossible, because \( \Phi(X_n) \) is linearly independent from polynomials in the first \( n - 1 \) variables.
The inductive hypothesis holds for the polynomial \( c \), and \( \Phi(c) \) being the zero function implies
\begin{equation*}
c \in \braket{ X_i^q - X_i \given i = 1, \ldots, n - 1 } \subsetneq I.
\end{equation*}
Therefore, \( f \in I \) since \( f \) divides \( c \). We have chosen \( f \) to be an arbitrary member of \( \ker \Phi \), which implies \( \ker \Phi \subseteq I \).
We have already shown that \( I \subseteq \ker \Phi \). We thus conclude that \( I = \ker \Phi \) and
\begin{equation*}
\BbbF_q[X_1, \ldots, X_m] / I \cong \fun(\BbbF_q^m, \BbbF_q).
\end{equation*}
\end{proof}
\begin{definition}\label{def:transcendetal_element}
We say that the element \( a \in \BbbK \) of the field extension \( \BbbK \) of \( \Bbbk \) is \term{transcendental} over \( \BbbK \) if it is \hyperref[def:algebraic_dependence]{algebraically independent}.
If \( a \) is not transcendental, we say that it is \term{algebraic}. If every element of \( \BbbK \) is algebraic over \( \Bbbk \), we say that \( \BbbK \) is an \term{algebraic extension} of \( \Bbbk \).
\end{definition}
\begin{proposition}\label{thm:field_is_algebraic_over_itself}
Every field is an \hyperref[def:transcendental_element]{algebraic extension} of itself.
\end{proposition}
\begin{proof}
Every element \( a \in \BbbK \) is a root of the polynomial \( X - a \).
\end{proof}
\begin{theorem}[Euler's constant is transcendental]\label{thm:eulers_constant_is_transcendental}
\hyperref[def:exponential_function]{Euler's constant} \( e \) is \hyperref[def:transcendetal_element]{transcendental} over \( \BbbQ \).
\end{theorem}
\begin{theorem}[Pi is transcendental]\label{thm:pi_is_transcendental}\mcite[454]{Knapp2016BasicAlgebra}
The number \hyperref[def:pi]{\( \pi \)} is \hyperref[def:transcendetal_element]{transcendental} over \( \BbbQ \).
\end{theorem}
\begin{example}\label{ex:polynomials_over_pi}
\Fullref{thm:pi_is_transcendental} implies that the polynomials \( \BbbQ[X] \) can be embedded into \( \BbbR \) via \( \Phi_\pi: \BbbQ[X] \to \BbbR \). We can identify a polynomial
\begin{equation*}
p(X) = \sum_{i=0}^n a_k X^k
\end{equation*}
with rational coefficients with the number
\begin{equation*}
p(\pi) = \sum_{i=0}^n a_k \pi^k.
\end{equation*}
\end{example}
\begin{definition}\label{def:finite_field_extension}
If \( \BbbK \) is \hyperref[thm:vector_space_dimension]{finite-dimensional vector space} over \( \Bbbk \), we say that \( \BbbK \) is a \term{finite extensions} of \( \Bbbk \).
\end{definition}
\begin{lemma}\label{thm:finite_field_extensions_are_algebraic}
Every \hyperref[def:finite_field_extension]{finite field extension} is \hyperref[def:transcendetal_element]{algebraic}.
\end{lemma}
\begin{proof}
Let \( \BbbK \) be a field extension of \( \Bbbk \). Consider the evaluation map \( \Phi_a: \Bbbk[X] \to \Bbbk[u] \) for some \( u \in \BbbK \).
Since the polynomials \( X^k \) for \( k = 0, 1, 2, \ldots \) form a basis for \( \Bbbk[X] \). If \( \Phi_a \) is injective, then \( \Phi_a(X_k) \) are linearly independent over \( \BbbK \). But \( \BbbK \) has finite dimension over \( \Bbbk \).
The obtained contradiction shows that \( \Phi_a \) is not injective.
\end{proof}
\begin{definition}\label{def:algebraically_closed_field}\mcite[prop. 9.20]{Knapp2016BasicAlgebra}
We say that the field \( \BbbK \) is algebraically closed if any of the equivalent conditions are satisfied:
\begin{thmenum}
\thmitem{def:algebraically_closed_field/trivial_algebraic_extensions} \( \BbbK \) has no nontrivial algebraic \hyperref[def:transcendental_element]{extensions}.
\thmitem{def:algebraically_closed_field/linear_irreducible_polynomials} Every irreducible polynomial in \( \BbbK[X] \) is linear.
\thmitem{def:algebraically_closed_field/at_least_one_root} Every nonconstant polynomial in \( \BbbK[X] \) has at least one root in \( \BbbK \).
\thmitem{def:algebraically_closed_field/factorization} Every polynomial in \( \BbbK[X] \) \hyperref[def:irreducible_factorization]{factors} into a product of linear polynomials.
\thmitem{def:algebraically_closed_field/exactly_n_roots} Every polynomial in \( \BbbK[X] \) of degree \( n \) has exactly \( n \) roots in \( \BbbK \), counting the root multiplicities.
\end{thmenum}
\end{definition}
\begin{proof}
\ImplicationSubProof{def:algebraically_closed_field/trivial_algebraic_extensions}{def:algebraically_closed_field/linear_irreducible_polynomials} Let \( p(X) \) be an irreducible polynomial in \( \BbbK[X] \).
Since \( \BbbK[X] \) is a unique factorization domain, it satisfies \fullref{def:unique_factorization_domain/primes_and_ideals}, and hence \( p(X) \) is a prime element. Thus, \( \braket {p(X)} \) is a \hyperref[def:semiring_ideal/prime]{prime ideal} in \( \BbbK[X] \).
Since \( \BbbK[X] \) is a principal ideal domain, by \fullref{thm:def:principal_ideal_domain/prime_ideal_is_maximal}, \( \braket{ p(X) } \) is also a maximal ideal. By \fullref{def:semiring_ideal/maximal/quotient}, the quotient \( Q \coloneqq \BbbK[X] / \braket{ p(X) } \) is a field. The vectors \( 1, X, X^2, \cdots, X^n \) for a basis of \( Q \) over \( \BbbK \), where \( n \) is the degree of \( p(X) \).
By \fullref{thm:finite_field_extensions_are_algebraic}, \( Q \) is an algebraic extension of \( \BbbK \). Since \( \BbbK \) has no nontrivial algebraic extensions, it follows that \( \BbbK = Q \). Thus, \( Q \) has dimension \( 1 \), and we have already discussed that \( \dim Q = \deg p \). Therefore, \( p \) is a linear polynomial.
\ImplicationSubProof{def:algebraically_closed_field/linear_irreducible_polynomials}{def:algebraically_closed_field/at_least_one_root} Suppose that every irreducible polynomial is linear.
By \fullref{thm:def:unique_factorization_domain/polynomial_ring}, \( \BbbK[X] \) is a unique factorization domain, and thus there exist irreducible polynomials \( q_1(X), \ldots, q_n(X) \) and a unit \( a \) such that
\begin{equation*}
p(X) = a q_1(X) \cdots q_n(X).
\end{equation*}
By assumption, the irreducible polynomials are linear, and hence have roots. Therefore, \( p(X) \) has at least one root.
\ImplicationSubProof{def:algebraically_closed_field/at_least_one_root}{def:algebraically_closed_field/factorization} Suppose that \( u_1 \) is a root of \( p(X) \). Then \( p(X) \) is divisible by \( (X - u_1) \). Using induction on the degree of \( p(X) \), we can factor \( p(X) \) into
\begin{equation*}
p(X) = a (X - u_1) (X - u_2) \cdots (X - u_n),
\end{equation*}
where \( a \) is a unit of \( \BbbK \). This is the desired factorization.
\ImplicationSubProof{def:algebraically_closed_field/factorization}{def:algebraically_closed_field/exactly_n_roots} Follows from the equivalence in \fullref{def:polynomial_root} by induction on the polynomial degree. By \fullref{thm:def:integral_domain/root_limit}, the number of roots is bounded by \( n \).
\ImplicationSubProof{def:algebraically_closed_field/exactly_n_roots}{def:algebraically_closed_field/trivial_algebraic_extensions} Suppose that every nonconstant polynomial of degree \( n \) has exactly \( n \) roots in \( \Bbbk \) and let \( \BbbK \) be an algebraic extension of \( \Bbbk \).
By \fullref{thm:def:integral_domain/root_limit}, every polynomial in \( \BbbK[X] \) has at most \( n \) roots. By assumption, every root of every polynomial is contained in \( \Bbbk \). Since \( \BbbK \) is algebraic over \( \Bbbk \), it follows that every element of \( \BbbK \) is a root of some polynomial. Therefore, \( \BbbK = \Bbbk \).
\end{proof}
\begin{proposition}\label{thm:no_finite_extensions_of_closed_fields}
An \hyperref[def:algebraically_closed_field]{algebraically closed field} has no nontrivial finite extension fields.
\end{proposition}
\begin{proof}
Follows from \fullref{thm:finite_field_extensions_are_algebraic} applied to \fullref{def:algebraically_closed_field/trivial_algebraic_extensions}.
\end{proof}
\begin{theorem}[{Weak \term[en=zero locus theorem]{nullstellensatz}}]\label{thm:weak_nullstellensatz}
Let \( \mscrK \) be an \hyperref[def:algebraically_closed_field]{algebraically closed field} and let \( \BbbK[X_1, \ldots, X_n] \) be its \hyperref[def:polynomial_algebra]{polynomial ring} in \( n \) indeterminates.
The ideal \( M \) of \( \BbbK[X_1, \ldots, X_n] \) is \hyperref[def:semiring_ideal/maximal]{maximal} if and only if there exist elements \( u_1, \ldots, u_n \) of \( \BbbK \) such that
\begin{equation*}
M = \braket{ X_1 - u_1, \ldots, X_n - u_n }.
\end{equation*}
\end{theorem}
|
** Copyright (c) 1989, NVIDIA CORPORATION. All rights reserved.
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
* Alternate returns.
program p
parameter(n = 13)
integer rslts(n), expect(n)
external s1, s5, s8
data rslts / n * 0 /
do 10 i = 1, 3
call s1(*20)
rslts(1) = rslts(1) + i
goto 10
20 rslts(2) = rslts(2) + i
10 continue
do 30 i = 1, 6
call s2(*31, *99999, *31, *99999)
rslts(3) = rslts(3) + i
goto 30
31 rslts(4) = rslts(4) + i
goto 30
99999 rslts(5) = rslts(5) + i
30 continue
call s4(0, *40)
rslts(6) = 1
40 rslts(7) = 1
call s4(1, *41)
rslts(8) = 1
41 rslts(9) = 1
x = -1.0
50 x = x + 1.0
call s5(*50, x)
rslts(10) = int(x + .1)
call s6(-2.3, *50, *51)
rslts(11) = 1
51 rslts(12) = rslts(12) + 1
call s6(x+20, *50, *51)
call s7
call s8(*60)
rslts(13) = 1
60 call check(rslts, expect, n)
data expect / 4, 2, 7, 8, 6,
+ 1, 1, 0, 1, 1, 0, 1, 1 /
end
cccccccccccccccccccccccccccccccccccccccccccccccccccc
integer function if(i)
if = i
end
subroutine s1(*)
common /x/ i
data i /-1/
i = i + 1
return i
end
subroutine s2(*,*,*,*)
common /coms2/ i
data i /0/
i = i + 1
return 6 - i
end
subroutine s4(i, *)
if (i .eq. 1) return 1
j = if(1)
return
end
subroutine s5(*, x)
entry s6(x, *, *)
if (x .eq. 0) return if(1)
if (x .gt. 0) return
return 2
end
subroutine s7
entry s8(*)
if (if(1) .eq. 1) return
return 1
end
|
def Vec (α : Type u) (n : Nat) : Type u := { a : List α // a.length = n }
def Vec.nil : Vec α 0 := ⟨[], rfl⟩
def Vec.cons (a : α) (as : Vec α n) : Vec α (n+1) := ⟨a :: as.val, by simp [as.property]⟩
set_option pp.analyze false
def Vec.casesOn
(motive : (n : Nat) → Vec α n → Sort v)
(n : Nat)
(as : Vec α n)
(nil : motive 0 Vec.nil)
(cons : (n : Nat) → (a : α) → (as : Vec α n) → (ih : motive n as) → motive (n+1) (Vec.cons a as))
: motive n as :=
let rec go (n : Nat) (as : List α) (h : as.length = n) : motive n ⟨as, h⟩ :=
match n, as, h with
| 0, [], _ => nil
| n+1, a::as, h =>
have : as.length = n := by injection h; assumption
have ih : motive n ⟨as, this⟩ := go n as this
cons n a ⟨as, this⟩ ih
match as with
| ⟨as, h⟩ => go n as h
example (n : Nat) (a : α) (as : Vec α n) : Vec.cons a (Vec.cons a as) = Vec.cons a (Vec.cons a as) := by
induction n+2, Vec.cons a (Vec.cons a as) using Vec.casesOn
case nil => constructor
case cons n a as ih =>
trace_state
constructor
#print "-----"
example (n : Nat) (a : α) (as : Vec α n) : Vec.cons a (Vec.cons a as) = Vec.cons a (Vec.cons a as) := by
cases n+2, Vec.cons a (Vec.cons a as) using Vec.casesOn
case nil => constructor
case cons n a as ih =>
trace_state
constructor
#print "-----"
example (n : Nat) (a : α) (as : Vec α n) : Vec.cons a (Vec.cons a as) = Vec.cons a (Vec.cons a as) := by
cases h₁ : n+2, h₂ : Vec.cons a (Vec.cons a as) using Vec.casesOn
case nil => constructor
case cons n' a' as' ih =>
trace_state
constructor
|
Program abdulmalik
IMPLICIT NONE
INTEGER::n
REAL::w(20),k,findmaxodd
print*,"enter n"
read*,n
call READ_REAL_VECROR(w,20,n)
K = findmaxodd(w,n,20)
print*,k
END Program abdulmalik
SUBROUTINE READ_REAL_VECROR(w,s,n)
IMPLICIT NONE
INTEGER,INTENT(IN)::n,s
REAL,INTENT(out)::w(s)
INTEGER::i
do i=1,n
print*,"enter w(i)"
read*,w(i)
enddo
END SUBROUTINE READ_REAL_VECROR
real FUNCTION findmaxodd(w,n,s)
IMPLICIT NONE
INTEGER,INTENT(IN)::n,s
REAL,INTENT(in)::w(s)
INTEGER::i,a
REAL::y,max=-1
do i=1,n
y=w(i)
if( Mod(y,2.) == 1)then
if(w(i)>max)then
max=w(i)
a=i
endif
endif
enddo
if(max/=-1)then
max=a
endif
findmaxodd=max
end FUNCTION findmaxodd |
State Before: L : Language
L' : Language
⊢ card Language.empty = 0 State After: no goals Tactic: simp [card_eq_card_functions_add_card_relations] |
lemma pCons_eq_iff [simp]: "pCons a p = pCons b q \<longleftrightarrow> a = b \<and> p = q" |
### A Pluto.jl notebook ###
# v0.12.21
using Markdown
using InteractiveUtils
# ╔═╡ 22f805e8-7a7b-11eb-05e8-3dd4205997e1
begin
using Test
using LinearAlgebra
end
# ╔═╡ edb0b6e8-7a7a-11eb-0a88-990e1165fc1c
md"""
## \#2
"""
# ╔═╡ 357c1f92-7a7b-11eb-22bf-cd420daded2c
begin
A = [
1 -1 1 -1
0 1 2 3
0 0 2 6
0 0 0 6
]
inv(A) * [
0
-1//2
-1
-1
]
end
# ╔═╡ d1f578ee-7a82-11eb-1375-c314d8672dd4
md"""
## \#3
"""
# ╔═╡ d5880030-7a82-11eb-3b88-a90d14844ed0
begin
A₂ = [
1 1 1 1
-1 1 -1 1
3 2 1 0
3 -2 1 0
]
inv(A₂) * [
-1//6
0
-1//2
0
]
end
# ╔═╡ Cell order:
# ╠═22f805e8-7a7b-11eb-05e8-3dd4205997e1
# ╠═edb0b6e8-7a7a-11eb-0a88-990e1165fc1c
# ╠═357c1f92-7a7b-11eb-22bf-cd420daded2c
# ╠═d1f578ee-7a82-11eb-1375-c314d8672dd4
# ╠═d5880030-7a82-11eb-3b88-a90d14844ed0
|
program demo_iachar
implicit none
write(*,*)iachar(['a','z','A','Z'])
! create function to convert uppercase letters to lowercase
write(*,'(a)')lower('abcdefg ABCDEFG')
contains
!
elemental pure function lower(str) result (string)
! Changes a string to lowercase
character(*), intent(In) :: str
character(len(str)) :: string
integer :: i
string = str
! step thru each letter in the string in specified range
do i = 1, len(str)
select case (str(i:i))
case ('A':'Z') ! change letter to miniscule
string(i:i) = char(iachar(str(i:i))+32)
case default
end select
end do
end function lower
!
end program demo_iachar
|
import Aesop
import Mathlib.Data.Nat.Basic
import Mathlib.Data.Nat.Interval
import Mathlib.Data.Set.Basic
import Mathlib.Data.Fintype.Card
import Mathlib.Tactic.LibrarySearch
import Mathlib.Tactic.Ring
/-!
# International Mathematical Olympiad 1987, Problem 4
Prove that there is no function f : ℕ → ℕ such that f(f(n)) = n + 1987
for every n.
-/
namespace Imo1987Q4
lemma subset_finite {A B : Set ℕ} (h : A ⊆ B) (hab : Finite ↑B) : Finite ↑A := by
rw[Set.finite_coe_iff]
rw[Set.finite_coe_iff] at hab
exact Set.Finite.subset hab h
lemma subset_fintype {A B : Set ℕ} (h : A ⊆ B) (hab : Fintype ↑B) : Fintype ↑A := by
exact @Fintype.ofFinite A (subset_finite h (Finite.of_fintype ↑B))
/--
More general version of the problem.
-/
theorem imo1987_q4_generalized (m : ℕ) :
(¬∃ f : ℕ → ℕ, ∀ n, f (f n) = n + (2 * m + 1)) := by
-- Informal solution by Sawa Pavlov, listed at
-- https://artofproblemsolving.com/wiki/index.php/1987_IMO_Problems/Problem_4
intro hf
obtain ⟨f, hf⟩ := hf
-- Note that f is injective, because if f(n) = f(m),
-- then f(f(n)) = f(f(m)), so m = n.
have f_injective : f.Injective := by
intros n m hnm; have hfn := hf n; simp_all only [add_left_inj]
-- Let A := ℕ - f(ℕ) and B := f(A).
let NN : Set ℕ := Set.univ
let A : Set ℕ := NN \ (f '' NN)
let B : Set ℕ := f '' A
have hid := Set.image_diff f_injective NN (f '' NN)
rw[show f '' (NN \ f '' NN) = B by rfl] at hid
-- A and B are disjoint and have union ℕ - f(f(ℕ)).
have ab_disjoint : Disjoint A B := by
intros _C hca hcb c hc
have hcca := hca hc
have hccb := hcb hc
aesop
have ab_union : A ∪ B = NN \ (f '' (f '' NN)) := by
rw[hid]
apply Set.eq_of_subset_of_subset
· intros x hx
cases hx <;> aesop
· intros x hx
obtain ⟨_hx, hx'⟩ := hx
cases Classical.em (x ∈ A) <;> aesop
-- ... which is {0, 1, ... , 2 * m}.
have ab_range : A ∪ B = {n | n < 2*m + 1} := by
apply Set.eq_of_subset_of_subset
· rw[ab_union]
intros x hx
simp at hx
simp
by_contra H
push_neg at H
have hz: ∃ z, x = (2 * m + 1) + z := exists_add_of_le H
obtain ⟨z, hz⟩ := hz
rw[hz] at hx
have hzz := hx z
rw[hf z] at hzz
rw[add_comm] at hzz
exact (hzz rfl).elim
· rw[ab_union]
intros x hx
aesop
-- But since f is injective they have the
-- same number of elements, which is impossible since {0, 1, ... , 2 * m}
-- has an odd number of elements.
have ab_fintype : Fintype ↑(A ∪ B) := by rw[ab_range]; exact inferInstance
have h2 : Fintype.card ↑(A ∪ B) = 2 * m + 1 := by
have hc := @Fintype.card_congr' ↑(A ∪ B)
{x | x < 2 * m + 1} _ _ (by rw[ab_range])
simp only [hc, Fintype.card_ofFinset, Finset.card_range]
have a_fintype := subset_fintype (Set.subset_union_left A B) ab_fintype
have b_fintype := subset_fintype (Set.subset_union_right A B) ab_fintype
have h3 := @Set.toFinset_union ℕ A B _ a_fintype b_fintype ab_fintype
rw[←@Set.toFinset_card _ (A ∪ B) ab_fintype] at h2
rw[h3] at h2; clear h3
have ab_disjoint' :=
(@Set.disjoint_toFinset _ _ _ a_fintype b_fintype).mpr ab_disjoint
rw[Finset.card_disjoint_union ab_disjoint'] at h2
rw[Set.toFinset_card, Set.toFinset_card] at h2
rw[Set.card_image_of_injective A f_injective] at h2
ring_nf at h2
have h4 := congrFun (congrArg HMod.hMod (f_injective (congrArg f h2))) 2
norm_num at h4
theorem imo1987_q4 : (¬∃ f : ℕ → ℕ, ∀ n, f (f n) = n + 1987) := by
rw[show 1987 = (2 * 993 + 1) by norm_num]
exact imo1987_q4_generalized 993
|
theory Pls_assoc_enat
imports Main "~~/src/HOL/Library/BNF_Corec" "$HIPSTER_HOME/IsaHipster"
begin
setup Tactic_Data.set_coinduct_sledgehammer
codatatype (sset: 'a) Stream =
SCons (shd: 'a) (stl: "'a Stream")
codatatype ENat = is_zero: EZ | ESuc (epred: ENat)
primcorec eplus :: "ENat \<Rightarrow> ENat \<Rightarrow> ENat" where
"eplus m n = (if is_zero m then n else ESuc (eplus (epred m) n))"
primcorec pls :: "ENat Stream \<Rightarrow> ENat Stream \<Rightarrow> ENat Stream" where
"pls s t = SCons (eplus (shd s) (shd t)) (pls (stl s) (stl t))"
datatype 'a Lst =
Emp
| Cons "'a" "'a Lst"
fun obsStream :: "int \<Rightarrow> 'a Stream \<Rightarrow> 'a Lst" where
"obsStream n s = (if (n \<le> 0) then Emp else Cons (shd s) (obsStream (n - 1) (stl s)))"
(*hipster_obs Stream Lst obsStream pls*)
lemma lemma_a [thy_expl]: "eplus x EZ = x"
apply (coinduction arbitrary: x rule: Pls_assoc_enat.ENat.coinduct_strong)
by simp
lemma lemma_aa [thy_expl]: "eplus EZ x = x"
apply (coinduction arbitrary: x rule: Pls_assoc_enat.ENat.coinduct_strong)
by simp
lemma lemma_ab [thy_expl]: "eplus (ESuc x) y = eplus x (ESuc y)"
apply (coinduction arbitrary: x y rule: Pls_assoc_enat.ENat.coinduct_strong)
apply simp
by (metis ENat.collapse(2) eplus.code)
lemma lemma_ac [thy_expl]: "ESuc (eplus x y) = eplus x (ESuc y)"
apply (coinduction arbitrary: x y rule: Pls_assoc_enat.ENat.coinduct_strong)
apply simp
by (metis eplus.code)
lemma lemma_ad [thy_expl]: "eplus (eplus x y) z = eplus x (eplus y z)"
apply (coinduction arbitrary: x y z rule: Pls_assoc_enat.ENat.coinduct_strong)
apply simp
by auto
lemma lemma_ae [thy_expl]: "eplus y x = eplus x y"
apply (coinduction arbitrary: x y rule: Pls_assoc_enat.ENat.coinduct_strong)
apply simp
by (metis ENat.collapse(1) ENat.collapse(2) lemma_a lemma_ab)
theorem pls_assoc: "pls (pls s t) u = pls s (pls t u)"
by hipster_coinduct_sledgehammer
(*by hipster_coinduct_sledgehammer
Failed to apply initial proof method*)
end |
import tactic
import tactic.induction
import .base .game
noncomputable theory
open_locale classical
lemma induct_s_at {P : State → Prop} {pw n : ℕ} {g : Game pw}
(h₁ : P g.s)
(h₂ : ∀ {s : State} {ma},
P s → A_move_valid pw s.board ma → P (apply_A_move s ma))
(h₃ : ∀ {s : State} {md}, P s → D_move_valid s.board md → P (apply_D_move s md))
(hf : ∀ {s : State}, P s → P s.finish) :
P (g.play n).s :=
begin
induction n with n ih,
{ assumption },
{ rw [play_at_succ'], let g₁ := _, change g.play n with g₁ at ih ⊢,
rw Game.play_move, split_ifs with h₄, swap, { assumption },
let a := g₁.a, let d := g₁.d,
have h₅ : ∃ (s' : State), play_D_move_at g₁ h₄ = init_game a d s' ∧ P s',
{ let s' := apply_D_move g₁.s (d.f g₁.s h₄).m, use s',
exact ⟨rfl, h₃ ih (d.f _ _).h⟩ },
rcases h₅ with ⟨s', h₅, h₆⟩, rw h₅, clear h₅,
rw play_A_move_at, split_ifs with h₅,
{ exact h₂ h₆ (a.f s' _ _).h },
{ exact hf h₆ }},
end
lemma induct_s {P : State → Prop} {pw : ℕ} {a : A pw} {d : D}
(h₁ : P state₀)
(h₂ : ∀ {s : State} {ma},
P s → A_move_valid pw s.board ma → P (apply_A_move s ma))
(h₃ : ∀ {s : State} {md}, P s → D_move_valid s.board md → P (apply_D_move s md))
(hf : ∀ {s : State}, P s → P s.finish) :
all_s a d P :=
by { intro n, apply induct_s_at; assumption }
lemma induct_b {P : Board → Prop} {pw : ℕ} {a : A pw} {d : D}
(h₁ : P board₀)
(h₂ : ∀ {b : Board} {ma}, P b → A_move_valid pw b ma → P (apply_A_move_b b ma))
(h₃ : ∀ {b : Board} {md}, P b → D_move_valid b md → P (apply_D_move_b b md)) :
all_b a d P :=
begin
apply induct_s,
{ exact h₁ },
{ rintro s ma h₄ h₅, exact h₂ h₄ h₅ },
{ rintro s ma h₄ h₅, exact h₃ h₄ h₅ },
{ intro s, exact id },
end
lemma simulate_add {pw : ℕ} {a : A pw} {d : D} {n₁ n₂ : ℕ} :
simulate a d (n₁ + n₂) = (simulate a d n₁).play n₂ :=
by { rw add_comm, apply function.iterate_add_apply }
lemma not_play_act_of_not_act {pw n : ℕ} {g : Game pw}
(h : ¬g.act) : ¬(g.play n).act :=
by { apply @induct_s_at (λ s, ¬s.act); intros; assumption <|> exact not_false }
lemma act_of_act_play {pw n : ℕ} {g : Game pw}
(h : (g.play n).act) : g.act :=
by { contrapose h, exact not_play_act_of_not_act h }
lemma play_eq_iff_states_eq {pw n : ℕ} {g : Game pw}
(h : (g.play n).s = g.s) : g.play n = g :=
begin
ext,
{ exact play_at_players_eq.1 },
{ exact play_at_players_eq.2 },
{ exact h },
end
lemma play_eq_of_not_act' {pw n : ℕ} {g : Game pw}
(h : ¬g.act) : g.play n = g :=
begin
rw play_eq_iff_states_eq, induction n with n ih,
{ refl },
{ rw play_at_succ',
let g₁ : Game pw := _, change g.play n with g₁ at ih ⊢, have h₁ : ¬g₁.act,
{ change ¬g₁.s.act, rwa ih },
rwa play_move_at_not_act h₁ },
end
lemma simulate_eq_of_not_act {pw n₁ n₂ : ℕ} {a : A pw} {d : D}
(h₁ : ¬(simulate a d n₁).act)
(h₂ : ¬(simulate a d n₂).act) :
simulate a d n₁ = simulate a d n₂ :=
begin
wlog h₃ : n₂ ≤ n₁,
obtain ⟨k, rfl⟩ := nat.exists_eq_add_of_le h₃,
rw simulate_add at h₁ ⊢,
let g : Game pw := _, change simulate a d n₂ with g at h₁ h₂ ⊢,
exact play_eq_of_not_act' h₂,
end
lemma play_move_len_le {pw : ℕ} {g : Game pw} :
g.play_move.s.len ≤ g.s.len + 2 :=
begin
rw Game.play_move, split_ifs,
{ have h₁ : (play_D_move_at g h).s.len = g.s.len + 1,
{ rw hist_len_play_D_move_at },
rw play_A_move_at, split_ifs with h₂,
{ rw [hist_len_play_A_move_at', h₁] },
{ change (play_D_move_at g h).s.finish.len ≤ _, rw [hist_len_finish, h₁],
apply add_le_add_left, dec_trivial }},
{ exact le_add_right (le_refl _) },
end
lemma play_len_le {pw n : ℕ} {g : Game pw} :
(g.play n).s.len ≤ g.s.len + n * 2 :=
begin
induction n with n ih,
{ refl },
{ rw play_at_succ', let g₁ : Game pw := _, change g.play n with g₁ at ih ⊢,
transitivity, exact play_move_len_le, rw [nat.succ_mul, ←add_assoc],
apply add_le_add_right ih },
end
lemma simulate_len_le {pw n : ℕ} {a : A pw} {d : D} :
(simulate a d n).s.len ≤ n * 2 :=
begin
change ((simulate a d 0).play n).s.len ≤ n * 2 + (simulate a d 0).s.len,
rw add_comm (n * 2), exact play_len_le,
end
lemma exi_A_wins_of_invariant {P : State → Prop} {pw : ℕ} {d : D} {s₀ : State}
(h₀ : P s₀)
(hP : ∀ (s : State), P s → s.act)
(hm : ∀ (s s' : State) hs, P s → s' = apply_D_move s (d.f s hs).m →
∃ (ma : Valid_A_move pw s'.board), P (apply_A_move s' ma.m)) :
∃ (a : A pw), (init_game a d s₀).A_wins :=
begin
let a : A pw,
{ refine ⟨λ s' hs' hvm, _⟩, refine (_ : ∃ (ma : Valid_A_move pw s'.board),
∀ (s : State) hs, P s → s' = apply_D_move s (d.f s hs).m →
P (apply_A_move s' ma.m)).some,
by_cases h₁ : ∃ (s : State) hs, P s ∧ s' = apply_D_move s (d.f s hs).m,
{ rcases h₁ with ⟨s, hs, h₁, h₂⟩, specialize hm s s' hs h₁ h₂,
cases hm with ma hm, use ma, intros, assumption },
{ refine ⟨⟨_, hvm.some_spec⟩, _⟩, rintro s₁ hs₁ h₂ h₃, push_neg at h₁,
specialize h₁ s₁ hs₁, push_neg at h₁, specialize h₁ h₂, contradiction }},
use a, rintro n, apply hP, induction n with n ih,
{ assumption },
{ rw play_at_succ', let g : Game pw := _,
change (init_game a d s₀).play n with g at ih ⊢,
have hs := hP _ ih, rw play_move_at_act hs, let s := g.s,
let s' := apply_D_move s (d.f s hs).m,
have h₁ : play_D_move_at g hs = init_game a d s',
{ ext,
{ exact play_at_players_eq.1 },
{ exact play_at_players_eq.2 },
{ change apply_D_move _ _ = apply_D_move _ _, congr,
exact play_at_players_eq.2 }},
rw h₁, clear h₁, have hvm : A_has_valid_move pw s'.board,
{ specialize hm s s' hs ih rfl, cases hm with ma hma, exact ⟨_, ma.h⟩ },
rw [play_A_move_at, dif_pos], swap, { split; assumption },
change P (apply_A_move s' (a.f s' hs hvm).m),
generalize hma : a.f s' hs hvm = ma,
change Exists.some _ = _ at hma, generalize_proofs h₁ at hma,
have h₂ := h₁.some_spec s hs ih rfl, subst hma, assumption },
end
lemma play_eq_of_not_act {pw n k : ℕ} {a : A pw} {d : D} {s : State}
(h₁ : ¬((init_game a d s).play n).act)
(h₂ : ¬((init_game a d s).play k).act) :
(init_game a d s).play n = (init_game a d s).play k :=
begin
wlog h₃ : k ≤ n, obtain ⟨n, rfl⟩ := nat.exists_eq_add_of_le h₃,
rw play_add, exact play_eq_of_not_act' h₂,
end
lemma not_act_of_descend (f : State → ℕ) (P : State → Prop)
{pw n : ℕ} {a : A pw} {d : D} {s₀ : State}
(hp₀ : f s₀ < n)
(hp₁ : P s₀)
(hp₂ : ∀ (s s' : State) hs hs' hvm, P s → s' = apply_D_move s (d.f s hs).m →
P (apply_A_move s' (a.f s' hs' hvm).m))
(hp₃ : ∀ (s s' : State) hs hs' hvm, P s → s' = apply_D_move s (d.f s hs).m →
f (apply_A_move s' (a.f s' hs' hvm).m) < f s) :
¬((init_game a d s₀).play n).act :=
begin
have h₀ : ∀ (n : ℕ) (s : State), s = ((init_game a d s₀).play n).s →
s.act → P s,
{ clear' hp₀ n, rintro n _ rfl hs₁,
rw ←Game.act at hs₁, induction n with n ih,
{ exact hp₁ },
{ rw play_at_succ' at hs₁ ⊢, let g : Game pw := _,
change (init_game a d s₀).play n with g at ih hs₁ ⊢,
obtain ⟨s', hs, hs', hvm, h₁, h₂⟩ :=
play_move_state_eq_of_act_play_move hs₁, rw h₂,
have ha : g.a = a := play_at_players_eq.1, rw ha at *, clear ha,
have hd : g.d = d := play_at_players_eq.2, rw hd at *, clear hd,
apply hp₂,
{ exact ih hs },
{ exact h₁ }}},
suffices h : ∀ (n : ℕ) (s : State), s = ((init_game a d s₀).play n).s →
s.act → f s + n ≤ f s₀,
{ apply mt (h n _ rfl), push_neg, apply nat.lt_add_left, exact hp₀ },
clear' n hp₀, rintro n _ rfl hs₁, induction n with n ih,
{ refl },
{ rw play_at_succ' at hs₁ ⊢, let g : Game pw := _,
change (init_game a d s₀).play n with g at ih hs₁ ⊢,
obtain ⟨s', hs, hs', hvm, h₁, h₂⟩ :=
play_move_state_eq_of_act_play_move hs₁, rw h₂,
have ha : g.a = a := play_at_players_eq.1, rw ha at *, clear ha,
have hd : g.d = d := play_at_players_eq.2, rw hd at *, clear hd,
specialize ih hs,
have h : f (apply_A_move s' (a.f s' hs' hvm).m) < f g.s,
{ apply hp₃,
{ apply h₀,
{ refl },
{ exact hs } },
{ exact h₁ }},
replace h : f (apply_A_move s' (a.f s' hs' hvm).m) + n < f g.s + n,
{ exact add_lt_add_right h n },
rw ←nat.succ_le_iff at h, rw nat.add_succ, exact h.trans ih },
end
lemma not_act_of_descend_single_moves (f : State → ℕ) (P : State → Prop)
{pw n : ℕ} {a : A pw} {d : D} {s₀ : State}
(hp₀ : f s₀ < n)
(hp₁ : P s₀)
(hp₂ : ∀ (s : State) hs hvm, P s → P (apply_A_move s (a.f s hs hvm).m))
(hp₃ : ∀ (s : State) hs, P s → P (apply_D_move s (d.f s hs).m))
(hp₄ : ∀ (s : State) hs hvm, P s → f (apply_A_move s (a.f s hs hvm).m) ≤ f s)
(hp₅ : ∀ (s : State) hs, P s → f (apply_D_move s (d.f s hs).m) < f s) :
¬((init_game a d s₀).play n).act :=
begin
apply not_act_of_descend f P hp₀ hp₁,
{ rintro s s' hs hs' hvm h₁ rfl, apply hp₂, apply hp₃, exact h₁ },
{ rintro s s' hs hs' hvm h₁ h₂, let s₁ : State := _, change f s₁ < _,
have h₃ : f s' < f s,
{ subst s', apply hp₅, exact h₁ },
have h₄ : f s₁ ≤ f s',
{ subst s', apply hp₄, apply hp₃, exact h₁ },
exact gt_of_gt_of_ge h₃ h₄ },
end
lemma not_act_of_descend_play_move' (f : State → ℕ) (P : State → Prop)
{pw n : ℕ} {a : A pw} {d : D} {s₀ : State}
(hp₀ : f s₀ < n)
(hp₁ : P s₀)
(hp₂ : ∀ (g : Game pw), g.play_move.act → P g.s → P g.play_move.s)
(hp₃ : ∀ (g : Game pw), g.play_move.act → P g.s → f g.play_move.s < f g.s) :
¬((init_game a d s₀).play n).act :=
begin
have h : ∀ (Q : State → State → Prop),
(∀ (g : Game pw), g.play_move.act → P g.s → Q g.s g.play_move.s) →
∀ (s s' : State) hs hs' hvm, P s → s' = apply_D_move s (d.f s hs).m →
Q s (apply_A_move s' (a.f s' hs' hvm).m),
{ rintro Q hQ, rintro s s' hs hs' hvm h₁ h₂,
have hs₁ : (init_game a d s).play_move.act,
{ subst s', exact act_play_move_of_A_hvm hvm },
specialize hQ (init_game a d s) hs₁ h₁,
obtain ⟨s', hs, hs', hvm, h₃, h₄⟩ := play_move_state_eq_of_act_play_move hs₁,
rw h₄ at hQ, subst_vars, exact hQ },
apply not_act_of_descend f P hp₀ hp₁,
{ exact h (λ (s₁ s₂ : State), P s₂) hp₂ },
{ exact h (λ (s₁ s₂ : State), f s₂ < f s₁) hp₃ },
end
lemma not_act_of_descend_play_move (f : State → ℕ) (P : State → Prop)
{pw n : ℕ} {a : A pw} {d : D} {s₀ : State}
(hp₀ : f s₀ < n)
(hp₁ : P s₀)
(hp₂ : ∀ (s : State), (init_game a d s).play_move.act → P s →
P (init_game a d s).play_move.s)
(hp₃ : ∀ (s : State), (init_game a d s).play_move.act → P s →
f (init_game a d s).play_move.s < f s) :
¬((init_game a d s₀).play n).act :=
begin
have h : ∀ (Q : State → State → Prop),
(∀ (s : State), (init_game a d s).play_move.act → P s →
Q s (init_game a d s).play_move.s) →
∀ (s s' : State) hs hs' hvm, P s → s' = apply_D_move s (d.f s hs).m →
Q s (apply_A_move s' (a.f s' hs' hvm).m),
{ rintro Q hQ, rintro s s' hs hs' hvm h₁ h₂,
have hs₁ : (init_game a d s).play_move.act,
{ subst s', exact act_play_move_of_A_hvm hvm },
specialize hQ s hs₁ h₁,
obtain ⟨s', hs, hs', hvm, h₃, h₄⟩ := play_move_state_eq_of_act_play_move hs₁,
rw h₄ at hQ, subst_vars, exact hQ },
apply not_act_of_descend f P hp₀ hp₁,
{ exact h (λ (s₁ s₂ : State), P s₂) hp₂ },
{ exact h (λ (s₁ s₂ : State), f s₂ < f s₁) hp₃ },
end
lemma not_act_of_descend_play_move_valid (f : State → ℕ) (P : State → Prop)
{pw n : ℕ} {a : A pw} {d : D} {s₀ : State}
(h₀ : valid_state pw s₀)
(hp₀ : f s₀ < n)
(hp₁ : P s₀)
(hp₂ : ∀ (s : State), valid_state pw s →
(init_game a d s).play_move.act → P s →
P (init_game a d s).play_move.s)
(hp₃ : ∀ (s : State), valid_state pw s →
(init_game a d s).play_move.act → P s →
f (init_game a d s).play_move.s < f s) :
¬((init_game a d s₀).play n).act :=
begin
have h : ∀ (Q : State → State → Prop),
(∀ (s : State), valid_state pw s →
(init_game a d s).play_move.act → P s →
Q s (init_game a d s).play_move.s) →
∀ (s s' : State) hs hs' hvm, valid_state pw s →
P s → s' = apply_D_move s (d.f s hs).m →
Q s (apply_A_move s' (a.f s' hs' hvm).m),
{ rintro Q hQ, rintro s s' hs hs' hvm hh₀ h₁ h₂,
have hs₁ : (init_game a d s).play_move.act,
{ subst s', exact act_play_move_of_A_hvm hvm },
specialize hQ s hh₀ hs₁ h₁,
obtain ⟨s', hs, hs', hvm, h₃, h₄⟩ := play_move_state_eq_of_act_play_move hs₁,
rw h₄ at hQ, subst_vars, exact hQ },
apply not_act_of_descend f (λ s, valid_state pw s ∧ P s) hp₀ ⟨h₀, hp₁⟩,
{
convert h (λ (s₁ s₂ : State), P s₂) _,
{
ext,
split; intro h₁,
{
rintro s s' hs hs' hvm hh hp hr,
exact (h₁ s s' hs hs' hvm ⟨hh, hp⟩ hr).2,
},
{
rintro s s' hs hs' hvm hh hp,
fsplit,
{
-- apply valid_state_apply_A_move,
sorry
},
{
exact h₁ s s' hs hs' hvm hh.1 hh.2 hp,
},
},
},
sorry
},
{
-- apply h (λ (s₁ s₂ : State), f s₂ < f s₁),
sorry
},
end
lemma A_mem_squares_of_valid_state {pw : ℕ} {s : State}
(h : valid_state pw s) :
s.board.A ∈ s.board.squares :=
begin
rcases h with ⟨a, d, n, rfl⟩,
apply @induct_s (λ (s : State), s.board.A ∈ s.board.squares),
{ triv },
{ rintro s p h₁ h, exact h.2.2 },
{ rintro s ⟨- | p⟩ h₁ h,
{ exact h₁ },
{ simp_rw [apply_D_move, apply_D_move_b, apply_move, set.mem_diff],
exact ⟨h₁, h.1.symm⟩ }},
{ rintro s h, exact h },
end |
module SchemeParser.Parser where
import Numeric (readInt, readFloat, readHex, readOct)
import Data.Ratio
import Data.Complex
import Data.Char (digitToInt)
import Data.Maybe (listToMaybe, fromJust)
import qualified Data.Map as M
import Data.Functor (($>))
import Text.Parsec hiding (spaces)
import Text.Parsec.Prim
import SchemeParser.Types
-- parser helpers
symbol :: LispParser Char
symbol = oneOf "!$%&|*+-/:<=>?@^_~"
spaces :: LispParser ()
spaces = skipMany1 space
escapedChars :: LispParser Char
escapedChars = do
char '\\'
c <- oneOf "\"nrt\\"
return $ case c of
'n' -> '\n'
'r' -> '\r'
't' -> '\t'
_ -> c
readBin :: Integral a => String -> Maybe a
readBin = fmap fst . listToMaybe . readInt 2 (`elem` "01") digitToInt
--
-- Lisp value parsers
--
-- primitives
parseAtom :: LispParser LispVal
parseAtom = do
first <- letter <|> symbol
rest <- many (letter <|> digit <|> symbol)
let atom = first : rest
return $ LAtom atom
parseString :: LispParser LispVal
parseString = do
char '"'
x <- many $ escapedChars <|> noneOf "\""
char '"'
return $ LString x
parseChar :: LispParser LispVal
parseChar = do
try (string "#\\")
c <- try (string "newline" <|> string "space")
<|> do { x <- anyChar; notFollowedBy alphaNum; return [x]}
return $ LChar $ case c of
"newline" -> '\n'
"space" -> ' '
_ -> head c
parseBool :: LispParser LispVal
parseBool =
try (string "#t" $> LBool True) <|> try (string "#f" $> LBool False)
-- http://www.schemers.org/Documents/Standards/R5RS/HTML/r5rs-Z-H-9.html#%_sec_6.2.4
parseNumberRadix :: LispParser LispVal
parseNumberRadix =
parseBin <|> parseOct <|> parseDec <|> parseHex
where
parseBin = do
try (string "#b")
LNumber . fromJust . readBin <$> many1 (oneOf "01")
parseOct = do
try (string "#o")
LNumber . fst . (!! 0) . readOct <$> many1 octDigit
parseDec = do
try (string "#d")
parseNumber
parseHex = do
try (string "#x")
LNumber . fst . (!! 0) . readHex <$> many1 hexDigit
parseNumber :: LispParser LispVal
parseNumber = parseNumberRadix <|> LNumber . read <$> many1 digit
parseFloat :: LispParser LispVal
parseFloat = do
l <- many1 digit
char '.'
r <- many1 digit
return $ LFloat $ fst $ head $ readFloat (l ++ "." ++ r)
parseRatio :: LispParser LispVal
parseRatio = do
n <- many1 digit
char '/'
d <- many1 digit
return $ LRatio (read n % read d)
parseComplex :: LispParser LispVal
parseComplex = do
r <- try parseFloat <|> parseNumber
char '+'
i <- try parseFloat <|> parseNumber
char 'i'
return $ LComplex (toDouble r :+ toDouble i)
-- lists
parseList :: LispParser LispVal
parseList = LList <$> sepBy parseExpr spaces
parseDottedList :: LispParser LispVal
parseDottedList = do
hd <- endBy parseExpr spaces
tl <- char '.' >> spaces >> parseExpr
return $ LDottedList hd tl
parseQuoted :: LispParser LispVal
parseQuoted = do
char '\''
x <- parseExpr
return $ LList [LAtom "quote", x]
parseQuasiQuoted :: LispParser LispVal
parseQuasiQuoted = do
char '`'
x <- parseExpr
return $ LList [LAtom "quasiquote", x]
parseUnquoted :: LispParser LispVal
parseUnquoted = do
char ','
x <- parseExpr
return $ LList [LAtom "unquote", x]
parseUnquoteSpliced :: LispParser LispVal
parseUnquoteSpliced = do
try $ string ",@"
x <- parseExpr
return $ LList [LAtom "unquote-splice", x]
parseVector :: LispParser LispVal
parseVector = LVector <$> sepBy parseExpr spaces
--
-- helpers
--
toDouble :: LispVal -> Double
toDouble (LFloat f) = realToFrac f
toDouble (LNumber n) = fromIntegral n
--
parseExpr :: LispParser LispVal
parseExpr = parseAtom
<|> parseString
<|> parseChar
<|> try parseFloat
<|> try parseRatio
<|> try parseComplex
<|> parseNumber
<|> parseBool
<|> parseQuoted
<|> parseQuasiQuoted
<|> parseUnquoteSpliced
<|> parseUnquoted
<|> char '(' *> (try parseList <|> parseDottedList) <* char ')'
<|> try (string "#(" *> parseVector <* char ')')
|
It's a sad day in history. It seems that the Bungie Webmaster has died and been replaced by PR people. Just read the most recent Letters to the Webmaster at Bungie.com.
Saddened by this turn of events, I wrote the following letter to try and rouse the spirit of our old friend (by "our," I mean people who have been Mac geeks for a long time).
By all outward appearances, you've been replaced by an impostor. The January 2006 Letters to the Webmaster on bungie.com lacked the eccentric, venom-filled, obscure-literatary-referencing punch that the replies in all previous installments of your letters section. The Gorilla references were forced, and everything you said seemed to be concentrated through the highly polished lens of a PR department. I'm old enough and have been following Bungie for long enough to have read every Letters to the Webmaster post in the history of Bungie. I've had long conversations with the Disembodied Soul. I'm pleased to say I hold the distinction of getting not one, but two pleasant and on-topic replied from you over the years. I guess I'm old-school.
Where are the references to chainsaws and iambic pentameter? Where's the seething, just below the surface rage? Where's the sharp-wit that insults moronic questions in a way the moron can not comprehend? Where is the display of bitter irony about your station at Bungie and life as well?
It's a sad day indeed. I can only assume that Frank, or another Bungie newbie has taken up the mantel and assumed your identity. I men nothing against Frank, I was an avid reader of OXM while he was on staff there. Likewise, Bungie has produced works every bit as wonderful as my beloved Marathon trilogy with the new-kids on board so I have no qualms with them either. They just can't compare to you when answering letters from clueless teenagers.
You will be missed, O Muse.
Now, If I am mistaken and your most recent ramblings are weak due to a particularly intense bender, simply reestablish proof of your identity by eviscerating me with the power and pixel-searing prose of old. If it helps, I have scattered errors in grammar and punctuation throughout this letter. |
[GOAL]
b : ℝ
hb : 0 < b
⊢ (fun x => rexp (-b * x ^ 2)) =o[atTop] fun x => rexp (-x)
[PROOFSTEP]
have A : (fun x : ℝ => -x - -b * x ^ 2) = fun x => x * (b * x + -1) := by ext x; ring
[GOAL]
b : ℝ
hb : 0 < b
⊢ (fun x => -x - -b * x ^ 2) = fun x => x * (b * x + -1)
[PROOFSTEP]
ext x
[GOAL]
case h
b : ℝ
hb : 0 < b
x : ℝ
⊢ -x - -b * x ^ 2 = x * (b * x + -1)
[PROOFSTEP]
ring
[GOAL]
b : ℝ
hb : 0 < b
A : (fun x => -x - -b * x ^ 2) = fun x => x * (b * x + -1)
⊢ (fun x => rexp (-b * x ^ 2)) =o[atTop] fun x => rexp (-x)
[PROOFSTEP]
rw [isLittleO_exp_comp_exp_comp, A]
[GOAL]
b : ℝ
hb : 0 < b
A : (fun x => -x - -b * x ^ 2) = fun x => x * (b * x + -1)
⊢ Tendsto (fun x => x * (b * x + -1)) atTop atTop
[PROOFSTEP]
apply Tendsto.atTop_mul_atTop tendsto_id
[GOAL]
b : ℝ
hb : 0 < b
A : (fun x => -x - -b * x ^ 2) = fun x => x * (b * x + -1)
⊢ Tendsto (fun x => b * x + -1) atTop atTop
[PROOFSTEP]
exact tendsto_atTop_add_const_right atTop (-1 : ℝ) (Tendsto.const_mul_atTop hb tendsto_id)
[GOAL]
b : ℝ
hb : 0 < b
s : ℝ
⊢ (fun x => x ^ s * rexp (-b * x ^ 2)) =o[atTop] fun x => rexp (-(1 / 2) * x)
[PROOFSTEP]
apply ((isBigO_refl (fun x : ℝ => x ^ s) atTop).mul_isLittleO (exp_neg_mul_sq_isLittleO_exp_neg hb)).trans
[GOAL]
b : ℝ
hb : 0 < b
s : ℝ
⊢ (fun x => x ^ s * rexp (-x)) =o[atTop] fun x => rexp (-(1 / 2) * x)
[PROOFSTEP]
simpa only [mul_comm] using Gamma_integrand_isLittleO s
[GOAL]
b : ℝ
hb : 0 < b
s : ℝ
hs : -1 < s
⊢ IntegrableOn (fun x => x ^ s * rexp (-b * x ^ 2)) (Ioi 0)
[PROOFSTEP]
rw [← Ioc_union_Ioi_eq_Ioi (zero_le_one : (0 : ℝ) ≤ 1), integrableOn_union]
[GOAL]
b : ℝ
hb : 0 < b
s : ℝ
hs : -1 < s
⊢ IntegrableOn (fun x => x ^ s * rexp (-b * x ^ 2)) (Ioc 0 1) ∧
IntegrableOn (fun x => x ^ s * rexp (-b * x ^ 2)) (Ioi 1)
[PROOFSTEP]
constructor
[GOAL]
case left
b : ℝ
hb : 0 < b
s : ℝ
hs : -1 < s
⊢ IntegrableOn (fun x => x ^ s * rexp (-b * x ^ 2)) (Ioc 0 1)
[PROOFSTEP]
rw [← integrableOn_Icc_iff_integrableOn_Ioc]
[GOAL]
case left
b : ℝ
hb : 0 < b
s : ℝ
hs : -1 < s
⊢ IntegrableOn (fun x => x ^ s * rexp (-b * x ^ 2)) (Icc 0 1)
[PROOFSTEP]
refine' IntegrableOn.mul_continuousOn _ _ isCompact_Icc
[GOAL]
case left.refine'_1
b : ℝ
hb : 0 < b
s : ℝ
hs : -1 < s
⊢ IntegrableOn (fun x => x ^ s) (Icc 0 1)
[PROOFSTEP]
refine' (intervalIntegrable_iff_integrable_Icc_of_le zero_le_one).mp _
[GOAL]
case left.refine'_1
b : ℝ
hb : 0 < b
s : ℝ
hs : -1 < s
⊢ IntervalIntegrable (fun x => x ^ s) volume 0 1
[PROOFSTEP]
exact intervalIntegral.intervalIntegrable_rpow' hs
[GOAL]
case left.refine'_2
b : ℝ
hb : 0 < b
s : ℝ
hs : -1 < s
⊢ ContinuousOn (fun x => rexp (-b * x ^ 2)) (Icc 0 1)
[PROOFSTEP]
exact (continuous_exp.comp (continuous_const.mul (continuous_pow 2))).continuousOn
[GOAL]
case right
b : ℝ
hb : 0 < b
s : ℝ
hs : -1 < s
⊢ IntegrableOn (fun x => x ^ s * rexp (-b * x ^ 2)) (Ioi 1)
[PROOFSTEP]
have B : (0 : ℝ) < 1 / 2 := by norm_num
[GOAL]
b : ℝ
hb : 0 < b
s : ℝ
hs : -1 < s
⊢ 0 < 1 / 2
[PROOFSTEP]
norm_num
[GOAL]
case right
b : ℝ
hb : 0 < b
s : ℝ
hs : -1 < s
B : 0 < 1 / 2
⊢ IntegrableOn (fun x => x ^ s * rexp (-b * x ^ 2)) (Ioi 1)
[PROOFSTEP]
apply integrable_of_isBigO_exp_neg B _ (IsLittleO.isBigO (rpow_mul_exp_neg_mul_sq_isLittleO_exp_neg hb _))
[GOAL]
b : ℝ
hb : 0 < b
s : ℝ
hs : -1 < s
B : 0 < 1 / 2
⊢ ContinuousOn (fun x => x ^ s * rexp (-b * x ^ 2)) (Ici 1)
[PROOFSTEP]
intro x hx
[GOAL]
b : ℝ
hb : 0 < b
s : ℝ
hs : -1 < s
B : 0 < 1 / 2
x : ℝ
hx : x ∈ Ici 1
⊢ ContinuousWithinAt (fun x => x ^ s * rexp (-b * x ^ 2)) (Ici 1) x
[PROOFSTEP]
have N : x ≠ 0 := by refine' (zero_lt_one.trans_le _).ne'; exact hx
[GOAL]
b : ℝ
hb : 0 < b
s : ℝ
hs : -1 < s
B : 0 < 1 / 2
x : ℝ
hx : x ∈ Ici 1
⊢ x ≠ 0
[PROOFSTEP]
refine' (zero_lt_one.trans_le _).ne'
[GOAL]
b : ℝ
hb : 0 < b
s : ℝ
hs : -1 < s
B : 0 < 1 / 2
x : ℝ
hx : x ∈ Ici 1
⊢ 1 ≤ x
[PROOFSTEP]
exact hx
[GOAL]
b : ℝ
hb : 0 < b
s : ℝ
hs : -1 < s
B : 0 < 1 / 2
x : ℝ
hx : x ∈ Ici 1
N : x ≠ 0
⊢ ContinuousWithinAt (fun x => x ^ s * rexp (-b * x ^ 2)) (Ici 1) x
[PROOFSTEP]
apply ((continuousAt_rpow_const _ _ (Or.inl N)).mul _).continuousWithinAt
[GOAL]
b : ℝ
hb : 0 < b
s : ℝ
hs : -1 < s
B : 0 < 1 / 2
x : ℝ
hx : x ∈ Ici 1
N : x ≠ 0
⊢ ContinuousAt (fun x => rexp (-b * x ^ 2)) x
[PROOFSTEP]
exact (continuous_exp.comp (continuous_const.mul (continuous_pow 2))).continuousAt
[GOAL]
b : ℝ
hb : 0 < b
s : ℝ
hs : -1 < s
⊢ Integrable fun x => x ^ s * rexp (-b * x ^ 2)
[PROOFSTEP]
rw [← integrableOn_univ, ← @Iio_union_Ici _ _ (0 : ℝ), integrableOn_union, integrableOn_Ici_iff_integrableOn_Ioi]
[GOAL]
b : ℝ
hb : 0 < b
s : ℝ
hs : -1 < s
⊢ IntegrableOn (fun x => x ^ s * rexp (-b * x ^ 2)) (Iio 0) ∧ IntegrableOn (fun x => x ^ s * rexp (-b * x ^ 2)) (Ioi 0)
[PROOFSTEP]
refine' ⟨_, integrableOn_rpow_mul_exp_neg_mul_sq hb hs⟩
[GOAL]
b : ℝ
hb : 0 < b
s : ℝ
hs : -1 < s
⊢ IntegrableOn (fun x => x ^ s * rexp (-b * x ^ 2)) (Iio 0)
[PROOFSTEP]
rw [←
(Measure.measurePreserving_neg (volume : Measure ℝ)).integrableOn_comp_preimage
(Homeomorph.neg ℝ).toMeasurableEquiv.measurableEmbedding]
[GOAL]
b : ℝ
hb : 0 < b
s : ℝ
hs : -1 < s
⊢ IntegrableOn ((fun x => x ^ s * rexp (-b * x ^ 2)) ∘ Neg.neg) (Neg.neg ⁻¹' Iio 0)
[PROOFSTEP]
simp only [Function.comp, neg_sq, neg_preimage, preimage_neg_Iio, neg_neg, neg_zero]
[GOAL]
b : ℝ
hb : 0 < b
s : ℝ
hs : -1 < s
⊢ IntegrableOn (fun x => (-x) ^ s * rexp (-b * x ^ 2)) (Ioi 0)
[PROOFSTEP]
apply Integrable.mono' (integrableOn_rpow_mul_exp_neg_mul_sq hb hs)
[GOAL]
case hf
b : ℝ
hb : 0 < b
s : ℝ
hs : -1 < s
⊢ AEStronglyMeasurable (fun x => (-x) ^ s * rexp (-b * x ^ 2)) (Measure.restrict volume (Ioi 0))
[PROOFSTEP]
apply Measurable.aestronglyMeasurable
[GOAL]
case hf.hf
b : ℝ
hb : 0 < b
s : ℝ
hs : -1 < s
⊢ Measurable fun x => (-x) ^ s * rexp (-b * x ^ 2)
[PROOFSTEP]
exact (measurable_id'.neg.pow measurable_const).mul ((measurable_id'.pow measurable_const).const_mul (-b)).exp
[GOAL]
case h
b : ℝ
hb : 0 < b
s : ℝ
hs : -1 < s
⊢ ∀ᵐ (a : ℝ) ∂Measure.restrict volume (Ioi 0), ‖(-a) ^ s * rexp (-b * a ^ 2)‖ ≤ a ^ s * rexp (-b * a ^ 2)
[PROOFSTEP]
have : MeasurableSet (Ioi (0 : ℝ)) := measurableSet_Ioi
[GOAL]
case h
b : ℝ
hb : 0 < b
s : ℝ
hs : -1 < s
this : MeasurableSet (Ioi 0)
⊢ ∀ᵐ (a : ℝ) ∂Measure.restrict volume (Ioi 0), ‖(-a) ^ s * rexp (-b * a ^ 2)‖ ≤ a ^ s * rexp (-b * a ^ 2)
[PROOFSTEP]
filter_upwards [ae_restrict_mem this] with x hx
[GOAL]
case h
b : ℝ
hb : 0 < b
s : ℝ
hs : -1 < s
this : MeasurableSet (Ioi 0)
x : ℝ
hx : x ∈ Ioi 0
⊢ ‖(-x) ^ s * rexp (-b * x ^ 2)‖ ≤ x ^ s * rexp (-b * x ^ 2)
[PROOFSTEP]
have h'x : 0 ≤ x := le_of_lt hx
[GOAL]
case h
b : ℝ
hb : 0 < b
s : ℝ
hs : -1 < s
this : MeasurableSet (Ioi 0)
x : ℝ
hx : x ∈ Ioi 0
h'x : 0 ≤ x
⊢ ‖(-x) ^ s * rexp (-b * x ^ 2)‖ ≤ x ^ s * rexp (-b * x ^ 2)
[PROOFSTEP]
rw [Real.norm_eq_abs, abs_mul, abs_of_nonneg (exp_pos _).le]
[GOAL]
case h
b : ℝ
hb : 0 < b
s : ℝ
hs : -1 < s
this : MeasurableSet (Ioi 0)
x : ℝ
hx : x ∈ Ioi 0
h'x : 0 ≤ x
⊢ |(-x) ^ s| * rexp (-b * x ^ 2) ≤ x ^ s * rexp (-b * x ^ 2)
[PROOFSTEP]
apply mul_le_mul_of_nonneg_right _ (exp_pos _).le
[GOAL]
b : ℝ
hb : 0 < b
s : ℝ
hs : -1 < s
this : MeasurableSet (Ioi 0)
x : ℝ
hx : x ∈ Ioi 0
h'x : 0 ≤ x
⊢ |(-x) ^ s| ≤ x ^ s
[PROOFSTEP]
simpa [abs_of_nonneg h'x] using abs_rpow_le_abs_rpow (-x) s
[GOAL]
b : ℝ
hb : 0 < b
⊢ Integrable fun x => rexp (-b * x ^ 2)
[PROOFSTEP]
simpa using integrable_rpow_mul_exp_neg_mul_sq hb (by norm_num : (-1 : ℝ) < 0)
[GOAL]
b : ℝ
hb : 0 < b
⊢ -1 < 0
[PROOFSTEP]
norm_num
[GOAL]
b : ℝ
⊢ IntegrableOn (fun x => rexp (-b * x ^ 2)) (Ioi 0) ↔ 0 < b
[PROOFSTEP]
refine' ⟨fun h => _, fun h => (integrable_exp_neg_mul_sq h).integrableOn⟩
[GOAL]
b : ℝ
h : IntegrableOn (fun x => rexp (-b * x ^ 2)) (Ioi 0)
⊢ 0 < b
[PROOFSTEP]
by_contra' hb
[GOAL]
b : ℝ
h : IntegrableOn (fun x => rexp (-b * x ^ 2)) (Ioi 0)
hb : b ≤ 0
⊢ False
[PROOFSTEP]
have : ∫⁻ _ : ℝ in Ioi 0, 1 ≤ ∫⁻ x : ℝ in Ioi 0, ‖exp (-b * x ^ 2)‖₊ :=
by
apply lintegral_mono (fun x ↦ _)
simp only [neg_mul, ENNReal.one_le_coe_iff, ← toNNReal_one, toNNReal_le_iff_le_coe,
Real.norm_of_nonneg (exp_pos _).le, coe_nnnorm, one_le_exp_iff, Right.nonneg_neg_iff]
exact fun x ↦ mul_nonpos_of_nonpos_of_nonneg hb (sq_nonneg x)
[GOAL]
b : ℝ
h : IntegrableOn (fun x => rexp (-b * x ^ 2)) (Ioi 0)
hb : b ≤ 0
⊢ ∫⁻ (x : ℝ) in Ioi 0, 1 ≤ ∫⁻ (x : ℝ) in Ioi 0, ↑‖rexp (-b * x ^ 2)‖₊
[PROOFSTEP]
apply lintegral_mono (fun x ↦ _)
[GOAL]
b : ℝ
h : IntegrableOn (fun x => rexp (-b * x ^ 2)) (Ioi 0)
hb : b ≤ 0
⊢ ∀ (x : ℝ), 1 ≤ ↑‖rexp (-b * x ^ 2)‖₊
[PROOFSTEP]
simp only [neg_mul, ENNReal.one_le_coe_iff, ← toNNReal_one, toNNReal_le_iff_le_coe, Real.norm_of_nonneg (exp_pos _).le,
coe_nnnorm, one_le_exp_iff, Right.nonneg_neg_iff]
[GOAL]
b : ℝ
h : IntegrableOn (fun x => rexp (-b * x ^ 2)) (Ioi 0)
hb : b ≤ 0
⊢ ∀ (x : ℝ), b * x ^ 2 ≤ 0
[PROOFSTEP]
exact fun x ↦ mul_nonpos_of_nonpos_of_nonneg hb (sq_nonneg x)
[GOAL]
b : ℝ
h : IntegrableOn (fun x => rexp (-b * x ^ 2)) (Ioi 0)
hb : b ≤ 0
this : ∫⁻ (x : ℝ) in Ioi 0, 1 ≤ ∫⁻ (x : ℝ) in Ioi 0, ↑‖rexp (-b * x ^ 2)‖₊
⊢ False
[PROOFSTEP]
simpa using this.trans_lt h.2
[GOAL]
b : ℝ
hb : 0 < b
⊢ Integrable fun x => x * rexp (-b * x ^ 2)
[PROOFSTEP]
simpa using integrable_rpow_mul_exp_neg_mul_sq hb (by norm_num : (-1 : ℝ) < 1)
[GOAL]
b : ℝ
hb : 0 < b
⊢ -1 < 1
[PROOFSTEP]
norm_num
[GOAL]
b : ℂ
x : ℝ
⊢ ‖cexp (-b * ↑x ^ 2)‖ = rexp (-b.re * x ^ 2)
[PROOFSTEP]
rw [Complex.norm_eq_abs, Complex.abs_exp, ← ofReal_pow, mul_comm (-b) _, ofReal_mul_re, neg_re, mul_comm]
[GOAL]
b : ℂ
hb : 0 < b.re
⊢ Integrable fun x => cexp (-b * ↑x ^ 2)
[PROOFSTEP]
refine' ⟨(Complex.continuous_exp.comp (continuous_const.mul (continuous_ofReal.pow 2))).aestronglyMeasurable, _⟩
[GOAL]
b : ℂ
hb : 0 < b.re
⊢ HasFiniteIntegral fun x => cexp (-b * ↑x ^ 2)
[PROOFSTEP]
rw [← hasFiniteIntegral_norm_iff]
[GOAL]
b : ℂ
hb : 0 < b.re
⊢ HasFiniteIntegral fun a => ‖cexp (-b * ↑a ^ 2)‖
[PROOFSTEP]
simp_rw [norm_cexp_neg_mul_sq]
[GOAL]
b : ℂ
hb : 0 < b.re
⊢ HasFiniteIntegral fun a => rexp (-b.re * a ^ 2)
[PROOFSTEP]
exact (integrable_exp_neg_mul_sq hb).2
[GOAL]
b : ℂ
hb : 0 < b.re
⊢ Integrable fun x => ↑x * cexp (-b * ↑x ^ 2)
[PROOFSTEP]
refine' ⟨(continuous_ofReal.mul (Complex.continuous_exp.comp _)).aestronglyMeasurable, _⟩
[GOAL]
case refine'_1
b : ℂ
hb : 0 < b.re
⊢ Continuous fun x => -b * ↑x ^ 2
[PROOFSTEP]
exact continuous_const.mul (continuous_ofReal.pow 2)
[GOAL]
case refine'_2
b : ℂ
hb : 0 < b.re
⊢ HasFiniteIntegral fun x => ↑x * cexp (-b * ↑x ^ 2)
[PROOFSTEP]
have := (integrable_mul_exp_neg_mul_sq hb).hasFiniteIntegral
[GOAL]
case refine'_2
b : ℂ
hb : 0 < b.re
this : HasFiniteIntegral fun x => x * rexp (-b.re * x ^ 2)
⊢ HasFiniteIntegral fun x => ↑x * cexp (-b * ↑x ^ 2)
[PROOFSTEP]
rw [← hasFiniteIntegral_norm_iff] at this ⊢
[GOAL]
case refine'_2
b : ℂ
hb : 0 < b.re
this : HasFiniteIntegral fun a => ‖a * rexp (-b.re * a ^ 2)‖
⊢ HasFiniteIntegral fun a => ‖↑a * cexp (-b * ↑a ^ 2)‖
[PROOFSTEP]
convert this
[GOAL]
case h.e'_5.h
b : ℂ
hb : 0 < b.re
this : HasFiniteIntegral fun a => ‖a * rexp (-b.re * a ^ 2)‖
x✝ : ℝ
⊢ ‖↑x✝ * cexp (-b * ↑x✝ ^ 2)‖ = ‖x✝ * rexp (-b.re * x✝ ^ 2)‖
[PROOFSTEP]
rw [norm_mul, norm_mul, norm_cexp_neg_mul_sq b, Complex.norm_eq_abs, abs_ofReal, Real.norm_eq_abs,
norm_of_nonneg (exp_pos _).le]
[GOAL]
b : ℂ
hb : 0 < b.re
⊢ ∫ (r : ℝ) in Ioi 0, ↑r * cexp (-b * ↑r ^ 2) = (2 * b)⁻¹
[PROOFSTEP]
have hb' : b ≠ 0 := by contrapose! hb; rw [hb, zero_re]
[GOAL]
b : ℂ
hb : 0 < b.re
⊢ b ≠ 0
[PROOFSTEP]
contrapose! hb
[GOAL]
b : ℂ
hb : b = 0
⊢ b.re ≤ 0
[PROOFSTEP]
rw [hb, zero_re]
[GOAL]
b : ℂ
hb : 0 < b.re
hb' : b ≠ 0
⊢ ∫ (r : ℝ) in Ioi 0, ↑r * cexp (-b * ↑r ^ 2) = (2 * b)⁻¹
[PROOFSTEP]
have A : ∀ x : ℂ, HasDerivAt (fun x => -(2 * b)⁻¹ * cexp (-b * x ^ 2)) (x * cexp (-b * x ^ 2)) x :=
by
intro x
convert ((hasDerivAt_pow 2 x).const_mul (-b)).cexp.const_mul (-(2 * b)⁻¹) using 1
field_simp [hb']
ring
[GOAL]
b : ℂ
hb : 0 < b.re
hb' : b ≠ 0
⊢ ∀ (x : ℂ), HasDerivAt (fun x => -(2 * b)⁻¹ * cexp (-b * x ^ 2)) (x * cexp (-b * x ^ 2)) x
[PROOFSTEP]
intro x
[GOAL]
b : ℂ
hb : 0 < b.re
hb' : b ≠ 0
x : ℂ
⊢ HasDerivAt (fun x => -(2 * b)⁻¹ * cexp (-b * x ^ 2)) (x * cexp (-b * x ^ 2)) x
[PROOFSTEP]
convert ((hasDerivAt_pow 2 x).const_mul (-b)).cexp.const_mul (-(2 * b)⁻¹) using 1
[GOAL]
case h.e'_7
b : ℂ
hb : 0 < b.re
hb' : b ≠ 0
x : ℂ
⊢ x * cexp (-b * x ^ 2) = -(2 * b)⁻¹ * (cexp (-b * x ^ 2) * (-b * (↑2 * x ^ (2 - 1))))
[PROOFSTEP]
field_simp [hb']
[GOAL]
case h.e'_7
b : ℂ
hb : 0 < b.re
hb' : b ≠ 0
x : ℂ
⊢ x * cexp (-(b * x ^ 2)) * (2 * b) = cexp (-(b * x ^ 2)) * (b * (2 * x))
[PROOFSTEP]
ring
[GOAL]
b : ℂ
hb : 0 < b.re
hb' : b ≠ 0
A : ∀ (x : ℂ), HasDerivAt (fun x => -(2 * b)⁻¹ * cexp (-b * x ^ 2)) (x * cexp (-b * x ^ 2)) x
⊢ ∫ (r : ℝ) in Ioi 0, ↑r * cexp (-b * ↑r ^ 2) = (2 * b)⁻¹
[PROOFSTEP]
have B : Tendsto (fun y : ℝ ↦ -(2 * b)⁻¹ * cexp (-b * (y : ℂ) ^ 2)) atTop (𝓝 (-(2 * b)⁻¹ * 0)) :=
by
refine' Tendsto.const_mul _ (tendsto_zero_iff_norm_tendsto_zero.mpr _)
simp_rw [norm_cexp_neg_mul_sq b]
exact tendsto_exp_atBot.comp (Tendsto.neg_const_mul_atTop (neg_lt_zero.2 hb) (tendsto_pow_atTop two_ne_zero))
[GOAL]
b : ℂ
hb : 0 < b.re
hb' : b ≠ 0
A : ∀ (x : ℂ), HasDerivAt (fun x => -(2 * b)⁻¹ * cexp (-b * x ^ 2)) (x * cexp (-b * x ^ 2)) x
⊢ Tendsto (fun y => -(2 * b)⁻¹ * cexp (-b * ↑y ^ 2)) atTop (𝓝 (-(2 * b)⁻¹ * 0))
[PROOFSTEP]
refine' Tendsto.const_mul _ (tendsto_zero_iff_norm_tendsto_zero.mpr _)
[GOAL]
b : ℂ
hb : 0 < b.re
hb' : b ≠ 0
A : ∀ (x : ℂ), HasDerivAt (fun x => -(2 * b)⁻¹ * cexp (-b * x ^ 2)) (x * cexp (-b * x ^ 2)) x
⊢ Tendsto (fun e => ‖cexp (-b * ↑e ^ 2)‖) atTop (𝓝 0)
[PROOFSTEP]
simp_rw [norm_cexp_neg_mul_sq b]
[GOAL]
b : ℂ
hb : 0 < b.re
hb' : b ≠ 0
A : ∀ (x : ℂ), HasDerivAt (fun x => -(2 * b)⁻¹ * cexp (-b * x ^ 2)) (x * cexp (-b * x ^ 2)) x
⊢ Tendsto (fun e => rexp (-b.re * e ^ 2)) atTop (𝓝 0)
[PROOFSTEP]
exact tendsto_exp_atBot.comp (Tendsto.neg_const_mul_atTop (neg_lt_zero.2 hb) (tendsto_pow_atTop two_ne_zero))
[GOAL]
b : ℂ
hb : 0 < b.re
hb' : b ≠ 0
A : ∀ (x : ℂ), HasDerivAt (fun x => -(2 * b)⁻¹ * cexp (-b * x ^ 2)) (x * cexp (-b * x ^ 2)) x
B : Tendsto (fun y => -(2 * b)⁻¹ * cexp (-b * ↑y ^ 2)) atTop (𝓝 (-(2 * b)⁻¹ * 0))
⊢ ∫ (r : ℝ) in Ioi 0, ↑r * cexp (-b * ↑r ^ 2) = (2 * b)⁻¹
[PROOFSTEP]
convert
integral_Ioi_of_hasDerivAt_of_tendsto' (fun x _ => (A ↑x).comp_ofReal)
(integrable_mul_cexp_neg_mul_sq hb).integrableOn B using
1
[GOAL]
case h.e'_3
b : ℂ
hb : 0 < b.re
hb' : b ≠ 0
A : ∀ (x : ℂ), HasDerivAt (fun x => -(2 * b)⁻¹ * cexp (-b * x ^ 2)) (x * cexp (-b * x ^ 2)) x
B : Tendsto (fun y => -(2 * b)⁻¹ * cexp (-b * ↑y ^ 2)) atTop (𝓝 (-(2 * b)⁻¹ * 0))
⊢ (2 * b)⁻¹ = -(2 * b)⁻¹ * 0 - -(2 * b)⁻¹ * cexp (-b * ↑0 ^ 2)
[PROOFSTEP]
simp only [mul_zero, ofReal_zero, zero_pow', Ne.def, bit0_eq_zero, Nat.one_ne_zero, not_false_iff, Complex.exp_zero,
mul_one, sub_neg_eq_add, zero_add]
[GOAL]
b : ℂ
hb : 0 < b.re
⊢ (∫ (x : ℝ), cexp (-b * ↑x ^ 2)) ^ 2 = ↑π / b
[PROOFSTEP]
calc
(∫ x : ℝ, cexp (-b * (x : ℂ) ^ 2)) ^ 2 = ∫ p : ℝ × ℝ, cexp (-b * (p.1 : ℂ) ^ 2) * cexp (-b * (p.2 : ℂ) ^ 2) := by
rw [pow_two, ← integral_prod_mul]; rfl
_ = ∫ p : ℝ × ℝ, cexp (-b * ((p.1 : ℂ) ^ 2 + (p.2 : ℂ) ^ 2)) :=
by
congr
ext1 p
rw [← Complex.exp_add, mul_add]
_ = ∫ p in polarCoord.target, p.1 • cexp (-b * ((p.1 * Complex.cos p.2) ^ 2 + (p.1 * Complex.sin p.2) ^ 2)) :=
by
rw [← integral_comp_polarCoord_symm]
simp only [polarCoord_symm_apply, ofReal_mul, ofReal_cos, ofReal_sin]
_ = (∫ r in Ioi (0 : ℝ), r * cexp (-b * (r : ℂ) ^ 2)) * ∫ θ in Ioo (-π) π, 1 :=
by
rw [← set_integral_prod_mul]
congr with p : 1
rw [mul_one]
congr
conv_rhs => rw [← one_mul ((p.1 : ℂ) ^ 2), ← sin_sq_add_cos_sq (p.2 : ℂ)]
ring
_ = ↑π / b := by
have : 0 ≤ π + π := by linarith [Real.pi_pos]
simp only [integral_const, Measure.restrict_apply', measurableSet_Ioo, univ_inter, volume_Ioo, sub_neg_eq_add,
ENNReal.toReal_ofReal, this]
rw [← two_mul, real_smul, mul_one, ofReal_mul, ofReal_ofNat, integral_mul_cexp_neg_mul_sq hb]
field_simp [(by contrapose! hb; rw [hb, zero_re] : b ≠ 0)]
ring
[GOAL]
b : ℂ
hb : 0 < b.re
⊢ (∫ (x : ℝ), cexp (-b * ↑x ^ 2)) ^ 2 = ∫ (p : ℝ × ℝ), cexp (-b * ↑p.fst ^ 2) * cexp (-b * ↑p.snd ^ 2)
[PROOFSTEP]
rw [pow_two, ← integral_prod_mul]
[GOAL]
b : ℂ
hb : 0 < b.re
⊢ ∫ (z : ℝ × ℝ), cexp (-b * ↑z.fst ^ 2) * cexp (-b * ↑z.snd ^ 2) ∂Measure.prod volume volume =
∫ (p : ℝ × ℝ), cexp (-b * ↑p.fst ^ 2) * cexp (-b * ↑p.snd ^ 2)
[PROOFSTEP]
rfl
[GOAL]
b : ℂ
hb : 0 < b.re
⊢ ∫ (p : ℝ × ℝ), cexp (-b * ↑p.fst ^ 2) * cexp (-b * ↑p.snd ^ 2) = ∫ (p : ℝ × ℝ), cexp (-b * (↑p.fst ^ 2 + ↑p.snd ^ 2))
[PROOFSTEP]
congr
[GOAL]
case e_f
b : ℂ
hb : 0 < b.re
⊢ (fun p => cexp (-b * ↑p.fst ^ 2) * cexp (-b * ↑p.snd ^ 2)) = fun p => cexp (-b * (↑p.fst ^ 2 + ↑p.snd ^ 2))
[PROOFSTEP]
ext1 p
[GOAL]
case e_f.h
b : ℂ
hb : 0 < b.re
p : ℝ × ℝ
⊢ cexp (-b * ↑p.fst ^ 2) * cexp (-b * ↑p.snd ^ 2) = cexp (-b * (↑p.fst ^ 2 + ↑p.snd ^ 2))
[PROOFSTEP]
rw [← Complex.exp_add, mul_add]
[GOAL]
b : ℂ
hb : 0 < b.re
⊢ ∫ (p : ℝ × ℝ), cexp (-b * (↑p.fst ^ 2 + ↑p.snd ^ 2)) =
∫ (p : ℝ × ℝ) in polarCoord.target,
p.fst • cexp (-b * ((↑p.fst * Complex.cos ↑p.snd) ^ 2 + (↑p.fst * Complex.sin ↑p.snd) ^ 2))
[PROOFSTEP]
rw [← integral_comp_polarCoord_symm]
[GOAL]
b : ℂ
hb : 0 < b.re
⊢ ∫ (p : ℝ × ℝ) in polarCoord.target,
p.fst •
cexp
(-b * (↑(↑(LocalHomeomorph.symm polarCoord) p).fst ^ 2 + ↑(↑(LocalHomeomorph.symm polarCoord) p).snd ^ 2)) =
∫ (p : ℝ × ℝ) in polarCoord.target,
p.fst • cexp (-b * ((↑p.fst * Complex.cos ↑p.snd) ^ 2 + (↑p.fst * Complex.sin ↑p.snd) ^ 2))
[PROOFSTEP]
simp only [polarCoord_symm_apply, ofReal_mul, ofReal_cos, ofReal_sin]
[GOAL]
b : ℂ
hb : 0 < b.re
⊢ ∫ (p : ℝ × ℝ) in polarCoord.target,
p.fst • cexp (-b * ((↑p.fst * Complex.cos ↑p.snd) ^ 2 + (↑p.fst * Complex.sin ↑p.snd) ^ 2)) =
(∫ (r : ℝ) in Ioi 0, ↑r * cexp (-b * ↑r ^ 2)) * ∫ (θ : ℝ) in Ioo (-π) π, 1
[PROOFSTEP]
rw [← set_integral_prod_mul]
[GOAL]
b : ℂ
hb : 0 < b.re
⊢ ∫ (p : ℝ × ℝ) in polarCoord.target,
p.fst • cexp (-b * ((↑p.fst * Complex.cos ↑p.snd) ^ 2 + (↑p.fst * Complex.sin ↑p.snd) ^ 2)) =
∫ (z : ℝ × ℝ) in Ioi 0 ×ˢ Ioo (-π) π, ↑z.fst * cexp (-b * ↑z.fst ^ 2) * 1 ∂Measure.prod volume volume
[PROOFSTEP]
congr with p : 1
[GOAL]
case e_f.h
b : ℂ
hb : 0 < b.re
p : ℝ × ℝ
⊢ p.fst • cexp (-b * ((↑p.fst * Complex.cos ↑p.snd) ^ 2 + (↑p.fst * Complex.sin ↑p.snd) ^ 2)) =
↑p.fst * cexp (-b * ↑p.fst ^ 2) * 1
[PROOFSTEP]
rw [mul_one]
[GOAL]
case e_f.h
b : ℂ
hb : 0 < b.re
p : ℝ × ℝ
⊢ p.fst • cexp (-b * ((↑p.fst * Complex.cos ↑p.snd) ^ 2 + (↑p.fst * Complex.sin ↑p.snd) ^ 2)) =
↑p.fst * cexp (-b * ↑p.fst ^ 2)
[PROOFSTEP]
congr
[GOAL]
case e_f.h.e_a.e_z.e_a
b : ℂ
hb : 0 < b.re
p : ℝ × ℝ
⊢ (↑p.fst * Complex.cos ↑p.snd) ^ 2 + (↑p.fst * Complex.sin ↑p.snd) ^ 2 = ↑p.fst ^ 2
[PROOFSTEP]
conv_rhs => rw [← one_mul ((p.1 : ℂ) ^ 2), ← sin_sq_add_cos_sq (p.2 : ℂ)]
[GOAL]
b : ℂ
hb : 0 < b.re
p : ℝ × ℝ
| ↑p.fst ^ 2
[PROOFSTEP]
rw [← one_mul ((p.1 : ℂ) ^ 2), ← sin_sq_add_cos_sq (p.2 : ℂ)]
[GOAL]
b : ℂ
hb : 0 < b.re
p : ℝ × ℝ
| ↑p.fst ^ 2
[PROOFSTEP]
rw [← one_mul ((p.1 : ℂ) ^ 2), ← sin_sq_add_cos_sq (p.2 : ℂ)]
[GOAL]
b : ℂ
hb : 0 < b.re
p : ℝ × ℝ
| ↑p.fst ^ 2
[PROOFSTEP]
rw [← one_mul ((p.1 : ℂ) ^ 2), ← sin_sq_add_cos_sq (p.2 : ℂ)]
[GOAL]
case e_f.h.e_a.e_z.e_a
b : ℂ
hb : 0 < b.re
p : ℝ × ℝ
⊢ (↑p.fst * Complex.cos ↑p.snd) ^ 2 + (↑p.fst * Complex.sin ↑p.snd) ^ 2 =
(Complex.sin ↑p.snd ^ 2 + Complex.cos ↑p.snd ^ 2) * ↑p.fst ^ 2
[PROOFSTEP]
ring
[GOAL]
b : ℂ
hb : 0 < b.re
⊢ (∫ (r : ℝ) in Ioi 0, ↑r * cexp (-b * ↑r ^ 2)) * ∫ (θ : ℝ) in Ioo (-π) π, 1 = ↑π / b
[PROOFSTEP]
have : 0 ≤ π + π := by linarith [Real.pi_pos]
[GOAL]
b : ℂ
hb : 0 < b.re
⊢ 0 ≤ π + π
[PROOFSTEP]
linarith [Real.pi_pos]
[GOAL]
b : ℂ
hb : 0 < b.re
this : 0 ≤ π + π
⊢ (∫ (r : ℝ) in Ioi 0, ↑r * cexp (-b * ↑r ^ 2)) * ∫ (θ : ℝ) in Ioo (-π) π, 1 = ↑π / b
[PROOFSTEP]
simp only [integral_const, Measure.restrict_apply', measurableSet_Ioo, univ_inter, volume_Ioo, sub_neg_eq_add,
ENNReal.toReal_ofReal, this]
[GOAL]
b : ℂ
hb : 0 < b.re
this : 0 ≤ π + π
⊢ (∫ (r : ℝ) in Ioi 0, ↑r * cexp (-b * ↑r ^ 2)) * (π + π) • 1 = ↑π / b
[PROOFSTEP]
rw [← two_mul, real_smul, mul_one, ofReal_mul, ofReal_ofNat, integral_mul_cexp_neg_mul_sq hb]
[GOAL]
b : ℂ
hb : 0 < b.re
this : 0 ≤ π + π
⊢ (2 * b)⁻¹ * (2 * ↑π) = ↑π / b
[PROOFSTEP]
field_simp [(by contrapose! hb; rw [hb, zero_re] : b ≠ 0)]
[GOAL]
b : ℂ
hb : 0 < b.re
this : 0 ≤ π + π
⊢ b ≠ 0
[PROOFSTEP]
contrapose! hb
[GOAL]
b : ℂ
this : 0 ≤ π + π
hb : b = 0
⊢ b.re ≤ 0
[PROOFSTEP]
rw [hb, zero_re]
[GOAL]
b : ℂ
hb : 0 < b.re
this : 0 ≤ π + π
⊢ 2 * ↑π * b = ↑π * (2 * b)
[PROOFSTEP]
ring
[GOAL]
b : ℝ
⊢ ∫ (x : ℝ), rexp (-b * x ^ 2) = sqrt (π / b)
[PROOFSTEP]
rcases le_or_lt b 0 with (hb | hb)
[GOAL]
case inl
b : ℝ
hb : b ≤ 0
⊢ ∫ (x : ℝ), rexp (-b * x ^ 2) = sqrt (π / b)
[PROOFSTEP]
rw [integral_undef, sqrt_eq_zero_of_nonpos]
[GOAL]
case inl
b : ℝ
hb : b ≤ 0
⊢ π / b ≤ 0
[PROOFSTEP]
exact div_nonpos_of_nonneg_of_nonpos pi_pos.le hb
[GOAL]
case inl
b : ℝ
hb : b ≤ 0
⊢ ¬Integrable fun x => rexp (-b * x ^ 2)
[PROOFSTEP]
simpa only [not_lt, integrable_exp_neg_mul_sq_iff] using hb
[GOAL]
case inr
b : ℝ
hb : 0 < b
⊢ ∫ (x : ℝ), rexp (-b * x ^ 2) = sqrt (π / b)
[PROOFSTEP]
refine' (sq_eq_sq _ (sqrt_nonneg _)).1 _
[GOAL]
case inr.refine'_1
b : ℝ
hb : 0 < b
⊢ 0 ≤ ∫ (x : ℝ), rexp (-b * x ^ 2)
[PROOFSTEP]
exact integral_nonneg fun x => (exp_pos _).le
[GOAL]
case inr.refine'_2
b : ℝ
hb : 0 < b
⊢ (∫ (x : ℝ), rexp (-b * x ^ 2)) ^ 2 = sqrt (π / b) ^ 2
[PROOFSTEP]
rw [← ofReal_inj, ofReal_pow, ← coe_algebraMap, IsROrC.algebraMap_eq_ofReal, ← integral_ofReal,
sq_sqrt (div_pos pi_pos hb).le, ← IsROrC.algebraMap_eq_ofReal, coe_algebraMap, ofReal_div]
[GOAL]
case inr.refine'_2
b : ℝ
hb : 0 < b
⊢ (∫ (a : ℝ), ↑(rexp (-b * a ^ 2))) ^ 2 = ↑π / ↑b
[PROOFSTEP]
convert integral_gaussian_sq_complex (by rwa [ofReal_re] : 0 < (b : ℂ).re) with _ x
[GOAL]
b : ℝ
hb : 0 < b
⊢ 0 < (↑b).re
[PROOFSTEP]
rwa [ofReal_re]
[GOAL]
case h.e'_2.h.e'_5.h.e'_7.h
b : ℝ
hb : 0 < b
x : ℝ
⊢ ↑(rexp (-b * x ^ 2)) = cexp (-↑b * ↑x ^ 2)
[PROOFSTEP]
rw [ofReal_exp, ofReal_mul, ofReal_pow, ofReal_neg]
[GOAL]
b : ℂ
hb : 0 < b.re
⊢ ContinuousAt (fun c => ∫ (x : ℝ), cexp (-c * ↑x ^ 2)) b
[PROOFSTEP]
let f : ℂ → ℝ → ℂ := fun (c : ℂ) (x : ℝ) => cexp (-c * (x : ℂ) ^ 2)
[GOAL]
b : ℂ
hb : 0 < b.re
f : ℂ → ℝ → ℂ := fun c x => cexp (-c * ↑x ^ 2)
⊢ ContinuousAt (fun c => ∫ (x : ℝ), cexp (-c * ↑x ^ 2)) b
[PROOFSTEP]
obtain ⟨d, hd, hd'⟩ := exists_between hb
[GOAL]
case intro.intro
b : ℂ
hb : 0 < b.re
f : ℂ → ℝ → ℂ := fun c x => cexp (-c * ↑x ^ 2)
d : ℝ
hd : 0 < d
hd' : d < b.re
⊢ ContinuousAt (fun c => ∫ (x : ℝ), cexp (-c * ↑x ^ 2)) b
[PROOFSTEP]
have f_meas : ∀ c : ℂ, AEStronglyMeasurable (f c) volume := fun c =>
by
apply Continuous.aestronglyMeasurable
exact Complex.continuous_exp.comp (continuous_const.mul (continuous_ofReal.pow 2))
[GOAL]
b : ℂ
hb : 0 < b.re
f : ℂ → ℝ → ℂ := fun c x => cexp (-c * ↑x ^ 2)
d : ℝ
hd : 0 < d
hd' : d < b.re
c : ℂ
⊢ AEStronglyMeasurable (f c) volume
[PROOFSTEP]
apply Continuous.aestronglyMeasurable
[GOAL]
case hf
b : ℂ
hb : 0 < b.re
f : ℂ → ℝ → ℂ := fun c x => cexp (-c * ↑x ^ 2)
d : ℝ
hd : 0 < d
hd' : d < b.re
c : ℂ
⊢ Continuous (f c)
[PROOFSTEP]
exact Complex.continuous_exp.comp (continuous_const.mul (continuous_ofReal.pow 2))
[GOAL]
case intro.intro
b : ℂ
hb : 0 < b.re
f : ℂ → ℝ → ℂ := fun c x => cexp (-c * ↑x ^ 2)
d : ℝ
hd : 0 < d
hd' : d < b.re
f_meas : ∀ (c : ℂ), AEStronglyMeasurable (f c) volume
⊢ ContinuousAt (fun c => ∫ (x : ℝ), cexp (-c * ↑x ^ 2)) b
[PROOFSTEP]
have f_cts : ∀ x : ℝ, ContinuousAt (fun c => f c x) b := fun x =>
(Complex.continuous_exp.comp (continuous_id'.neg.mul continuous_const)).continuousAt
[GOAL]
case intro.intro
b : ℂ
hb : 0 < b.re
f : ℂ → ℝ → ℂ := fun c x => cexp (-c * ↑x ^ 2)
d : ℝ
hd : 0 < d
hd' : d < b.re
f_meas : ∀ (c : ℂ), AEStronglyMeasurable (f c) volume
f_cts : ∀ (x : ℝ), ContinuousAt (fun c => f c x) b
⊢ ContinuousAt (fun c => ∫ (x : ℝ), cexp (-c * ↑x ^ 2)) b
[PROOFSTEP]
have f_le_bd : ∀ᶠ c : ℂ in 𝓝 b, ∀ᵐ x : ℝ, ‖f c x‖ ≤ exp (-d * x ^ 2) :=
by
refine' eventually_of_mem ((continuous_re.isOpen_preimage _ isOpen_Ioi).mem_nhds hd') _
refine' fun c hc => ae_of_all _ fun x => _
rw [norm_cexp_neg_mul_sq, exp_le_exp]
exact mul_le_mul_of_nonneg_right (neg_le_neg (le_of_lt hc)) (sq_nonneg _)
[GOAL]
b : ℂ
hb : 0 < b.re
f : ℂ → ℝ → ℂ := fun c x => cexp (-c * ↑x ^ 2)
d : ℝ
hd : 0 < d
hd' : d < b.re
f_meas : ∀ (c : ℂ), AEStronglyMeasurable (f c) volume
f_cts : ∀ (x : ℝ), ContinuousAt (fun c => f c x) b
⊢ ∀ᶠ (c : ℂ) in 𝓝 b, ∀ᵐ (x : ℝ), ‖f c x‖ ≤ rexp (-d * x ^ 2)
[PROOFSTEP]
refine' eventually_of_mem ((continuous_re.isOpen_preimage _ isOpen_Ioi).mem_nhds hd') _
[GOAL]
b : ℂ
hb : 0 < b.re
f : ℂ → ℝ → ℂ := fun c x => cexp (-c * ↑x ^ 2)
d : ℝ
hd : 0 < d
hd' : d < b.re
f_meas : ∀ (c : ℂ), AEStronglyMeasurable (f c) volume
f_cts : ∀ (x : ℝ), ContinuousAt (fun c => f c x) b
⊢ ∀ (x : ℂ), x ∈ re ⁻¹' Ioi d → ∀ᵐ (x_1 : ℝ), ‖f x x_1‖ ≤ rexp (-d * x_1 ^ 2)
[PROOFSTEP]
refine' fun c hc => ae_of_all _ fun x => _
[GOAL]
b : ℂ
hb : 0 < b.re
f : ℂ → ℝ → ℂ := fun c x => cexp (-c * ↑x ^ 2)
d : ℝ
hd : 0 < d
hd' : d < b.re
f_meas : ∀ (c : ℂ), AEStronglyMeasurable (f c) volume
f_cts : ∀ (x : ℝ), ContinuousAt (fun c => f c x) b
c : ℂ
hc : c ∈ re ⁻¹' Ioi d
x : ℝ
⊢ ‖f c x‖ ≤ rexp (-d * x ^ 2)
[PROOFSTEP]
rw [norm_cexp_neg_mul_sq, exp_le_exp]
[GOAL]
b : ℂ
hb : 0 < b.re
f : ℂ → ℝ → ℂ := fun c x => cexp (-c * ↑x ^ 2)
d : ℝ
hd : 0 < d
hd' : d < b.re
f_meas : ∀ (c : ℂ), AEStronglyMeasurable (f c) volume
f_cts : ∀ (x : ℝ), ContinuousAt (fun c => f c x) b
c : ℂ
hc : c ∈ re ⁻¹' Ioi d
x : ℝ
⊢ -c.re * x ^ 2 ≤ -d * x ^ 2
[PROOFSTEP]
exact mul_le_mul_of_nonneg_right (neg_le_neg (le_of_lt hc)) (sq_nonneg _)
[GOAL]
case intro.intro
b : ℂ
hb : 0 < b.re
f : ℂ → ℝ → ℂ := fun c x => cexp (-c * ↑x ^ 2)
d : ℝ
hd : 0 < d
hd' : d < b.re
f_meas : ∀ (c : ℂ), AEStronglyMeasurable (f c) volume
f_cts : ∀ (x : ℝ), ContinuousAt (fun c => f c x) b
f_le_bd : ∀ᶠ (c : ℂ) in 𝓝 b, ∀ᵐ (x : ℝ), ‖f c x‖ ≤ rexp (-d * x ^ 2)
⊢ ContinuousAt (fun c => ∫ (x : ℝ), cexp (-c * ↑x ^ 2)) b
[PROOFSTEP]
exact continuousAt_of_dominated (eventually_of_forall f_meas) f_le_bd (integrable_exp_neg_mul_sq hd) (ae_of_all _ f_cts)
[GOAL]
b : ℂ
hb : 0 < b.re
⊢ ∫ (x : ℝ), cexp (-b * ↑x ^ 2) = (↑π / b) ^ (1 / 2)
[PROOFSTEP]
have nv : ∀ {b : ℂ}, 0 < re b → b ≠ 0 := by intro b hb; contrapose! hb; rw [hb]; simp
[GOAL]
b : ℂ
hb : 0 < b.re
⊢ ∀ {b : ℂ}, 0 < b.re → b ≠ 0
[PROOFSTEP]
intro b hb
[GOAL]
b✝ : ℂ
hb✝ : 0 < b✝.re
b : ℂ
hb : 0 < b.re
⊢ b ≠ 0
[PROOFSTEP]
contrapose! hb
[GOAL]
b✝ : ℂ
hb✝ : 0 < b✝.re
b : ℂ
hb : b = 0
⊢ b.re ≤ 0
[PROOFSTEP]
rw [hb]
[GOAL]
b✝ : ℂ
hb✝ : 0 < b✝.re
b : ℂ
hb : b = 0
⊢ 0.re ≤ 0
[PROOFSTEP]
simp
[GOAL]
b : ℂ
hb : 0 < b.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
⊢ ∫ (x : ℝ), cexp (-b * ↑x ^ 2) = (↑π / b) ^ (1 / 2)
[PROOFSTEP]
apply
(convex_halfspace_re_gt 0).isPreconnected.eq_of_sq_eq ?_ ?_ (fun c hc => ?_) (fun {c} hc => ?_)
(by simp : 0 < re (1 : ℂ)) ?_ hb
[GOAL]
b : ℂ
hb : 0 < b.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
⊢ 0 < 1.re
[PROOFSTEP]
simp
[GOAL]
b : ℂ
hb : 0 < b.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
⊢ ContinuousOn (fun {b} => ∫ (x : ℝ), cexp (-b * ↑x ^ 2)) {c | 0 < c.re}
[PROOFSTEP]
exact ContinuousAt.continuousOn continuousAt_gaussian_integral
[GOAL]
b : ℂ
hb : 0 < b.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
⊢ ContinuousOn (fun {b} => (↑π / b) ^ (1 / 2)) {c | 0 < c.re}
[PROOFSTEP]
refine'
ContinuousAt.continuousOn fun b hb =>
(continuousAt_cpow_const (Or.inl _)).comp (continuousAt_const.div continuousAt_id (nv hb))
[GOAL]
b✝ : ℂ
hb✝ : 0 < b✝.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
b : ℂ
hb : b ∈ {c | 0 < c.re}
⊢ 0 < (↑π / b).re
[PROOFSTEP]
rw [div_re, ofReal_im, ofReal_re, zero_mul, zero_div, add_zero]
[GOAL]
b✝ : ℂ
hb✝ : 0 < b✝.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
b : ℂ
hb : b ∈ {c | 0 < c.re}
⊢ 0 < π * b.re / ↑normSq b
[PROOFSTEP]
exact div_pos (mul_pos pi_pos hb) (normSq_pos.mpr (nv hb))
[GOAL]
b : ℂ
hb : 0 < b.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
⊢ ∫ (x : ℝ), cexp (-1 * ↑x ^ 2) = (↑π / 1) ^ (1 / 2)
[PROOFSTEP]
have : ∀ x : ℝ, cexp (-(1 : ℂ) * (x : ℂ) ^ 2) = exp (-(1 : ℝ) * x ^ 2) :=
by
intro x
simp only [ofReal_exp, neg_mul, one_mul, ofReal_neg, ofReal_pow]
[GOAL]
b : ℂ
hb : 0 < b.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
⊢ ∀ (x : ℝ), cexp (-1 * ↑x ^ 2) = ↑(rexp (-1 * x ^ 2))
[PROOFSTEP]
intro x
[GOAL]
b : ℂ
hb : 0 < b.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
x : ℝ
⊢ cexp (-1 * ↑x ^ 2) = ↑(rexp (-1 * x ^ 2))
[PROOFSTEP]
simp only [ofReal_exp, neg_mul, one_mul, ofReal_neg, ofReal_pow]
[GOAL]
b : ℂ
hb : 0 < b.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
this : ∀ (x : ℝ), cexp (-1 * ↑x ^ 2) = ↑(rexp (-1 * x ^ 2))
⊢ ∫ (x : ℝ), cexp (-1 * ↑x ^ 2) = (↑π / 1) ^ (1 / 2)
[PROOFSTEP]
simp_rw [this, ← coe_algebraMap, IsROrC.algebraMap_eq_ofReal, integral_ofReal, ← IsROrC.algebraMap_eq_ofReal,
coe_algebraMap]
[GOAL]
b : ℂ
hb : 0 < b.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
this : ∀ (x : ℝ), cexp (-1 * ↑x ^ 2) = ↑(rexp (-1 * x ^ 2))
⊢ ↑(∫ (a : ℝ), rexp (-1 * a ^ 2)) = (↑π / 1) ^ (1 / 2)
[PROOFSTEP]
conv_rhs =>
congr
·rw [← ofReal_one, ← ofReal_div]
·rw [← ofReal_one, ← ofReal_ofNat, ← ofReal_div]
[GOAL]
b : ℂ
hb : 0 < b.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
this : ∀ (x : ℝ), cexp (-1 * ↑x ^ 2) = ↑(rexp (-1 * x ^ 2))
| (↑π / 1) ^ (1 / 2)
[PROOFSTEP]
congr
·rw [← ofReal_one, ← ofReal_div]
·rw [← ofReal_one, ← ofReal_ofNat, ← ofReal_div]
[GOAL]
b : ℂ
hb : 0 < b.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
this : ∀ (x : ℝ), cexp (-1 * ↑x ^ 2) = ↑(rexp (-1 * x ^ 2))
| (↑π / 1) ^ (1 / 2)
[PROOFSTEP]
congr
·rw [← ofReal_one, ← ofReal_div]
·rw [← ofReal_one, ← ofReal_ofNat, ← ofReal_div]
[GOAL]
b : ℂ
hb : 0 < b.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
this : ∀ (x : ℝ), cexp (-1 * ↑x ^ 2) = ↑(rexp (-1 * x ^ 2))
| (↑π / 1) ^ (1 / 2)
[PROOFSTEP]
congr
[GOAL]
case a
b : ℂ
hb : 0 < b.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
this : ∀ (x : ℝ), cexp (-1 * ↑x ^ 2) = ↑(rexp (-1 * x ^ 2))
| ↑π / 1
case a
b : ℂ
hb : 0 < b.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
this : ∀ (x : ℝ), cexp (-1 * ↑x ^ 2) = ↑(rexp (-1 * x ^ 2))
| 1 / 2
[PROOFSTEP]
·rw [← ofReal_one, ← ofReal_div]
[GOAL]
case a
b : ℂ
hb : 0 < b.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
this : ∀ (x : ℝ), cexp (-1 * ↑x ^ 2) = ↑(rexp (-1 * x ^ 2))
| ↑π / 1
[PROOFSTEP]
rw [← ofReal_one, ← ofReal_div]
[GOAL]
case a
b : ℂ
hb : 0 < b.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
this : ∀ (x : ℝ), cexp (-1 * ↑x ^ 2) = ↑(rexp (-1 * x ^ 2))
| ↑π / 1
[PROOFSTEP]
rw [← ofReal_one, ← ofReal_div]
[GOAL]
case a
b : ℂ
hb : 0 < b.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
this : ∀ (x : ℝ), cexp (-1 * ↑x ^ 2) = ↑(rexp (-1 * x ^ 2))
| ↑π / 1
[PROOFSTEP]
rw [← ofReal_one, ← ofReal_div]
[GOAL]
case a
b : ℂ
hb : 0 < b.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
this : ∀ (x : ℝ), cexp (-1 * ↑x ^ 2) = ↑(rexp (-1 * x ^ 2))
| 1 / 2
[PROOFSTEP]
·rw [← ofReal_one, ← ofReal_ofNat, ← ofReal_div]
[GOAL]
case a
b : ℂ
hb : 0 < b.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
this : ∀ (x : ℝ), cexp (-1 * ↑x ^ 2) = ↑(rexp (-1 * x ^ 2))
| 1 / 2
[PROOFSTEP]
rw [← ofReal_one, ← ofReal_ofNat, ← ofReal_div]
[GOAL]
case a
b : ℂ
hb : 0 < b.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
this : ∀ (x : ℝ), cexp (-1 * ↑x ^ 2) = ↑(rexp (-1 * x ^ 2))
| 1 / 2
[PROOFSTEP]
rw [← ofReal_one, ← ofReal_ofNat, ← ofReal_div]
[GOAL]
case a
b : ℂ
hb : 0 < b.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
this : ∀ (x : ℝ), cexp (-1 * ↑x ^ 2) = ↑(rexp (-1 * x ^ 2))
| 1 / 2
[PROOFSTEP]
rw [← ofReal_one, ← ofReal_ofNat, ← ofReal_div]
[GOAL]
b : ℂ
hb : 0 < b.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
this : ∀ (x : ℝ), cexp (-1 * ↑x ^ 2) = ↑(rexp (-1 * x ^ 2))
⊢ ↑(∫ (a : ℝ), rexp (-1 * a ^ 2)) = ↑(π / 1) ^ ↑(1 / 2)
[PROOFSTEP]
rw [← ofReal_cpow, ofReal_inj]
[GOAL]
b : ℂ
hb : 0 < b.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
this : ∀ (x : ℝ), cexp (-1 * ↑x ^ 2) = ↑(rexp (-1 * x ^ 2))
⊢ ∫ (a : ℝ), rexp (-1 * a ^ 2) = (π / 1) ^ (1 / 2)
case hx
b : ℂ
hb : 0 < b.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
this : ∀ (x : ℝ), cexp (-1 * ↑x ^ 2) = ↑(rexp (-1 * x ^ 2))
⊢ 0 ≤ π / 1
[PROOFSTEP]
convert integral_gaussian (1 : ℝ) using 1
[GOAL]
case h.e'_3
b : ℂ
hb : 0 < b.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
this : ∀ (x : ℝ), cexp (-1 * ↑x ^ 2) = ↑(rexp (-1 * x ^ 2))
⊢ (π / 1) ^ (1 / 2) = sqrt (π / 1)
[PROOFSTEP]
rw [sqrt_eq_rpow]
[GOAL]
case hx
b : ℂ
hb : 0 < b.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
this : ∀ (x : ℝ), cexp (-1 * ↑x ^ 2) = ↑(rexp (-1 * x ^ 2))
⊢ 0 ≤ π / 1
[PROOFSTEP]
rw [div_one]
[GOAL]
case hx
b : ℂ
hb : 0 < b.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
this : ∀ (x : ℝ), cexp (-1 * ↑x ^ 2) = ↑(rexp (-1 * x ^ 2))
⊢ 0 ≤ π
[PROOFSTEP]
exact pi_pos.le
[GOAL]
b : ℂ
hb : 0 < b.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
c : ℂ
hc : c ∈ {c | 0 < c.re}
⊢ ((fun {b} => ∫ (x : ℝ), cexp (-b * ↑x ^ 2)) ^ 2) c = ((fun {b} => (↑π / b) ^ (1 / 2)) ^ 2) c
[PROOFSTEP]
dsimp only [Pi.pow_apply]
[GOAL]
b : ℂ
hb : 0 < b.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
c : ℂ
hc : c ∈ {c | 0 < c.re}
⊢ (∫ (x : ℝ), cexp (-c * ↑x ^ 2)) ^ 2 = ((↑π / c) ^ (1 / 2)) ^ 2
[PROOFSTEP]
rw [integral_gaussian_sq_complex hc, sq]
[GOAL]
b : ℂ
hb : 0 < b.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
c : ℂ
hc : c ∈ {c | 0 < c.re}
⊢ ↑π / c = (↑π / c) ^ (1 / 2) * (↑π / c) ^ (1 / 2)
[PROOFSTEP]
conv_lhs => rw [← cpow_one (↑π / c)]
[GOAL]
b : ℂ
hb : 0 < b.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
c : ℂ
hc : c ∈ {c | 0 < c.re}
| ↑π / c
[PROOFSTEP]
rw [← cpow_one (↑π / c)]
[GOAL]
b : ℂ
hb : 0 < b.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
c : ℂ
hc : c ∈ {c | 0 < c.re}
| ↑π / c
[PROOFSTEP]
rw [← cpow_one (↑π / c)]
[GOAL]
b : ℂ
hb : 0 < b.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
c : ℂ
hc : c ∈ {c | 0 < c.re}
| ↑π / c
[PROOFSTEP]
rw [← cpow_one (↑π / c)]
[GOAL]
b : ℂ
hb : 0 < b.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
c : ℂ
hc : c ∈ {c | 0 < c.re}
⊢ (↑π / c) ^ 1 = (↑π / c) ^ (1 / 2) * (↑π / c) ^ (1 / 2)
[PROOFSTEP]
rw [← cpow_add _ _ (div_ne_zero (ofReal_ne_zero.mpr pi_ne_zero) (nv hc))]
[GOAL]
b : ℂ
hb : 0 < b.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
c : ℂ
hc : c ∈ {c | 0 < c.re}
⊢ (↑π / c) ^ 1 = (↑π / c) ^ (1 / 2 + 1 / 2)
[PROOFSTEP]
norm_num
[GOAL]
b : ℂ
hb : 0 < b.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
c : ℂ
hc : c ∈ {c | 0 < c.re}
⊢ (↑π / c) ^ (1 / 2) ≠ 0
[PROOFSTEP]
rw [Ne.def, cpow_eq_zero_iff, not_and_or]
[GOAL]
b : ℂ
hb : 0 < b.re
nv : ∀ {b : ℂ}, 0 < b.re → b ≠ 0
c : ℂ
hc : c ∈ {c | 0 < c.re}
⊢ ¬↑π / c = 0 ∨ ¬1 / 2 ≠ 0
[PROOFSTEP]
exact Or.inl (div_ne_zero (ofReal_ne_zero.mpr pi_ne_zero) (nv hc))
[GOAL]
b : ℂ
hb : 0 < b.re
⊢ ∫ (x : ℝ) in Ioi 0, cexp (-b * ↑x ^ 2) = (↑π / b) ^ (1 / 2) / 2
[PROOFSTEP]
have full_integral := integral_gaussian_complex hb
[GOAL]
b : ℂ
hb : 0 < b.re
full_integral : ∫ (x : ℝ), cexp (-b * ↑x ^ 2) = (↑π / b) ^ (1 / 2)
⊢ ∫ (x : ℝ) in Ioi 0, cexp (-b * ↑x ^ 2) = (↑π / b) ^ (1 / 2) / 2
[PROOFSTEP]
have : MeasurableSet (Ioi (0 : ℝ)) := measurableSet_Ioi
[GOAL]
b : ℂ
hb : 0 < b.re
full_integral : ∫ (x : ℝ), cexp (-b * ↑x ^ 2) = (↑π / b) ^ (1 / 2)
this : MeasurableSet (Ioi 0)
⊢ ∫ (x : ℝ) in Ioi 0, cexp (-b * ↑x ^ 2) = (↑π / b) ^ (1 / 2) / 2
[PROOFSTEP]
rw [← integral_add_compl this (integrable_cexp_neg_mul_sq hb), compl_Ioi] at full_integral
[GOAL]
b : ℂ
hb : 0 < b.re
full_integral : (∫ (x : ℝ) in Ioi 0, cexp (-b * ↑x ^ 2)) + ∫ (x : ℝ) in Iic 0, cexp (-b * ↑x ^ 2) = (↑π / b) ^ (1 / 2)
this : MeasurableSet (Ioi 0)
⊢ ∫ (x : ℝ) in Ioi 0, cexp (-b * ↑x ^ 2) = (↑π / b) ^ (1 / 2) / 2
[PROOFSTEP]
suffices ∫ x : ℝ in Iic 0, cexp (-b * (x : ℂ) ^ 2) = ∫ x : ℝ in Ioi 0, cexp (-b * (x : ℂ) ^ 2)
by
rw [this, ← mul_two] at full_integral
rwa [eq_div_iff]; exact two_ne_zero
[GOAL]
b : ℂ
hb : 0 < b.re
full_integral : (∫ (x : ℝ) in Ioi 0, cexp (-b * ↑x ^ 2)) + ∫ (x : ℝ) in Iic 0, cexp (-b * ↑x ^ 2) = (↑π / b) ^ (1 / 2)
this✝ : MeasurableSet (Ioi 0)
this : ∫ (x : ℝ) in Iic 0, cexp (-b * ↑x ^ 2) = ∫ (x : ℝ) in Ioi 0, cexp (-b * ↑x ^ 2)
⊢ ∫ (x : ℝ) in Ioi 0, cexp (-b * ↑x ^ 2) = (↑π / b) ^ (1 / 2) / 2
[PROOFSTEP]
rw [this, ← mul_two] at full_integral
[GOAL]
b : ℂ
hb : 0 < b.re
full_integral : (∫ (x : ℝ) in Ioi 0, cexp (-b * ↑x ^ 2)) * 2 = (↑π / b) ^ (1 / 2)
this✝ : MeasurableSet (Ioi 0)
this : ∫ (x : ℝ) in Iic 0, cexp (-b * ↑x ^ 2) = ∫ (x : ℝ) in Ioi 0, cexp (-b * ↑x ^ 2)
⊢ ∫ (x : ℝ) in Ioi 0, cexp (-b * ↑x ^ 2) = (↑π / b) ^ (1 / 2) / 2
[PROOFSTEP]
rwa [eq_div_iff]
[GOAL]
b : ℂ
hb : 0 < b.re
full_integral : (∫ (x : ℝ) in Ioi 0, cexp (-b * ↑x ^ 2)) * 2 = (↑π / b) ^ (1 / 2)
this✝ : MeasurableSet (Ioi 0)
this : ∫ (x : ℝ) in Iic 0, cexp (-b * ↑x ^ 2) = ∫ (x : ℝ) in Ioi 0, cexp (-b * ↑x ^ 2)
⊢ 2 ≠ 0
[PROOFSTEP]
exact two_ne_zero
[GOAL]
b : ℂ
hb : 0 < b.re
full_integral : (∫ (x : ℝ) in Ioi 0, cexp (-b * ↑x ^ 2)) + ∫ (x : ℝ) in Iic 0, cexp (-b * ↑x ^ 2) = (↑π / b) ^ (1 / 2)
this : MeasurableSet (Ioi 0)
⊢ ∫ (x : ℝ) in Iic 0, cexp (-b * ↑x ^ 2) = ∫ (x : ℝ) in Ioi 0, cexp (-b * ↑x ^ 2)
[PROOFSTEP]
have : ∀ c : ℝ, ∫ x in (0 : ℝ)..c, cexp (-b * (x : ℂ) ^ 2) = ∫ x in -c..0, cexp (-b * (x : ℂ) ^ 2) :=
by
intro c
have := intervalIntegral.integral_comp_sub_left (a := 0) (b := c) (fun x => cexp (-b * (x : ℂ) ^ 2)) 0
simpa [zero_sub, neg_sq, neg_zero] using this
[GOAL]
b : ℂ
hb : 0 < b.re
full_integral : (∫ (x : ℝ) in Ioi 0, cexp (-b * ↑x ^ 2)) + ∫ (x : ℝ) in Iic 0, cexp (-b * ↑x ^ 2) = (↑π / b) ^ (1 / 2)
this : MeasurableSet (Ioi 0)
⊢ ∀ (c : ℝ), ∫ (x : ℝ) in 0 ..c, cexp (-b * ↑x ^ 2) = ∫ (x : ℝ) in -c..0, cexp (-b * ↑x ^ 2)
[PROOFSTEP]
intro c
[GOAL]
b : ℂ
hb : 0 < b.re
full_integral : (∫ (x : ℝ) in Ioi 0, cexp (-b * ↑x ^ 2)) + ∫ (x : ℝ) in Iic 0, cexp (-b * ↑x ^ 2) = (↑π / b) ^ (1 / 2)
this : MeasurableSet (Ioi 0)
c : ℝ
⊢ ∫ (x : ℝ) in 0 ..c, cexp (-b * ↑x ^ 2) = ∫ (x : ℝ) in -c..0, cexp (-b * ↑x ^ 2)
[PROOFSTEP]
have := intervalIntegral.integral_comp_sub_left (a := 0) (b := c) (fun x => cexp (-b * (x : ℂ) ^ 2)) 0
[GOAL]
b : ℂ
hb : 0 < b.re
full_integral : (∫ (x : ℝ) in Ioi 0, cexp (-b * ↑x ^ 2)) + ∫ (x : ℝ) in Iic 0, cexp (-b * ↑x ^ 2) = (↑π / b) ^ (1 / 2)
this✝ : MeasurableSet (Ioi 0)
c : ℝ
this : ∫ (x : ℝ) in 0 ..c, cexp (-b * ↑(0 - x) ^ 2) = ∫ (x : ℝ) in 0 - c..0 - 0, cexp (-b * ↑x ^ 2)
⊢ ∫ (x : ℝ) in 0 ..c, cexp (-b * ↑x ^ 2) = ∫ (x : ℝ) in -c..0, cexp (-b * ↑x ^ 2)
[PROOFSTEP]
simpa [zero_sub, neg_sq, neg_zero] using this
[GOAL]
b : ℂ
hb : 0 < b.re
full_integral : (∫ (x : ℝ) in Ioi 0, cexp (-b * ↑x ^ 2)) + ∫ (x : ℝ) in Iic 0, cexp (-b * ↑x ^ 2) = (↑π / b) ^ (1 / 2)
this✝ : MeasurableSet (Ioi 0)
this : ∀ (c : ℝ), ∫ (x : ℝ) in 0 ..c, cexp (-b * ↑x ^ 2) = ∫ (x : ℝ) in -c..0, cexp (-b * ↑x ^ 2)
⊢ ∫ (x : ℝ) in Iic 0, cexp (-b * ↑x ^ 2) = ∫ (x : ℝ) in Ioi 0, cexp (-b * ↑x ^ 2)
[PROOFSTEP]
have t1 := intervalIntegral_tendsto_integral_Ioi 0 (integrable_cexp_neg_mul_sq hb).integrableOn tendsto_id
[GOAL]
b : ℂ
hb : 0 < b.re
full_integral : (∫ (x : ℝ) in Ioi 0, cexp (-b * ↑x ^ 2)) + ∫ (x : ℝ) in Iic 0, cexp (-b * ↑x ^ 2) = (↑π / b) ^ (1 / 2)
this✝ : MeasurableSet (Ioi 0)
this : ∀ (c : ℝ), ∫ (x : ℝ) in 0 ..c, cexp (-b * ↑x ^ 2) = ∫ (x : ℝ) in -c..0, cexp (-b * ↑x ^ 2)
t1 : Tendsto (fun i => ∫ (x : ℝ) in 0 ..id i, cexp (-b * ↑x ^ 2)) atTop (𝓝 (∫ (x : ℝ) in Ioi 0, cexp (-b * ↑x ^ 2)))
⊢ ∫ (x : ℝ) in Iic 0, cexp (-b * ↑x ^ 2) = ∫ (x : ℝ) in Ioi 0, cexp (-b * ↑x ^ 2)
[PROOFSTEP]
have t2 :
Tendsto (fun c : ℝ => ∫ x : ℝ in (0 : ℝ)..c, cexp (-b * (x : ℂ) ^ 2)) atTop
(𝓝 (∫ x : ℝ in Iic 0, cexp (-b * (x : ℂ) ^ 2))) :=
by
simp_rw [this]
refine' intervalIntegral_tendsto_integral_Iic _ _ tendsto_neg_atTop_atBot
apply (integrable_cexp_neg_mul_sq hb).integrableOn
[GOAL]
b : ℂ
hb : 0 < b.re
full_integral : (∫ (x : ℝ) in Ioi 0, cexp (-b * ↑x ^ 2)) + ∫ (x : ℝ) in Iic 0, cexp (-b * ↑x ^ 2) = (↑π / b) ^ (1 / 2)
this✝ : MeasurableSet (Ioi 0)
this : ∀ (c : ℝ), ∫ (x : ℝ) in 0 ..c, cexp (-b * ↑x ^ 2) = ∫ (x : ℝ) in -c..0, cexp (-b * ↑x ^ 2)
t1 : Tendsto (fun i => ∫ (x : ℝ) in 0 ..id i, cexp (-b * ↑x ^ 2)) atTop (𝓝 (∫ (x : ℝ) in Ioi 0, cexp (-b * ↑x ^ 2)))
⊢ Tendsto (fun c => ∫ (x : ℝ) in 0 ..c, cexp (-b * ↑x ^ 2)) atTop (𝓝 (∫ (x : ℝ) in Iic 0, cexp (-b * ↑x ^ 2)))
[PROOFSTEP]
simp_rw [this]
[GOAL]
b : ℂ
hb : 0 < b.re
full_integral : (∫ (x : ℝ) in Ioi 0, cexp (-b * ↑x ^ 2)) + ∫ (x : ℝ) in Iic 0, cexp (-b * ↑x ^ 2) = (↑π / b) ^ (1 / 2)
this✝ : MeasurableSet (Ioi 0)
this : ∀ (c : ℝ), ∫ (x : ℝ) in 0 ..c, cexp (-b * ↑x ^ 2) = ∫ (x : ℝ) in -c..0, cexp (-b * ↑x ^ 2)
t1 : Tendsto (fun i => ∫ (x : ℝ) in 0 ..id i, cexp (-b * ↑x ^ 2)) atTop (𝓝 (∫ (x : ℝ) in Ioi 0, cexp (-b * ↑x ^ 2)))
⊢ Tendsto (fun c => ∫ (x : ℝ) in -c..0, cexp (-b * ↑x ^ 2)) atTop (𝓝 (∫ (x : ℝ) in Iic 0, cexp (-b * ↑x ^ 2)))
[PROOFSTEP]
refine' intervalIntegral_tendsto_integral_Iic _ _ tendsto_neg_atTop_atBot
[GOAL]
b : ℂ
hb : 0 < b.re
full_integral : (∫ (x : ℝ) in Ioi 0, cexp (-b * ↑x ^ 2)) + ∫ (x : ℝ) in Iic 0, cexp (-b * ↑x ^ 2) = (↑π / b) ^ (1 / 2)
this✝ : MeasurableSet (Ioi 0)
this : ∀ (c : ℝ), ∫ (x : ℝ) in 0 ..c, cexp (-b * ↑x ^ 2) = ∫ (x : ℝ) in -c..0, cexp (-b * ↑x ^ 2)
t1 : Tendsto (fun i => ∫ (x : ℝ) in 0 ..id i, cexp (-b * ↑x ^ 2)) atTop (𝓝 (∫ (x : ℝ) in Ioi 0, cexp (-b * ↑x ^ 2)))
⊢ IntegrableOn (fun x => cexp (-b * ↑x ^ 2)) (Iic 0)
[PROOFSTEP]
apply (integrable_cexp_neg_mul_sq hb).integrableOn
[GOAL]
b : ℂ
hb : 0 < b.re
full_integral : (∫ (x : ℝ) in Ioi 0, cexp (-b * ↑x ^ 2)) + ∫ (x : ℝ) in Iic 0, cexp (-b * ↑x ^ 2) = (↑π / b) ^ (1 / 2)
this✝ : MeasurableSet (Ioi 0)
this : ∀ (c : ℝ), ∫ (x : ℝ) in 0 ..c, cexp (-b * ↑x ^ 2) = ∫ (x : ℝ) in -c..0, cexp (-b * ↑x ^ 2)
t1 : Tendsto (fun i => ∫ (x : ℝ) in 0 ..id i, cexp (-b * ↑x ^ 2)) atTop (𝓝 (∫ (x : ℝ) in Ioi 0, cexp (-b * ↑x ^ 2)))
t2 : Tendsto (fun c => ∫ (x : ℝ) in 0 ..c, cexp (-b * ↑x ^ 2)) atTop (𝓝 (∫ (x : ℝ) in Iic 0, cexp (-b * ↑x ^ 2)))
⊢ ∫ (x : ℝ) in Iic 0, cexp (-b * ↑x ^ 2) = ∫ (x : ℝ) in Ioi 0, cexp (-b * ↑x ^ 2)
[PROOFSTEP]
exact tendsto_nhds_unique t2 t1
[GOAL]
b : ℝ
⊢ ∫ (x : ℝ) in Ioi 0, rexp (-b * x ^ 2) = sqrt (π / b) / 2
[PROOFSTEP]
rcases le_or_lt b 0 with (hb | hb)
[GOAL]
case inl
b : ℝ
hb : b ≤ 0
⊢ ∫ (x : ℝ) in Ioi 0, rexp (-b * x ^ 2) = sqrt (π / b) / 2
[PROOFSTEP]
rw [integral_undef, sqrt_eq_zero_of_nonpos, zero_div]
[GOAL]
case inl
b : ℝ
hb : b ≤ 0
⊢ π / b ≤ 0
case inl b : ℝ hb : b ≤ 0 ⊢ ¬Integrable fun x => rexp (-b * x ^ 2)
[PROOFSTEP]
exact div_nonpos_of_nonneg_of_nonpos pi_pos.le hb
[GOAL]
case inl
b : ℝ
hb : b ≤ 0
⊢ ¬Integrable fun x => rexp (-b * x ^ 2)
[PROOFSTEP]
rwa [← IntegrableOn, integrableOn_Ioi_exp_neg_mul_sq_iff, not_lt]
[GOAL]
case inr
b : ℝ
hb : 0 < b
⊢ ∫ (x : ℝ) in Ioi 0, rexp (-b * x ^ 2) = sqrt (π / b) / 2
[PROOFSTEP]
rw [← IsROrC.ofReal_inj (K := ℂ), ← integral_ofReal, ← IsROrC.algebraMap_eq_ofReal, coe_algebraMap]
[GOAL]
case inr
b : ℝ
hb : 0 < b
⊢ ∫ (a : ℝ) in Ioi 0, ↑(rexp (-b * a ^ 2)) = ↑(sqrt (π / b) / 2)
[PROOFSTEP]
convert integral_gaussian_complex_Ioi (by rwa [ofReal_re] : 0 < (b : ℂ).re)
[GOAL]
b : ℝ
hb : 0 < b
⊢ 0 < (↑b).re
[PROOFSTEP]
rwa [ofReal_re]
[GOAL]
case h.e'_2.h.e'_7.h
b : ℝ
hb : 0 < b
x✝ : ℝ
⊢ ↑(rexp (-b * x✝ ^ 2)) = cexp (-↑b * ↑x✝ ^ 2)
[PROOFSTEP]
simp
[GOAL]
case h.e'_3
b : ℝ
hb : 0 < b
⊢ ↑(sqrt (π / b) / 2) = (↑π / ↑b) ^ (1 / 2) / 2
[PROOFSTEP]
rw [sqrt_eq_rpow, ← ofReal_div, ofReal_div, ofReal_cpow]
[GOAL]
case h.e'_3
b : ℝ
hb : 0 < b
⊢ ↑(π / b) ^ ↑(1 / 2) / ↑2 = ↑(π / b) ^ (1 / 2) / 2
case h.e'_3.hx b : ℝ hb : 0 < b ⊢ 0 ≤ π / b
[PROOFSTEP]
norm_num
[GOAL]
case h.e'_3.hx
b : ℝ
hb : 0 < b
⊢ 0 ≤ π / b
[PROOFSTEP]
exact (div_pos pi_pos hb).le
[GOAL]
⊢ Gamma (1 / 2) = sqrt π
[PROOFSTEP]
rw [Gamma_eq_integral one_half_pos, ← integral_comp_rpow_Ioi_of_pos zero_lt_two]
[GOAL]
⊢ ∫ (x : ℝ) in Ioi 0, (2 * x ^ (2 - 1)) • (rexp (-x ^ 2) * (x ^ 2) ^ (1 / 2 - 1)) = sqrt π
[PROOFSTEP]
convert congr_arg (fun x : ℝ => 2 * x) (integral_gaussian_Ioi 1) using 1
[GOAL]
case h.e'_2
⊢ ∫ (x : ℝ) in Ioi 0, (2 * x ^ (2 - 1)) • (rexp (-x ^ 2) * (x ^ 2) ^ (1 / 2 - 1)) =
2 * ∫ (x : ℝ) in Ioi 0, rexp (-1 * x ^ 2)
[PROOFSTEP]
rw [← integral_mul_left]
[GOAL]
case h.e'_2
⊢ ∫ (x : ℝ) in Ioi 0, (2 * x ^ (2 - 1)) • (rexp (-x ^ 2) * (x ^ 2) ^ (1 / 2 - 1)) =
∫ (a : ℝ) in Ioi 0, 2 * rexp (-1 * a ^ 2)
[PROOFSTEP]
refine' set_integral_congr measurableSet_Ioi fun x hx => _
[GOAL]
case h.e'_2
x : ℝ
hx : x ∈ Ioi 0
⊢ (2 * x ^ (2 - 1)) • (rexp (-x ^ 2) * (x ^ 2) ^ (1 / 2 - 1)) = 2 * rexp (-1 * x ^ 2)
[PROOFSTEP]
dsimp only
[GOAL]
case h.e'_2
x : ℝ
hx : x ∈ Ioi 0
⊢ (2 * x ^ (2 - 1)) • (rexp (-x ^ 2) * (x ^ 2) ^ (1 / 2 - 1)) = 2 * rexp (-1 * x ^ 2)
[PROOFSTEP]
have : (x ^ (2 : ℝ)) ^ (1 / (2 : ℝ) - 1) = x⁻¹ :=
by
rw [← rpow_mul (le_of_lt hx)]
norm_num
rw [rpow_neg (le_of_lt hx), rpow_one]
[GOAL]
x : ℝ
hx : x ∈ Ioi 0
⊢ (x ^ 2) ^ (1 / 2 - 1) = x⁻¹
[PROOFSTEP]
rw [← rpow_mul (le_of_lt hx)]
[GOAL]
x : ℝ
hx : x ∈ Ioi 0
⊢ x ^ (2 * (1 / 2 - 1)) = x⁻¹
[PROOFSTEP]
norm_num
[GOAL]
x : ℝ
hx : x ∈ Ioi 0
⊢ x ^ (-1) = x⁻¹
[PROOFSTEP]
rw [rpow_neg (le_of_lt hx), rpow_one]
[GOAL]
case h.e'_2
x : ℝ
hx : x ∈ Ioi 0
this : (x ^ 2) ^ (1 / 2 - 1) = x⁻¹
⊢ (2 * x ^ (2 - 1)) • (rexp (-x ^ 2) * (x ^ 2) ^ (1 / 2 - 1)) = 2 * rexp (-1 * x ^ 2)
[PROOFSTEP]
rw [smul_eq_mul, this]
[GOAL]
case h.e'_2
x : ℝ
hx : x ∈ Ioi 0
this : (x ^ 2) ^ (1 / 2 - 1) = x⁻¹
⊢ 2 * x ^ (2 - 1) * (rexp (-x ^ 2) * x⁻¹) = 2 * rexp (-1 * x ^ 2)
[PROOFSTEP]
field_simp [(ne_of_lt (show 0 < x from hx)).symm]
[GOAL]
case h.e'_2
x : ℝ
hx : x ∈ Ioi 0
this : (x ^ 2) ^ (1 / 2 - 1) = x⁻¹
⊢ 2 * x ^ (2 - 1) * rexp (-x ^ 2) = 2 * rexp (-x ^ 2) * x
[PROOFSTEP]
norm_num
[GOAL]
case h.e'_2
x : ℝ
hx : x ∈ Ioi 0
this : (x ^ 2) ^ (1 / 2 - 1) = x⁻¹
⊢ 2 * x * rexp (-x ^ 2) = 2 * rexp (-x ^ 2) * x
[PROOFSTEP]
ring
[GOAL]
case h.e'_3
⊢ sqrt π = 2 * (sqrt (π / 1) / 2)
[PROOFSTEP]
rw [div_one, ← mul_div_assoc, mul_comm, mul_div_cancel _ (two_ne_zero' ℝ)]
[GOAL]
⊢ Gamma (1 / 2) = ↑π ^ (1 / 2)
[PROOFSTEP]
convert congr_arg ((↑) : ℝ → ℂ) Real.Gamma_one_half_eq
[GOAL]
case h.e'_2
⊢ Gamma (1 / 2) = ↑(Real.Gamma (1 / 2))
[PROOFSTEP]
simpa only [one_div, ofReal_inv, ofReal_ofNat] using Gamma_ofReal (1 / 2)
[GOAL]
case h.e'_3
⊢ ↑π ^ (1 / 2) = ↑(sqrt π)
[PROOFSTEP]
rw [sqrt_eq_rpow, ofReal_cpow pi_pos.le, ofReal_div, ofReal_ofNat, ofReal_one]
[GOAL]
b✝ b : ℂ
c T : ℝ
⊢ ‖cexp (-b * (↑T + ↑c * I) ^ 2)‖ = rexp (-(b.re * T ^ 2 - ↑2 * b.im * c * T - b.re * c ^ 2))
[PROOFSTEP]
rw [Complex.norm_eq_abs, Complex.abs_exp, neg_mul, neg_re, ← re_add_im b]
[GOAL]
b✝ b : ℂ
c T : ℝ
⊢ rexp (-((↑b.re + ↑b.im * I) * (↑T + ↑c * I) ^ 2).re) =
rexp (-((↑b.re + ↑b.im * I).re * T ^ 2 - ↑2 * (↑b.re + ↑b.im * I).im * c * T - (↑b.re + ↑b.im * I).re * c ^ 2))
[PROOFSTEP]
simp only [sq, re_add_im, mul_re, mul_im, add_re, add_im, ofReal_re, ofReal_im, I_re, I_im]
[GOAL]
b✝ b : ℂ
c T : ℝ
⊢ rexp
(-(b.re * ((T + (c * 0 - 0 * 1)) * (T + (c * 0 - 0 * 1)) - (0 + (c * 1 + 0 * 0)) * (0 + (c * 1 + 0 * 0))) -
b.im * ((T + (c * 0 - 0 * 1)) * (0 + (c * 1 + 0 * 0)) + (0 + (c * 1 + 0 * 0)) * (T + (c * 0 - 0 * 1))))) =
rexp (-(b.re * (T * T) - ↑2 * b.im * c * T - b.re * (c * c)))
[PROOFSTEP]
ring_nf
[GOAL]
b : ℂ
hb : b.re ≠ 0
c T : ℝ
⊢ ‖cexp (-b * (↑T + ↑c * I) ^ 2)‖ = rexp (-(b.re * (T - b.im * c / b.re) ^ 2 - c ^ 2 * (b.im ^ 2 / b.re + b.re)))
[PROOFSTEP]
have :
b.re * T ^ 2 - 2 * b.im * c * T - b.re * c ^ 2 =
b.re * (T - b.im * c / b.re) ^ 2 - c ^ 2 * (b.im ^ 2 / b.re + b.re) :=
by field_simp; ring
[GOAL]
b : ℂ
hb : b.re ≠ 0
c T : ℝ
⊢ b.re * T ^ 2 - ↑2 * b.im * c * T - b.re * c ^ 2 = b.re * (T - b.im * c / b.re) ^ 2 - c ^ 2 * (b.im ^ 2 / b.re + b.re)
[PROOFSTEP]
field_simp
[GOAL]
b : ℂ
hb : b.re ≠ 0
c T : ℝ
⊢ (b.re * T ^ 2 - 2 * b.im * c * T - b.re * c ^ 2) * (b.re ^ 2 * b.re) =
b.re * (T * b.re - b.im * c) ^ 2 * b.re - b.re ^ 2 * (c ^ 2 * (b.im ^ 2 + b.re * b.re))
[PROOFSTEP]
ring
[GOAL]
b : ℂ
hb : b.re ≠ 0
c T : ℝ
this :
b.re * T ^ 2 - ↑2 * b.im * c * T - b.re * c ^ 2 = b.re * (T - b.im * c / b.re) ^ 2 - c ^ 2 * (b.im ^ 2 / b.re + b.re)
⊢ ‖cexp (-b * (↑T + ↑c * I) ^ 2)‖ = rexp (-(b.re * (T - b.im * c / b.re) ^ 2 - c ^ 2 * (b.im ^ 2 / b.re + b.re)))
[PROOFSTEP]
rw [norm_cexp_neg_mul_sq_add_mul_I, this]
[GOAL]
b : ℂ
hb : 0 < b.re
c T : ℝ
hT : 0 ≤ T
⊢ ‖verticalIntegral b c T‖ ≤ 2 * |c| * rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
[PROOFSTEP]
have vert_norm_bound :
∀ {T : ℝ},
0 ≤ T →
∀ {c y : ℝ},
|y| ≤ |c| → ‖cexp (-b * (T + y * I) ^ 2)‖ ≤ exp (-(b.re * T ^ 2 - (2 : ℝ) * |b.im| * |c| * T - b.re * c ^ 2)) :=
by
intro T hT c y hy
rw [norm_cexp_neg_mul_sq_add_mul_I b, exp_le_exp, neg_le_neg_iff]
refine' sub_le_sub (sub_le_sub (le_refl _) (mul_le_mul_of_nonneg_right _ hT)) _
· (conv_lhs => rw [mul_assoc]); (conv_rhs => rw [mul_assoc])
refine' mul_le_mul_of_nonneg_left ((le_abs_self _).trans _) zero_le_two
rw [abs_mul]
exact mul_le_mul_of_nonneg_left hy (abs_nonneg _)
· refine' mul_le_mul_of_nonneg_left _ hb.le
rwa [sq_le_sq]
-- now main proof
[GOAL]
b : ℂ
hb : 0 < b.re
c T : ℝ
hT : 0 ≤ T
⊢ ∀ {T : ℝ},
0 ≤ T →
∀ {c y : ℝ},
|y| ≤ |c| → ‖cexp (-b * (↑T + ↑y * I) ^ 2)‖ ≤ rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
[PROOFSTEP]
intro T hT c y hy
[GOAL]
b : ℂ
hb : 0 < b.re
c✝ T✝ : ℝ
hT✝ : 0 ≤ T✝
T : ℝ
hT : 0 ≤ T
c y : ℝ
hy : |y| ≤ |c|
⊢ ‖cexp (-b * (↑T + ↑y * I) ^ 2)‖ ≤ rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
[PROOFSTEP]
rw [norm_cexp_neg_mul_sq_add_mul_I b, exp_le_exp, neg_le_neg_iff]
[GOAL]
b : ℂ
hb : 0 < b.re
c✝ T✝ : ℝ
hT✝ : 0 ≤ T✝
T : ℝ
hT : 0 ≤ T
c y : ℝ
hy : |y| ≤ |c|
⊢ b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2 ≤ b.re * T ^ 2 - ↑2 * b.im * y * T - b.re * y ^ 2
[PROOFSTEP]
refine' sub_le_sub (sub_le_sub (le_refl _) (mul_le_mul_of_nonneg_right _ hT)) _
[GOAL]
case refine'_1
b : ℂ
hb : 0 < b.re
c✝ T✝ : ℝ
hT✝ : 0 ≤ T✝
T : ℝ
hT : 0 ≤ T
c y : ℝ
hy : |y| ≤ |c|
⊢ ↑2 * b.im * y ≤ 2 * |b.im| * |c|
[PROOFSTEP]
conv_lhs => rw [mul_assoc]
[GOAL]
b : ℂ
hb : 0 < b.re
c✝ T✝ : ℝ
hT✝ : 0 ≤ T✝
T : ℝ
hT : 0 ≤ T
c y : ℝ
hy : |y| ≤ |c|
| ↑2 * b.im * y
[PROOFSTEP]
rw [mul_assoc]
[GOAL]
b : ℂ
hb : 0 < b.re
c✝ T✝ : ℝ
hT✝ : 0 ≤ T✝
T : ℝ
hT : 0 ≤ T
c y : ℝ
hy : |y| ≤ |c|
| ↑2 * b.im * y
[PROOFSTEP]
rw [mul_assoc]
[GOAL]
b : ℂ
hb : 0 < b.re
c✝ T✝ : ℝ
hT✝ : 0 ≤ T✝
T : ℝ
hT : 0 ≤ T
c y : ℝ
hy : |y| ≤ |c|
| ↑2 * b.im * y
[PROOFSTEP]
rw [mul_assoc]
[GOAL]
case refine'_1
b : ℂ
hb : 0 < b.re
c✝ T✝ : ℝ
hT✝ : 0 ≤ T✝
T : ℝ
hT : 0 ≤ T
c y : ℝ
hy : |y| ≤ |c|
⊢ ↑2 * (b.im * y) ≤ 2 * |b.im| * |c|
[PROOFSTEP]
conv_rhs => rw [mul_assoc]
[GOAL]
b : ℂ
hb : 0 < b.re
c✝ T✝ : ℝ
hT✝ : 0 ≤ T✝
T : ℝ
hT : 0 ≤ T
c y : ℝ
hy : |y| ≤ |c|
| 2 * |b.im| * |c|
[PROOFSTEP]
rw [mul_assoc]
[GOAL]
b : ℂ
hb : 0 < b.re
c✝ T✝ : ℝ
hT✝ : 0 ≤ T✝
T : ℝ
hT : 0 ≤ T
c y : ℝ
hy : |y| ≤ |c|
| 2 * |b.im| * |c|
[PROOFSTEP]
rw [mul_assoc]
[GOAL]
b : ℂ
hb : 0 < b.re
c✝ T✝ : ℝ
hT✝ : 0 ≤ T✝
T : ℝ
hT : 0 ≤ T
c y : ℝ
hy : |y| ≤ |c|
| 2 * |b.im| * |c|
[PROOFSTEP]
rw [mul_assoc]
[GOAL]
case refine'_1
b : ℂ
hb : 0 < b.re
c✝ T✝ : ℝ
hT✝ : 0 ≤ T✝
T : ℝ
hT : 0 ≤ T
c y : ℝ
hy : |y| ≤ |c|
⊢ ↑2 * (b.im * y) ≤ 2 * (|b.im| * |c|)
[PROOFSTEP]
refine' mul_le_mul_of_nonneg_left ((le_abs_self _).trans _) zero_le_two
[GOAL]
case refine'_1
b : ℂ
hb : 0 < b.re
c✝ T✝ : ℝ
hT✝ : 0 ≤ T✝
T : ℝ
hT : 0 ≤ T
c y : ℝ
hy : |y| ≤ |c|
⊢ |b.im * y| ≤ |b.im| * |c|
[PROOFSTEP]
rw [abs_mul]
[GOAL]
case refine'_1
b : ℂ
hb : 0 < b.re
c✝ T✝ : ℝ
hT✝ : 0 ≤ T✝
T : ℝ
hT : 0 ≤ T
c y : ℝ
hy : |y| ≤ |c|
⊢ |b.im| * |y| ≤ |b.im| * |c|
[PROOFSTEP]
exact mul_le_mul_of_nonneg_left hy (abs_nonneg _)
[GOAL]
case refine'_2
b : ℂ
hb : 0 < b.re
c✝ T✝ : ℝ
hT✝ : 0 ≤ T✝
T : ℝ
hT : 0 ≤ T
c y : ℝ
hy : |y| ≤ |c|
⊢ b.re * y ^ 2 ≤ b.re * c ^ 2
[PROOFSTEP]
refine' mul_le_mul_of_nonneg_left _ hb.le
[GOAL]
case refine'_2
b : ℂ
hb : 0 < b.re
c✝ T✝ : ℝ
hT✝ : 0 ≤ T✝
T : ℝ
hT : 0 ≤ T
c y : ℝ
hy : |y| ≤ |c|
⊢ y ^ 2 ≤ c ^ 2
[PROOFSTEP]
rwa [sq_le_sq]
-- now main proof
[GOAL]
b : ℂ
hb : 0 < b.re
c T : ℝ
hT : 0 ≤ T
vert_norm_bound :
∀ {T : ℝ},
0 ≤ T →
∀ {c y : ℝ},
|y| ≤ |c| → ‖cexp (-b * (↑T + ↑y * I) ^ 2)‖ ≤ rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
⊢ ‖verticalIntegral b c T‖ ≤ 2 * |c| * rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
[PROOFSTEP]
refine' (intervalIntegral.norm_integral_le_of_norm_le_const _).trans _
[GOAL]
case refine'_1
b : ℂ
hb : 0 < b.re
c T : ℝ
hT : 0 ≤ T
vert_norm_bound :
∀ {T : ℝ},
0 ≤ T →
∀ {c y : ℝ},
|y| ≤ |c| → ‖cexp (-b * (↑T + ↑y * I) ^ 2)‖ ≤ rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
⊢ ℝ
case refine'_2
b : ℂ
hb : 0 < b.re
c T : ℝ
hT : 0 ≤ T
vert_norm_bound :
∀ {T : ℝ},
0 ≤ T →
∀ {c y : ℝ},
|y| ≤ |c| → ‖cexp (-b * (↑T + ↑y * I) ^ 2)‖ ≤ rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
⊢ ∀ (x : ℝ), x ∈ Ι 0 c → ‖I * (cexp (-b * (↑T + ↑x * I) ^ 2) - cexp (-b * (↑T - ↑x * I) ^ 2))‖ ≤ ?refine'_1
case refine'_3
b : ℂ
hb : 0 < b.re
c T : ℝ
hT : 0 ≤ T
vert_norm_bound :
∀ {T : ℝ},
0 ≤ T →
∀ {c y : ℝ},
|y| ≤ |c| → ‖cexp (-b * (↑T + ↑y * I) ^ 2)‖ ≤ rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
⊢ ?refine'_1 * |c - 0| ≤ 2 * |c| * rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
[PROOFSTEP]
pick_goal 3
[GOAL]
case refine'_3
b : ℂ
hb : 0 < b.re
c T : ℝ
hT : 0 ≤ T
vert_norm_bound :
∀ {T : ℝ},
0 ≤ T →
∀ {c y : ℝ},
|y| ≤ |c| → ‖cexp (-b * (↑T + ↑y * I) ^ 2)‖ ≤ rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
⊢ ?refine'_1 * |c - 0| ≤ 2 * |c| * rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
[PROOFSTEP]
rw [sub_zero]
[GOAL]
case refine'_3
b : ℂ
hb : 0 < b.re
c T : ℝ
hT : 0 ≤ T
vert_norm_bound :
∀ {T : ℝ},
0 ≤ T →
∀ {c y : ℝ},
|y| ≤ |c| → ‖cexp (-b * (↑T + ↑y * I) ^ 2)‖ ≤ rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
⊢ ?refine'_1 * |c| ≤ 2 * |c| * rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
case refine'_1
b : ℂ
hb : 0 < b.re
c T : ℝ
hT : 0 ≤ T
vert_norm_bound :
∀ {T : ℝ},
0 ≤ T →
∀ {c y : ℝ},
|y| ≤ |c| → ‖cexp (-b * (↑T + ↑y * I) ^ 2)‖ ≤ rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
⊢ ℝ
[PROOFSTEP]
conv_lhs => simp only [mul_comm _ |c|]
[GOAL]
b : ℂ
hb : 0 < b.re
c T : ℝ
hT : 0 ≤ T
vert_norm_bound :
∀ {T : ℝ},
0 ≤ T →
∀ {c y : ℝ},
|y| ≤ |c| → ‖cexp (-b * (↑T + ↑y * I) ^ 2)‖ ≤ rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
| ?refine'_1 * |c|
[PROOFSTEP]
simp only [mul_comm _ |c|]
[GOAL]
b : ℂ
hb : 0 < b.re
c T : ℝ
hT : 0 ≤ T
vert_norm_bound :
∀ {T : ℝ},
0 ≤ T →
∀ {c y : ℝ},
|y| ≤ |c| → ‖cexp (-b * (↑T + ↑y * I) ^ 2)‖ ≤ rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
| ?refine'_1 * |c|
[PROOFSTEP]
simp only [mul_comm _ |c|]
[GOAL]
b : ℂ
hb : 0 < b.re
c T : ℝ
hT : 0 ≤ T
vert_norm_bound :
∀ {T : ℝ},
0 ≤ T →
∀ {c y : ℝ},
|y| ≤ |c| → ‖cexp (-b * (↑T + ↑y * I) ^ 2)‖ ≤ rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
| ?refine'_1 * |c|
[PROOFSTEP]
simp only [mul_comm _ |c|]
[GOAL]
case refine'_3
b : ℂ
hb : 0 < b.re
c T : ℝ
hT : 0 ≤ T
vert_norm_bound :
∀ {T : ℝ},
0 ≤ T →
∀ {c y : ℝ},
|y| ≤ |c| → ‖cexp (-b * (↑T + ↑y * I) ^ 2)‖ ≤ rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
⊢ |c| * ?refine'_1 ≤ 2 * |c| * rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
case refine'_1
b : ℂ
hb : 0 < b.re
c T : ℝ
hT : 0 ≤ T
vert_norm_bound :
∀ {T : ℝ},
0 ≤ T →
∀ {c y : ℝ},
|y| ≤ |c| → ‖cexp (-b * (↑T + ↑y * I) ^ 2)‖ ≤ rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
⊢ ℝ
[PROOFSTEP]
conv_rhs =>
conv =>
congr
rw [mul_comm]
rw [mul_assoc]
[GOAL]
b : ℂ
hb : 0 < b.re
c T : ℝ
hT : 0 ≤ T
vert_norm_bound :
∀ {T : ℝ},
0 ≤ T →
∀ {c y : ℝ},
|y| ≤ |c| → ‖cexp (-b * (↑T + ↑y * I) ^ 2)‖ ≤ rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
| 2 * |c| * rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
[PROOFSTEP]
conv =>
congr
rw [mul_comm]
rw [mul_assoc]
[GOAL]
b : ℂ
hb : 0 < b.re
c T : ℝ
hT : 0 ≤ T
vert_norm_bound :
∀ {T : ℝ},
0 ≤ T →
∀ {c y : ℝ},
|y| ≤ |c| → ‖cexp (-b * (↑T + ↑y * I) ^ 2)‖ ≤ rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
| 2 * |c| * rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
[PROOFSTEP]
conv =>
congr
rw [mul_comm]
rw [mul_assoc]
[GOAL]
b : ℂ
hb : 0 < b.re
c T : ℝ
hT : 0 ≤ T
vert_norm_bound :
∀ {T : ℝ},
0 ≤ T →
∀ {c y : ℝ},
|y| ≤ |c| → ‖cexp (-b * (↑T + ↑y * I) ^ 2)‖ ≤ rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
| 2 * |c| * rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
[PROOFSTEP]
conv =>
congr
rw [mul_comm]
[GOAL]
b : ℂ
hb : 0 < b.re
c T : ℝ
hT : 0 ≤ T
vert_norm_bound :
∀ {T : ℝ},
0 ≤ T →
∀ {c y : ℝ},
|y| ≤ |c| → ‖cexp (-b * (↑T + ↑y * I) ^ 2)‖ ≤ rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
| 2 * |c| * rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
[PROOFSTEP]
congr
rw [mul_comm]
[GOAL]
b : ℂ
hb : 0 < b.re
c T : ℝ
hT : 0 ≤ T
vert_norm_bound :
∀ {T : ℝ},
0 ≤ T →
∀ {c y : ℝ},
|y| ≤ |c| → ‖cexp (-b * (↑T + ↑y * I) ^ 2)‖ ≤ rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
| 2 * |c| * rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
[PROOFSTEP]
congr
[GOAL]
case a
b : ℂ
hb : 0 < b.re
c T : ℝ
hT : 0 ≤ T
vert_norm_bound :
∀ {T : ℝ},
0 ≤ T →
∀ {c y : ℝ},
|y| ≤ |c| → ‖cexp (-b * (↑T + ↑y * I) ^ 2)‖ ≤ rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
| 2 * |c|
case a
b : ℂ
hb : 0 < b.re
c T : ℝ
hT : 0 ≤ T
vert_norm_bound :
∀ {T : ℝ},
0 ≤ T →
∀ {c y : ℝ},
|y| ≤ |c| → ‖cexp (-b * (↑T + ↑y * I) ^ 2)‖ ≤ rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
| rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
[PROOFSTEP]
rw [mul_comm]
[GOAL]
b : ℂ
hb : 0 < b.re
c T : ℝ
hT : 0 ≤ T
vert_norm_bound :
∀ {T : ℝ},
0 ≤ T →
∀ {c y : ℝ},
|y| ≤ |c| → ‖cexp (-b * (↑T + ↑y * I) ^ 2)‖ ≤ rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
| |c| * 2 * rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
[PROOFSTEP]
rw [mul_assoc]
[GOAL]
case refine'_2
b : ℂ
hb : 0 < b.re
c T : ℝ
hT : 0 ≤ T
vert_norm_bound :
∀ {T : ℝ},
0 ≤ T →
∀ {c y : ℝ},
|y| ≤ |c| → ‖cexp (-b * (↑T + ↑y * I) ^ 2)‖ ≤ rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
⊢ ∀ (x : ℝ),
x ∈ Ι 0 c →
‖I * (cexp (-b * (↑T + ↑x * I) ^ 2) - cexp (-b * (↑T - ↑x * I) ^ 2))‖ ≤
2 * rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
[PROOFSTEP]
intro y hy
[GOAL]
case refine'_2
b : ℂ
hb : 0 < b.re
c T : ℝ
hT : 0 ≤ T
vert_norm_bound :
∀ {T : ℝ},
0 ≤ T →
∀ {c y : ℝ},
|y| ≤ |c| → ‖cexp (-b * (↑T + ↑y * I) ^ 2)‖ ≤ rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
y : ℝ
hy : y ∈ Ι 0 c
⊢ ‖I * (cexp (-b * (↑T + ↑y * I) ^ 2) - cexp (-b * (↑T - ↑y * I) ^ 2))‖ ≤
2 * rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
[PROOFSTEP]
have absy : |y| ≤ |c| := by
rcases le_or_lt 0 c with (h | h)
· rw [uIoc_of_le h] at hy
rw [abs_of_nonneg h, abs_of_pos hy.1]
exact hy.2
· rw [uIoc_of_lt h] at hy
rw [abs_of_neg h, abs_of_nonpos hy.2, neg_le_neg_iff]
exact hy.1.le
[GOAL]
b : ℂ
hb : 0 < b.re
c T : ℝ
hT : 0 ≤ T
vert_norm_bound :
∀ {T : ℝ},
0 ≤ T →
∀ {c y : ℝ},
|y| ≤ |c| → ‖cexp (-b * (↑T + ↑y * I) ^ 2)‖ ≤ rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
y : ℝ
hy : y ∈ Ι 0 c
⊢ |y| ≤ |c|
[PROOFSTEP]
rcases le_or_lt 0 c with (h | h)
[GOAL]
case inl
b : ℂ
hb : 0 < b.re
c T : ℝ
hT : 0 ≤ T
vert_norm_bound :
∀ {T : ℝ},
0 ≤ T →
∀ {c y : ℝ},
|y| ≤ |c| → ‖cexp (-b * (↑T + ↑y * I) ^ 2)‖ ≤ rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
y : ℝ
hy : y ∈ Ι 0 c
h : 0 ≤ c
⊢ |y| ≤ |c|
[PROOFSTEP]
rw [uIoc_of_le h] at hy
[GOAL]
case inl
b : ℂ
hb : 0 < b.re
c T : ℝ
hT : 0 ≤ T
vert_norm_bound :
∀ {T : ℝ},
0 ≤ T →
∀ {c y : ℝ},
|y| ≤ |c| → ‖cexp (-b * (↑T + ↑y * I) ^ 2)‖ ≤ rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
y : ℝ
hy : y ∈ Ioc 0 c
h : 0 ≤ c
⊢ |y| ≤ |c|
[PROOFSTEP]
rw [abs_of_nonneg h, abs_of_pos hy.1]
[GOAL]
case inl
b : ℂ
hb : 0 < b.re
c T : ℝ
hT : 0 ≤ T
vert_norm_bound :
∀ {T : ℝ},
0 ≤ T →
∀ {c y : ℝ},
|y| ≤ |c| → ‖cexp (-b * (↑T + ↑y * I) ^ 2)‖ ≤ rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
y : ℝ
hy : y ∈ Ioc 0 c
h : 0 ≤ c
⊢ y ≤ c
[PROOFSTEP]
exact hy.2
[GOAL]
case inr
b : ℂ
hb : 0 < b.re
c T : ℝ
hT : 0 ≤ T
vert_norm_bound :
∀ {T : ℝ},
0 ≤ T →
∀ {c y : ℝ},
|y| ≤ |c| → ‖cexp (-b * (↑T + ↑y * I) ^ 2)‖ ≤ rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
y : ℝ
hy : y ∈ Ι 0 c
h : c < 0
⊢ |y| ≤ |c|
[PROOFSTEP]
rw [uIoc_of_lt h] at hy
[GOAL]
case inr
b : ℂ
hb : 0 < b.re
c T : ℝ
hT : 0 ≤ T
vert_norm_bound :
∀ {T : ℝ},
0 ≤ T →
∀ {c y : ℝ},
|y| ≤ |c| → ‖cexp (-b * (↑T + ↑y * I) ^ 2)‖ ≤ rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
y : ℝ
hy : y ∈ Ioc c 0
h : c < 0
⊢ |y| ≤ |c|
[PROOFSTEP]
rw [abs_of_neg h, abs_of_nonpos hy.2, neg_le_neg_iff]
[GOAL]
case inr
b : ℂ
hb : 0 < b.re
c T : ℝ
hT : 0 ≤ T
vert_norm_bound :
∀ {T : ℝ},
0 ≤ T →
∀ {c y : ℝ},
|y| ≤ |c| → ‖cexp (-b * (↑T + ↑y * I) ^ 2)‖ ≤ rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
y : ℝ
hy : y ∈ Ioc c 0
h : c < 0
⊢ c ≤ y
[PROOFSTEP]
exact hy.1.le
[GOAL]
case refine'_2
b : ℂ
hb : 0 < b.re
c T : ℝ
hT : 0 ≤ T
vert_norm_bound :
∀ {T : ℝ},
0 ≤ T →
∀ {c y : ℝ},
|y| ≤ |c| → ‖cexp (-b * (↑T + ↑y * I) ^ 2)‖ ≤ rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
y : ℝ
hy : y ∈ Ι 0 c
absy : |y| ≤ |c|
⊢ ‖I * (cexp (-b * (↑T + ↑y * I) ^ 2) - cexp (-b * (↑T - ↑y * I) ^ 2))‖ ≤
2 * rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
[PROOFSTEP]
rw [norm_mul, Complex.norm_eq_abs, abs_I, one_mul, two_mul]
[GOAL]
case refine'_2
b : ℂ
hb : 0 < b.re
c T : ℝ
hT : 0 ≤ T
vert_norm_bound :
∀ {T : ℝ},
0 ≤ T →
∀ {c y : ℝ},
|y| ≤ |c| → ‖cexp (-b * (↑T + ↑y * I) ^ 2)‖ ≤ rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
y : ℝ
hy : y ∈ Ι 0 c
absy : |y| ≤ |c|
⊢ ‖cexp (-b * (↑T + ↑y * I) ^ 2) - cexp (-b * (↑T - ↑y * I) ^ 2)‖ ≤
rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2)) +
rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
[PROOFSTEP]
refine' (norm_sub_le _ _).trans (add_le_add (vert_norm_bound hT absy) _)
[GOAL]
case refine'_2
b : ℂ
hb : 0 < b.re
c T : ℝ
hT : 0 ≤ T
vert_norm_bound :
∀ {T : ℝ},
0 ≤ T →
∀ {c y : ℝ},
|y| ≤ |c| → ‖cexp (-b * (↑T + ↑y * I) ^ 2)‖ ≤ rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
y : ℝ
hy : y ∈ Ι 0 c
absy : |y| ≤ |c|
⊢ ‖cexp (-b * (↑T - ↑y * I) ^ 2)‖ ≤ rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
[PROOFSTEP]
rw [← abs_neg y] at absy
[GOAL]
case refine'_2
b : ℂ
hb : 0 < b.re
c T : ℝ
hT : 0 ≤ T
vert_norm_bound :
∀ {T : ℝ},
0 ≤ T →
∀ {c y : ℝ},
|y| ≤ |c| → ‖cexp (-b * (↑T + ↑y * I) ^ 2)‖ ≤ rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
y : ℝ
hy : y ∈ Ι 0 c
absy : |(-y)| ≤ |c|
⊢ ‖cexp (-b * (↑T - ↑y * I) ^ 2)‖ ≤ rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))
[PROOFSTEP]
simpa only [neg_mul, ofReal_neg] using vert_norm_bound hT absy
[GOAL]
b : ℂ
hb : 0 < b.re
c : ℝ
⊢ Tendsto (verticalIntegral b c) atTop (𝓝 0)
[PROOFSTEP]
rw [tendsto_zero_iff_norm_tendsto_zero]
[GOAL]
b : ℂ
hb : 0 < b.re
c : ℝ
⊢ Tendsto (fun e => ‖verticalIntegral b c e‖) atTop (𝓝 0)
[PROOFSTEP]
refine'
tendsto_of_tendsto_of_tendsto_of_le_of_le' tendsto_const_nhds _ (eventually_of_forall fun _ => norm_nonneg _)
((eventually_ge_atTop (0 : ℝ)).mp (eventually_of_forall fun T hT => verticalIntegral_norm_le hb c hT))
[GOAL]
b : ℂ
hb : 0 < b.re
c : ℝ
⊢ Tendsto (fun T => 2 * |c| * rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))) atTop (𝓝 0)
[PROOFSTEP]
rw [(by ring : 0 = 2 * |c| * 0)]
[GOAL]
b : ℂ
hb : 0 < b.re
c : ℝ
⊢ 0 = 2 * |c| * 0
[PROOFSTEP]
ring
[GOAL]
b : ℂ
hb : 0 < b.re
c : ℝ
⊢ Tendsto (fun T => 2 * |c| * rexp (-(b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2))) atTop (𝓝 (2 * |c| * 0))
[PROOFSTEP]
refine' (tendsto_exp_atBot.comp (tendsto_neg_atTop_atBot.comp _)).const_mul _
[GOAL]
b : ℂ
hb : 0 < b.re
c : ℝ
⊢ Tendsto (fun T => b.re * T ^ 2 - 2 * |b.im| * |c| * T - b.re * c ^ 2) atTop atTop
[PROOFSTEP]
apply tendsto_atTop_add_const_right
[GOAL]
case hf
b : ℂ
hb : 0 < b.re
c : ℝ
⊢ Tendsto (fun x => b.re * x ^ 2 - 2 * |b.im| * |c| * x) atTop atTop
[PROOFSTEP]
simp_rw [sq, ← mul_assoc, ← sub_mul]
[GOAL]
case hf
b : ℂ
hb : 0 < b.re
c : ℝ
⊢ Tendsto (fun x => (b.re * x - 2 * |b.im| * |c|) * x) atTop atTop
[PROOFSTEP]
refine' Tendsto.atTop_mul_atTop (tendsto_atTop_add_const_right _ _ _) tendsto_id
[GOAL]
case hf
b : ℂ
hb : 0 < b.re
c : ℝ
⊢ Tendsto (fun x => b.re * x) atTop atTop
[PROOFSTEP]
exact (tendsto_const_mul_atTop_of_pos hb).mpr tendsto_id
[GOAL]
b : ℂ
hb : 0 < b.re
c : ℝ
⊢ Integrable fun x => cexp (-b * (↑x + ↑c * I) ^ 2)
[PROOFSTEP]
refine'
⟨(Complex.continuous_exp.comp
(continuous_const.mul ((continuous_ofReal.add continuous_const).pow 2))).aestronglyMeasurable,
_⟩
[GOAL]
b : ℂ
hb : 0 < b.re
c : ℝ
⊢ HasFiniteIntegral fun x => cexp (-b * (↑x + ↑c * I) ^ 2)
[PROOFSTEP]
rw [← hasFiniteIntegral_norm_iff]
[GOAL]
b : ℂ
hb : 0 < b.re
c : ℝ
⊢ HasFiniteIntegral fun a => ‖cexp (-b * (↑a + ↑c * I) ^ 2)‖
[PROOFSTEP]
simp_rw [norm_cexp_neg_mul_sq_add_mul_I' hb.ne', neg_sub _ (c ^ 2 * _), sub_eq_add_neg _ (b.re * _), Real.exp_add]
[GOAL]
b : ℂ
hb : 0 < b.re
c : ℝ
⊢ HasFiniteIntegral fun a => rexp (c ^ 2 * (b.im ^ 2 / b.re + b.re)) * rexp (-(b.re * (a - b.im * c / b.re) ^ 2))
[PROOFSTEP]
suffices Integrable fun x : ℝ => exp (-(b.re * x ^ 2)) by
exact (Integrable.comp_sub_right this (b.im * c / b.re)).hasFiniteIntegral.const_mul _
[GOAL]
b : ℂ
hb : 0 < b.re
c : ℝ
this : Integrable fun x => rexp (-(b.re * x ^ 2))
⊢ HasFiniteIntegral fun a => rexp (c ^ 2 * (b.im ^ 2 / b.re + b.re)) * rexp (-(b.re * (a - b.im * c / b.re) ^ 2))
[PROOFSTEP]
exact (Integrable.comp_sub_right this (b.im * c / b.re)).hasFiniteIntegral.const_mul _
[GOAL]
b : ℂ
hb : 0 < b.re
c : ℝ
⊢ Integrable fun x => rexp (-(b.re * x ^ 2))
[PROOFSTEP]
simp_rw [← neg_mul]
[GOAL]
b : ℂ
hb : 0 < b.re
c : ℝ
⊢ Integrable fun x => rexp (-b.re * x ^ 2)
[PROOFSTEP]
apply integrable_exp_neg_mul_sq hb
[GOAL]
b : ℂ
hb : 0 < b.re
c : ℝ
⊢ ∫ (x : ℝ), cexp (-b * (↑x + ↑c * I) ^ 2) = (↑π / b) ^ (1 / 2)
[PROOFSTEP]
refine'
tendsto_nhds_unique
(intervalIntegral_tendsto_integral (integrable_cexp_neg_mul_sq_add_real_mul_I hb c) tendsto_neg_atTop_atBot
tendsto_id)
_
[GOAL]
b : ℂ
hb : 0 < b.re
c : ℝ
⊢ Tendsto (fun i => ∫ (x : ℝ) in -i..id i, cexp (-b * (↑x + ↑c * I) ^ 2)) atTop (𝓝 ((↑π / b) ^ (1 / 2)))
[PROOFSTEP]
set I₁ := fun T => ∫ x : ℝ in -T..T, cexp (-b * (x + c * I) ^ 2) with HI₁
[GOAL]
b : ℂ
hb : 0 < b.re
c : ℝ
I₁ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
HI₁ : I₁ = fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
⊢ Tendsto (fun i => ∫ (x : ℝ) in -i..id i, cexp (-b * (↑x + ↑c * I) ^ 2)) atTop (𝓝 ((↑π / b) ^ (1 / 2)))
[PROOFSTEP]
let I₂ := fun T : ℝ => ∫ x : ℝ in -T..T, cexp (-b * (x : ℂ) ^ 2)
[GOAL]
b : ℂ
hb : 0 < b.re
c : ℝ
I₁ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
HI₁ : I₁ = fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
I₂ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * ↑x ^ 2)
⊢ Tendsto (fun i => ∫ (x : ℝ) in -i..id i, cexp (-b * (↑x + ↑c * I) ^ 2)) atTop (𝓝 ((↑π / b) ^ (1 / 2)))
[PROOFSTEP]
let I₄ := fun T : ℝ => ∫ y : ℝ in (0 : ℝ)..c, cexp (-b * (T + y * I) ^ 2)
[GOAL]
b : ℂ
hb : 0 < b.re
c : ℝ
I₁ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
HI₁ : I₁ = fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
I₂ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * ↑x ^ 2)
I₄ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (↑T + ↑y * I) ^ 2)
⊢ Tendsto (fun i => ∫ (x : ℝ) in -i..id i, cexp (-b * (↑x + ↑c * I) ^ 2)) atTop (𝓝 ((↑π / b) ^ (1 / 2)))
[PROOFSTEP]
let I₅ := fun T : ℝ => ∫ y : ℝ in (0 : ℝ)..c, cexp (-b * (-T + y * I) ^ 2)
[GOAL]
b : ℂ
hb : 0 < b.re
c : ℝ
I₁ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
HI₁ : I₁ = fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
I₂ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * ↑x ^ 2)
I₄ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (↑T + ↑y * I) ^ 2)
I₅ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (-↑T + ↑y * I) ^ 2)
⊢ Tendsto (fun i => ∫ (x : ℝ) in -i..id i, cexp (-b * (↑x + ↑c * I) ^ 2)) atTop (𝓝 ((↑π / b) ^ (1 / 2)))
[PROOFSTEP]
have C : ∀ T : ℝ, I₂ T - I₁ T + I * I₄ T - I * I₅ T = 0 :=
by
intro T
have :=
integral_boundary_rect_eq_zero_of_differentiableOn (fun z => cexp (-b * z ^ 2)) (-T) (T + c * I)
(by
refine' Differentiable.differentiableOn (Differentiable.const_mul _ _).cexp
exact differentiable_pow 2)
simpa only [neg_im, ofReal_im, neg_zero, ofReal_zero, zero_mul, add_zero, neg_re, ofReal_re, add_re, mul_re, I_re,
mul_zero, I_im, tsub_zero, add_im, mul_im, mul_one, zero_add, Algebra.id.smul_eq_mul, ofReal_neg] using this
[GOAL]
b : ℂ
hb : 0 < b.re
c : ℝ
I₁ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
HI₁ : I₁ = fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
I₂ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * ↑x ^ 2)
I₄ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (↑T + ↑y * I) ^ 2)
I₅ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (-↑T + ↑y * I) ^ 2)
⊢ ∀ (T : ℝ), I₂ T - I₁ T + I * I₄ T - I * I₅ T = 0
[PROOFSTEP]
intro T
[GOAL]
b : ℂ
hb : 0 < b.re
c : ℝ
I₁ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
HI₁ : I₁ = fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
I₂ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * ↑x ^ 2)
I₄ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (↑T + ↑y * I) ^ 2)
I₅ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (-↑T + ↑y * I) ^ 2)
T : ℝ
⊢ I₂ T - I₁ T + I * I₄ T - I * I₅ T = 0
[PROOFSTEP]
have :=
integral_boundary_rect_eq_zero_of_differentiableOn (fun z => cexp (-b * z ^ 2)) (-T) (T + c * I)
(by
refine' Differentiable.differentiableOn (Differentiable.const_mul _ _).cexp
exact differentiable_pow 2)
[GOAL]
b : ℂ
hb : 0 < b.re
c : ℝ
I₁ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
HI₁ : I₁ = fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
I₂ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * ↑x ^ 2)
I₄ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (↑T + ↑y * I) ^ 2)
I₅ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (-↑T + ↑y * I) ^ 2)
T : ℝ
⊢ DifferentiableOn ℂ (fun z => cexp (-b * z ^ 2)) (uIcc (-↑T).re (↑T + ↑c * I).re ×ℂ uIcc (-↑T).im (↑T + ↑c * I).im)
[PROOFSTEP]
refine' Differentiable.differentiableOn (Differentiable.const_mul _ _).cexp
[GOAL]
b : ℂ
hb : 0 < b.re
c : ℝ
I₁ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
HI₁ : I₁ = fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
I₂ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * ↑x ^ 2)
I₄ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (↑T + ↑y * I) ^ 2)
I₅ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (-↑T + ↑y * I) ^ 2)
T : ℝ
⊢ Differentiable ℂ fun z => z ^ 2
[PROOFSTEP]
exact differentiable_pow 2
[GOAL]
b : ℂ
hb : 0 < b.re
c : ℝ
I₁ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
HI₁ : I₁ = fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
I₂ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * ↑x ^ 2)
I₄ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (↑T + ↑y * I) ^ 2)
I₅ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (-↑T + ↑y * I) ^ 2)
T : ℝ
this :
(((∫ (x : ℝ) in (-↑T).re..(↑T + ↑c * I).re, (fun z => cexp (-b * z ^ 2)) (↑x + ↑(-↑T).im * I)) -
∫ (x : ℝ) in (-↑T).re..(↑T + ↑c * I).re, (fun z => cexp (-b * z ^ 2)) (↑x + ↑(↑T + ↑c * I).im * I)) +
I • ∫ (y : ℝ) in (-↑T).im..(↑T + ↑c * I).im, (fun z => cexp (-b * z ^ 2)) (↑(↑T + ↑c * I).re + ↑y * I)) -
I • ∫ (y : ℝ) in (-↑T).im..(↑T + ↑c * I).im, (fun z => cexp (-b * z ^ 2)) (↑(-↑T).re + ↑y * I) =
0
⊢ I₂ T - I₁ T + I * I₄ T - I * I₅ T = 0
[PROOFSTEP]
simpa only [neg_im, ofReal_im, neg_zero, ofReal_zero, zero_mul, add_zero, neg_re, ofReal_re, add_re, mul_re, I_re,
mul_zero, I_im, tsub_zero, add_im, mul_im, mul_one, zero_add, Algebra.id.smul_eq_mul, ofReal_neg] using this
[GOAL]
b : ℂ
hb : 0 < b.re
c : ℝ
I₁ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
HI₁ : I₁ = fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
I₂ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * ↑x ^ 2)
I₄ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (↑T + ↑y * I) ^ 2)
I₅ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (-↑T + ↑y * I) ^ 2)
C : ∀ (T : ℝ), I₂ T - I₁ T + I * I₄ T - I * I₅ T = 0
⊢ Tendsto (fun i => ∫ (x : ℝ) in -i..id i, cexp (-b * (↑x + ↑c * I) ^ 2)) atTop (𝓝 ((↑π / b) ^ (1 / 2)))
[PROOFSTEP]
simp_rw [id.def, ← HI₁]
[GOAL]
b : ℂ
hb : 0 < b.re
c : ℝ
I₁ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
HI₁ : I₁ = fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
I₂ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * ↑x ^ 2)
I₄ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (↑T + ↑y * I) ^ 2)
I₅ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (-↑T + ↑y * I) ^ 2)
C : ∀ (T : ℝ), I₂ T - I₁ T + I * I₄ T - I * I₅ T = 0
⊢ Tendsto I₁ atTop (𝓝 ((↑π / b) ^ (1 / 2)))
[PROOFSTEP]
have : I₁ = fun T : ℝ => I₂ T + verticalIntegral b c T :=
by
ext1 T
specialize C T
rw [sub_eq_zero] at C
unfold verticalIntegral
rw [integral_const_mul, intervalIntegral.integral_sub]
· simp_rw [(fun a b => by rw [sq]; ring_nf : ∀ a b : ℂ, (a - b * I) ^ 2 = (-a + b * I) ^ 2)]
change I₁ T = I₂ T + I * (I₄ T - I₅ T)
rw [mul_sub, ← C]
abel
all_goals apply Continuous.intervalIntegrable; continuity
[GOAL]
b : ℂ
hb : 0 < b.re
c : ℝ
I₁ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
HI₁ : I₁ = fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
I₂ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * ↑x ^ 2)
I₄ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (↑T + ↑y * I) ^ 2)
I₅ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (-↑T + ↑y * I) ^ 2)
C : ∀ (T : ℝ), I₂ T - I₁ T + I * I₄ T - I * I₅ T = 0
⊢ I₁ = fun T => I₂ T + verticalIntegral b c T
[PROOFSTEP]
ext1 T
[GOAL]
case h
b : ℂ
hb : 0 < b.re
c : ℝ
I₁ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
HI₁ : I₁ = fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
I₂ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * ↑x ^ 2)
I₄ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (↑T + ↑y * I) ^ 2)
I₅ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (-↑T + ↑y * I) ^ 2)
C : ∀ (T : ℝ), I₂ T - I₁ T + I * I₄ T - I * I₅ T = 0
T : ℝ
⊢ I₁ T = I₂ T + verticalIntegral b c T
[PROOFSTEP]
specialize C T
[GOAL]
case h
b : ℂ
hb : 0 < b.re
c : ℝ
I₁ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
HI₁ : I₁ = fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
I₂ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * ↑x ^ 2)
I₄ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (↑T + ↑y * I) ^ 2)
I₅ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (-↑T + ↑y * I) ^ 2)
T : ℝ
C : I₂ T - I₁ T + I * I₄ T - I * I₅ T = 0
⊢ I₁ T = I₂ T + verticalIntegral b c T
[PROOFSTEP]
rw [sub_eq_zero] at C
[GOAL]
case h
b : ℂ
hb : 0 < b.re
c : ℝ
I₁ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
HI₁ : I₁ = fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
I₂ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * ↑x ^ 2)
I₄ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (↑T + ↑y * I) ^ 2)
I₅ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (-↑T + ↑y * I) ^ 2)
T : ℝ
C : I₂ T - I₁ T + I * I₄ T = I * I₅ T
⊢ I₁ T = I₂ T + verticalIntegral b c T
[PROOFSTEP]
unfold verticalIntegral
[GOAL]
case h
b : ℂ
hb : 0 < b.re
c : ℝ
I₁ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
HI₁ : I₁ = fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
I₂ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * ↑x ^ 2)
I₄ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (↑T + ↑y * I) ^ 2)
I₅ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (-↑T + ↑y * I) ^ 2)
T : ℝ
C : I₂ T - I₁ T + I * I₄ T = I * I₅ T
⊢ I₁ T = I₂ T + ∫ (y : ℝ) in 0 ..c, I * (cexp (-b * (↑T + ↑y * I) ^ 2) - cexp (-b * (↑T - ↑y * I) ^ 2))
[PROOFSTEP]
rw [integral_const_mul, intervalIntegral.integral_sub]
[GOAL]
case h
b : ℂ
hb : 0 < b.re
c : ℝ
I₁ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
HI₁ : I₁ = fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
I₂ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * ↑x ^ 2)
I₄ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (↑T + ↑y * I) ^ 2)
I₅ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (-↑T + ↑y * I) ^ 2)
T : ℝ
C : I₂ T - I₁ T + I * I₄ T = I * I₅ T
⊢ I₁ T =
I₂ T + I * ((∫ (x : ℝ) in 0 ..c, cexp (-b * (↑T + ↑x * I) ^ 2)) - ∫ (x : ℝ) in 0 ..c, cexp (-b * (↑T - ↑x * I) ^ 2))
[PROOFSTEP]
simp_rw [(fun a b => by rw [sq]; ring_nf : ∀ a b : ℂ, (a - b * I) ^ 2 = (-a + b * I) ^ 2)]
[GOAL]
b✝ : ℂ
hb : 0 < b✝.re
c : ℝ
I₁ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b✝ * (↑x + ↑c * I) ^ 2)
HI₁ : I₁ = fun T => ∫ (x : ℝ) in -T..T, cexp (-b✝ * (↑x + ↑c * I) ^ 2)
I₂ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b✝ * ↑x ^ 2)
I₄ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b✝ * (↑T + ↑y * I) ^ 2)
I₅ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b✝ * (-↑T + ↑y * I) ^ 2)
T : ℝ
C : I₂ T - I₁ T + I * I₄ T = I * I₅ T
a b : ℂ
⊢ (a - b * I) ^ 2 = (-a + b * I) ^ 2
[PROOFSTEP]
rw [sq]
[GOAL]
b✝ : ℂ
hb : 0 < b✝.re
c : ℝ
I₁ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b✝ * (↑x + ↑c * I) ^ 2)
HI₁ : I₁ = fun T => ∫ (x : ℝ) in -T..T, cexp (-b✝ * (↑x + ↑c * I) ^ 2)
I₂ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b✝ * ↑x ^ 2)
I₄ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b✝ * (↑T + ↑y * I) ^ 2)
I₅ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b✝ * (-↑T + ↑y * I) ^ 2)
T : ℝ
C : I₂ T - I₁ T + I * I₄ T = I * I₅ T
a b : ℂ
⊢ (a - b * I) * (a - b * I) = (-a + b * I) ^ 2
[PROOFSTEP]
ring_nf
[GOAL]
case h
b : ℂ
hb : 0 < b.re
c : ℝ
I₁ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
HI₁ : I₁ = fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
I₂ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * ↑x ^ 2)
I₄ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (↑T + ↑y * I) ^ 2)
I₅ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (-↑T + ↑y * I) ^ 2)
T : ℝ
C : I₂ T - I₁ T + I * I₄ T = I * I₅ T
⊢ ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2) =
(∫ (x : ℝ) in -T..T, cexp (-b * ↑x ^ 2)) +
I * ((∫ (x : ℝ) in 0 ..c, cexp (-b * (↑T + ↑x * I) ^ 2)) - ∫ (x : ℝ) in 0 ..c, cexp (-b * (-↑T + ↑x * I) ^ 2))
[PROOFSTEP]
change I₁ T = I₂ T + I * (I₄ T - I₅ T)
[GOAL]
case h
b : ℂ
hb : 0 < b.re
c : ℝ
I₁ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
HI₁ : I₁ = fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
I₂ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * ↑x ^ 2)
I₄ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (↑T + ↑y * I) ^ 2)
I₅ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (-↑T + ↑y * I) ^ 2)
T : ℝ
C : I₂ T - I₁ T + I * I₄ T = I * I₅ T
⊢ I₁ T = I₂ T + I * (I₄ T - I₅ T)
[PROOFSTEP]
rw [mul_sub, ← C]
[GOAL]
case h
b : ℂ
hb : 0 < b.re
c : ℝ
I₁ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
HI₁ : I₁ = fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
I₂ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * ↑x ^ 2)
I₄ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (↑T + ↑y * I) ^ 2)
I₅ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (-↑T + ↑y * I) ^ 2)
T : ℝ
C : I₂ T - I₁ T + I * I₄ T = I * I₅ T
⊢ I₁ T = I₂ T + (I * I₄ T - (I₂ T - I₁ T + I * I₄ T))
[PROOFSTEP]
abel
[GOAL]
case h
b : ℂ
hb : 0 < b.re
c : ℝ
I₁ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
HI₁ : I₁ = fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
I₂ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * ↑x ^ 2)
I₄ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (↑T + ↑y * I) ^ 2)
I₅ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (-↑T + ↑y * I) ^ 2)
T : ℝ
C : I₂ T - I₁ T + I * I₄ T = I * I₅ T
⊢ I₁ T = I₂ T + (I * I₄ T - (I₂ T - I₁ T + I * I₄ T))
[PROOFSTEP]
abel
[GOAL]
case h.hf
b : ℂ
hb : 0 < b.re
c : ℝ
I₁ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
HI₁ : I₁ = fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
I₂ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * ↑x ^ 2)
I₄ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (↑T + ↑y * I) ^ 2)
I₅ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (-↑T + ↑y * I) ^ 2)
T : ℝ
C : I₂ T - I₁ T + I * I₄ T = I * I₅ T
⊢ IntervalIntegrable (fun x => cexp (-b * (↑T + ↑x * I) ^ 2)) volume 0 c
case h.hg
b : ℂ
hb : 0 < b.re
c : ℝ
I₁ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
HI₁ : I₁ = fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
I₂ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * ↑x ^ 2)
I₄ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (↑T + ↑y * I) ^ 2)
I₅ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (-↑T + ↑y * I) ^ 2)
T : ℝ
C : I₂ T - I₁ T + I * I₄ T = I * I₅ T
⊢ IntervalIntegrable (fun x => cexp (-b * (↑T - ↑x * I) ^ 2)) volume 0 c
[PROOFSTEP]
all_goals apply Continuous.intervalIntegrable; continuity
[GOAL]
case h.hf
b : ℂ
hb : 0 < b.re
c : ℝ
I₁ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
HI₁ : I₁ = fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
I₂ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * ↑x ^ 2)
I₄ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (↑T + ↑y * I) ^ 2)
I₅ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (-↑T + ↑y * I) ^ 2)
T : ℝ
C : I₂ T - I₁ T + I * I₄ T = I * I₅ T
⊢ IntervalIntegrable (fun x => cexp (-b * (↑T + ↑x * I) ^ 2)) volume 0 c
[PROOFSTEP]
apply Continuous.intervalIntegrable
[GOAL]
case h.hf.hu
b : ℂ
hb : 0 < b.re
c : ℝ
I₁ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
HI₁ : I₁ = fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
I₂ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * ↑x ^ 2)
I₄ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (↑T + ↑y * I) ^ 2)
I₅ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (-↑T + ↑y * I) ^ 2)
T : ℝ
C : I₂ T - I₁ T + I * I₄ T = I * I₅ T
⊢ Continuous fun x => cexp (-b * (↑T + ↑x * I) ^ 2)
[PROOFSTEP]
continuity
[GOAL]
case h.hg
b : ℂ
hb : 0 < b.re
c : ℝ
I₁ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
HI₁ : I₁ = fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
I₂ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * ↑x ^ 2)
I₄ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (↑T + ↑y * I) ^ 2)
I₅ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (-↑T + ↑y * I) ^ 2)
T : ℝ
C : I₂ T - I₁ T + I * I₄ T = I * I₅ T
⊢ IntervalIntegrable (fun x => cexp (-b * (↑T - ↑x * I) ^ 2)) volume 0 c
[PROOFSTEP]
apply Continuous.intervalIntegrable
[GOAL]
case h.hg.hu
b : ℂ
hb : 0 < b.re
c : ℝ
I₁ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
HI₁ : I₁ = fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
I₂ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * ↑x ^ 2)
I₄ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (↑T + ↑y * I) ^ 2)
I₅ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (-↑T + ↑y * I) ^ 2)
T : ℝ
C : I₂ T - I₁ T + I * I₄ T = I * I₅ T
⊢ Continuous fun x => cexp (-b * (↑T - ↑x * I) ^ 2)
[PROOFSTEP]
continuity
[GOAL]
b : ℂ
hb : 0 < b.re
c : ℝ
I₁ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
HI₁ : I₁ = fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
I₂ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * ↑x ^ 2)
I₄ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (↑T + ↑y * I) ^ 2)
I₅ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (-↑T + ↑y * I) ^ 2)
C : ∀ (T : ℝ), I₂ T - I₁ T + I * I₄ T - I * I₅ T = 0
this : I₁ = fun T => I₂ T + verticalIntegral b c T
⊢ Tendsto I₁ atTop (𝓝 ((↑π / b) ^ (1 / 2)))
[PROOFSTEP]
rw [this, ← add_zero ((π / b : ℂ) ^ (1 / 2 : ℂ)), ← integral_gaussian_complex hb]
[GOAL]
b : ℂ
hb : 0 < b.re
c : ℝ
I₁ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
HI₁ : I₁ = fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
I₂ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * ↑x ^ 2)
I₄ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (↑T + ↑y * I) ^ 2)
I₅ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (-↑T + ↑y * I) ^ 2)
C : ∀ (T : ℝ), I₂ T - I₁ T + I * I₄ T - I * I₅ T = 0
this : I₁ = fun T => I₂ T + verticalIntegral b c T
⊢ Tendsto (fun T => I₂ T + verticalIntegral b c T) atTop (𝓝 ((∫ (x : ℝ), cexp (-b * ↑x ^ 2)) + 0))
[PROOFSTEP]
refine' Tendsto.add _ (tendsto_verticalIntegral hb c)
[GOAL]
b : ℂ
hb : 0 < b.re
c : ℝ
I₁ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
HI₁ : I₁ = fun T => ∫ (x : ℝ) in -T..T, cexp (-b * (↑x + ↑c * I) ^ 2)
I₂ : ℝ → ℂ := fun T => ∫ (x : ℝ) in -T..T, cexp (-b * ↑x ^ 2)
I₄ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (↑T + ↑y * I) ^ 2)
I₅ : ℝ → ℂ := fun T => ∫ (y : ℝ) in 0 ..c, cexp (-b * (-↑T + ↑y * I) ^ 2)
C : ∀ (T : ℝ), I₂ T - I₁ T + I * I₄ T - I * I₅ T = 0
this : I₁ = fun T => I₂ T + verticalIntegral b c T
⊢ Tendsto (fun T => I₂ T) atTop (𝓝 (∫ (x : ℝ), cexp (-b * ↑x ^ 2)))
[PROOFSTEP]
exact intervalIntegral_tendsto_integral (integrable_cexp_neg_mul_sq hb) tendsto_neg_atTop_atBot tendsto_id
[GOAL]
b : ℂ
hb : 0 < b.re
c : ℂ
⊢ ∫ (x : ℝ), cexp (-b * (↑x + c) ^ 2) = (↑π / b) ^ (1 / 2)
[PROOFSTEP]
rw [← re_add_im c]
[GOAL]
b : ℂ
hb : 0 < b.re
c : ℂ
⊢ ∫ (x : ℝ), cexp (-b * (↑x + (↑c.re + ↑c.im * I)) ^ 2) = (↑π / b) ^ (1 / 2)
[PROOFSTEP]
simp_rw [← add_assoc, ← ofReal_add]
[GOAL]
b : ℂ
hb : 0 < b.re
c : ℂ
⊢ ∫ (x : ℝ), cexp (-b * (↑(x + c.re) + ↑c.im * I) ^ 2) = (↑π / b) ^ (1 / 2)
[PROOFSTEP]
rw [integral_add_right_eq_self fun x : ℝ => cexp (-b * (↑x + ↑c.im * I) ^ 2)]
[GOAL]
b : ℂ
hb : 0 < b.re
c : ℂ
⊢ ∫ (x : ℝ), cexp (-b * (↑x + ↑c.im * I) ^ 2) = (↑π / b) ^ (1 / 2)
[PROOFSTEP]
apply integral_cexp_neg_mul_sq_add_real_mul_I hb
[GOAL]
b : ℂ
hb : 0 < b.re
t : ℂ
⊢ ∫ (x : ℝ), cexp (I * t * ↑x) * cexp (-b * ↑x ^ 2) = cexp (-t ^ 2 / (4 * b)) * (↑π / b) ^ (1 / 2)
[PROOFSTEP]
have : b ≠ 0 := by contrapose! hb; rw [hb, zero_re]
[GOAL]
b : ℂ
hb : 0 < b.re
t : ℂ
⊢ b ≠ 0
[PROOFSTEP]
contrapose! hb
[GOAL]
b t : ℂ
hb : b = 0
⊢ b.re ≤ 0
[PROOFSTEP]
rw [hb, zero_re]
[GOAL]
b : ℂ
hb : 0 < b.re
t : ℂ
this : b ≠ 0
⊢ ∫ (x : ℝ), cexp (I * t * ↑x) * cexp (-b * ↑x ^ 2) = cexp (-t ^ 2 / (4 * b)) * (↑π / b) ^ (1 / 2)
[PROOFSTEP]
simp_rw [← Complex.exp_add]
[GOAL]
b : ℂ
hb : 0 < b.re
t : ℂ
this : b ≠ 0
⊢ ∫ (x : ℝ), cexp (I * t * ↑x + -b * ↑x ^ 2) = cexp (-t ^ 2 / (4 * b)) * (↑π / b) ^ (1 / 2)
[PROOFSTEP]
have : ∀ x : ℂ, I * t * x + -b * x ^ 2 = -t ^ 2 / ((4 : ℂ) * b) + -b * (x + -I * t / 2 / b) ^ 2 :=
by
intro x
ring_nf
rw [I_sq]
field_simp; ring
[GOAL]
b : ℂ
hb : 0 < b.re
t : ℂ
this : b ≠ 0
⊢ ∀ (x : ℂ), I * t * x + -b * x ^ 2 = -t ^ 2 / (4 * b) + -b * (x + -I * t / 2 / b) ^ 2
[PROOFSTEP]
intro x
[GOAL]
b : ℂ
hb : 0 < b.re
t : ℂ
this : b ≠ 0
x : ℂ
⊢ I * t * x + -b * x ^ 2 = -t ^ 2 / (4 * b) + -b * (x + -I * t / 2 / b) ^ 2
[PROOFSTEP]
ring_nf
[GOAL]
b : ℂ
hb : 0 < b.re
t : ℂ
this : b ≠ 0
x : ℂ
⊢ I * t * x - x ^ 2 * b =
I * t * x * b * b⁻¹ + I ^ 2 * t ^ 2 * b * b⁻¹ ^ 2 * ↑(Int.negOfNat 1) * (↑(Int.ofNat 1) / ↑4) +
(t ^ 2 * b⁻¹ * ↑(Int.negOfNat 1) * (↑(Int.ofNat 1) / ↑4) - x ^ 2 * b)
[PROOFSTEP]
rw [I_sq]
[GOAL]
b : ℂ
hb : 0 < b.re
t : ℂ
this : b ≠ 0
x : ℂ
⊢ I * t * x - x ^ 2 * b =
I * t * x * b * b⁻¹ + -1 * t ^ 2 * b * b⁻¹ ^ 2 * ↑(Int.negOfNat 1) * (↑(Int.ofNat 1) / ↑4) +
(t ^ 2 * b⁻¹ * ↑(Int.negOfNat 1) * (↑(Int.ofNat 1) / ↑4) - x ^ 2 * b)
[PROOFSTEP]
field_simp
[GOAL]
b : ℂ
hb : 0 < b.re
t : ℂ
this : b ≠ 0
x : ℂ
⊢ (I * t * x - x ^ 2 * b) * (b ^ 2 * 4 * (b * 4)) =
(I * t * x * (b ^ 2 * 4) + t ^ 2 * b) * (b * 4) + (-t ^ 2 - b * 4 * (x ^ 2 * b)) * (b ^ 2 * 4)
[PROOFSTEP]
ring
[GOAL]
b : ℂ
hb : 0 < b.re
t : ℂ
this✝ : b ≠ 0
this : ∀ (x : ℂ), I * t * x + -b * x ^ 2 = -t ^ 2 / (4 * b) + -b * (x + -I * t / 2 / b) ^ 2
⊢ ∫ (x : ℝ), cexp (I * t * ↑x + -b * ↑x ^ 2) = cexp (-t ^ 2 / (4 * b)) * (↑π / b) ^ (1 / 2)
[PROOFSTEP]
simp_rw [this, Complex.exp_add, integral_mul_left, integral_cexp_neg_mul_sq_add_const hb]
[GOAL]
b : ℂ
hb : 0 < b.re
⊢ (𝓕 fun x => cexp (-↑π * b * ↑x ^ 2)) = fun t => 1 / b ^ (1 / 2) * cexp (-↑π / b * ↑t ^ 2)
[PROOFSTEP]
ext1 t
[GOAL]
case h
b : ℂ
hb : 0 < b.re
t : ℝ
⊢ 𝓕 (fun x => cexp (-↑π * b * ↑x ^ 2)) t = 1 / b ^ (1 / 2) * cexp (-↑π / b * ↑t ^ 2)
[PROOFSTEP]
simp_rw [fourierIntegral_eq_integral_exp_smul, smul_eq_mul]
[GOAL]
case h
b : ℂ
hb : 0 < b.re
t : ℝ
⊢ ∫ (v : ℝ), cexp (↑(-2 * π * v * t) * I) * cexp (-↑π * b * ↑v ^ 2) = 1 / b ^ (1 / 2) * cexp (-↑π / b * ↑t ^ 2)
[PROOFSTEP]
have h1 : 0 < re (π * b) := by rw [ofReal_mul_re]; exact mul_pos pi_pos hb
[GOAL]
b : ℂ
hb : 0 < b.re
t : ℝ
⊢ 0 < (↑π * b).re
[PROOFSTEP]
rw [ofReal_mul_re]
[GOAL]
b : ℂ
hb : 0 < b.re
t : ℝ
⊢ 0 < π * b.re
[PROOFSTEP]
exact mul_pos pi_pos hb
[GOAL]
case h
b : ℂ
hb : 0 < b.re
t : ℝ
h1 : 0 < (↑π * b).re
⊢ ∫ (v : ℝ), cexp (↑(-2 * π * v * t) * I) * cexp (-↑π * b * ↑v ^ 2) = 1 / b ^ (1 / 2) * cexp (-↑π / b * ↑t ^ 2)
[PROOFSTEP]
have h2 : b ≠ 0 := by contrapose! hb; rw [hb, zero_re]
[GOAL]
b : ℂ
hb : 0 < b.re
t : ℝ
h1 : 0 < (↑π * b).re
⊢ b ≠ 0
[PROOFSTEP]
contrapose! hb
[GOAL]
b : ℂ
t : ℝ
h1 : 0 < (↑π * b).re
hb : b = 0
⊢ b.re ≤ 0
[PROOFSTEP]
rw [hb, zero_re]
[GOAL]
case h
b : ℂ
hb : 0 < b.re
t : ℝ
h1 : 0 < (↑π * b).re
h2 : b ≠ 0
⊢ ∫ (v : ℝ), cexp (↑(-2 * π * v * t) * I) * cexp (-↑π * b * ↑v ^ 2) = 1 / b ^ (1 / 2) * cexp (-↑π / b * ↑t ^ 2)
[PROOFSTEP]
convert _root_.fourier_transform_gaussian h1 (-2 * π * t) using 1
[GOAL]
case h.e'_2
b : ℂ
hb : 0 < b.re
t : ℝ
h1 : 0 < (↑π * b).re
h2 : b ≠ 0
⊢ ∫ (v : ℝ), cexp (↑(-2 * π * v * t) * I) * cexp (-↑π * b * ↑v ^ 2) =
∫ (x : ℝ), cexp (I * (-2 * ↑π * ↑t) * ↑x) * cexp (-(↑π * b) * ↑x ^ 2)
[PROOFSTEP]
congr 1 with x : 1
[GOAL]
case h.e'_2.e_f.h
b : ℂ
hb : 0 < b.re
t : ℝ
h1 : 0 < (↑π * b).re
h2 : b ≠ 0
x : ℝ
⊢ cexp (↑(-2 * π * x * t) * I) * cexp (-↑π * b * ↑x ^ 2) = cexp (I * (-2 * ↑π * ↑t) * ↑x) * cexp (-(↑π * b) * ↑x ^ 2)
[PROOFSTEP]
congr 2
[GOAL]
case h.e'_2.e_f.h.e_a.e_z
b : ℂ
hb : 0 < b.re
t : ℝ
h1 : 0 < (↑π * b).re
h2 : b ≠ 0
x : ℝ
⊢ ↑(-2 * π * x * t) * I = I * (-2 * ↑π * ↑t) * ↑x
case h.e'_2.e_f.h.e_a.e_z
b : ℂ
hb : 0 < b.re
t : ℝ
h1 : 0 < (↑π * b).re
h2 : b ≠ 0
x : ℝ
⊢ -↑π * b * ↑x ^ 2 = -(↑π * b) * ↑x ^ 2
[PROOFSTEP]
any_goals push_cast ; ring
[GOAL]
case h.e'_2.e_f.h.e_a.e_z
b : ℂ
hb : 0 < b.re
t : ℝ
h1 : 0 < (↑π * b).re
h2 : b ≠ 0
x : ℝ
⊢ ↑(-2 * π * x * t) * I = I * (-2 * ↑π * ↑t) * ↑x
[PROOFSTEP]
push_cast
[GOAL]
case h.e'_2.e_f.h.e_a.e_z
b : ℂ
hb : 0 < b.re
t : ℝ
h1 : 0 < (↑π * b).re
h2 : b ≠ 0
x : ℝ
⊢ -2 * ↑π * ↑x * ↑t * I = I * (-2 * ↑π * ↑t) * ↑x
[PROOFSTEP]
ring
[GOAL]
case h.e'_2.e_f.h.e_a.e_z
b : ℂ
hb : 0 < b.re
t : ℝ
h1 : 0 < (↑π * b).re
h2 : b ≠ 0
x : ℝ
⊢ -↑π * b * ↑x ^ 2 = -(↑π * b) * ↑x ^ 2
[PROOFSTEP]
push_cast
[GOAL]
case h.e'_2.e_f.h.e_a.e_z
b : ℂ
hb : 0 < b.re
t : ℝ
h1 : 0 < (↑π * b).re
h2 : b ≠ 0
x : ℝ
⊢ -↑π * b * ↑x ^ 2 = -(↑π * b) * ↑x ^ 2
[PROOFSTEP]
ring
[GOAL]
case h.e'_3
b : ℂ
hb : 0 < b.re
t : ℝ
h1 : 0 < (↑π * b).re
h2 : b ≠ 0
⊢ 1 / b ^ (1 / 2) * cexp (-↑π / b * ↑t ^ 2) = cexp (-(-2 * ↑π * ↑t) ^ 2 / (4 * (↑π * b))) * (↑π / (↑π * b)) ^ (1 / 2)
[PROOFSTEP]
conv_lhs => rw [mul_comm]
[GOAL]
b : ℂ
hb : 0 < b.re
t : ℝ
h1 : 0 < (↑π * b).re
h2 : b ≠ 0
| 1 / b ^ (1 / 2) * cexp (-↑π / b * ↑t ^ 2)
[PROOFSTEP]
rw [mul_comm]
[GOAL]
b : ℂ
hb : 0 < b.re
t : ℝ
h1 : 0 < (↑π * b).re
h2 : b ≠ 0
| 1 / b ^ (1 / 2) * cexp (-↑π / b * ↑t ^ 2)
[PROOFSTEP]
rw [mul_comm]
[GOAL]
b : ℂ
hb : 0 < b.re
t : ℝ
h1 : 0 < (↑π * b).re
h2 : b ≠ 0
| 1 / b ^ (1 / 2) * cexp (-↑π / b * ↑t ^ 2)
[PROOFSTEP]
rw [mul_comm]
[GOAL]
case h.e'_3
b : ℂ
hb : 0 < b.re
t : ℝ
h1 : 0 < (↑π * b).re
h2 : b ≠ 0
⊢ cexp (-↑π / b * ↑t ^ 2) * (1 / b ^ (1 / 2)) = cexp (-(-2 * ↑π * ↑t) ^ 2 / (4 * (↑π * b))) * (↑π / (↑π * b)) ^ (1 / 2)
[PROOFSTEP]
congr 2
[GOAL]
case h.e'_3.e_a.e_z
b : ℂ
hb : 0 < b.re
t : ℝ
h1 : 0 < (↑π * b).re
h2 : b ≠ 0
⊢ -↑π / b * ↑t ^ 2 = -(-2 * ↑π * ↑t) ^ 2 / (4 * (↑π * b))
[PROOFSTEP]
field_simp [ofReal_ne_zero.mpr pi_ne_zero]
[GOAL]
case h.e'_3.e_a.e_z
b : ℂ
hb : 0 < b.re
t : ℝ
h1 : 0 < (↑π * b).re
h2 : b ≠ 0
⊢ ↑π * ↑t ^ 2 * (4 * (↑π * b)) = (2 * ↑π * ↑t) ^ 2 * b
[PROOFSTEP]
ring
[GOAL]
case h.e'_3.e_a
b : ℂ
hb : 0 < b.re
t : ℝ
h1 : 0 < (↑π * b).re
h2 : b ≠ 0
⊢ 1 / b ^ (1 / 2) = (↑π / (↑π * b)) ^ (1 / 2)
[PROOFSTEP]
rw [← div_div, div_self (ofReal_ne_zero.mpr pi_ne_zero), one_div, one_div b, inv_cpow]
[GOAL]
case h.e'_3.e_a.hx
b : ℂ
hb : 0 < b.re
t : ℝ
h1 : 0 < (↑π * b).re
h2 : b ≠ 0
⊢ arg b ≠ π
[PROOFSTEP]
rw [Ne.def, arg_eq_pi_iff, not_and_or, not_lt]
[GOAL]
case h.e'_3.e_a.hx
b : ℂ
hb : 0 < b.re
t : ℝ
h1 : 0 < (↑π * b).re
h2 : b ≠ 0
⊢ 0 ≤ b.re ∨ ¬b.im = 0
[PROOFSTEP]
exact Or.inl hb.le
[GOAL]
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℝ
ha : 0 < a
s : ℝ
⊢ Tendsto (fun x => |x| ^ s * rexp (-a * x ^ 2)) (cocompact ℝ) (𝓝 0)
[PROOFSTEP]
conv in rexp _ => rw [← sq_abs]
[GOAL]
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℝ
ha : 0 < a
s x : ℝ
| rexp (-a * x ^ 2)
[PROOFSTEP]
rw [← sq_abs]
[GOAL]
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℝ
ha : 0 < a
s x : ℝ
| rexp (-a * x ^ 2)
[PROOFSTEP]
rw [← sq_abs]
[GOAL]
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℝ
ha : 0 < a
s x : ℝ
| rexp (-a * x ^ 2)
[PROOFSTEP]
rw [← sq_abs]
[GOAL]
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℝ
ha : 0 < a
s : ℝ
⊢ Tendsto (fun x => |x| ^ s * rexp (-a * |x| ^ 2)) (cocompact ℝ) (𝓝 0)
[PROOFSTEP]
erw [cocompact_eq, ← comap_abs_atTop,
@tendsto_comap'_iff _ _ _ (fun y => y ^ s * rexp (-a * y ^ 2)) _ _ _
(mem_atTop_sets.mpr ⟨0, fun b hb => ⟨b, abs_of_nonneg hb⟩⟩)]
[GOAL]
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℝ
ha : 0 < a
s : ℝ
⊢ Tendsto (fun y => y ^ s * rexp (-a * y ^ 2)) atTop (𝓝 0)
[PROOFSTEP]
exact
(rpow_mul_exp_neg_mul_sq_isLittleO_exp_neg ha s).tendsto_zero_of_tendsto
(tendsto_exp_atBot.comp <| tendsto_id.neg_const_mul_atTop (neg_lt_zero.mpr one_half_pos))
[GOAL]
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℂ
ha : 0 < a.re
s : ℝ
⊢ (fun x => cexp (-a * ↑x ^ 2)) =o[cocompact ℝ] fun x => |x| ^ s
[PROOFSTEP]
rw [← isLittleO_norm_left]
[GOAL]
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℂ
ha : 0 < a.re
s : ℝ
⊢ (fun x => ‖cexp (-a * ↑x ^ 2)‖) =o[cocompact ℝ] fun x => |x| ^ s
[PROOFSTEP]
simp_rw [norm_cexp_neg_mul_sq]
[GOAL]
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℂ
ha : 0 < a.re
s : ℝ
⊢ (fun x => rexp (-a.re * x ^ 2)) =o[cocompact ℝ] fun x => |x| ^ s
[PROOFSTEP]
apply isLittleO_of_tendsto'
[GOAL]
case hgf
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℂ
ha : 0 < a.re
s : ℝ
⊢ ∀ᶠ (x : ℝ) in cocompact ℝ, |x| ^ s = 0 → rexp (-a.re * x ^ 2) = 0
[PROOFSTEP]
refine' Eventually.filter_mono cocompact_le_cofinite _
[GOAL]
case hgf
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℂ
ha : 0 < a.re
s : ℝ
⊢ ∀ᶠ (x : ℝ) in cofinite, |x| ^ s = 0 → rexp (-a.re * x ^ 2) = 0
[PROOFSTEP]
refine' (eventually_cofinite_ne 0).mp (eventually_of_forall fun x hx h => _)
[GOAL]
case hgf
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℂ
ha : 0 < a.re
s x : ℝ
hx : x ≠ 0
h : |x| ^ s = 0
⊢ rexp (-a.re * x ^ 2) = 0
[PROOFSTEP]
exact ((rpow_pos_of_pos (abs_pos.mpr hx) _).ne' h).elim
[GOAL]
case a
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℂ
ha : 0 < a.re
s : ℝ
⊢ Tendsto (fun x => rexp (-a.re * x ^ 2) / |x| ^ s) (cocompact ℝ) (𝓝 0)
[PROOFSTEP]
refine'
Tendsto.congr' (Eventually.filter_mono cocompact_le_cofinite _)
(tendsto_zero_iff_norm_tendsto_zero.mp <| tendsto_rpow_abs_mul_exp_neg_mul_sq_cocompact ha (-s))
[GOAL]
case a
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℂ
ha : 0 < a.re
s : ℝ
⊢ ∀ᶠ (x : ℝ) in cofinite, ‖|x| ^ (-s) * rexp (-a.re * x ^ 2)‖ = (fun x => rexp (-a.re * x ^ 2) / |x| ^ s) x
[PROOFSTEP]
refine' (eventually_cofinite_ne 0).mp (eventually_of_forall fun x _ => _)
[GOAL]
case a
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℂ
ha : 0 < a.re
s x : ℝ
x✝ : x ≠ 0
⊢ ‖|x| ^ (-s) * rexp (-a.re * x ^ 2)‖ = (fun x => rexp (-a.re * x ^ 2) / |x| ^ s) x
[PROOFSTEP]
dsimp only
[GOAL]
case a
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℂ
ha : 0 < a.re
s x : ℝ
x✝ : x ≠ 0
⊢ ‖|x| ^ (-s) * rexp (-a.re * x ^ 2)‖ = rexp (-a.re * x ^ 2) / |x| ^ s
[PROOFSTEP]
rw [norm_mul, norm_of_nonneg (rpow_nonneg_of_nonneg (abs_nonneg _) _), mul_comm, rpow_neg (abs_nonneg x),
div_eq_mul_inv, norm_of_nonneg (exp_pos _).le]
[GOAL]
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℂ
ha : 0 < a.re
⊢ ∑' (n : ℤ), cexp (-↑π * a * ↑n ^ 2) = 1 / a ^ (1 / 2) * ∑' (n : ℤ), cexp (-↑π / a * ↑n ^ 2)
[PROOFSTEP]
let f := fun x : ℝ => cexp (-π * a * (x : ℂ) ^ 2)
[GOAL]
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℂ
ha : 0 < a.re
f : ℝ → ℂ := fun x => cexp (-↑π * a * ↑x ^ 2)
⊢ ∑' (n : ℤ), cexp (-↑π * a * ↑n ^ 2) = 1 / a ^ (1 / 2) * ∑' (n : ℤ), cexp (-↑π / a * ↑n ^ 2)
[PROOFSTEP]
have h1 : 0 < (↑π * a).re := by
rw [ofReal_mul_re]
exact mul_pos pi_pos ha
[GOAL]
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℂ
ha : 0 < a.re
f : ℝ → ℂ := fun x => cexp (-↑π * a * ↑x ^ 2)
⊢ 0 < (↑π * a).re
[PROOFSTEP]
rw [ofReal_mul_re]
[GOAL]
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℂ
ha : 0 < a.re
f : ℝ → ℂ := fun x => cexp (-↑π * a * ↑x ^ 2)
⊢ 0 < π * a.re
[PROOFSTEP]
exact mul_pos pi_pos ha
[GOAL]
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℂ
ha : 0 < a.re
f : ℝ → ℂ := fun x => cexp (-↑π * a * ↑x ^ 2)
h1 : 0 < (↑π * a).re
⊢ ∑' (n : ℤ), cexp (-↑π * a * ↑n ^ 2) = 1 / a ^ (1 / 2) * ∑' (n : ℤ), cexp (-↑π / a * ↑n ^ 2)
[PROOFSTEP]
have h2 : 0 < (↑π / a).re := by
rw [div_eq_mul_inv, ofReal_mul_re, inv_re]
refine' mul_pos pi_pos (div_pos ha <| normSq_pos.mpr _)
contrapose! ha
rw [ha, zero_re]
[GOAL]
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℂ
ha : 0 < a.re
f : ℝ → ℂ := fun x => cexp (-↑π * a * ↑x ^ 2)
h1 : 0 < (↑π * a).re
⊢ 0 < (↑π / a).re
[PROOFSTEP]
rw [div_eq_mul_inv, ofReal_mul_re, inv_re]
[GOAL]
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℂ
ha : 0 < a.re
f : ℝ → ℂ := fun x => cexp (-↑π * a * ↑x ^ 2)
h1 : 0 < (↑π * a).re
⊢ 0 < π * (a.re / ↑normSq a)
[PROOFSTEP]
refine' mul_pos pi_pos (div_pos ha <| normSq_pos.mpr _)
[GOAL]
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℂ
ha : 0 < a.re
f : ℝ → ℂ := fun x => cexp (-↑π * a * ↑x ^ 2)
h1 : 0 < (↑π * a).re
⊢ a ≠ 0
[PROOFSTEP]
contrapose! ha
[GOAL]
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℂ
f : ℝ → ℂ := fun x => cexp (-↑π * a * ↑x ^ 2)
h1 : 0 < (↑π * a).re
ha : a = 0
⊢ a.re ≤ 0
[PROOFSTEP]
rw [ha, zero_re]
[GOAL]
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℂ
ha : 0 < a.re
f : ℝ → ℂ := fun x => cexp (-↑π * a * ↑x ^ 2)
h1 : 0 < (↑π * a).re
h2 : 0 < (↑π / a).re
⊢ ∑' (n : ℤ), cexp (-↑π * a * ↑n ^ 2) = 1 / a ^ (1 / 2) * ∑' (n : ℤ), cexp (-↑π / a * ↑n ^ 2)
[PROOFSTEP]
have f_bd : f =O[cocompact ℝ] fun x => |x| ^ (-2 : ℝ) :=
by
convert (isLittleO_exp_neg_mul_sq_cocompact h1 (-2)).isBigO using 2
dsimp only
congr 1
ring
[GOAL]
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℂ
ha : 0 < a.re
f : ℝ → ℂ := fun x => cexp (-↑π * a * ↑x ^ 2)
h1 : 0 < (↑π * a).re
h2 : 0 < (↑π / a).re
⊢ f =O[cocompact ℝ] fun x => |x| ^ (-2)
[PROOFSTEP]
convert (isLittleO_exp_neg_mul_sq_cocompact h1 (-2)).isBigO using 2
[GOAL]
case h.e'_7.h
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℂ
ha : 0 < a.re
f : ℝ → ℂ := fun x => cexp (-↑π * a * ↑x ^ 2)
h1 : 0 < (↑π * a).re
h2 : 0 < (↑π / a).re
x✝ : ℝ
⊢ f x✝ = cexp (-(↑π * a) * ↑x✝ ^ 2)
[PROOFSTEP]
dsimp only
[GOAL]
case h.e'_7.h
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℂ
ha : 0 < a.re
f : ℝ → ℂ := fun x => cexp (-↑π * a * ↑x ^ 2)
h1 : 0 < (↑π * a).re
h2 : 0 < (↑π / a).re
x✝ : ℝ
⊢ cexp (-↑π * a * ↑x✝ ^ 2) = cexp (-(↑π * a) * ↑x✝ ^ 2)
[PROOFSTEP]
congr 1
[GOAL]
case h.e'_7.h.e_z
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℂ
ha : 0 < a.re
f : ℝ → ℂ := fun x => cexp (-↑π * a * ↑x ^ 2)
h1 : 0 < (↑π * a).re
h2 : 0 < (↑π / a).re
x✝ : ℝ
⊢ -↑π * a * ↑x✝ ^ 2 = -(↑π * a) * ↑x✝ ^ 2
[PROOFSTEP]
ring
[GOAL]
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℂ
ha : 0 < a.re
f : ℝ → ℂ := fun x => cexp (-↑π * a * ↑x ^ 2)
h1 : 0 < (↑π * a).re
h2 : 0 < (↑π / a).re
f_bd : f =O[cocompact ℝ] fun x => |x| ^ (-2)
⊢ ∑' (n : ℤ), cexp (-↑π * a * ↑n ^ 2) = 1 / a ^ (1 / 2) * ∑' (n : ℤ), cexp (-↑π / a * ↑n ^ 2)
[PROOFSTEP]
have Ff_bd : 𝓕 f =O[cocompact ℝ] fun x => |x| ^ (-2 : ℝ) :=
by
rw [fourier_transform_gaussian_pi ha]
convert (isLittleO_exp_neg_mul_sq_cocompact h2 (-2)).isBigO.const_mul_left ((1 : ℂ) / a ^ (1 / 2 : ℂ)) using 2
congr 1
ring_nf
[GOAL]
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℂ
ha : 0 < a.re
f : ℝ → ℂ := fun x => cexp (-↑π * a * ↑x ^ 2)
h1 : 0 < (↑π * a).re
h2 : 0 < (↑π / a).re
f_bd : f =O[cocompact ℝ] fun x => |x| ^ (-2)
⊢ 𝓕 f =O[cocompact ℝ] fun x => |x| ^ (-2)
[PROOFSTEP]
rw [fourier_transform_gaussian_pi ha]
[GOAL]
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℂ
ha : 0 < a.re
f : ℝ → ℂ := fun x => cexp (-↑π * a * ↑x ^ 2)
h1 : 0 < (↑π * a).re
h2 : 0 < (↑π / a).re
f_bd : f =O[cocompact ℝ] fun x => |x| ^ (-2)
⊢ (fun t => 1 / a ^ (1 / 2) * cexp (-↑π / a * ↑t ^ 2)) =O[cocompact ℝ] fun x => |x| ^ (-2)
[PROOFSTEP]
convert (isLittleO_exp_neg_mul_sq_cocompact h2 (-2)).isBigO.const_mul_left ((1 : ℂ) / a ^ (1 / 2 : ℂ)) using 2
[GOAL]
case h.e'_7.h
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℂ
ha : 0 < a.re
f : ℝ → ℂ := fun x => cexp (-↑π * a * ↑x ^ 2)
h1 : 0 < (↑π * a).re
h2 : 0 < (↑π / a).re
f_bd : f =O[cocompact ℝ] fun x => |x| ^ (-2)
x✝ : ℝ
⊢ 1 / a ^ (1 / 2) * cexp (-↑π / a * ↑x✝ ^ 2) = 1 / a ^ (1 / 2) * cexp (-(↑π / a) * ↑x✝ ^ 2)
[PROOFSTEP]
congr 1
[GOAL]
case h.e'_7.h.e_a
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℂ
ha : 0 < a.re
f : ℝ → ℂ := fun x => cexp (-↑π * a * ↑x ^ 2)
h1 : 0 < (↑π * a).re
h2 : 0 < (↑π / a).re
f_bd : f =O[cocompact ℝ] fun x => |x| ^ (-2)
x✝ : ℝ
⊢ cexp (-↑π / a * ↑x✝ ^ 2) = cexp (-(↑π / a) * ↑x✝ ^ 2)
[PROOFSTEP]
ring_nf
[GOAL]
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℂ
ha : 0 < a.re
f : ℝ → ℂ := fun x => cexp (-↑π * a * ↑x ^ 2)
h1 : 0 < (↑π * a).re
h2 : 0 < (↑π / a).re
f_bd : f =O[cocompact ℝ] fun x => |x| ^ (-2)
Ff_bd : 𝓕 f =O[cocompact ℝ] fun x => |x| ^ (-2)
⊢ ∑' (n : ℤ), cexp (-↑π * a * ↑n ^ 2) = 1 / a ^ (1 / 2) * ∑' (n : ℤ), cexp (-↑π / a * ↑n ^ 2)
[PROOFSTEP]
simpa only [fourier_transform_gaussian_pi ha, tsum_mul_left, Function.comp] using
Real.tsum_eq_tsum_fourierIntegral_of_rpow_decay
(Complex.continuous_exp.comp (continuous_const.mul (continuous_ofReal.pow 2)) : Continuous f) one_lt_two f_bd Ff_bd
[GOAL]
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℝ
ha : 0 < a
⊢ ∑' (n : ℤ), rexp (-π * a * ↑n ^ 2) = 1 / a ^ (1 / 2) * ∑' (n : ℤ), rexp (-π / a * ↑n ^ 2)
[PROOFSTEP]
simpa only [← ofReal_inj, ofReal_mul, ofReal_tsum, ofReal_exp, ofReal_div, ofReal_pow, ofReal_int_cast, ofReal_neg,
ofReal_cpow ha.le, ofReal_ofNat, ofReal_one] using
Complex.tsum_exp_neg_mul_int_sq (by rwa [ofReal_re] : 0 < (a : ℂ).re)
[GOAL]
E : Type u_1
inst✝ : NormedAddCommGroup E
a : ℝ
ha : 0 < a
⊢ 0 < (↑a).re
[PROOFSTEP]
rwa [ofReal_re]
|
In the Gros Islet Quarter on St. Lucia , rough seas damaged a jetty and grounded a yacht . In Soufrière , four homes were destroyed by the storm surge , which also made some areas impassible . The storm surge also flooded parts of the Anse la Raye Quarter , leading to officials declaring a mandatory evacuation of the area . On Montserrat , very little damage was reported . A few minor landslides occurred in rural areas ; no impact was caused by them . On Nevis there was relatively little damage although the beachfront part of the Four Seasons Resort was severely damaged and was subsequently closed for an extended period . Throughout St. Kitts and Nevis , damage was estimated at $ 19 million .
|
lemma primitive_part_eq_0_iff [simp]: "primitive_part p = 0 \<longleftrightarrow> p = 0" |
module Either where
data Either (A : Set) (B : Set) : Set where
left : A → Either A B
right : B → Either A B
[_,_] : ∀ {A B} {C : Set} → (A → C) → (B → C) → Either A B → C
[ f , g ] (left x) = f x
[ f , g ] (right x) = g x
|
export transform_solutions
_parse_expression(exp) = exp isa String ? Num(eval(Meta.parse(exp))) : exp
"""
$(TYPEDSIGNATURES)
Takes a `Result` object and a string `f` representing a Symbolics.jl expression.
Returns an array with the values of `f` evaluated for the respective solutions.
Additional substitution rules can be specified in `rules` in the format `("a" => val)` or `(a => val)`
"""
function transform_solutions(res::Result, f::String; rules=Dict())
# a string is used as input - a macro would not "see" the user's namespace while the user's namespace does not "see" the variables
transformed = [Vector{ComplexF64}(undef, length(res.solutions[1])) for k in res.solutions] # preallocate
# define variables in rules in this namespace
new_keys = declare_variable.(string.(keys(Dict(rules))))
expr = f isa String ? _parse_expression(f) : f
fixed_subs = merge(res.fixed_parameters, Dict(zip(new_keys, values(Dict(rules)))))
expr = substitute_all(expr, Dict(fixed_subs))
vars = res.problem.variables
all_symbols = cat(vars, collect(keys(res.swept_parameters)), dims=1)
comp_func = build_function(expr, all_symbols)
f = eval(comp_func)
# preallocate an array for the numerical values, rewrite parts of it
# when looping through the solutions
vals = Vector{ComplexF64}(undef, length(all_symbols))
n_vars = length(vars)
n_pars = length(all_symbols) - n_vars
for idx in CartesianIndices(res.solutions)
params_values = res.swept_parameters[Tuple(idx)...]
vals[end-n_pars+1:end] .= params_values # param values are common to all branches
for (branch,soln) in enumerate(res.solutions[idx])
vals[1:n_vars] .= soln
transformed[idx][branch] = Base.invokelatest(f, vals)
end
end
return transformed
end
# a simplified version meant to work with arrays of solutions
# cannot parse parameter values mainly meant for time-dependent results
function transform_solutions(soln::Vector, f::String, harm_eq::HarmonicEquation)
vars = _remove_brackets(get_variables(harm_eq))
transformed = Vector{ComplexF64}(undef, length(soln))
# parse the input with Symbolics
expr = HarmonicBalance._parse_expression(f)
rule(u) = Dict(zip(vars, u))
transformed = map( x -> substitute_all(expr, rule(x)), soln)
return convert(typeof(soln[1]), transformed)
end |
Guadalupe, CA, July 23, 2003—A surprising 1,200 applications were handed out by Peoples' Self-Help Housing last week with more requests still coming in for Guadalupe's affordable housing, Riverview Townhomes.
"This is pretty overwhelming," said Roger Barr, of Peoples' Self-Help Housing's rental division. "It's about 3 times more than expected and it really shows the demand and interest in affordable housing in the Guadalupe area-and is also reflective of the need for more affordable housing on the Central Coast," Said Barr.
A lottery process will then determine the order of applications to be reviewed and qualified.
Peoples' Self-Help Housing distributed applications for 80 rental townhomes last weekend at the Riverview Townhomes in Guadalupe. The apartment complex will be completed in the fall, 2003, and the qualified residents will begin moving in at that point.
Wells Fargo is an important partner in this project as the construction lender and a financial sponsor. |
#include "wallet/unittests/test_helpers.h"
#include "utility/logger.h"
#include "websocket/websocket_server.h"
#include "websocket/sessions.h"
#include <boost/beast/core.hpp>
#include <boost/beast/ssl.hpp>
#include <boost/beast/websocket.hpp>
#include <boost/asio/connect.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/spawn.hpp>
#include <cstdlib>
#include <iostream>
#include <string>
#include <thread>
#include <iostream>
WALLET_TEST_INIT
using namespace beam;
namespace
{
namespace beast = boost::beast; // from <boost/beast.hpp>
namespace http = beast::http; // from <boost/beast/http.hpp>
namespace websocket = beast::websocket; // from <boost/beast/websocket.hpp>
namespace net = boost::asio; // from <boost/asio.hpp>
namespace ssl = boost::asio::ssl; // from <boost/asio/ssl.hpp>
using tcp = boost::asio::ip::tcp; // from <boost/asio/ip/tcp.hpp>
void LoadRootCertificates(ssl::context& ctx, boost::system::error_code& ec)
{
std::string const cert =
"-----BEGIN CERTIFICATE-----\n"
"MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs\n"
"MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3\n"
"d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j\n"
"ZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAwMFoXDTMxMTExMDAwMDAwMFowbDEL\n"
"MAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3\n"
"LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug\n"
"RVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm\n"
"+9S75S0tMqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTW\n"
"PNt0OKRKzE0lgvdKpVMSOO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEM\n"
"xChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFB\n"
"Ik5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQNAQTXKFx01p8VdteZOE3\n"
"hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUeh10aUAsg\n"
"EsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQF\n"
"MAMBAf8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaA\n"
"FLE+w2kD+L9HAdSYJhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3Nec\n"
"nzyIZgYIVyHbIUf4KmeqvxgydkAQV8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6z\n"
"eM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFpmyPInngiK3BD41VHMWEZ71jF\n"
"hS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkKmNEVX58Svnw2\n"
"Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe\n"
"vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep\n"
"+OkuE6N36B9K\n"
"-----END CERTIFICATE-----\n"
"-----BEGIN CERTIFICATE-----\n"
"MIIDaDCCAlCgAwIBAgIJAO8vBu8i8exWMA0GCSqGSIb3DQEBCwUAMEkxCzAJBgNV\n"
"BAYTAlVTMQswCQYDVQQIDAJDQTEtMCsGA1UEBwwkTG9zIEFuZ2VsZXNPPUJlYXN0\n"
"Q049d3d3LmV4YW1wbGUuY29tMB4XDTE3MDUwMzE4MzkxMloXDTQ0MDkxODE4Mzkx\n"
"MlowSTELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMS0wKwYDVQQHDCRMb3MgQW5n\n"
"ZWxlc089QmVhc3RDTj13d3cuZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUA\n"
"A4IBDwAwggEKAoIBAQDJ7BRKFO8fqmsEXw8v9YOVXyrQVsVbjSSGEs4Vzs4cJgcF\n"
"xqGitbnLIrOgiJpRAPLy5MNcAXE1strVGfdEf7xMYSZ/4wOrxUyVw/Ltgsft8m7b\n"
"Fu8TsCzO6XrxpnVtWk506YZ7ToTa5UjHfBi2+pWTxbpN12UhiZNUcrRsqTFW+6fO\n"
"9d7xm5wlaZG8cMdg0cO1bhkz45JSl3wWKIES7t3EfKePZbNlQ5hPy7Pd5JTmdGBp\n"
"yY8anC8u4LPbmgW0/U31PH0rRVfGcBbZsAoQw5Tc5dnb6N2GEIbq3ehSfdDHGnrv\n"
"enu2tOK9Qx6GEzXh3sekZkxcgh+NlIxCNxu//Dk9AgMBAAGjUzBRMB0GA1UdDgQW\n"
"BBTZh0N9Ne1OD7GBGJYz4PNESHuXezAfBgNVHSMEGDAWgBTZh0N9Ne1OD7GBGJYz\n"
"4PNESHuXezAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCmTJVT\n"
"LH5Cru1vXtzb3N9dyolcVH82xFVwPewArchgq+CEkajOU9bnzCqvhM4CryBb4cUs\n"
"gqXWp85hAh55uBOqXb2yyESEleMCJEiVTwm/m26FdONvEGptsiCmF5Gxi0YRtn8N\n"
"V+KhrQaAyLrLdPYI7TrwAOisq2I1cD0mt+xgwuv/654Rl3IhOMx+fKWKJ9qLAiaE\n"
"fQyshjlPP9mYVxWOxqctUdQ8UnsUKKGEUcVrA08i1OAnVKlPFjKBvk+r7jpsTPcr\n"
"9pWXTO9JrYMML7d+XRSZA1n3856OqZDX4403+9FnXCvfcLZLLKTBvwwFgEFGpzjK\n"
"UEVbkhd5qstF6qWK\n"
"-----END CERTIFICATE-----\n";
;
ctx.add_certificate_authority(
boost::asio::buffer(cert.data(), cert.size()), ec);
if (ec)
return;
}
void LoadServerCertificate(WebSocketServer::Options& options)
{
/*
The certificate was generated from CMD.EXE on Windows 10 using:
winpty openssl dhparam -out dh.pem 2048
winpty openssl req -newkey rsa:2048 -nodes -keyout key.pem -x509 -days 10000 -out cert.pem -subj "//C=US\ST=CA\L=Los Angeles\O=Beast\CN=www.example.com"
*/
std::string const cert =
"-----BEGIN CERTIFICATE-----\n"
"MIIC4TCCAcmgAwIBAgIUZI8OTxIJc4I3qP93o3+7kkauMwkwDQYJKoZIhvcNAQEL\n"
"BQAwADAeFw0yMTA4MzAxMjUyMzdaFw00OTAxMTUxMjUyMzdaMAAwggEiMA0GCSqG\n"
"SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCuuDs1tuf8otgGWO9fKW2RNeIfi9aH7u0r\n"
"5VtFh8PLZiJjItzzNykxJWKGMlEyjS+R8czNSWylxgxvEQLyoUOKnDIO+zrS84T/\n"
"XxRPPqukB/UuruuWMPmjumXPLp0MePSPAzY4IAMsS89ve0zoNh4R2Zzj33Y47L8R\n"
"wWxW6Q5mRprT2o1UCJcledACWF4drqWDHOLNk2VOobdsfSLAKT+z2xq69wm3qBzQ\n"
"2HZROsCsqQOUVG0pYlLevH65a/rlJ1kbYg4Tnf5ldcQdEJuaZFm6o9rn4xioQjRg\n"
"4suG6/aFEyBTEgRlxRRmnSkqg7/UDVxKcaBkHsZoBEnJsWlExLC1AgMBAAGjUzBR\n"
"MB0GA1UdDgQWBBRESesuAscp2fHe/v9xUMMbIipiqzAfBgNVHSMEGDAWgBRESesu\n"
"Ascp2fHe/v9xUMMbIipiqzAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUA\n"
"A4IBAQB4L2wS5w+MGEvZYsPk9f7ryN2c029ZJ5shmxOX4xYa9ig1ZloNox+v/05A\n"
"CUVHnTaxAsFhZunbkXLQ5KAVzVuXQk2ljlWWhZjp5ImqQSTbkY2wzBNHIPbnSNEV\n"
"CuprV51JDZrB1Q0zKulCq2ia1Og29FGclUHrL2QdMR30UZyV8HaCez0tEp4QSKNk\n"
"tCd2tIZo8+n9UYCKAgp5FMhxpyL5DL94TCTTG1Lf4tEICeZdKV89a0d3eAN79kC7\n"
"Tm2A/1SNQouuLdv+tQ/gMAfMCzqOMTqZ85oJOyqXnxAA9cvje5nm4pBn5Q/6H4ry\n"
"oJXOdnELFSfHV5wdsQeq5IyO5A5X\n"
"-----END CERTIFICATE-----\n";
std::string const key =
"-----BEGIN PRIVATE KEY-----\n"
"MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCuuDs1tuf8otgG\n"
"WO9fKW2RNeIfi9aH7u0r5VtFh8PLZiJjItzzNykxJWKGMlEyjS+R8czNSWylxgxv\n"
"EQLyoUOKnDIO+zrS84T/XxRPPqukB/UuruuWMPmjumXPLp0MePSPAzY4IAMsS89v\n"
"e0zoNh4R2Zzj33Y47L8RwWxW6Q5mRprT2o1UCJcledACWF4drqWDHOLNk2VOobds\n"
"fSLAKT+z2xq69wm3qBzQ2HZROsCsqQOUVG0pYlLevH65a/rlJ1kbYg4Tnf5ldcQd\n"
"EJuaZFm6o9rn4xioQjRg4suG6/aFEyBTEgRlxRRmnSkqg7/UDVxKcaBkHsZoBEnJ\n"
"sWlExLC1AgMBAAECggEAHnmF4lWX2ynwMhM7FUcdlFFosoXqkmUrOxfTNqp6jTSw\n"
"VMhU75s0dR0HNU77eKzFmlgpl7jx2WxU6N53vChCpp+d350UYo0VKpHD8hqFR6QX\n"
"sN/TuaurL1KoxV1lCTLjvTobL+jthMFoWhKQlIQz9HsXcWudrEom/YrWQcZ+u3nV\n"
"KDGia5XVQxvD7+vHJ5ppZTM5wB5cUM6VNqwvkXO3Ts3zXJQ+fwbce1I4yg7xzm/m\n"
"aBq1+z0BtkfxAa+W/D/rx89gPGVapi8r0vved+5ei9B7KMyHCKgwrlxpJyOJ87S5\n"
"A+g8HzFatQPoleqUfGZluXxtoFQSK6B4FtNwPKy8AQKBgQDl99uLONrPSnat4b+v\n"
"5i/VcvvA9Ot4ryOnIul6KaNbp3Gr/DyVchrHEkvu7+HVV75kYhObReG9AsMn7a/B\n"
"1rxbria5SbgJm0r+HW4oJzkO5DO1L5gOdbQiYxw5Jff+vuD/fBIyXNKKNFrzO5Cj\n"
"mtbmYDzZ9gP4s3wQE5XATy69AQKBgQDCf1oH+AM7r10BH69A1HNvnYqdamEZ0Bem\n"
"VG32Q0YGZCDS0UfJ8ybo/Sw0VRplEE3WFkGrQg4lleJ3kYRDuFUtInNXNmPWvuJH\n"
"pCJt/WsnIrF5j7hPWY/9G0pg8KuP4I/yvIAIVXYXesh06akhN9OsIuhQtCnueejS\n"
"jWG1/aYPtQKBgA8rWVUGrBBOZiO0J3PP6EnZKtggj8PHMb/doq8HPhpWoj3pBooJ\n"
"G9ET2ORq+GedQRbYDVkJtAlGvF7O4/ASXRxjEXTZcwVXNAwtHs4RQEdGME78U7ho\n"
"dThrdzoh0gkAyFCx+3VNACpTp8gxnqncFd7ebEUoYDywgjeBQziLQJcBAoGAfB+J\n"
"9UvxxEVFtVHjJhxvDuwbahpZnX+PmDaJdn+4UJvV1rR4fAkQ69+mNj+ZeKXPBrFt\n"
"dz3QiWv9+xCCuDULJqK1uRKc5I8tGUtGLatslq0tVcbCeOFPYtfnv7XXxxoow2BI\n"
"1Qi1NIbHJtV3ehuGmnQsjlRr7iUe0EAp+1rEf4ECgYEAlG0xduNOFoIDcwkvVPE9\n"
"K131PQZG2gPdj++sxtn+HArwkLfCehWF0rzTgwkTGAetnWIozkIGUMZyARGCNkf/\n"
"KLah5NgArdQ5hPu8hR8E5VoZ7NqMgHmYchsEm9aHX6YdKZKFMHVAjii0OxFNI44G\n"
"CnK97hhaayZ2X4FP2y75Ve0=\n"
"-----END PRIVATE KEY-----\n";
std::string const dh =
"-----BEGIN DH PARAMETERS-----\n"
"MIIBCAKCAQEAnu2ndoqGLBD+d/Sb7YCHTRYX+Eka4ps/M5j9WRjXsfZT4zsOaKDp\n"
"8S0b6lyf09908RPyNnA9DrQC4kvlwYJ5xVoPTGG31w6B89BfiiUsykWczzXYWJNO\n"
"wdOm0L2gpNDJgSlYhThG5ajNFwPVxCIkkmV82ysRoQyJbWhCtlIwRfuPWBVtuF2y\n"
"/NpxMCD77hZD+VoqXdu3TfaCaA/bsGBQQtk5cJ2Xdaz/ZLLiNIpw2wGSe5p4Q4Uu\n"
"GkIcZYhMsDM8sqQ0FE7J0Exx15vwTeGGtkRqdDne7IMdNsAbOO/QCX8dzm+qraI5\n"
"XzKu+eTfSns9AeJTbxNDRy2M0OF/GbeIUwIBAg==\n"
"-----END DH PARAMETERS-----\n";
options.certificate = cert;
options.key = key;
options.dhParams = dh;
}
int SendMessage(std::string host, const std::string& port, const std::string& text, std::function<void()> cb)
{
try
{
// The io_context is required for all I/O
net::io_context ioc;
// These objects perform our I/O
tcp::resolver resolver{ ioc };
websocket::stream<tcp::socket> ws{ ioc };
// Look up the domain name
auto const results = resolver.resolve(host, port);
// Make the connection on the IP address we get from a lookup
auto ep = net::connect(ws.next_layer(), results);
// Update the host_ string. This will provide the value of the
// Host HTTP header during the WebSocket handshake.
// See https://tools.ietf.org/html/rfc7230#section-5.4
host += ':' + std::to_string(ep.port());
// Set a decorator to change the User-Agent of the handshake
ws.set_option(websocket::stream_base::decorator(
[](websocket::request_type& req)
{
req.set(http::field::user_agent,
std::string(BOOST_BEAST_VERSION_STRING) +
" websocket-client-coro");
}));
// Perform the websocket handshake
ws.handshake(host, "/");
// Send the message
ws.write(net::buffer(std::string(text)));
// This buffer will hold the incoming message
beast::flat_buffer buffer;
// Read a message into our buffer
ws.read(buffer);
// Close the WebSocket connection
ws.close(websocket::close_code::normal);
std::stringstream ss;
ss << beast::make_printable(buffer.data());
WALLET_CHECK(ss.str() == "test messagetest message");
}
catch (std::exception const& e)
{
std::cerr << "Error: " << e.what() << std::endl;
WALLET_CHECK(false);
}
catch (...)
{
WALLET_CHECK(false);
// std::cerr << "Error: " << e. << std::endl;
}
cb();
return WALLET_CHECK_RESULT;
}
int SendSecureMessage(std::string host, const std::string& port, const std::string& text, size_t connectionNum, std::function<void()> cb)
{
try
{
// The io_context is required for all I/O
net::io_context ioc;
// The SSL context is required, and holds certificates
ssl::context ctx{ ssl::context::tlsv13_client };
// This holds the root certificate used for verification
boost::system::error_code ec;
LoadRootCertificates(ctx, ec);
// These objects perform our I/O
tcp::resolver resolver{ ioc };
// Look up the domain name
auto const results = resolver.resolve(host, port);
std::vector<websocket::stream<beast::ssl_stream<tcp::socket>>> wsockets;
size_t count = connectionNum;
wsockets.reserve(count);
for (; count > 0; --count)
{
auto& ws = wsockets.emplace_back(ioc, ctx);
// Make the connection on the IP address we get from a lookup
net::connect(ws.next_layer().next_layer(), results.begin(), results.end());
// Perform the SSL handshake
ws.next_layer().handshake(ssl::stream_base::client);
// Set a decorator to change the User-Agent of the handshake
ws.set_option(websocket::stream_base::decorator(
[](websocket::request_type& req)
{
req.set(http::field::user_agent,
std::string(BOOST_BEAST_VERSION_STRING) +
" websocket-client");
}));
// Perform the websocket handshake
ws.handshake(host, "/");
}
for (int j = 0; j < 2; ++j)
{
for (auto& ws : wsockets)
{
for (int i = 0; i < 2; ++i)
{
// create test message
std::stringstream ss;
ss << text << i;
auto testMessage = ss.str();
// Send the message
ws.write(net::buffer(testMessage));
// This buffer will hold the incoming message
beast::flat_buffer buffer;
// Read a message into our buffer
ws.read(buffer);
std::stringstream ss2;
ss2 << beast::make_printable(buffer.data());
WALLET_CHECK(ss2.str() == testMessage + testMessage);
}
}
}
for (auto& ws : wsockets)
{
// Close the WebSocket connection
ws.close(websocket::close_code::normal);
}
}
catch (std::exception const& e)
{
std::cerr << "Error: " << e.what() << std::endl;
WALLET_CHECK(false);
}
catch (...)
{
WALLET_CHECK(false);
// std::cerr << "Error: " << e. << std::endl;
}
cb();
return WALLET_CHECK_RESULT;
}
void fail(beast::error_code ec, char const* what)
{
std::cerr << what << ": " << ec.message() << "\n";
WALLET_CHECK(false);
}
void do_session(
std::string host,
std::string const& port,
std::string const& text,
net::io_context& ioc,
ssl::context& ctx,
net::yield_context yield)
{
beast::error_code ec;
// These objects perform our I/O
tcp::resolver resolver(ioc);
websocket::stream<
beast::ssl_stream<beast::tcp_stream>> ws(ioc, ctx);
// Look up the domain name
auto const results = resolver.async_resolve(host, port, yield[ec]);
if (ec)
return fail(ec, "resolve");
// Set a timeout on the operation
beast::get_lowest_layer(ws).expires_after(std::chrono::seconds(30));
// Make the connection on the IP address we get from a lookup
auto ep = beast::get_lowest_layer(ws).async_connect(results, yield[ec]);
if (ec)
return fail(ec, "connect");
// Update the host_ string. This will provide the value of the
// Host HTTP header during the WebSocket handshake.
// See https://tools.ietf.org/html/rfc7230#section-5.4
host += ':' + std::to_string(ep.port());
// Set a timeout on the operation
beast::get_lowest_layer(ws).expires_after(std::chrono::seconds(30));
// Set a decorator to change the User-Agent of the handshake
ws.set_option(websocket::stream_base::decorator(
[](websocket::request_type& req)
{
req.set(http::field::user_agent,
std::string(BOOST_BEAST_VERSION_STRING) +
" websocket-client-coro");
}));
// Perform the SSL handshake
ws.next_layer().async_handshake(ssl::stream_base::client, yield[ec]);
if (ec)
return fail(ec, "ssl_handshake");
// Turn off the timeout on the tcp_stream, because
// the websocket stream has its own timeout system.
beast::get_lowest_layer(ws).expires_never();
// Set suggested timeout settings for the websocket
ws.set_option(
websocket::stream_base::timeout::suggested(
beast::role_type::client));
// Perform the websocket handshake
ws.async_handshake(host, "/", yield[ec]);
if (ec)
return fail(ec, "handshake");
for (int i = 0; i < 1000; ++i)
{
// create test message
std::stringstream ss;
ss << text << i;
auto testMessage = ss.str();
// Send the message
ws.async_write(net::buffer(testMessage), yield[ec]);
if (ec)
return fail(ec, "write");
// This buffer will hold the incoming message
beast::flat_buffer buffer;
// Read a message into our buffer
ws.async_read(buffer, yield[ec]);
if (ec)
return fail(ec, "read");
std::stringstream ss2;
ss2 << beast::make_printable(buffer.data());
auto t = ss2.str();
WALLET_CHECK(std::equal(t.begin(), t.begin()+ testMessage.size(), testMessage.begin(), testMessage.end()));
WALLET_CHECK(std::equal(t.begin() + testMessage.size(), t.end(), testMessage.begin(), testMessage.end()));
}
// Close the WebSocket connection
ws.async_close(websocket::close_code::normal, yield[ec]);
if (ec)
return fail(ec, "close");
// If we get here then the connection is closed gracefully
}
int SendSecureMessage2(std::string host, const std::string& port, const std::string& text, size_t connectionNum, std::function<void()> cb)
{
std::cout << "Client started..." << std::endl;
try
{
// The io_context is required for all I/O
net::io_context ioc;
// The SSL context is required, and holds certificates
ssl::context ctx{ ssl::context::tlsv13_client };
// This holds the root certificate used for verification
boost::system::error_code ec;
LoadRootCertificates(ctx, ec);
for (size_t i = 0; i < connectionNum; ++i)
{
boost::asio::spawn(ioc, std::bind(
&do_session,
std::string(host),
std::string(port),
std::string(text),
std::ref(ioc),
std::ref(ctx),
std::placeholders::_1));
}
ioc.run();
}
catch (std::exception const& e)
{
std::cerr << "Error: " << e.what() << std::endl;
WALLET_CHECK(false);
}
catch (...)
{
WALLET_CHECK(false);
// std::cerr << "Error: " << e. << std::endl;
}
cb();
std::cout << "Client done" << std::endl;
return WALLET_CHECK_RESULT;
}
template<typename H>
class MyWebSocketServer : public WebSocketServer
{
public:
MyWebSocketServer(SafeReactor::Ptr reactor, const Options& options)
: WebSocketServer(std::move(reactor), options)
{
}
virtual ~MyWebSocketServer() = default;
private:
WebSocketServer::ClientHandler::Ptr ReactorThread_onNewWSClient(WebSocketServer::SendFunc wsSend, WebSocketServer::CloseFunc wsClose) override
{
return std::make_shared<H>(wsSend, wsClose);
}
};
void PlainWebsocketTest()
{
std::cout << "Plain Web Socket test" << std::endl;
try
{
struct MyClientHandler : WebSocketServer::ClientHandler
{
WebSocketServer::SendFunc m_wsSend;
WebSocketServer::CloseFunc m_wsClose;
MyClientHandler(WebSocketServer::SendFunc wsSend, WebSocketServer::CloseFunc wsClose)
: m_wsSend(wsSend)
{}
void ReactorThread_onWSDataReceived(std::string&& message) override
{
std::cout << "Message: " << message << std::endl;
WALLET_CHECK(message == "test message");
m_wsSend(message + message);
}
};
SafeReactor::Ptr safeReactor = SafeReactor::create();
io::Reactor::Ptr reactor = safeReactor->ptr();
io::Reactor::Scope scope(*reactor);
WebSocketServer::Options options;
options.port = 8200;
MyWebSocketServer<MyClientHandler> server(safeReactor, options);
std::thread t1(SendMessage, "127.0.0.1", "8200", "test message", [reactor]() {reactor->stop(); });
reactor->run();
t1.join();
}
catch (...)
{
WALLET_CHECK(false);
}
}
void SecureWebsocketTest(size_t clientCount = 1, size_t connections=10)
{
std::cout << "Secure Web Socket test [" << clientCount<< "," << connections << "]" <<std::endl;
try
{
struct MyClientHandler : WebSocketServer::ClientHandler
{
WebSocketServer::SendFunc m_wsSend;
WebSocketServer::CloseFunc m_wsClose;
MyClientHandler(WebSocketServer::SendFunc wsSend, WebSocketServer::CloseFunc wsClose)
: m_wsSend(wsSend)
{}
void ReactorThread_onWSDataReceived(std::string&& message) override
{
//std::cout << "Secure Message: " << message << std::endl;
//WALLET_CHECK(message == "test message");
m_wsSend(message + message);
}
};
SafeReactor::Ptr safeReactor = SafeReactor::create();
io::Reactor::Ptr reactor = safeReactor->ptr();
io::Reactor::Scope scope(*reactor);
size_t count = clientCount *3;
auto timer = io::Timer::create(*reactor);
auto cb = [&count, reactor, &timer]()
{
//std::cout << count << std::endl;
if (--count == 0)
{
if (WebsocketSession<SecureWebsocketSession>::counter == 0)
{
reactor->stop();
}
else
{
timer->start(10000, false, [reactor]() {reactor->stop(); });
}
}
};
WebSocketServer::Options options;
options.port = 8202;
options.useTls = true;
LoadServerCertificate(options);
MyWebSocketServer<MyClientHandler> server(safeReactor, options);
std::vector<std::thread> threads;
threads.reserve(count);
std::string message = "test message";
message.resize(500000);
std::generate(message.begin(), message.end(), []() { return 'a'; });// (std::string::value_type)std::rand(); });
for (size_t i = 0; i < clientCount; ++i)
{
threads.emplace_back(SendSecureMessage2, "127.0.0.1", "8202", message, connections, cb);
threads.emplace_back(SendSecureMessage, "127.0.0.1", "8202", "test message", 2, cb);
threads.emplace_back(SendMessage, "127.0.0.1", "8202", "test message", cb);
}
reactor->run();
for (auto& t : threads)
{
t.join();
}
std::cout << "counter: " << WebsocketSession<SecureWebsocketSession>::counter << std::endl;
WALLET_CHECK(WebsocketSession<SecureWebsocketSession>::counter == 0);
}
catch (...)
{
WALLET_CHECK(false);
}
}
}
int main()
{
int logLevel = LOG_LEVEL_WARNING;
auto logger = beam::Logger::create(logLevel, logLevel);
PlainWebsocketTest();
//SecureWebsocketTest();
SecureWebsocketTest(1, 1);
return WALLET_CHECK_RESULT;
} |
lemmas prime_dvd_mult_int = prime_dvd_mult_iff[where ?'a = int] |
import unitb.models.nondet
import unitb.refinement.basic
namespace nondet
open temporal
open predicate
open unitb
universe variable u
section defs
variables {α β : Type}
structure evt_ref (lbl : Type) (mc : program α) (ea : event α) (ecs : lbl → event α) : Type :=
(witness : lbl → α → Prop)
(witness_fis : ⦃ ∃∃ e, witness e ⦄)
(sim : ∀ ec, ⟦ (ecs ec).step_of ⟧ ⟹ ⟦ ea.step_of ⟧)
(delay : ∀ ec, witness ec ⋀ ea.coarse_sch ⋀ ea.fine_sch ↦ witness ec ⋀ (ecs ec).coarse_sch in mc)
(stable : ∀ ec, unless_except mc (witness ec ⋀ (ecs ec).coarse_sch) (-ea.coarse_sch) { e | ∃ l, ecs l = e })
(resched : ∀ ec, ea.coarse_sch ⋀ ea.fine_sch ⋀ witness ec ↦ (ecs ec).fine_sch in mc)
structure refined (ma mc : program α) : Type :=
(sim_init : mc^.first ⟹ ma^.first)
(ref : option mc.lbl → option ma.lbl → Prop)
(evt_sim : ∀ ec, ⟦ mc.step_of ec ⟧ ⟹ ∃∃ ea : { ea // ref ec ea }, ⟦ ma.step_of ea.val ⟧)
(events : ∀ ae, evt_ref { ec // ref ec ae } mc (ma.event ae) (λ ec, mc.event ec.val) )
lemma refined.sim {ma mc : program α}
(R : refined ma mc)
: ⟦ is_step mc ⟧ ⟹ ⟦ is_step ma ⟧ :=
begin
simp [is_step_exists_event'],
intro τ,
intros H,
cases H with ce H,
apply exists_imp_exists' subtype.val _ (R.evt_sim ce τ H),
intro, apply id,
end
end defs
section soundness
parameters {α β : Type}
parameter (ma : program α)
parameter (mc : program α)
open temporal
parameter R : refined ma mc
parameter τ : stream α
parameter M₁ : system_sem.ex mc τ
section schedules
parameter e : option ma.lbl
@[reducible]
def imp_lbl := { ec : option mc.lbl // R.ref ec e }
def AC := (program.event ma e).coarse_sch
def AF := (program.event ma e).fine_sch
def W (e' : imp_lbl) := (R.events e).witness e'
def CC (e' : option mc.lbl) := mc.coarse_sch_of e'
def CF (e' : option mc.lbl) := mc.fine_sch_of e'
parameter abs_coarse : (◇◻(•AC ⋀ -⟦ ma.step_of e ⟧)) τ
parameter abs_fine : (◻◇•AF) τ
include M₁
include abs_coarse
include abs_fine
lemma abs_coarse_and_fine
: (◻◇(•AC ⋀ •AF)) τ :=
begin
apply coincidence,
{ apply stable_entails_stable _ _ abs_coarse,
apply λ _, and.left },
{ apply abs_fine },
end
lemma conc_coarse : ∃ e', (◇◻(• W e' ⋀ • CC e'.val) ) τ :=
begin
have H : ((∃∃ e', ◇◻(• W ma mc R e e' ⋀ • CC mc e'.val))
⋁ ◻◇((-•AC ma e) ⋁ ∃∃ e' : imp_lbl ma mc R e, ⟦ mc.step_of e'.val ⟧)) τ,
{ rw exists_action,
apply p_or_p_imp_p_or_right _ (unless_sem_exists' M₁.safety (R.events e).stable _),
{ apply inf_often_entails_inf_often,
apply p_or_p_imp_p_or_right' _,
apply action_entails_action,
intros σ σ',
simp [imp_lbl,mem_set_of],
intros ec H x H' H₁,
existsi x,
cases H with H₀ H,
cases H with H₂ STEP,
unfold program.step_of,
simp [H₂,event.step_of,STEP,H₀,H₁,H'], },
have H' := leads_to.gen_disj' (R.events e).delay,
apply inf_often_of_leads_to (system_sem.leads_to_sem H' _ M₁),
simp,
have H' := ew_eq_true (R.events e).witness_fis,
rw [← p_and_over_p_exists_right
,← p_and_over_p_exists_right],
simp [H'],
apply abs_coarse_and_fine ma mc _ M₁ _ abs_coarse abs_fine, },
simp at H,
cases H with H H,
{ exfalso,
revert abs_coarse,
change ¬ _,
rw [p_not_eq_not,not_eventually,not_henceforth,p_not_p_and,p_not_p_not_iff_self],
apply inf_often_entails_inf_often _ _ H,
apply p_or_p_imp_p_or_right',
rw p_exists_entails_eq_p_forall_entails,
intros ec,
apply (R.events e).sim _ , },
{ simp [H], },
end
lemma conc_fine : ∀ e',
(◇◻•W e') τ →
(◻◇•CF e'.val) τ :=
begin
intros e' H,
have H' := system_sem.leads_to_sem ((R.events e).resched e') _ M₁,
apply inf_often_of_leads_to H',
rw p_and_comm,
apply coincidence H,
apply abs_coarse_and_fine _ _ _ M₁ _ abs_coarse abs_fine,
end
end schedules
include M₁
include R
theorem soundness : system_sem.ex ma τ :=
begin
apply nondet.program.ex.mk,
{ apply R.sim_init,
apply M₁.init },
{ intro i,
apply R.sim,
apply M₁.safety },
{ intros e COARSE₀ FINE₀,
apply assume_neg _, intro ACT,
have COARSE₁ : (◇◻(•AC ma e ⋀ -⟦program.step_of ma e⟧)) τ,
{ rw [p_not_eq_not,not_henceforth,not_eventually] at ACT,
apply stable_and_of_stable_of_stable COARSE₀ ACT },
clear COARSE₀ ACT,
cases conc_coarse ma mc R τ M₁ _ COARSE₁ FINE₀ with e' C_COARSE',
have C_COARSE : (◇◻•CC mc e'.val) τ,
{ apply stable_entails_stable _ _ C_COARSE',
intro, apply and.right },
have WIT : (◇◻•W ma mc R e e') τ,
{ apply stable_entails_stable _ _ C_COARSE',
intro, apply and.left },
have C_FINE := conc_fine ma mc R τ M₁ e COARSE₁ FINE₀ e' WIT,
apply inf_often_entails_inf_often _ _ (M₁.liveness _ C_COARSE C_FINE),
have H := (R.events e).sim e',
apply H, },
end
end soundness
end nondet
|
#ifndef BOOST_SMART_PTR_DETAIL_SP_COUNTED_BASE_CLANG_HPP_INCLUDED
#define BOOST_SMART_PTR_DETAIL_SP_COUNTED_BASE_CLANG_HPP_INCLUDED
// MS compatible compilers support #pragma once
#if defined(_MSC_VER) && (_MSC_VER >= 1020)
#pragma once
#endif
// detail/sp_counted_base_clang.hpp - __c11 clang intrinsics
//
// Copyright (c) 2007, 2013, 2015 Peter Dimov
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt
#include <boost/config.hpp>
#include <boost/cstdint.hpp>
#include <boost/smart_ptr/detail/sp_noexcept.hpp>
#include <boost/smart_ptr/detail/sp_typeinfo_.hpp>
namespace boost {
namespace detail {
typedef _Atomic(boost::int_least32_t) atomic_int_least32_t;
inline void atomic_increment(atomic_int_least32_t *pw) BOOST_SP_NOEXCEPT {
__c11_atomic_fetch_add(pw, 1, __ATOMIC_RELAXED);
}
inline boost::int_least32_t
atomic_decrement(atomic_int_least32_t *pw) BOOST_SP_NOEXCEPT {
return __c11_atomic_fetch_sub(pw, 1, __ATOMIC_ACQ_REL);
}
inline boost::int_least32_t
atomic_conditional_increment(atomic_int_least32_t *pw) BOOST_SP_NOEXCEPT {
// long r = *pw;
// if( r != 0 ) ++*pw;
// return r;
boost::int_least32_t r = __c11_atomic_load(pw, __ATOMIC_RELAXED);
for (;;) {
if (r == 0) {
return r;
}
if (__c11_atomic_compare_exchange_weak(pw, &r, r + 1, __ATOMIC_RELAXED,
__ATOMIC_RELAXED)) {
return r;
}
}
}
#if defined(__clang__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wweak-vtables"
#endif
class BOOST_SYMBOL_VISIBLE sp_counted_base {
private:
sp_counted_base(sp_counted_base const &);
sp_counted_base &operator=(sp_counted_base const &);
atomic_int_least32_t use_count_; // #shared
atomic_int_least32_t weak_count_; // #weak + (#shared != 0)
public:
sp_counted_base() BOOST_SP_NOEXCEPT {
__c11_atomic_init(&use_count_, 1);
__c11_atomic_init(&weak_count_, 1);
}
virtual ~sp_counted_base() /*BOOST_SP_NOEXCEPT*/
{}
// dispose() is called when use_count_ drops to zero, to release
// the resources managed by *this.
virtual void dispose() BOOST_SP_NOEXCEPT = 0; // nothrow
// destroy() is called when weak_count_ drops to zero.
virtual void destroy() BOOST_SP_NOEXCEPT // nothrow
{
delete this;
}
virtual void *get_deleter(sp_typeinfo_ const &ti) BOOST_SP_NOEXCEPT = 0;
virtual void *get_local_deleter(sp_typeinfo_ const &ti) BOOST_SP_NOEXCEPT = 0;
virtual void *get_untyped_deleter() BOOST_SP_NOEXCEPT = 0;
void add_ref_copy() BOOST_SP_NOEXCEPT { atomic_increment(&use_count_); }
bool add_ref_lock() BOOST_SP_NOEXCEPT // true on success
{
return atomic_conditional_increment(&use_count_) != 0;
}
void release() BOOST_SP_NOEXCEPT {
if (atomic_decrement(&use_count_) == 1) {
dispose();
weak_release();
}
}
void weak_add_ref() BOOST_SP_NOEXCEPT { atomic_increment(&weak_count_); }
void weak_release() BOOST_SP_NOEXCEPT {
if (atomic_decrement(&weak_count_) == 1) {
destroy();
}
}
long use_count() const BOOST_SP_NOEXCEPT {
return __c11_atomic_load(const_cast<atomic_int_least32_t *>(&use_count_),
__ATOMIC_ACQUIRE);
}
};
#if defined(__clang__)
#pragma clang diagnostic pop
#endif
} // namespace detail
} // namespace boost
#endif // #ifndef BOOST_SMART_PTR_DETAIL_SP_COUNTED_BASE_CLANG_HPP_INCLUDED
|
open import Agda.Builtin.List
open import Agda.Builtin.Unit
open import Agda.Builtin.Nat
open import Agda.Builtin.String
renaming (primShowNat to show)
open import Agda.Builtin.Reflection
renaming (bindTC to _>>=_; returnTC to return)
pattern vArg t = arg (arg-info visible relevant) t
pattern var₀ x = var x []
infixr 10 _++_
_++_ = primStringAppend
postulate
whatever : ∀ {a} {A : Set a} → A
macro
test1 : Nat → Term → TC _
test1 n _ =
extendContext (vArg (quoteTerm Nat)) do
var₀ i ← quoteTC n where _ → whatever
m ← unquoteTC {A = Nat} (var₀ 0)
var₀ j ← quoteTC m where _ → whatever
extendContext (vArg (quoteTerm Nat)) do
var₀ k ← quoteTC n where _ → whatever
var₀ l ← quoteTC m where _ → whatever
typeError (strErr (show i ++ show k ++ show j ++ show l) ∷ [])
test2 : Term → TC _
test2 hole = do
st ← quoteTC Set
t ← extendContext (vArg st) do
v ← unquoteTC {A = Set} (var₀ zero)
extendContext (vArg (var₀ zero)) do
_ ← unquoteTC {A = v} (var₀ zero)
return tt
u ← quoteTC t
unify hole u
test3 : Nat → Term → TC _
test3 n _ = do
m ← extendContext (vArg (quoteTerm Nat)) (return n)
var₀ i ← quoteTC m where _ → whatever
typeError (strErr (show i) ∷ [])
localvar : Term → TC _
localvar _ = do
m ← extendContext (vArg (quoteTerm Nat)) (unquoteTC {A = Nat} (var₀ 0))
typeError (strErr (show m) ∷ [])
|
Formal statement is: lemma bigthetaI [intro]: "f \<in> O[F](g) \<Longrightarrow> f \<in> \<Omega>[F](g) \<Longrightarrow> f \<in> \<Theta>[F](g)" Informal statement is: If $f$ is $O(g)$ and $\Omega(g)$, then $f$ is $\Theta(g)$. |
{-# LANGUAGE ConstraintKinds #-}
{-# LANGUAGE FlexibleInstances #-}
module Math.Matrix
( DimVector (..)
, DimMatrix (..)
, GPConstraint
, withMat
, toMatrix
, mulM
, transposeM
, mapMM
, mapDiagonalM
, invSM
, substractMeanM
, (+^^)
, (-^^)
, (*^^)
, cholM
, foldAllSM
, sumAllSM
, trace2SM
, toDimMatrix
, detSM
, toMatrixM'
, zipWithDim
, zipWithArray
, identM
, delayMatrix
, linearSolveM
, vectorLength
, randomMatrixD
, matrixRowsNum
, matrixColsNum
, pinvSM
, mulPM
, eigSHM
)
where
import Universum hiding (Vector, transpose, map, natVal,
zipWith)
import GHC.TypeLits hiding (someNatVal)
import Data.Array.Repa
import Data.Random.Normal (normals)
import Data.Vector.Unboxed.Base (Unbox)
import Data.Vinyl.TypeLevel (AllConstrained)
--import Numeric.Dimensions
import Numeric.LinearAlgebra.Repa hiding (Matrix, Vector)
import System.Random (Random, RandomGen)
import PCA.Types
import PCA.Util hiding (randomMatrixD, zipWithArray)
newtype DimVector r (n :: Nat) a
= DimVector { runVector :: Vector r a }
deriving Eq
newtype DimMatrix r (y :: Nat) (x :: Nat) a
= DimMatrix { getInternal :: Matrix r a}
instance Functor (DimVector D n) where
fmap f (DimVector vector) = DimVector (smap f vector)
withMat
:: Matrix D a
-> (forall x y. (KnownNat x, KnownNat y) => DimMatrix D x y a -> k)
-> k
withMat m f =
let (Z :. x :. y) = extent m
in
case someNatVal (fromIntegral x) of
SomeNat (Proxy :: Proxy m) -> case someNatVal (fromIntegral y) of
SomeNat (Proxy :: Proxy n) -> f (DimMatrix @_ @m @n m)
mulM
:: forall y1 x1 y2 x2 a.
( AllConstrained KnownNat [x1, x2, y1, y2]
, Numeric a
, x1 ~ y2
)
=> DimMatrix D y1 x1 a
-> DimMatrix D y2 x2 a
-> DimMatrix D y1 x2 a
mulM (DimMatrix m1) (DimMatrix m2) = DimMatrix $ delay $ m1 `mulS` m2
transposeM
:: (KnownNat y, KnownNat x)
=> DimMatrix D y x a
-> DimMatrix D x y a
transposeM (DimMatrix m) = DimMatrix $ transpose m
mapMM
::
( KnownNat y
, KnownNat x
, Unbox a
, Unbox b
)
=> (a -> b)
-> DimMatrix D y x a
-> DimMatrix D y x b
mapMM f (DimMatrix m) = DimMatrix $ map f m
mapDiagonalM
::
( KnownNat y
, KnownNat x
, Unbox a
)
=> (a -> a)
-> DimMatrix D y x a
-> DimMatrix D y x a
mapDiagonalM f (DimMatrix m) = DimMatrix $ mapDiagonal f m
invSM
::
( KnownNat y
, KnownNat x
, Field a
, Numeric a
, y ~ x
)
=> DimMatrix D y x a
-> DimMatrix D y x a
invSM (DimMatrix m) = DimMatrix $ delay $ invS m
substractMeanM
::
( KnownNat y
, KnownNat x
)
=> DimMatrix D y x Double
-> DimMatrix D y x Double
substractMeanM (DimMatrix m) = DimMatrix $ substractMean m
infixl 6 +^^, -^^
infixl 7 *^^
(+^^)
:: forall y1 x1 y2 x2 a.
( AllConstrained KnownNat [x1, x2, y1, y2]
, x1 ~ x2
, y1 ~ y2
, Num a
)
=> DimMatrix D y1 x1 a
-> DimMatrix D y2 x2 a
-> DimMatrix D y2 x2 a
(+^^) (DimMatrix m1) (DimMatrix m2) = DimMatrix $ m1 +^ m2
(-^^)
:: forall y1 x1 y2 x2 a.
( AllConstrained KnownNat [x1, x2, y1, y2]
, x1 ~ x2
, y1 ~ y2
, Num a
)
=> DimMatrix D y1 x1 a
-> DimMatrix D y2 x2 a
-> DimMatrix D y2 x2 a
(-^^) (DimMatrix m1) (DimMatrix m2) = DimMatrix $ m1 -^ m2
(*^^)
:: forall y1 x1 y2 x2 a.
( AllConstrained KnownNat [x1, x2, y1, y2]
, x1 ~ x2
, y1 ~ y2
, Num a
)
=> DimMatrix D y1 x1 a
-> DimMatrix D y2 x2 a
-> DimMatrix D y2 x2 a
(*^^) (DimMatrix m1) (DimMatrix m2) = DimMatrix $ m1 *^ m2
cholM
::
( KnownNat y
, KnownNat x
, Field a
, y ~ x
)
=> DimMatrix D y x a
-> DimMatrix D y x a
cholM (DimMatrix m) = DimMatrix $ delay $ chol $ trustSym $ computeS m
sumAllSM
::
( KnownNat y
, KnownNat x
, Num a)
=> DimMatrix D y x a
-> a
sumAllSM (DimMatrix m) = sumAllS m
foldAllSM
:: (KnownNat y, KnownNat x)
=> (Double -> Double -> Double)
-> Double
-> DimMatrix D y x Double
-> Double
foldAllSM f initValue (DimMatrix m) = foldAllS f initValue m
detSM
:: (KnownNat y, KnownNat x)
=> DimMatrix D y x Double
-> Double
detSM (DimMatrix m) = detS m
trace2SM
:: (KnownNat x, KnownNat y)
=> DimMatrix D x y Double
-> Double
trace2SM (DimMatrix m) = trace2S $ computeS m
toDimMatrix
::
( Source r a
, KnownNat m
, KnownNat n
)
=> DimVector r m a
-> Int
-> DimMatrix D m n a
toDimMatrix (DimVector arr) desiredSize =
DimMatrix (toMatrix arr desiredSize)
toMatrixM'
:: (Source r a, KnownNat n, KnownNat m)
=> DimVector r n a
-> DimMatrix D n m a
toMatrixM' (DimVector arr) =
DimMatrix $ fromFunction (Z :. dimension :. 1) generator
where
dimension = size . extent $ arr
generator (Z :. rows :. _) = linearIndex arr rows
zipWithDim
::
( Source r1 a
, Source r2 b
, KnownNat m
, KnownNat n
)
=> (a -> b -> c)
-> DimMatrix r1 m n a
-> DimMatrix r2 m n b
-> DimMatrix D m n c
zipWithDim f (DimMatrix mat1) (DimMatrix mat2) =
DimMatrix $ zipWith f mat1 mat2
zipWithArray
::
( KnownNat n
, KnownNat m
)
=> (a -> b -> c)
-> DimVector D n a
-> DimMatrix D n m b
-> DimMatrix D n m c
zipWithArray f (DimVector array1) (DimMatrix array2) =
DimMatrix $ zipWith f (toMatrix array1 n) array2
where
(Z :. n :. _) = extent array2
identM
:: forall m n a.
( KnownNat m
, KnownNat n
, GPConstraint a
, m ~ n
)
=> DimMatrix D m n a
identM =
let dim = fromEnum $
natVal @m @Proxy Proxy in DimMatrix $
identD dim
delayMatrix
::
( KnownNat m
, KnownNat n
, Source r a
)
=> DimMatrix r m n a
-> DimMatrix D m n a
delayMatrix (DimMatrix matrix) = DimMatrix (delay matrix)
linearSolveM
::
( Field a
, AllConstrained KnownNat '[m, n]
)
=> DimMatrix D m m a
-> DimMatrix D m n a
-> Maybe (DimMatrix D m n a)
linearSolveM (DimMatrix mat1) (DimMatrix mat2) =
case linearSolveS mat1 mat2 of
Nothing -> Nothing
Just sol -> Just . delayMatrix . DimMatrix $ sol
vectorLength
:: forall r m a. KnownNat m
=> DimVector r m a
-> Int
vectorLength _ = fromEnum $ natVal (Proxy @m)
matrixRowsNum
:: forall r m n a. KnownNat m
=> DimMatrix r m n a
-> Int
matrixRowsNum _ = fromEnum $ natVal (Proxy @m)
matrixColsNum
:: forall r m n a. KnownNat n
=> DimMatrix r m n a
-> Int
matrixColsNum _ = fromEnum $ natVal (Proxy @n)
randomMatrixD
:: forall a g m n.
( RandomGen g
, Random a
, Unbox a
, Floating a
, KnownNat m
, KnownNat n
)
=> g
-> (Int, Int)
-> DimMatrix D m n a
randomMatrixD gen (rows, cols) =
let randomList = take (rows * cols) (normals gen) in
DimMatrix . delay $ fromListUnboxed (Z :. rows :. cols) randomList
type GPConstraint a =
( Field a
, Random a
, Unbox a
, Floating a
, Eq a
)
{-
hermToMatrixM
:: Herm a
-> DimMatrix D m n a
hermToMatrixM = undefined
-}
pinvSM
::
( KnownNat y
, KnownNat x
, Field a
, Numeric a
, y ~ x
)
=> DimMatrix D y x a
-> DimMatrix D y x a
pinvSM (DimMatrix m) = DimMatrix . delay $ pinvS m
mulPM
:: forall y1 x1 y2 x2 a.
( AllConstrained KnownNat [x1, x2, y1, y2]
, Numeric a
, x1 ~ y2
)
=> DimMatrix D y1 x1 a
-> DimMatrix D y2 x2 a
-> DimMatrix D y1 x2 a
mulPM (DimMatrix m) (DimMatrix n) =
DimMatrix . delay . runIdentity $ m `mulP` n
eigSHM
:: (Field a, Numeric a)
=> Herm a
-> (DimVector D m Double, DimMatrix D m m a)
eigSHM hermM =
(DimVector $ delay $ fst out, DimMatrix $ delay $ snd out)
where
out = eigSH hermM
|
/-
Copyright (c) 2022 Tomaz Gomes. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Tomaz Gomes.
-/
import data.list.sort tactic
import data.nat.log
/-!
# Timed Insertion Sort
This file defines a new version of Insertion Sort that, besides sorting the input list, counts the
number of comparisons made through the execution of the algorithm. Also, it presents proofs of
it's time complexity and it's equivalence to the one defined in data/list/sort.lean
## Main Definition
- Timed.insertion_sort : list α → (list α × ℕ)
## Main Results
- Timed.insertion_sort_complexity :
∀ l : list α, (Timed.insertion_sort r l).snd ≤ l.length * l.length
- Timed.insertion_sort_equivalence :
∀ l : list α, (Timed.insertion_sort r l).fst = list.insertion_sort r l
-/
variables {α : Type} (r : α → α → Prop) [decidable_rel r]
local infix ` ≼ ` : 50 := r
namespace Timed
@[simp] def ordered_insert (a : α) : list α → (list α × ℕ)
| [] := ([a], 0)
| (h :: t) := if a ≼ h then (a :: h :: t, 1)
else let (l', n) := ordered_insert t in (h :: l', n + 1)
#eval ordered_insert (≤) 2 [5, 3, 1, 4]
-- ([2, 5, 3, 1, 4], 1)
#eval ordered_insert (≤) 9 [1, 0, 8]
-- ([1, 0, 8, 9], 3)
@[simp] def insertion_sort : list α → (list α × ℕ)
| [] := ([], 0)
| (h :: t) := let (l', n) := (insertion_sort t), (l'', m) := ordered_insert r h l'
in (l'', n + m)
#eval insertion_sort (≤) [1, 2, 3, 4, 5]
-- ([1, 2, 3, 4, 5], 4)
#eval insertion_sort (≤) [5, 4, 3, 2, 1]
-- ([1, 2, 3, 4, 5], 10)
theorem ordered_insert_complexity (a : α) :
∀ l : list α, (ordered_insert r a l).snd ≤ l.length :=
begin
intro l,
induction l,
{ simp only [list.length, ordered_insert], },
{ simp only [list.length, ordered_insert], split_ifs,
{ simp only [zero_le, le_add_iff_nonneg_left], },
{ cases (ordered_insert r a l_tl),
unfold ordered_insert,
linarith,
}
}
end
theorem ordered_insert_equivalence (a : α) : ∀ l : list α,
(ordered_insert r a l).fst = list.ordered_insert r a l :=
begin
intro l,
induction l,
{ simp only [list.ordered_insert_nil, ordered_insert, eq_self_iff_true, and_self], },
{ simp only [list.ordered_insert, ordered_insert], split_ifs,
{ simp only [eq_self_iff_true, and_self], },
{ cases (ordered_insert r a l_tl),
unfold ordered_insert,
simp only [true_and, eq_self_iff_true],
exact l_ih,
}
}
end
theorem ordered_insert_length (a : α) : ∀ l : list α,
(ordered_insert r a l).fst.length = l.length + 1 :=
begin
intro l,
rw ordered_insert_equivalence r a l,
exact list.ordered_insert_length r l a,
end
theorem insertion_sort_preserves_length : ∀ l : list α,
(insertion_sort r l).fst.length = l.length :=
begin
intro l,
induction l,
{ simp only [insertion_sort], },
{ simp only [insertion_sort, list.length],
cases (insertion_sort r l_tl) with sorted_tl _,
unfold insertion_sort,
have ordered_length : (ordered_insert r l_hd sorted_tl).fst.length = sorted_tl.length + 1 :=
ordered_insert_length r l_hd sorted_tl,
cases (ordered_insert r l_hd sorted_tl) with sorted_list _,
unfold insertion_sort,
rw ordered_length,
rw l_ih,
}
end
theorem insertion_sort_complexity :
∀ l : list α, (insertion_sort r l).snd ≤ l.length * l.length :=
begin
intro l,
induction l,
{ simp only [insertion_sort, list.length, mul_zero], },
{ simp only [insertion_sort, list.length],
have same_lengths : (insertion_sort r l_tl).fst.length = l_tl.length :=
insertion_sort_preserves_length r l_tl,
cases (insertion_sort r l_tl) with sorted_tl ops,
unfold insertion_sort,
have hh : (ordered_insert r l_hd sorted_tl).snd ≤ sorted_tl.length :=
ordered_insert_complexity r l_hd sorted_tl,
cases (ordered_insert r l_hd sorted_tl),
unfold insertion_sort,
linarith,
}
end
theorem insertion_sort_equivalence : ∀ l : list α,
(insertion_sort r l).fst = list.insertion_sort r l :=
begin
intro l,
induction l,
{ simp only [insertion_sort, list.insertion_sort], },
{ simp only [insertion_sort, list.insertion_sort],
rw ← l_ih,
cases insertion_sort r l_tl,
unfold insertion_sort,
rw ← ordered_insert_equivalence r l_hd fst,
cases ordered_insert r l_hd fst,
unfold insertion_sort,
}
end
end Timed
|
Provide technical support to customers and employees in the form of sales presentations, product related training programs, and customer seminars.
Provide Product Line specific Technical Sales support for the organization through direct sales presentations, training programs, and customer seminars.
Develop sales aids in the form of presentation material, CD's documentation, and procedure manuals required for specific projects.
Assist with the development of quotations and Product Line related bid packages.
Provide technical support for Regional Sales to present current technology in a direct selling effort.
Train Regional sales on current technologies.
Act as a market input source to Product Line Management for new product opportunities.
Assist in development of annual Revenue Budget on Product Line related basis.
Develop and present technical papers.
Interface between customers, Product Line Managers, and Engineers on new product requirements. |
---
author: Nathan Carter ([email protected])
---
This answer assumes you have imported SymPy as follows.
```python
from sympy import * # load all math functions
init_printing( use_latex='mathjax' ) # use pretty math output
```
Let's create a simple example. We'll be approximating $f(x)=\sin x$
centered at $a=0$ with a Taylor series of degree $n=5$. We will be
applying our approximation at $x_0=1$. What is the error bound?
```python
var( 'x' )
formula = sin(x)
a = 0
x_0 = 1
n = 5
```
We will not ask SymPy to compute the formula exactly, but will instead
have it sample a large number of $c$ values from the interval in question,
and compute the maximum over those samples. (The exact solution can be too
hard for SymPy to compute.)
```python
# Get 1000 evenly-spaced c values:
cs = [ Min(x_0,a) + abs(x_0-a)*i/1000 for i in range(1001) ]
# Create the formula |f^(n+1)(x)|:
formula2 = abs( diff( formula, x, n+1 ) )
# Find the max of it on all the 1000 values:
m = Max( *[ formula2.subs(x,c) for c in cs ] )
# Compute the error bound:
N( abs(x_0-a)**(n+1) / factorial(n+1) * m )
```
$\displaystyle 0.00116870970112208$
The error is at most $0.00116871\ldots$.
|
[STATEMENT]
theorem p20: "\<forall>x y. \<exists>z. \<forall>w. P(x) \<and> Q(y) \<longrightarrow> R(z) \<and> S(w) \<Longrightarrow>
\<exists>x y. P(x) \<and> Q(y) \<Longrightarrow> \<exists>z. R(z)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<forall>x y. \<exists>z. \<forall>w. P x \<and> Q y \<longrightarrow> R z \<and> S w; \<exists>x y. P x \<and> Q y\<rbrakk> \<Longrightarrow> \<exists>z. R z
[PROOF STEP]
@proof
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<forall>x y. \<exists>z. \<forall>w. P x \<and> Q y \<longrightarrow> R z \<and> S w; \<exists>x y. P x \<and> Q y\<rbrakk> \<Longrightarrow> \<exists>z. R z
[PROOF STEP]
@obtain x y where "P(x)" "Q(y)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<forall>x y. \<exists>z. \<forall>w. P x \<and> Q y \<longrightarrow> R z \<and> S w; \<exists>x y. P x \<and> Q y\<rbrakk> \<Longrightarrow> \<exists>z. R z
[PROOF STEP]
@obtain z where "\<forall>w. P(x) \<and> Q(y) \<longrightarrow> R(z) \<and> S(w)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<forall>x y. \<exists>z. \<forall>w. P x \<and> Q y \<longrightarrow> R z \<and> S w; \<exists>x y. P x \<and> Q y\<rbrakk> \<Longrightarrow> \<exists>z. R z
[PROOF STEP]
@qed |
data Size : Set where
↑ : Size → Size
↑ ()
data N : Size → Set where
suc : ∀{i} (a : N i) → N (↑ i)
data Val : ∀{i} (t : N i) → Set where
val : ∀{i} (n : N i) → Val (suc n)
record R (j : Size) : Set where
field num : N j
data W {j} (ft : R j) : Set where
immed : (v : Val (R.num ft)) → W ft
postulate
E : ∀{j} (ft : R j) (P : (w : W ft) → Set) → Set
test : ∀ {j} (ft : R j) → Set
test {j} ft = E {j} ft testw
where
testw : ∀ {ft : R _} (w : W ft) → Set
testw (immed (val a)) = test record{ num = a }
-- testw passes without quantification over ft
-- or with _ := j
{- OLD ERROR
Cannot instantiate the metavariable _35 to solution ↑ i since it
contains the variable i which is not in scope of the metavariable
or irrelevant in the metavariable but relevant in the solution
when checking that the pattern val a has type Val (R.num ft₁)
-}
|
/* Copyright (C) 2021 Atsushi Togo */
/* All rights reserved. */
/* This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#ifndef __phono3py_H__
#define __phono3py_H__
#if defined(MKL_LAPACKE) || defined(SCIPY_MKL_H)
#include <mkl.h>
#else
#include <lapacke.h>
#endif
#include "phonoc_array.h"
long ph3py_get_interaction(
Darray *fc3_normal_squared, const char *g_zero, const Darray *frequencies,
const lapack_complex_double *eigenvectors, const long (*triplets)[3],
const long num_triplets, const long (*bz_grid_addresses)[3],
const long D_diag[3], const long Q[3][3], const double *fc3,
const long is_compact_fc3, const double (*svecs)[3],
const long multi_dims[2], const long (*multi)[2], const double *masses,
const long *p2s_map, const long *s2p_map, const long *band_indices,
const long symmetrize_fc3_q, const double cutoff_frequency);
long ph3py_get_pp_collision(
double *imag_self_energy,
const long relative_grid_address[24][4][3], /* thm */
const double *frequencies, const lapack_complex_double *eigenvectors,
const long (*triplets)[3], const long num_triplets,
const long *triplet_weights, const long (*bz_grid_addresses)[3], /* thm */
const long *bz_map, /* thm */
const long bz_grid_type, const long D_diag[3], const long Q[3][3],
const double *fc3, const long is_compact_fc3, const double (*svecs)[3],
const long multi_dims[2], const long (*multi)[2], const double *masses,
const long *p2s_map, const long *s2p_map, const Larray *band_indices,
const Darray *temperatures, const long is_NU, const long symmetrize_fc3_q,
const double cutoff_frequency);
long ph3py_get_pp_collision_with_sigma(
double *imag_self_energy, const double sigma, const double sigma_cutoff,
const double *frequencies, const lapack_complex_double *eigenvectors,
const long (*triplets)[3], const long num_triplets,
const long *triplet_weights, const long (*bz_grid_addresses)[3],
const long D_diag[3], const long Q[3][3], const double *fc3,
const long is_compact_fc3, const double (*svecs)[3],
const long multi_dims[2], const long (*multi)[2], const double *masses,
const long *p2s_map, const long *s2p_map, const Larray *band_indices,
const Darray *temperatures, const long is_NU, const long symmetrize_fc3_q,
const double cutoff_frequency);
void ph3py_get_imag_self_energy_at_bands_with_g(
double *imag_self_energy, const Darray *fc3_normal_squared,
const double *frequencies, const long (*triplets)[3],
const long *triplet_weights, const double *g, const char *g_zero,
const double temperature, const double cutoff_frequency,
const long num_frequency_points, const long frequency_point_index);
void ph3py_get_detailed_imag_self_energy_at_bands_with_g(
double *detailed_imag_self_energy, double *imag_self_energy_N,
double *imag_self_energy_U, const Darray *fc3_normal_squared,
const double *frequencies, const long (*triplets)[3],
const long *triplet_weights, const long (*bz_grid_addresses)[3],
const double *g, const char *g_zero, const double temperature,
const double cutoff_frequency);
void ph3py_get_real_self_energy_at_bands(
double *real_self_energy, const Darray *fc3_normal_squared,
const long *band_indices, const double *frequencies,
const long (*triplets)[3], const long *triplet_weights,
const double epsilon, const double temperature,
const double unit_conversion_factor, const double cutoff_frequency);
void ph3py_get_real_self_energy_at_frequency_point(
double *real_self_energy, const double frequency_point,
const Darray *fc3_normal_squared, const long *band_indices,
const double *frequencies, const long (*triplets)[3],
const long *triplet_weights, const double epsilon, const double temperature,
const double unit_conversion_factor, const double cutoff_frequency);
void ph3py_get_collision_matrix(
double *collision_matrix, const Darray *fc3_normal_squared,
const double *frequencies, const long (*triplets)[3],
const long *triplets_map, const long *map_q,
const long *rotated_grid_points, const double *rotations_cartesian,
const double *g, const long num_ir_gp, const long num_gp,
const long num_rot, const double temperature,
const double unit_conversion_factor, const double cutoff_frequency);
void ph3py_get_reducible_collision_matrix(
double *collision_matrix, const Darray *fc3_normal_squared,
const double *frequencies, const long (*triplets)[3],
const long *triplets_map, const long *map_q, const double *g,
const long num_gp, const double temperature,
const double unit_conversion_factor, const double cutoff_frequency);
void ph3py_get_isotope_scattering_strength(
double *gamma, const long grid_point, const double *mass_variances,
const double *frequencies, const lapack_complex_double *eigenvectors,
const long num_grid_points, const long *band_indices, const long num_band,
const long num_band0, const double sigma, const double cutoff_frequency);
void ph3py_get_thm_isotope_scattering_strength(
double *gamma, const long grid_point, const long *ir_grid_points,
const long *weights, const double *mass_variances,
const double *frequencies, const lapack_complex_double *eigenvectors,
const long num_ir_grid_points, const long *band_indices,
const long num_band, const long num_band0,
const double *integration_weights, const double cutoff_frequency);
void ph3py_distribute_fc3(double *fc3, const long target, const long source,
const long *atom_mapping, const long num_atom,
const double *rot_cart);
void ph3py_rotate_delta_fc2(double (*fc3)[3][3][3],
const double (*delta_fc2s)[3][3],
const double *inv_U,
const double (*site_sym_cart)[3][3],
const long *rot_map_syms, const long num_atom,
const long num_site_sym, const long num_disp);
void ph3py_get_permutation_symmetry_fc3(double *fc3, const long num_atom);
void ph3py_get_permutation_symmetry_compact_fc3(
double *fc3, const long p2s[], const long s2pp[], const long nsym_list[],
const long perms[], const long n_satom, const long n_patom);
void ph3py_transpose_compact_fc3(double *fc3, const long p2s[],
const long s2pp[], const long nsym_list[],
const long perms[], const long n_satom,
const long n_patom, const long t_type);
long ph3py_get_triplets_reciprocal_mesh_at_q(
long *map_triplets, long *map_q, const long grid_point, const long mesh[3],
const long is_time_reversal, const long num_rot,
const long (*rec_rotations)[3][3], const long swappable);
long ph3py_get_BZ_triplets_at_q(long (*triplets)[3], const long grid_point,
const long (*bz_grid_addresses)[3],
const long *bz_map, const long *map_triplets,
const long num_map_triplets,
const long D_diag[3], const long Q[3][3],
const long bz_grid_type);
long ph3py_get_integration_weight(
double *iw, char *iw_zero, const double *frequency_points,
const long num_band0, const long relative_grid_address[24][4][3],
const long mesh[3], const long (*triplets)[3], const long num_triplets,
const long (*bz_grid_addresses)[3], const long *bz_map,
const long bz_grid_type, const double *frequencies1, const long num_band1,
const double *frequencies2, const long num_band2, const long tp_type,
const long openmp_per_triplets, const long openmp_per_bands);
void ph3py_get_integration_weight_with_sigma(
double *iw, char *iw_zero, const double sigma, const double sigma_cutoff,
const double *frequency_points, const long num_band0,
const long (*triplets)[3], const long num_triplets,
const double *frequencies, const long num_band, const long tp_type);
long ph3py_get_grid_index_from_address(const long address[3],
const long mesh[3]);
void ph3py_get_gr_grid_addresses(long gr_grid_addresses[][3],
const long D_diag[3]);
long ph3py_get_reciprocal_rotations(long rec_rotations[48][3][3],
const long (*rotations)[3][3],
const long num_rot,
const long is_time_reversal);
long ph3py_transform_rotations(long (*transformed_rots)[3][3],
const long (*rotations)[3][3],
const long num_rot, const long D_diag[3],
const long Q[3][3]);
long ph3py_get_snf3x3(long D_diag[3], long P[3][3], long Q[3][3],
const long A[3][3]);
long ph3py_transform_rotations(long (*transformed_rots)[3][3],
const long (*rotations)[3][3],
const long num_rot, const long D_diag[3],
const long Q[3][3]);
long ph3py_get_ir_grid_map(long *ir_grid_map, const long D_diag[3],
const long PS[3], const long (*grg_rotations)[3][3],
const long num_rot);
long ph3py_get_bz_grid_addresses(long (*bz_grid_addresses)[3], long *bz_map,
long *bzg2grg, const long D_diag[3],
const long Q[3][3], const long PS[3],
const double rec_lattice[3][3],
const long type);
long ph3py_rotate_bz_grid_index(const long bz_grid_index,
const long rotation[3][3],
const long (*bz_grid_addresses)[3],
const long *bz_map, const long D_diag[3],
const long PS[3], const long bz_grid_type);
void ph3py_symmetrize_collision_matrix(double *collision_matrix,
const long num_column,
const long num_temp,
const long num_sigma);
void ph3py_expand_collision_matrix(double *collision_matrix,
const long *rot_grid_points,
const long *ir_grid_points,
const long num_ir_gp,
const long num_grid_points,
const long num_rot, const long num_sigma,
const long num_temp, const long num_band);
long ph3py_get_neighboring_gird_points(
long *relative_grid_points, const long *grid_points,
const long (*relative_grid_address)[3], const long mesh[3],
const long (*bz_grid_addresses)[3], const long *bz_map,
const long bz_grid_type, const long num_grid_points,
const long num_relative_grid_address);
long ph3py_get_thm_integration_weights_at_grid_points(
double *iw, const double *frequency_points, const long num_band0,
const long num_band, const long num_gp,
const long (*relative_grid_address)[4][3], const long D_diag[3],
const long *grid_points, const long (*bz_grid_addresses)[3],
const long *bz_map, const long bz_grid_type, const double *frequencies,
const long *gp2irgp_map, const char function);
#endif
|
% Fig. W1 Web Appendix W8 Feedback Control of Dynamic Systems, 6e
% Franklin, Powell, Emami
%
clear all;
close all;
F=[0 1;0 0];
G=[0;1];
H=[1 0];
J=0;
T=1;
[Phi,Gam]=c2d(F,G,T);
j=sqrt(-1);
Pc=[.78+.18*j;.78-.18*j];
K=acker(Phi,Gam,Pc);
Pe=[.2+.2*j;.2-.2*j];
L=acker(Phi',H',Pe)';
[A,B,C,D]=dreg(Phi,Gam,H,J,K,L);
A=Phi-Gam*K-L*H;
B=L;
C=K;
D=0;
[Ac,Bc,Cc,Dc]=series(A,B,C,D,Phi,Gam,H,J);
[Acl,Bcl,Ccl,Dcl]=feedback(Ac,Bc,Cc,Dc,0,0,0,1);
tf=30;
N=tf/T+1;
td=0:1:tf;
yd=dstep(Acl,Bcl,Ccl,Dcl,1,N);
axis([0 30 0 1.5])
plot(td,yd,'-',td,yd,'*'),
xlabel('time (sec)')
ylabel('plant output y(t)')
title('Fig. 8.20 Step response of the continuous and digital systems')
nicegrid;
hold on
% use command structure from section 7.8
Nx=[1;0];
A2=[Phi -Gam*K;
L*H Phi-L*H-Gam*K];
B2=[Gam*K*Nx;Gam*K*Nx];
C2=[1 0 0 0];
D2=0;
y2=dstep(A2,B2,C2,D2,1,N);
plot(td,y2,'-',td,y2,'o')
text(6.5,.25,'o-----o-----o-----o Command structure from Fig 7.48(b)')
text(6.5,.45,'*-----*-----*-----* Command structure from Fig 7.15')
hold off
|
(*
This is the definition of formal syntax for Dan Grossman's Thesis,
"SAFE PROGRAMMING AT THE C LEVEL OF ABSTRACTION".
Path Extension
*)
Set Implicit Arguments.
Require Export Cyclone_Formal_Syntax Cyclone_Static_Semantics_Kinding_And_Context_Well_Formedness.
Require Export Cyclone_Dynamic_Semantics.
Require Export Cyclone_Classes Cyclone_Inductions Cyclone_LN_Tactics Cyclone_LN_Extra_Lemmas_And_Automation.
Require Export Cyclone_WFC_Lemmas.
Require Export Cyclone_WFU_Lemmas.
Require Export Cyclone_Context_Weakening_Proof.
Require Export Cyclone_Substitutions_Proof.
Require Export Cyclone_LN_Types_Lemmas.
Require Export Cyclone_Get_Lemmas.
Require Export Cyclone_Admit_Environment.
Open Scope list_scope.
(* Dan is probably thinking like this but not saying it in the text of the proof. *)
(* Thesis difference, using getd. *)
Lemma get'_path_extension_r:
forall v p v' v'' pe,
Value v ->
Value v' ->
Value v'' ->
get' v p v' ->
get' v' (cons pe nil) v'' ->
get' v (app p (cons pe nil)) v''.
Proof.
introv vv vv' vv'' getd.
induction getd; intros.
simpl; auto.
apply IHgetd in H2; auto.
constructor*.
apply IHgetd in H2; auto.
constructor*.
apply IHgetd in H1; auto.
constructor*.
Qed.
Ltac invert_exists :=
repeat
match goal with
| H : exists _ _, _ |- _ => inversions H
| H : exists _, _ |- _ => inversions H
| H : Value (cpair _ _) |- _ => inversions H
end.
Lemma A_10_Path_Extension_1_A_pair:
forall v p v',
get' v p v' ->
(forall v0 v1,
v' = (cpair v0 v1) ->
get' v (app p (cons (i_pe zero_pe) nil)) v0 /\
get' v (app p (cons (i_pe one_pe) nil)) v1) .
Proof.
introv getd.
induction getd; intros; split; subst; inversions* H;
try solve[constructor*];
try solve[ apply get'_path_extension_r with (v':= (cpair v2 v3)); auto];
try solve[apply get'_path_extension_r with (v':= (cpair v0 v2)); auto].
Qed.
Ltac invert_pathed_get :=
match goal with
| H : get' _ (app _ _) _ |- _ => inversions* H
end.
Lemma A_10_Path_Extension_1_A_no_pair:
forall v p v',
get' v p v' ->
( ~(exists v0 v1, v' = (cpair v0 v1)) ->
~(exists i p' v'', get' v (app p (cons (i_pe i) p')) v'')).
Proof.
introv getd.
induction getd; intros; unfolds; intros;
invert_exists;
invert_pathed_get.
Qed.
Lemma A_10_Path_Extension_1_A_pack:
forall v p v',
get' v p v' ->
forall t' v0 t k,
v' = (pack t' v0 (etype aliases k t)) ->
get' v (app p (cons u_pe nil)) v0.
Proof.
introv getd.
induction getd; intros; subst; inversions* H;
simpl;
try solve[constructor*].
Qed.
Lemma A_10_Path_Extension_1_A_no_pack:
forall v p v',
get' v p v' ->
~(exists t' v0 t k, v' = (pack t' v0 (etype aliases k t))) ->
~(exists p' v'', get' v (app p (cons u_pe p')) v'').
Proof.
introv getd.
induction getd; intros; unfolds; intros;
try solve[invert_exists; invert_pathed_get].
invert_exists.
simpl in H1.
inversions* H1.
unfolds in H0.
contradict H0.
exists* tau' v1 tau k.
Qed.
(* ? Extend both ps? *)
Lemma gettype_path_extension_r:
forall u x p t p' t',
gettype u x p t p' t' ->
forall pe t'',
gettype u x (app p (cons pe nil)) t' (app (cons pe nil) p') t'' ->
gettype u x (app p (cons pe nil)) t p' t''.
Proof.
introv gettyped.
induction gettyped; intros; simpl; auto.
destruct pe. destruct i.
Admitted.
Lemma A_10_Path_Extension_2_cross:
forall u x p t p' t',
gettype u x p t p' t' ->
forall t0 t1,
t' = (cross t0 t1) ->
(gettype u x p t (app p' (cons (i_pe zero_pe) nil)) t0 /\
gettype u x p t (app p' (cons (i_pe one_pe ) nil)) t1).
Proof.
introv gettyped.
induction gettyped; intros; try solve[split; subst; simpl; constructor*];
try solve[intros; split; subst; simpl; constructor*; try apply* IHgettyped].
try solve[intros; split; subst; simpl; apply gettype_etype with (tau'':= tau''); auto;
apply* IHgettyped].
Qed.
Lemma fix_path:
forall (a :PE),
(a :: nil) = (app (a :: nil) nil).
Proof.
auto.
Qed.
Lemma gettype_nil_agreement:
forall u x p t t',
gettype u x p t nil t' ->
t = t'.
Admitted.
Lemma gettype_nil_get_agreement:
forall u x p t t' t'',
gettype u x p t nil t' ->
LVPE.V.get (x, p) u = Some t'' ->
(t = t' /\ t = t'').
Admitted.
Lemma intermediate_type:
forall u x t p p' t',
gettype u x p t p' t' ->
exists t'' pe,
gettype u x p t (pe :: p') t'' ->
gettype u x (p & pe) t'' p' t'.
Admitted.
(* this is a weak version of an intermediate type theorem. *)
Lemma punt:
forall x p u tau0 k t0 t2 p',
LVPE.V.get (x, p) u = Some tau0 ->
forall pe,
gettype u x (p ++ pe :: nil) t0 p' (etype aliases k t2) ->
LVPE.V.get (x, p ++ pe :: nil) u = Some tau0.
Admitted.
Lemma A_10_Path_Extension_2_etype:
forall p',
forall u x p t t',
gettype u x p t p' t' ->
forall k t0,
t' = (etype aliases k t0) ->
forall tau,
LVPE.V.get (x,p) u = Some tau ->
(gettype u x p t (app p' (cons u_pe nil)) (T.open_rec 0 tau t0)).
Proof.
introv gettyped.
induction gettyped; intros; simpl; subst.
apply gettype_etype with (tau'':= tau0); auto.
constructor.
apply IHgettyped with (k0:=k); auto.
lets P: punt x p u tau0 k.
specialize (P t0 t2 p' H0 (i_pe zero_pe)).
admit. (* fucking append again *)
constructor.
apply IHgettyped with (k0:=k); auto.
lets P: punt x p u tau0 k.
specialize (P t0 t2 p' H0 (i_pe one_pe)).
admit. (* LVPE.V.get (x, p ++ i_pe zero_pe :: nil) u = Some tau0 *)
apply gettype_etype with (tau'':= tau''); auto.
apply IHgettyped with (k:=k0); auto.
lets P: punt x p u tau0 k.
specialize (P t0 t0 p' H1 u_pe).
(* No, fails on etype vs open rec. *)
admit.
(* breaks at the induction unless I find some way to radically change the types/paths.
intros p'.
induction p'; intros.
apply gettype_nil_get_agreement with (t'':= tau) in H; auto.
inversions H; subst.
simpl.
apply gettype_etype with (tau'':= (etype aliases k t0)); auto.
destruct a; try destruct i; subst; inversion H; subst.
admit.
admit.
simpl.
constructor.
assert(BROKEN:
gettype u x (append p (cons (i_pe zero_pe) nil)) t1 p'
(etype aliases k t0)). admit.
assert(TE: etype aliases k t0 = etype aliases k t0); auto.
(*(p ++ i_pe zero_pe :: nil) *)
specialize (IHp' u x).
(* BROKEN k t0 TE *)
(* LVPE get goal wrong *)
admit.
admit.
specialize (IHp' u x p).
*)
(*
(* Dan does it by p then left most in p'. *)
induction p; intros; induction p'; intros; subst.
apply gettype_nil_get_agreement with (t'':= tau) in H; auto.
inversions* H; simpl.
apply gettype_etype with (tau'':= tau); auto.
destruct a; try destruct i.
inversions* H; simpl.
(* Stuck.
inversion H6; subst; simpl; auto.
admit.
simpl; subst.
apply gettype_nil_get_agreement with (t'':= tau) in H; auto.
inversions* H; subst.
apply gettype_etype with (tau'':= (etype aliases k t0)); auto.
admit.
*)
(* by gettyped
introv gettyped.
induction gettyped; intros; subst.
simpl.
rewrite fix_path.
apply gettype_etype with (tau'':= tau0); auto.
intros; subst; simpl; constructor*.
(* inversions gettyped. *)
apply IHgettyped with (k0:=k); auto.
admit.
intros; subst; simpl; constructor*.
(* inversions gettyped. *)
apply IHgettyped with (k0:=k); auto.
admit.
simpl.
apply gettype_etype with (tau'':=tau''); auto.
apply IHgettyped with (k:=k0); auto.
admit.
Qed.
*)
Admitted.
*) |
lemma to_fract_uminus [simp]: "to_fract (-x) = -to_fract x" |
[STATEMENT]
lemma analz_insert_L [simp]:
"analz (insert (L l) H) = insert (L l) (analz (set l \<union> H))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. analz (insert (L l) H) = insert (L l) (analz (set l \<union> H))
[PROOF STEP]
apply (rule equalityI)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. analz (insert (L l) H) \<subseteq> insert (L l) (analz (set l \<union> H))
2. insert (L l) (analz (set l \<union> H)) \<subseteq> analz (insert (L l) H)
[PROOF STEP]
apply (rule subsetI)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>x. x \<in> analz (insert (L l) H) \<Longrightarrow> x \<in> insert (L l) (analz (set l \<union> H))
2. insert (L l) (analz (set l \<union> H)) \<subseteq> analz (insert (L l) H)
[PROOF STEP]
apply (erule analz.induct, auto)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x. x \<in> analz (set l \<union> H) \<Longrightarrow> x \<in> analz (insert (L l) H)
[PROOF STEP]
apply (erule analz.induct, auto)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>X. X \<in> set l \<Longrightarrow> X \<in> analz (insert (L l) H)
[PROOF STEP]
using analz.Inj
[PROOF STATE]
proof (prove)
using this:
?X \<in> ?H \<Longrightarrow> ?X \<in> analz ?H
goal (1 subgoal):
1. \<And>X. X \<in> set l \<Longrightarrow> X \<in> analz (insert (L l) H)
[PROOF STEP]
by blast |
Formal statement is: lemma i_squared [simp]: "\<i> * \<i> = -1" Informal statement is: $i^2 = -1$. |
Require Export XR_Rle.
Require Export XR_Rplus_lt_compat.
Require Export XR_Rplus_lt_compat_r.
Local Open Scope R_scope.
Lemma Rplus_lt_le_compat :
forall r1 r2 r3 r4, r1 < r2 -> r3 <= r4 -> r1 + r3 < r2 + r4.
Proof.
intros u v w x.
intros huv hwx.
unfold "<=" in hwx.
destruct hwx as [ hwx | heq ].
{
apply Rplus_lt_compat.
{ exact huv. }
{ exact hwx. }
}
{
subst w.
apply Rplus_lt_compat_r.
exact huv.
}
Qed.
|
structure vector (R : Type) :=
vec :: (x : R) (y : R) (z : R)
namespace vector
variables {R : Type} [comm_ring R]
def zero : vector R := vec 0 0 0
def add (a b : vector R) :=
vec (a.x + b.x) (a.y + b.y) (a.z + b.z)
def neg (a : vector R) :=
vec (-a.x) (-a.y) (-a.z)
def cross_prod (a b : vector R) :=
vec (a.y * b.z - a.z * b.y) (a.x * b.z - a.z * b.x) (a.x * b.y - a.y * b.x)
def inner_prod (a b : vector R) :=
a.x * b.x + a.y * b.y +a.z * b.z
def scalar (n : R) (a : vector R) :=
vec (n * a.x) (n * a.y) (n * a.z)
theorem vector.ext (α : Type) (a b c d e f : α) :
a = d → b = e → c = f → vec a b c = vec d e f :=
begin
intro Had, intro Hbe, intro Hcf,
rw [Had,Hbe,Hcf]
end
theorem vector.add_comm :
∀ (a b : vector R) ,
add a b = add b a :=
begin
intro a , intro b ,
unfold add,
simp
end
theorem vector.add_asso :
∀ (a b c : vector R) ,
add a (add b c) = add (add a b) c :=
begin
intro a , intro b , intro c ,
unfold add,
simp
end
theorem vector.inner_prod_comm :
∀ (a b : vector R) ,
inner_prod a b = inner_prod b a :=
begin
intro a ,
intro b ,
unfold inner_prod,
rw [mul_comm, add_assoc, add_comm],
rw [mul_comm, add_assoc, add_comm],
rw mul_comm, simp
end
theorem vector.zero_zero :
∀ (a : vector R) ,
add zero a = add a zero :=
begin
intro a,
unfold add, unfold zero,
simp,
end
theorem vector.zero_add :
∀ (a : vector R) ,
add zero a = a :=
begin
intro a,
unfold add, unfold zero,
cases a with a.x a.y a.z,
dsimp, congr,
rw zero_add, rw zero_add, rw zero_add
end
theorem vector.add_zero :
∀ (a : vector R) ,
add a zero = a :=
begin
intro a,
unfold add, unfold zero,
cases a with a.x a.y a.z,
dsimp, simp
end
theorem vector.vec_assoc :
∀ (a b : vector R) (n : R) ,
scalar n (add a b) = add (scalar n a) (scalar n b) :=
begin
intro a, intro b, intro n,
unfold add, unfold scalar,
dsimp, congr,
rw mul_add, rw mul_add, rw mul_add
end
theorem vector.num_assoc :
∀ (a : vector R) (m n : R) ,
scalar (m + n) a = add (scalar m a) (scalar n a) :=
begin
intro a, intro m, intro n,
unfold add, unfold scalar,
dsimp, congr,
rw add_mul, rw add_mul, rw add_mul
end
theorem vector.cross_pord_comm :
∀ (a b : vector R) ,
neg (cross_prod b a) = cross_prod a b :=
begin
intro a, intro b,
unfold neg, unfold cross_prod,
dsimp, congr, simp,
rw [mul_comm, add_comm, mul_comm], simp,
rw [mul_comm, add_comm, mul_comm], simp,
rw [mul_comm, add_comm, mul_comm], simp,
end
theorem mul_comm1 :
∀ (a b c : R) ,
a * b * c = c * a * b :=
begin
intro a, intro b, intro c,
rw [mul_comm, ← mul_assoc]
end
theorem mul_comm2 :
∀ (a b c : R) ,
a * b * c = b * c *a :=
begin
intro a, intro b, intro c,
rw [mul_assoc, mul_comm]
end
theorem Jacobian_Identity :
∀ (a b c : vector R) ,
add (cross_prod a (cross_prod b c)) (add (cross_prod b (cross_prod c a)) (cross_prod c (cross_prod a b))) = zero :=
begin
intro a, intro b, intro c,
unfold add, unfold cross_prod, unfold zero,
dsimp, congr,
rw [mul_add, mul_add, mul_add, mul_add, mul_add, mul_add],
rw mul_comm, simp, rw mul_comm, simp, rw mul_comm, simp,
rw mul_comm, simp, rw mul_comm, simp, rw mul_comm, simp,
rw [mul_comm1, add_comm, add_assoc],
rw [mul_comm1, add_comm, add_assoc, add_assoc],
rw [mul_comm1, add_comm, add_assoc, add_assoc],
rw [mul_comm2, add_comm, add_assoc, add_assoc],
rw [mul_comm2, add_comm, add_assoc, add_assoc],
rw [mul_comm2, add_comm, add_assoc, add_assoc],
rw [mul_comm, add_comm, add_assoc, add_assoc],
rw [mul_comm, add_comm, add_assoc, add_assoc],
rw [mul_comm, add_comm, add_assoc, add_assoc],
rw [mul_comm, add_comm, add_assoc, add_assoc],
rw [mul_comm, add_comm, add_assoc],
rw [mul_comm, add_comm], simp,
rw [mul_add, mul_add, mul_add, mul_add, mul_add, mul_add],
rw mul_comm, simp, rw mul_comm, simp, rw mul_comm, simp,
rw mul_comm, simp, rw mul_comm, simp, rw mul_comm, simp,
rw [mul_comm2, add_comm, add_assoc],
rw [mul_comm2, add_comm, add_assoc, add_assoc],
rw [mul_comm2, add_comm, add_assoc, add_assoc],
rw [mul_comm2, add_comm, add_assoc, add_assoc],
rw [mul_comm2, add_comm, add_assoc, add_assoc],
rw [mul_comm2, add_comm, add_assoc, add_assoc],
rw [mul_comm, add_comm, add_assoc, add_assoc],
rw [mul_comm, add_comm, add_assoc, add_assoc],
rw [mul_comm, add_comm, add_assoc, add_assoc],
rw [mul_comm, add_comm, add_assoc, add_assoc],
rw [mul_comm, add_comm, add_assoc],
rw [mul_comm, add_comm], simp,
rw [mul_add, mul_add, mul_add, mul_add, mul_add, mul_add],
rw mul_comm, simp, rw mul_comm, simp, rw mul_comm, simp,
rw mul_comm, simp, rw mul_comm, simp, rw mul_comm, simp,
rw [mul_comm2, add_comm, add_assoc],
rw [mul_comm2, add_comm, add_assoc, add_assoc],
rw [mul_comm2, add_comm, add_assoc, add_assoc],
rw [mul_comm1, add_comm, add_assoc, add_assoc],
rw [mul_comm1, add_comm, add_assoc, add_assoc],
rw [mul_comm1, add_comm, add_assoc, add_assoc],
rw [mul_comm, add_comm, add_assoc, add_assoc],
rw [mul_comm, add_comm, add_assoc, add_assoc],
rw [mul_comm, add_comm, add_assoc, add_assoc],
rw [mul_comm, add_comm, add_assoc, add_assoc],
rw [mul_comm, add_comm, add_assoc],
rw [mul_comm, add_comm], simp,
end
end vector |
function wathen_test11 ( )
%*****************************************************************************80
%
%% WATHEN_TEST11 assemble, factor and solve using WATHEN_ST + CG_ST.
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 05 June 2014
%
% Author:
%
% John Burkardt
%
fprintf ( 1, '\n' );
fprintf ( 1, 'WATHEN_TEST11\n' );
fprintf ( 1, ' Assemble, factor and solve a Wathen system\n' );
fprintf ( 1, ' defined by WATHEN_ST and CG_ST.\n' );
fprintf ( 1, '\n' );
nx = 1;
ny = 1;
fprintf ( 1, ' Elements in X direction NX = %d\n', nx );
fprintf ( 1, ' Elements in Y direction NY = %d\n', ny );
fprintf ( 1, ' Number of elements = %d\n', nx * ny );
%
% Compute the number of unknowns.
%
n = wathen_order ( nx, ny );
fprintf ( 1, ' Number of nodes N = %d\n', n );
%
% Compute the matrix size.
%
nz_num = wathen_st_size ( nx, ny );
fprintf ( 1, ' Number of nonzeros = %d\n', nz_num );
%
% Set up a random solution X1.
%
seed = 123456789;
[ x1, seed ] = r8vec_uniform_01 ( n, seed );
%
% Compute the matrix.
%
seed = 123456789;
[ row, col, a, seed ] = wathen_st ( nx, ny, nz_num, seed );
%
% Compute the corresponding right hand side B.
%
b = mv_st ( n, n, nz_num, row, col, a, x1 );
%
% Solve the linear system.
%
x2 = ones ( n, 1 );
x2 = cg_st ( n, nz_num, row, col, a, b, x2 );
%
% Compute the maximum solution error.
%
e = max ( abs ( x1 - x2 ) );
fprintf ( 1, ' Maximum solution error is %g\n', e );
return
end
|
lemma closed_nonpos_Reals_complex [simp]: "closed (\<real>\<^sub>\<le>\<^sub>0 :: complex set)" |
-----------------------------------------------------------------------------
-- |
-- Module : FlashADC
-- Copyright : José Edil Guimarães de Medeiros
-- License : BSD-style (see the file LICENSE)
--
-- Maintainer : [email protected]
-- Stability : experimental
-- Portability : portable
--
-- This module shows the model of a flash architecture ADC.
--
-- =Running the demo
--
-- To run the demo you need the ForSyDe installed in your
-- environment. For plotting, you need the
-- <http://gnuplot.sourceforge.net Gnuplot package>.
--
-- To calculate 10000 samples for R=1e3 Omhs, C=1e-6 F, T=1e-6run in @ghci@:
--
-- >>> simulate 4
--
-- To plot the response for a sine wave input, run in @ghci@:
--
-- >>> plotOutput 4 0.01 1000
-----------------------------------------------------------------------------
module ForSyDe.Shallow.Example.Synchronous.FlashADC where
import ForSyDe.Shallow
import Data.Complex
type Resistance = Double
type Voltage = Double
type Code = Integer
type Bits = Integer
type Time = Double
type Frequency = Double
-- | 'flashADC' is the top level module.
flashADC :: [Resistance] -- ^ Resistance values
-> Signal Voltage -- ^ Input signal
-> Signal Code -- ^ Output signal
flashADC resistors input = decoder $ compNetwork input $ resNetwork resistors
-- | 'decoder' takes the thermometer code and outputs integers.
decoder :: [Signal Bits] -- ^ Bit inputs
-> Signal Code -- ^ Output signal
decoder = foldl1 (zipWithSY (+))
-- | 'compNetwork' implements the comparator array.
compNetwork :: Signal Voltage -- ^ ADC input signal
-> [Voltage] -- ^ Voltage thresholds
-> [Signal Bits] -- ^ Bit outputs
compNetwork input = zipWith (\i v -> mapSY (comparator v) i) (repeat input)
-- | 'comparator' is the one bit quantizer.
comparator :: Voltage -- ^ (+) input
-> Voltage -- ^ (-) input
-> Bits -- ^ Output
comparator i v
| v <= i = 0
| otherwise = 1
-- | 'resNetwork' implements the resistor voltage scaling network.
resNetwork :: [Resistance] -- ^ Resistor values
-> [Voltage] -- ^ Threshold voltages
resNetwork resistors = init $ tail $ scanl (\v r -> v + vdd * r / (sumR)) 0 resistors
where vdd = 1
sumR = sum resistors
-- | 'simulate' takes the system parameters and runs the simulation
-- with a sine wave input.
simulate :: Int -- ^ ADC resolution
-> Int -- ^ Number of samples
-> Signal Code -- ^ ADC output
simulate res n = flashADC resistors input
where resistors = replicate (2^res) 1
input = sineWave' 0.5 0.5 50 n
-- | 'plotFFT' runs the simulation with a sine wave input
plotFFT :: Int -- ^ ADC resolution
-> Int -- ^ FFT depth (power of 2)
-> IO String -- ^ plot
plotFFT res n = plotCT' (toRational 1.0) [(g_out, "g")]
where g_out = d2aConverter DAhold (toRational 1.0) $ signal g
a = fromSignal $ simulate res n
b = (:+) <$> (map ((\x -> x/2^res - 0.5).fromIntegral) a)
c = zipWith ($) b $ repeat 0.0
d = vector c
e = fromVector $ fft n d
f = map ((*20).(logBase 10).(\x -> 2*x/(fromIntegral n)) . magnitude) e
g = (take (n `div` 2) f)
-- | 'plotOutput' uses the CTLib plot capabilities to plot the
-- output. In a later version, a plotter to Synchornous signals will be
-- developed.
plotOutput :: Int -- ^ Resolution
-> Double -- ^ Discretization timestep
-> Int -- ^ Number of samples
-> IO String -- ^ plot
plotOutput res t n = plotCT' (toRational t) [(output, "output")]
where output = d2aConverter DAhold (toRational t) adcSignal
adcSignal = signal $ map (toRational) $ fromSignal $
simulate res n
-- | 'sineWave' is an auxiliary function that creates a sine wave signal for
-- simulation purposes.
sineWave' :: Voltage -- ^ Amplitude
-> Voltage -- ^ Offset
-> Frequency -- ^ Frequency
-> Int -- ^ Number of samples
-> Signal Time -- ^ Output signal
sineWave' amp offset freq n = signal sineW
where sineW = map (\t -> (amp/2) * sin(2*pi*freq*t) + offset) grid
grid = linspace 0 1 n
-- | 'linspace' is an auxiliary function that creates an uniform
-- spaced sampling grid for simulation purposes.
linspace :: Time -- ^ Start time
-> Time -- ^ Stop time
-> Int -- ^ Number of samples
-> [Time] -- ^ Sampling grid
linspace start stop n = map (scale) nodes
where n' = fromIntegral n
nodes = map (fromIntegral) [0..(n-1)]
scale t = t * length / (n' - 1) + start
length = stop - start
|
\section{Built-in Module \sectcode{gdbm}}
\bimodindex{gdbm}
This module is nearly identical to the \code{dbm} module, but uses
GDBM instead. Its interface is identical, and not repeated here.
Warning: the file formats created by gdbm and dbm are incompatible.
\bimodindex{dbm}
|
example (p q : Prop) (hp : p) : p ∨ q :=
by { left, assumption }
|
= USS Breese ( DD @-@ 122 ) =
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from math import sin, cos
import numpy as np
import matplotlib.pyplot as plt
from best_fit import best_fit
for i, (x,), y in best_fit:
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(111)
f = lambda x: x + 10*sin(5*x) + 7*cos(4*x)
xs = np.linspace(0, 10, 1000)
ys = [f(i) for i in xs]
ax.plot(xs, ys)
ax.scatter([x], [y], facecolor='r', s=100)
ax.set_xlabel('x')
ax.set_ylabel('y')
fig.savefig('{}.png'.format(i))
print('save {}.png'.format(i))
plt.close(fig)
|
------------------------------------------------------------------------
-- A library for working with dependently typed syntax
-- Nils Anders Danielsson
------------------------------------------------------------------------
-- This library is leaning heavily on two of Conor McBride's papers:
--
-- * Type-Preserving Renaming and Substitution.
--
-- * Outrageous but Meaningful Coincidences: Dependent type-safe
-- syntax and evaluation.
-- This module gives a brief overview of the modules in the library.
module README where
------------------------------------------------------------------------
-- The library
-- Contexts, variables, context morphisms, context extensions, etc.
import deBruijn.Context
-- Parallel substitutions (defined using an inductive family).
import deBruijn.Substitution.Data.Basics
-- A map function for the substitutions.
import deBruijn.Substitution.Data.Map
-- Some simple substitution combinators. (Given a term type which
-- supports weakening and transformation of variables to terms various
-- substitutions are defined and various lemmas proved.)
import deBruijn.Substitution.Data.Simple
-- Given an operation which applies a substitution to a term,
-- satisfying some properties, more operations and lemmas are
-- defined/proved.
--
-- (This module reexports various other modules.)
import deBruijn.Substitution.Data.Application
-- A module which repackages (and reexports) the development under
-- deBruijn.Substitution.Data.
import deBruijn.Substitution.Data
-- Some modules mirroring the development under
-- deBruijn.Substitution.Data, but using substitutions defined as
-- functions rather than data.
--
-- The functional version of substitutions is in some respects easier
-- to work with than the one based on data, but in other respects more
-- awkward. I maintain both developments so that they can be compared.
import deBruijn.Substitution.Function.Basics
import deBruijn.Substitution.Function.Map
import deBruijn.Substitution.Function.Simple
-- The two definitions of substitutions are isomorphic (assuming
-- extensionality).
import deBruijn.Substitution.Isomorphic
------------------------------------------------------------------------
-- An example showing how the library can be used
-- A well-typed representation of a dependently typed language.
import README.DependentlyTyped.Term
-- Normal and neutral terms.
import README.DependentlyTyped.NormalForm
-- Instantiation of deBruijn.Substitution.Data for terms.
import README.DependentlyTyped.Term.Substitution
-- Instantiation of deBruijn.Substitution.Data for normal and neutral
-- terms.
import README.DependentlyTyped.NormalForm.Substitution
-- Normalisation by evaluation.
import README.DependentlyTyped.NBE
-- Various equality checkers (some complete, all sound).
import README.DependentlyTyped.Equality-checker
-- Raw terms.
import README.DependentlyTyped.Raw-term
-- A type-checker (sound).
import README.DependentlyTyped.Type-checker
-- A definability result: A "closed value" is the semantics of a
-- closed term if and only if it satisfies all "Kripke predicates".
import README.DependentlyTyped.Definability
-- An observation: There is a term without a corresponding syntactic
-- type (given some assumptions).
import README.DependentlyTyped.Term-without-type
-- Another observation: If the "Outrageous but Meaningful
-- Coincidences" approach is used to formalise a language, then you
-- can end up with an extensional type theory (with equality
-- reflection).
import README.DependentlyTyped.Extensional-type-theory
-- Inductively defined beta-eta-equality.
import README.DependentlyTyped.Beta-Eta
-- TODO: Add an untyped example.
|
/-
Copyright (c) 2016 Jeremy Avigad. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jeremy Avigad, Leonardo de Moura, Mario Carneiro, Johannes Hölzl, Damiano Testa,
Yuyang Zhao
! This file was ported from Lean 3 source module algebra.order.monoid.lemmas
! leanprover-community/mathlib commit 2ed7e4aec72395b6a7c3ac4ac7873a7a43ead17c
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathlib.Algebra.CovariantAndContravariant
import Mathlib.Init.Data.Ordering.Basic
import Mathlib.Order.MinMax
import Mathlib.Tactic.Contrapose
import Mathlib.Tactic.PushNeg
import Mathlib.Tactic.Use
/-!
# Ordered monoids
This file develops the basics of ordered monoids.
## Implementation details
Unfortunately, the number of `'` appended to lemmas in this file
may differ between the multiplicative and the additive version of a lemma.
The reason is that we did not want to change existing names in the library.
## Remark
Almost no monoid is actually present in this file: most assumptions have been generalized to
`Mul` or `MulOneClass`.
-/
-- TODO: If possible, uniformize lemma names, taking special care of `'`,
-- after the `ordered`-refactor is done.
open Function
variable {α β : Type _}
section Mul
variable [Mul α]
section LE
variable [LE α]
/- The prime on this lemma is present only on the multiplicative version. The unprimed version
is taken by the analogous lemma for semiring, with an extra non-negativity assumption. -/
@[to_additive add_le_add_left]
theorem mul_le_mul_left' [CovariantClass α α (· * ·) (· ≤ ·)] {b c : α} (bc : b ≤ c) (a : α) :
a * b ≤ a * c :=
CovariantClass.elim _ bc
#align mul_le_mul_left' mul_le_mul_left'
#align add_le_add_left add_le_add_left
@[to_additive le_of_add_le_add_left]
theorem le_of_mul_le_mul_left' [ContravariantClass α α (· * ·) (· ≤ ·)] {a b c : α}
(bc : a * b ≤ a * c) :
b ≤ c :=
ContravariantClass.elim _ bc
#align le_of_mul_le_mul_left' le_of_mul_le_mul_left'
#align le_of_add_le_add_left le_of_add_le_add_left
/- The prime on this lemma is present only on the multiplicative version. The unprimed version
is taken by the analogous lemma for semiring, with an extra non-negativity assumption. -/
@[to_additive add_le_add_right]
theorem mul_le_mul_right' [i : CovariantClass α α (swap (· * ·)) (· ≤ ·)] {b c : α} (bc : b ≤ c)
(a : α) :
b * a ≤ c * a :=
i.elim a bc
#align mul_le_mul_right' mul_le_mul_right'
#align add_le_add_right add_le_add_right
@[to_additive le_of_add_le_add_right]
theorem le_of_mul_le_mul_right' [i : ContravariantClass α α (swap (· * ·)) (· ≤ ·)] {a b c : α}
(bc : b * a ≤ c * a) :
b ≤ c :=
i.elim a bc
#align le_of_mul_le_mul_right' le_of_mul_le_mul_right'
#align le_of_add_le_add_right le_of_add_le_add_right
@[to_additive (attr := simp)]
theorem mul_le_mul_iff_left [CovariantClass α α (· * ·) (· ≤ ·)]
[ContravariantClass α α (· * ·) (· ≤ ·)] (a : α) {b c : α} :
a * b ≤ a * c ↔ b ≤ c :=
rel_iff_cov α α (· * ·) (· ≤ ·) a
#align mul_le_mul_iff_left mul_le_mul_iff_left
#align add_le_add_iff_left add_le_add_iff_left
@[to_additive (attr := simp)]
theorem mul_le_mul_iff_right [CovariantClass α α (swap (· * ·)) (· ≤ ·)]
[ContravariantClass α α (swap (· * ·)) (· ≤ ·)] (a : α) {b c : α} :
b * a ≤ c * a ↔ b ≤ c :=
rel_iff_cov α α (swap (· * ·)) (· ≤ ·) a
#align mul_le_mul_iff_right mul_le_mul_iff_right
#align add_le_add_iff_right add_le_add_iff_right
end LE
section LT
variable [LT α]
@[to_additive (attr := simp)]
theorem mul_lt_mul_iff_left [CovariantClass α α (· * ·) (· < ·)]
[ContravariantClass α α (· * ·) (· < ·)] (a : α) {b c : α} :
a * b < a * c ↔ b < c :=
rel_iff_cov α α (· * ·) (· < ·) a
#align mul_lt_mul_iff_left mul_lt_mul_iff_left
#align add_lt_add_iff_left add_lt_add_iff_left
@[to_additive (attr := simp)]
theorem mul_lt_mul_iff_right [CovariantClass α α (swap (· * ·)) (· < ·)]
[ContravariantClass α α (swap (· * ·)) (· < ·)] (a : α) {b c : α} :
b * a < c * a ↔ b < c :=
rel_iff_cov α α (swap (· * ·)) (· < ·) a
#align mul_lt_mul_iff_right mul_lt_mul_iff_right
#align add_lt_add_iff_right add_lt_add_iff_right
@[to_additive add_lt_add_left]
theorem mul_lt_mul_left' [CovariantClass α α (· * ·) (· < ·)] {b c : α} (bc : b < c) (a : α) :
a * b < a * c :=
CovariantClass.elim _ bc
#align mul_lt_mul_left' mul_lt_mul_left'
#align add_lt_add_left add_lt_add_left
@[to_additive lt_of_add_lt_add_left]
theorem lt_of_mul_lt_mul_left' [ContravariantClass α α (· * ·) (· < ·)] {a b c : α}
(bc : a * b < a * c) :
b < c :=
ContravariantClass.elim _ bc
#align lt_of_mul_lt_mul_left' lt_of_mul_lt_mul_left'
#align lt_of_add_lt_add_left lt_of_add_lt_add_left
@[to_additive add_lt_add_right]
theorem mul_lt_mul_right' [i : CovariantClass α α (swap (· * ·)) (· < ·)] {b c : α} (bc : b < c)
(a : α) :
b * a < c * a :=
i.elim a bc
#align mul_lt_mul_right' mul_lt_mul_right'
#align add_lt_add_right add_lt_add_right
@[to_additive lt_of_add_lt_add_right]
theorem lt_of_mul_lt_mul_right' [i : ContravariantClass α α (swap (· * ·)) (· < ·)] {a b c : α}
(bc : b * a < c * a) :
b < c :=
i.elim a bc
#align lt_of_mul_lt_mul_right' lt_of_mul_lt_mul_right'
#align lt_of_add_lt_add_right lt_of_add_lt_add_right
end LT
section Preorder
variable [Preorder α]
@[to_additive]
theorem mul_lt_mul_of_lt_of_lt [CovariantClass α α (· * ·) (· < ·)]
[CovariantClass α α (swap (· * ·)) (· < ·)]
{a b c d : α} (h₁ : a < b) (h₂ : c < d) : a * c < b * d :=
calc
a * c < a * d := mul_lt_mul_left' h₂ a
_ < b * d := mul_lt_mul_right' h₁ d
#align mul_lt_mul_of_lt_of_lt mul_lt_mul_of_lt_of_lt
#align add_lt_add_of_lt_of_lt add_lt_add_of_lt_of_lt
alias add_lt_add_of_lt_of_lt ← add_lt_add
#align add_lt_add add_lt_add
@[to_additive]
theorem mul_lt_mul_of_le_of_lt [CovariantClass α α (· * ·) (· < ·)]
[CovariantClass α α (swap (· * ·)) (· ≤ ·)] {a b c d : α} (h₁ : a ≤ b) (h₂ : c < d) :
a * c < b * d :=
(mul_le_mul_right' h₁ _).trans_lt (mul_lt_mul_left' h₂ b)
#align mul_lt_mul_of_le_of_lt mul_lt_mul_of_le_of_lt
#align add_lt_add_of_le_of_lt add_lt_add_of_le_of_lt
@[to_additive]
theorem mul_lt_mul_of_lt_of_le [CovariantClass α α (· * ·) (· ≤ ·)]
[CovariantClass α α (swap (· * ·)) (· < ·)] {a b c d : α} (h₁ : a < b) (h₂ : c ≤ d) :
a * c < b * d :=
(mul_le_mul_left' h₂ _).trans_lt (mul_lt_mul_right' h₁ d)
#align mul_lt_mul_of_lt_of_le mul_lt_mul_of_lt_of_le
#align add_lt_add_of_lt_of_le add_lt_add_of_lt_of_le
/-- Only assumes left strict covariance. -/
@[to_additive "Only assumes left strict covariance"]
theorem Left.mul_lt_mul [CovariantClass α α (· * ·) (· < ·)]
[CovariantClass α α (swap (· * ·)) (· ≤ ·)] {a b c d : α} (h₁ : a < b) (h₂ : c < d) :
a * c < b * d :=
mul_lt_mul_of_le_of_lt h₁.le h₂
#align left.mul_lt_mul Left.mul_lt_mul
#align left.add_lt_add Left.add_lt_add
/-- Only assumes right strict covariance. -/
@[to_additive "Only assumes right strict covariance"]
theorem Right.mul_lt_mul [CovariantClass α α (· * ·) (· ≤ ·)]
[CovariantClass α α (swap (· * ·)) (· < ·)] {a b c d : α}
(h₁ : a < b) (h₂ : c < d) :
a * c < b * d :=
mul_lt_mul_of_lt_of_le h₁ h₂.le
#align right.mul_lt_mul Right.mul_lt_mul
#align right.add_lt_add Right.add_lt_add
@[to_additive add_le_add]
theorem mul_le_mul' [CovariantClass α α (· * ·) (· ≤ ·)] [CovariantClass α α (swap (· * ·)) (· ≤ ·)]
{a b c d : α} (h₁ : a ≤ b) (h₂ : c ≤ d) :
a * c ≤ b * d :=
(mul_le_mul_left' h₂ _).trans (mul_le_mul_right' h₁ d)
#align mul_le_mul' mul_le_mul'
#align add_le_add add_le_add
@[to_additive]
theorem mul_le_mul_three [CovariantClass α α (· * ·) (· ≤ ·)]
[CovariantClass α α (swap (· * ·)) (· ≤ ·)] {a b c d e f : α} (h₁ : a ≤ d) (h₂ : b ≤ e)
(h₃ : c ≤ f) :
a * b * c ≤ d * e * f :=
mul_le_mul' (mul_le_mul' h₁ h₂) h₃
#align mul_le_mul_three mul_le_mul_three
#align add_le_add_three add_le_add_three
@[to_additive]
theorem mul_lt_of_mul_lt_left [CovariantClass α α (· * ·) (· ≤ ·)] {a b c d : α} (h : a * b < c)
(hle : d ≤ b) :
a * d < c :=
(mul_le_mul_left' hle a).trans_lt h
#align mul_lt_of_mul_lt_left mul_lt_of_mul_lt_left
#align add_lt_of_add_lt_left add_lt_of_add_lt_left
@[to_additive]
theorem mul_le_of_mul_le_left [CovariantClass α α (· * ·) (· ≤ ·)] {a b c d : α} (h : a * b ≤ c)
(hle : d ≤ b) :
a * d ≤ c :=
@act_rel_of_rel_of_act_rel _ _ _ (· ≤ ·) _ _ a _ _ _ hle h
#align mul_le_of_mul_le_left mul_le_of_mul_le_left
#align add_le_of_add_le_left add_le_of_add_le_left
@[to_additive]
theorem mul_lt_of_mul_lt_right [CovariantClass α α (swap (· * ·)) (· ≤ ·)] {a b c d : α}
(h : a * b < c) (hle : d ≤ a) :
d * b < c :=
(mul_le_mul_right' hle b).trans_lt h
#align mul_lt_of_mul_lt_right mul_lt_of_mul_lt_right
#align add_lt_of_add_lt_right add_lt_of_add_lt_right
@[to_additive]
theorem mul_le_of_mul_le_right [CovariantClass α α (swap (· * ·)) (· ≤ ·)] {a b c d : α}
(h : a * b ≤ c) (hle : d ≤ a) :
d * b ≤ c :=
(mul_le_mul_right' hle b).trans h
#align mul_le_of_mul_le_right mul_le_of_mul_le_right
#align add_le_of_add_le_right add_le_of_add_le_right
@[to_additive]
theorem lt_mul_of_lt_mul_left [CovariantClass α α (· * ·) (· ≤ ·)] {a b c d : α} (h : a < b * c)
(hle : c ≤ d) :
a < b * d :=
h.trans_le (mul_le_mul_left' hle b)
#align lt_mul_of_lt_mul_left lt_mul_of_lt_mul_left
#align lt_add_of_lt_add_left lt_add_of_lt_add_left
@[to_additive]
theorem le_mul_of_le_mul_left [CovariantClass α α (· * ·) (· ≤ ·)] {a b c d : α} (h : a ≤ b * c)
(hle : c ≤ d) :
a ≤ b * d :=
@rel_act_of_rel_of_rel_act _ _ _ (· ≤ ·) _ _ b _ _ _ hle h
#align le_mul_of_le_mul_left le_mul_of_le_mul_left
#align le_add_of_le_add_left le_add_of_le_add_left
@[to_additive]
theorem lt_mul_of_lt_mul_right [CovariantClass α α (swap (· * ·)) (· ≤ ·)] {a b c d : α}
(h : a < b * c) (hle : b ≤ d) :
a < d * c :=
h.trans_le (mul_le_mul_right' hle c)
#align lt_mul_of_lt_mul_right lt_mul_of_lt_mul_right
#align lt_add_of_lt_add_right lt_add_of_lt_add_right
@[to_additive]
theorem le_mul_of_le_mul_right [CovariantClass α α (swap (· * ·)) (· ≤ ·)] {a b c d : α}
(h : a ≤ b * c) (hle : b ≤ d) :
a ≤ d * c :=
h.trans (mul_le_mul_right' hle c)
#align le_mul_of_le_mul_right le_mul_of_le_mul_right
#align le_add_of_le_add_right le_add_of_le_add_right
end Preorder
section PartialOrder
variable [PartialOrder α]
@[to_additive]
theorem mul_left_cancel'' [ContravariantClass α α (· * ·) (· ≤ ·)] {a b c : α} (h : a * b = a * c) :
b = c :=
(le_of_mul_le_mul_left' h.le).antisymm (le_of_mul_le_mul_left' h.ge)
#align mul_left_cancel'' mul_left_cancel''
#align add_left_cancel'' add_left_cancel''
@[to_additive]
theorem mul_right_cancel'' [ContravariantClass α α (swap (· * ·)) (· ≤ ·)] {a b c : α}
(h : a * b = c * b) :
a = c :=
le_antisymm (le_of_mul_le_mul_right' h.le) (le_of_mul_le_mul_right' h.ge)
#align mul_right_cancel'' mul_right_cancel''
#align add_right_cancel'' add_right_cancel''
end PartialOrder
section LinearOrder
variable [LinearOrder α] {a b c d : α} [CovariantClass α α (· * ·) (· < ·)]
[CovariantClass α α (swap (· * ·)) (· < ·)]
@[to_additive] lemma min_le_max_of_mul_le_mul (h : a * b ≤ c * d) : min a b ≤ max c d :=
by simp_rw [min_le_iff, le_max_iff]; contrapose! h; exact mul_lt_mul_of_lt_of_lt h.1.1 h.2.2
#align min_le_max_of_add_le_add min_le_max_of_add_le_add
#align min_le_max_of_mul_le_mul min_le_max_of_mul_le_mul
end LinearOrder
end Mul
-- using one
section MulOneClass
variable [MulOneClass α]
section LE
variable [LE α]
@[to_additive le_add_of_nonneg_right]
theorem le_mul_of_one_le_right' [CovariantClass α α (· * ·) (· ≤ ·)] {a b : α} (h : 1 ≤ b) :
a ≤ a * b :=
calc
a = a * 1 := (mul_one a).symm
_ ≤ a * b := mul_le_mul_left' h a
#align le_mul_of_one_le_right' le_mul_of_one_le_right'
#align le_add_of_nonneg_right le_add_of_nonneg_right
@[to_additive add_le_of_nonpos_right]
theorem mul_le_of_le_one_right' [CovariantClass α α (· * ·) (· ≤ ·)] {a b : α} (h : b ≤ 1) :
a * b ≤ a :=
calc
a * b ≤ a * 1 := mul_le_mul_left' h a
_ = a := mul_one a
#align mul_le_of_le_one_right' mul_le_of_le_one_right'
#align add_le_of_nonpos_right add_le_of_nonpos_right
@[to_additive le_add_of_nonneg_left]
theorem le_mul_of_one_le_left' [CovariantClass α α (swap (· * ·)) (· ≤ ·)] {a b : α} (h : 1 ≤ b) :
a ≤ b * a :=
calc
a = 1 * a := (one_mul a).symm
_ ≤ b * a := mul_le_mul_right' h a
#align le_mul_of_one_le_left' le_mul_of_one_le_left'
#align le_add_of_nonneg_left le_add_of_nonneg_left
@[to_additive add_le_of_nonpos_left]
theorem mul_le_of_le_one_left' [CovariantClass α α (swap (· * ·)) (· ≤ ·)] {a b : α} (h : b ≤ 1) :
b * a ≤ a :=
calc
b * a ≤ 1 * a := mul_le_mul_right' h a
_ = a := one_mul a
#align mul_le_of_le_one_left' mul_le_of_le_one_left'
#align add_le_of_nonpos_left add_le_of_nonpos_left
@[to_additive]
theorem one_le_of_le_mul_right [ContravariantClass α α (· * ·) (· ≤ ·)] {a b : α} (h : a ≤ a * b) :
1 ≤ b :=
le_of_mul_le_mul_left' <| by simpa only [mul_one]
#align one_le_of_le_mul_right one_le_of_le_mul_right
#align nonneg_of_le_add_right nonneg_of_le_add_right
@[to_additive]
theorem le_one_of_mul_le_right [ContravariantClass α α (· * ·) (· ≤ ·)] {a b : α} (h : a * b ≤ a) :
b ≤ 1 :=
le_of_mul_le_mul_left' <| by simpa only [mul_one]
#align le_one_of_mul_le_right le_one_of_mul_le_right
#align nonpos_of_add_le_right nonpos_of_add_le_right
@[to_additive]
theorem one_le_of_le_mul_left [ContravariantClass α α (swap (· * ·)) (· ≤ ·)] {a b : α}
(h : b ≤ a * b) :
1 ≤ a :=
le_of_mul_le_mul_right' <| by simpa only [one_mul]
#align one_le_of_le_mul_left one_le_of_le_mul_left
#align nonneg_of_le_add_left nonneg_of_le_add_left
@[to_additive]
theorem le_one_of_mul_le_left [ContravariantClass α α (swap (· * ·)) (· ≤ ·)] {a b : α}
(h : a * b ≤ b) :
a ≤ 1 :=
le_of_mul_le_mul_right' <| by simpa only [one_mul]
#align le_one_of_mul_le_left le_one_of_mul_le_left
#align nonpos_of_add_le_left nonpos_of_add_le_left
@[to_additive (attr := simp) le_add_iff_nonneg_right]
theorem le_mul_iff_one_le_right' [CovariantClass α α (· * ·) (· ≤ ·)]
[ContravariantClass α α (· * ·) (· ≤ ·)] (a : α) {b : α} :
a ≤ a * b ↔ 1 ≤ b :=
Iff.trans (by rw [mul_one]) (mul_le_mul_iff_left a)
#align le_mul_iff_one_le_right' le_mul_iff_one_le_right'
#align le_add_iff_nonneg_right le_add_iff_nonneg_right
@[to_additive (attr := simp) le_add_iff_nonneg_left]
theorem le_mul_iff_one_le_left' [CovariantClass α α (swap (· * ·)) (· ≤ ·)]
[ContravariantClass α α (swap (· * ·)) (· ≤ ·)] (a : α) {b : α} :
a ≤ b * a ↔ 1 ≤ b :=
Iff.trans (by rw [one_mul]) (mul_le_mul_iff_right a)
#align le_mul_iff_one_le_left' le_mul_iff_one_le_left'
#align le_add_iff_nonneg_left le_add_iff_nonneg_left
@[to_additive (attr := simp) add_le_iff_nonpos_right]
theorem mul_le_iff_le_one_right' [CovariantClass α α (· * ·) (· ≤ ·)]
[ContravariantClass α α (· * ·) (· ≤ ·)] (a : α) {b : α} :
a * b ≤ a ↔ b ≤ 1 :=
Iff.trans (by rw [mul_one]) (mul_le_mul_iff_left a)
#align mul_le_iff_le_one_right' mul_le_iff_le_one_right'
#align add_le_iff_nonpos_right add_le_iff_nonpos_right
@[to_additive (attr := simp) add_le_iff_nonpos_left]
theorem mul_le_iff_le_one_left' [CovariantClass α α (swap (· * ·)) (· ≤ ·)]
[ContravariantClass α α (swap (· * ·)) (· ≤ ·)] {a b : α} :
a * b ≤ b ↔ a ≤ 1 :=
Iff.trans (by rw [one_mul]) (mul_le_mul_iff_right b)
#align mul_le_iff_le_one_left' mul_le_iff_le_one_left'
#align add_le_iff_nonpos_left add_le_iff_nonpos_left
end LE
section LT
variable [LT α]
@[to_additive lt_add_of_pos_right]
theorem lt_mul_of_one_lt_right' [CovariantClass α α (· * ·) (· < ·)] (a : α) {b : α} (h : 1 < b) :
a < a * b :=
calc
a = a * 1 := (mul_one a).symm
_ < a * b := mul_lt_mul_left' h a
#align lt_mul_of_one_lt_right' lt_mul_of_one_lt_right'
#align lt_add_of_pos_right lt_add_of_pos_right
@[to_additive add_lt_of_neg_right]
theorem mul_lt_of_lt_one_right' [CovariantClass α α (· * ·) (· < ·)] (a : α) {b : α} (h : b < 1) :
a * b < a :=
calc
a * b < a * 1 := mul_lt_mul_left' h a
_ = a := mul_one a
#align mul_lt_of_lt_one_right' mul_lt_of_lt_one_right'
#align add_lt_of_neg_right add_lt_of_neg_right
@[to_additive lt_add_of_pos_left]
theorem lt_mul_of_one_lt_left' [CovariantClass α α (swap (· * ·)) (· < ·)] (a : α) {b : α}
(h : 1 < b) :
a < b * a :=
calc
a = 1 * a := (one_mul a).symm
_ < b * a := mul_lt_mul_right' h a
#align lt_mul_of_one_lt_left' lt_mul_of_one_lt_left'
#align lt_add_of_pos_left lt_add_of_pos_left
@[to_additive add_lt_of_neg_left]
theorem mul_lt_of_lt_one_left' [CovariantClass α α (swap (· * ·)) (· < ·)] (a : α) {b : α}
(h : b < 1) :
b * a < a :=
calc
b * a < 1 * a := mul_lt_mul_right' h a
_ = a := one_mul a
#align mul_lt_of_lt_one_left' mul_lt_of_lt_one_left'
#align add_lt_of_neg_left add_lt_of_neg_left
@[to_additive]
theorem one_lt_of_lt_mul_right [ContravariantClass α α (· * ·) (· < ·)] {a b : α} (h : a < a * b) :
1 < b :=
lt_of_mul_lt_mul_left' <| by simpa only [mul_one]
#align one_lt_of_lt_mul_right one_lt_of_lt_mul_right
#align pos_of_lt_add_right pos_of_lt_add_right
@[to_additive]
theorem lt_one_of_mul_lt_right [ContravariantClass α α (· * ·) (· < ·)] {a b : α} (h : a * b < a) :
b < 1 :=
lt_of_mul_lt_mul_left' <| by simpa only [mul_one]
#align lt_one_of_mul_lt_right lt_one_of_mul_lt_right
#align neg_of_add_lt_right neg_of_add_lt_right
@[to_additive]
theorem one_lt_of_lt_mul_left [ContravariantClass α α (swap (· * ·)) (· < ·)] {a b : α}
(h : b < a * b) :
1 < a :=
lt_of_mul_lt_mul_right' <| by simpa only [one_mul]
#align one_lt_of_lt_mul_left one_lt_of_lt_mul_left
#align pos_of_lt_add_left pos_of_lt_add_left
@[to_additive]
theorem lt_one_of_mul_lt_left [ContravariantClass α α (swap (· * ·)) (· < ·)] {a b : α}
(h : a * b < b) :
a < 1 :=
lt_of_mul_lt_mul_right' <| by simpa only [one_mul]
#align lt_one_of_mul_lt_left lt_one_of_mul_lt_left
#align neg_of_add_lt_left neg_of_add_lt_left
@[to_additive (attr := simp) lt_add_iff_pos_right]
theorem lt_mul_iff_one_lt_right' [CovariantClass α α (· * ·) (· < ·)]
[ContravariantClass α α (· * ·) (· < ·)] (a : α) {b : α} :
a < a * b ↔ 1 < b :=
Iff.trans (by rw [mul_one]) (mul_lt_mul_iff_left a)
#align lt_mul_iff_one_lt_right' lt_mul_iff_one_lt_right'
#align lt_add_iff_pos_right lt_add_iff_pos_right
@[to_additive (attr := simp) lt_add_iff_pos_left]
theorem lt_mul_iff_one_lt_left' [CovariantClass α α (swap (· * ·)) (· < ·)]
[ContravariantClass α α (swap (· * ·)) (· < ·)] (a : α) {b : α} : a < b * a ↔ 1 < b :=
Iff.trans (by rw [one_mul]) (mul_lt_mul_iff_right a)
#align lt_mul_iff_one_lt_left' lt_mul_iff_one_lt_left'
#align lt_add_iff_pos_left lt_add_iff_pos_left
@[to_additive (attr := simp) add_lt_iff_neg_left]
theorem mul_lt_iff_lt_one_left' [CovariantClass α α (· * ·) (· < ·)]
[ContravariantClass α α (· * ·) (· < ·)] {a b : α} :
a * b < a ↔ b < 1 :=
Iff.trans (by rw [mul_one]) (mul_lt_mul_iff_left a)
#align mul_lt_iff_lt_one_left' mul_lt_iff_lt_one_left'
#align add_lt_iff_neg_left add_lt_iff_neg_left
@[to_additive (attr := simp) add_lt_iff_neg_right]
theorem mul_lt_iff_lt_one_right' [CovariantClass α α (swap (· * ·)) (· < ·)]
[ContravariantClass α α (swap (· * ·)) (· < ·)] {a : α} (b : α) : a * b < b ↔ a < 1 :=
Iff.trans (by rw [one_mul]) (mul_lt_mul_iff_right b)
#align mul_lt_iff_lt_one_right' mul_lt_iff_lt_one_right'
#align add_lt_iff_neg_right add_lt_iff_neg_right
end LT
section Preorder
variable [Preorder α]
/-! Lemmas of the form `b ≤ c → a ≤ 1 → b * a ≤ c`,
which assume left covariance. -/
@[to_additive]
theorem mul_le_of_le_of_le_one [CovariantClass α α (· * ·) (· ≤ ·)] {a b c : α} (hbc : b ≤ c)
(ha : a ≤ 1) :
b * a ≤ c :=
calc
b * a ≤ b * 1 := mul_le_mul_left' ha b
_ = b := mul_one b
_ ≤ c := hbc
#align mul_le_of_le_of_le_one mul_le_of_le_of_le_one
#align add_le_of_le_of_nonpos add_le_of_le_of_nonpos
@[to_additive]
theorem mul_lt_of_le_of_lt_one [CovariantClass α α (· * ·) (· < ·)] {a b c : α} (hbc : b ≤ c)
(ha : a < 1) :
b * a < c :=
calc
b * a < b * 1 := mul_lt_mul_left' ha b
_ = b := mul_one b
_ ≤ c := hbc
#align mul_lt_of_le_of_lt_one mul_lt_of_le_of_lt_one
#align add_lt_of_le_of_neg add_lt_of_le_of_neg
@[to_additive]
theorem mul_lt_of_lt_of_le_one [CovariantClass α α (· * ·) (· ≤ ·)] {a b c : α} (hbc : b < c)
(ha : a ≤ 1) :
b * a < c :=
calc
b * a ≤ b * 1 := mul_le_mul_left' ha b
_ = b := mul_one b
_ < c := hbc
#align mul_lt_of_lt_of_le_one mul_lt_of_lt_of_le_one
#align add_lt_of_lt_of_nonpos add_lt_of_lt_of_nonpos
@[to_additive]
theorem mul_lt_of_lt_of_lt_one [CovariantClass α α (· * ·) (· < ·)] {a b c : α} (hbc : b < c)
(ha : a < 1) :
b * a < c :=
calc
b * a < b * 1 := mul_lt_mul_left' ha b
_ = b := mul_one b
_ < c := hbc
#align mul_lt_of_lt_of_lt_one mul_lt_of_lt_of_lt_one
#align add_lt_of_lt_of_neg add_lt_of_lt_of_neg
@[to_additive]
theorem mul_lt_of_lt_of_lt_one' [CovariantClass α α (· * ·) (· ≤ ·)] {a b c : α} (hbc : b < c)
(ha : a < 1) :
b * a < c :=
mul_lt_of_lt_of_le_one hbc ha.le
#align mul_lt_of_lt_of_lt_one' mul_lt_of_lt_of_lt_one'
#align add_lt_of_lt_of_neg' add_lt_of_lt_of_neg'
/-- Assumes left covariance.
The lemma assuming right covariance is `Right.mul_le_one`. -/
@[to_additive "Assumes left covariance.
The lemma assuming right covariance is `Right.add_nonpos`."]
theorem Left.mul_le_one [CovariantClass α α (· * ·) (· ≤ ·)] {a b : α} (ha : a ≤ 1) (hb : b ≤ 1) :
a * b ≤ 1 :=
mul_le_of_le_of_le_one ha hb
#align left.mul_le_one Left.mul_le_one
#align left.add_nonpos Left.add_nonpos
/-- Assumes left covariance.
The lemma assuming right covariance is `Right.mul_lt_one_of_le_of_lt`. -/
@[to_additive Left.add_neg_of_nonpos_of_neg
"Assumes left covariance.
The lemma assuming right covariance is `Right.add_neg_of_nonpos_of_neg`."]
theorem Left.mul_lt_one_of_le_of_lt [CovariantClass α α (· * ·) (· < ·)] {a b : α} (ha : a ≤ 1)
(hb : b < 1) :
a * b < 1 :=
mul_lt_of_le_of_lt_one ha hb
#align left.mul_lt_one_of_le_of_lt Left.mul_lt_one_of_le_of_lt
#align left.add_neg_of_nonpos_of_neg Left.add_neg_of_nonpos_of_neg
/-- Assumes left covariance.
The lemma assuming right covariance is `Right.mul_lt_one_of_lt_of_le`. -/
@[to_additive Left.add_neg_of_neg_of_nonpos
"Assumes left covariance.
The lemma assuming right covariance is `Right.add_neg_of_neg_of_nonpos`."]
theorem Left.mul_lt_one_of_lt_of_le [CovariantClass α α (· * ·) (· ≤ ·)] {a b : α} (ha : a < 1)
(hb : b ≤ 1) :
a * b < 1 :=
mul_lt_of_lt_of_le_one ha hb
#align left.mul_lt_one_of_lt_of_le Left.mul_lt_one_of_lt_of_le
#align left.add_neg_of_neg_of_nonpos Left.add_neg_of_neg_of_nonpos
/-- Assumes left covariance.
The lemma assuming right covariance is `Right.mul_lt_one`. -/
@[to_additive "Assumes left covariance.
The lemma assuming right covariance is `Right.add_neg`."]
theorem Left.mul_lt_one [CovariantClass α α (· * ·) (· < ·)] {a b : α} (ha : a < 1) (hb : b < 1) :
a * b < 1 :=
mul_lt_of_lt_of_lt_one ha hb
#align left.mul_lt_one Left.mul_lt_one
#align left.add_neg Left.add_neg
/-- Assumes left covariance.
The lemma assuming right covariance is `Right.mul_lt_one'`. -/
@[to_additive "Assumes left covariance.
The lemma assuming right covariance is `Right.add_neg'`."]
theorem Left.mul_lt_one' [CovariantClass α α (· * ·) (· ≤ ·)] {a b : α} (ha : a < 1) (hb : b < 1) :
a * b < 1 :=
mul_lt_of_lt_of_lt_one' ha hb
#align left.mul_lt_one' Left.mul_lt_one'
#align left.add_neg' Left.add_neg'
/-! Lemmas of the form `b ≤ c → 1 ≤ a → b ≤ c * a`,
which assume left covariance. -/
@[to_additive]
theorem le_mul_of_le_of_one_le [CovariantClass α α (· * ·) (· ≤ ·)] {a b c : α} (hbc : b ≤ c)
(ha : 1 ≤ a) :
b ≤ c * a :=
calc
b ≤ c := hbc
_ = c * 1 := (mul_one c).symm
_ ≤ c * a := mul_le_mul_left' ha c
#align le_mul_of_le_of_one_le le_mul_of_le_of_one_le
#align le_add_of_le_of_nonneg le_add_of_le_of_nonneg
@[to_additive]
theorem lt_mul_of_le_of_one_lt [CovariantClass α α (· * ·) (· < ·)] {a b c : α} (hbc : b ≤ c)
(ha : 1 < a) :
b < c * a :=
calc
b ≤ c := hbc
_ = c * 1 := (mul_one c).symm
_ < c * a := mul_lt_mul_left' ha c
#align lt_mul_of_le_of_one_lt lt_mul_of_le_of_one_lt
#align lt_add_of_le_of_pos lt_add_of_le_of_pos
@[to_additive]
theorem lt_mul_of_lt_of_one_le [CovariantClass α α (· * ·) (· ≤ ·)] {a b c : α} (hbc : b < c)
(ha : 1 ≤ a) :
b < c * a :=
calc
b < c := hbc
_ = c * 1 := (mul_one c).symm
_ ≤ c * a := mul_le_mul_left' ha c
#align lt_mul_of_lt_of_one_le lt_mul_of_lt_of_one_le
#align lt_add_of_lt_of_nonneg lt_add_of_lt_of_nonneg
@[to_additive]
theorem lt_mul_of_lt_of_one_lt [CovariantClass α α (· * ·) (· < ·)] {a b c : α} (hbc : b < c)
(ha : 1 < a) :
b < c * a :=
calc
b < c := hbc
_ = c * 1 := (mul_one c).symm
_ < c * a := mul_lt_mul_left' ha c
#align lt_mul_of_lt_of_one_lt lt_mul_of_lt_of_one_lt
#align lt_add_of_lt_of_pos lt_add_of_lt_of_pos
@[to_additive]
theorem lt_mul_of_lt_of_one_lt' [CovariantClass α α (· * ·) (· ≤ ·)] {a b c : α} (hbc : b < c)
(ha : 1 < a) :
b < c * a :=
lt_mul_of_lt_of_one_le hbc ha.le
#align lt_mul_of_lt_of_one_lt' lt_mul_of_lt_of_one_lt'
#align lt_add_of_lt_of_pos' lt_add_of_lt_of_pos'
/-- Assumes left covariance.
The lemma assuming right covariance is `Right.one_le_mul`. -/
@[to_additive Left.add_nonneg "Assumes left covariance.
The lemma assuming right covariance is `Right.add_nonneg`."]
theorem Left.one_le_mul [CovariantClass α α (· * ·) (· ≤ ·)] {a b : α} (ha : 1 ≤ a) (hb : 1 ≤ b) :
1 ≤ a * b :=
le_mul_of_le_of_one_le ha hb
#align left.one_le_mul Left.one_le_mul
#align left.add_nonneg Left.add_nonneg
/-- Assumes left covariance.
The lemma assuming right covariance is `Right.one_lt_mul_of_le_of_lt`. -/
@[to_additive Left.add_pos_of_nonneg_of_pos
"Assumes left covariance.
The lemma assuming right covariance is `Right.add_pos_of_nonneg_of_pos`."]
theorem Left.one_lt_mul_of_le_of_lt [CovariantClass α α (· * ·) (· < ·)] {a b : α} (ha : 1 ≤ a)
(hb : 1 < b) :
1 < a * b :=
lt_mul_of_le_of_one_lt ha hb
#align left.one_lt_mul_of_le_of_lt Left.one_lt_mul_of_le_of_lt
#align left.add_pos_of_nonneg_of_pos Left.add_pos_of_nonneg_of_pos
/-- Assumes left covariance.
The lemma assuming right covariance is `Right.one_lt_mul_of_lt_of_le`. -/
@[to_additive Left.add_pos_of_pos_of_nonneg
"Assumes left covariance.
The lemma assuming right covariance is `Right.add_pos_of_pos_of_nonneg`."]
theorem Left.one_lt_mul_of_lt_of_le [CovariantClass α α (· * ·) (· ≤ ·)] {a b : α} (ha : 1 < a)
(hb : 1 ≤ b) :
1 < a * b :=
lt_mul_of_lt_of_one_le ha hb
#align left.one_lt_mul_of_lt_of_le Left.one_lt_mul_of_lt_of_le
#align left.add_pos_of_pos_of_nonneg Left.add_pos_of_pos_of_nonneg
/-- Assumes left covariance.
The lemma assuming right covariance is `Right.one_lt_mul`. -/
@[to_additive Left.add_pos "Assumes left covariance.
The lemma assuming right covariance is `Right.add_pos`."]
theorem Left.one_lt_mul [CovariantClass α α (· * ·) (· < ·)] {a b : α} (ha : 1 < a) (hb : 1 < b) :
1 < a * b :=
lt_mul_of_lt_of_one_lt ha hb
#align left.one_lt_mul Left.one_lt_mul
#align left.add_pos Left.add_pos
/-- Assumes left covariance.
The lemma assuming right covariance is `Right.one_lt_mul'`. -/
@[to_additive Left.add_pos' "Assumes left covariance.
The lemma assuming right covariance is `Right.add_pos'`."]
theorem Left.one_lt_mul' [CovariantClass α α (· * ·) (· ≤ ·)] {a b : α} (ha : 1 < a) (hb : 1 < b) :
1 < a * b :=
lt_mul_of_lt_of_one_lt' ha hb
#align left.one_lt_mul' Left.one_lt_mul'
#align left.add_pos' Left.add_pos'
/-! Lemmas of the form `a ≤ 1 → b ≤ c → a * b ≤ c`,
which assume right covariance. -/
@[to_additive]
theorem mul_le_of_le_one_of_le [CovariantClass α α (swap (· * ·)) (· ≤ ·)] {a b c : α} (ha : a ≤ 1)
(hbc : b ≤ c) :
a * b ≤ c :=
calc
a * b ≤ 1 * b := mul_le_mul_right' ha b
_ = b := one_mul b
_ ≤ c := hbc
#align mul_le_of_le_one_of_le mul_le_of_le_one_of_le
#align add_le_of_nonpos_of_le add_le_of_nonpos_of_le
@[to_additive]
theorem mul_lt_of_lt_one_of_le [CovariantClass α α (swap (· * ·)) (· < ·)] {a b c : α} (ha : a < 1)
(hbc : b ≤ c) :
a * b < c :=
calc
a * b < 1 * b := mul_lt_mul_right' ha b
_ = b := one_mul b
_ ≤ c := hbc
#align mul_lt_of_lt_one_of_le mul_lt_of_lt_one_of_le
#align add_lt_of_neg_of_le add_lt_of_neg_of_le
@[to_additive]
theorem mul_lt_of_le_one_of_lt [CovariantClass α α (swap (· * ·)) (· ≤ ·)] {a b c : α} (ha : a ≤ 1)
(hb : b < c) :
a * b < c :=
calc
a * b ≤ 1 * b := mul_le_mul_right' ha b
_ = b := one_mul b
_ < c := hb
#align mul_lt_of_le_one_of_lt mul_lt_of_le_one_of_lt
#align add_lt_of_nonpos_of_lt add_lt_of_nonpos_of_lt
@[to_additive]
theorem mul_lt_of_lt_one_of_lt [CovariantClass α α (swap (· * ·)) (· < ·)] {a b c : α} (ha : a < 1)
(hb : b < c) :
a * b < c :=
calc
a * b < 1 * b := mul_lt_mul_right' ha b
_ = b := one_mul b
_ < c := hb
#align mul_lt_of_lt_one_of_lt mul_lt_of_lt_one_of_lt
#align add_lt_of_neg_of_lt add_lt_of_neg_of_lt
@[to_additive]
theorem mul_lt_of_lt_one_of_lt' [CovariantClass α α (swap (· * ·)) (· ≤ ·)] {a b c : α} (ha : a < 1)
(hbc : b < c) :
a * b < c :=
mul_lt_of_le_one_of_lt ha.le hbc
#align mul_lt_of_lt_one_of_lt' mul_lt_of_lt_one_of_lt'
#align add_lt_of_neg_of_lt' add_lt_of_neg_of_lt'
/-- Assumes right covariance.
The lemma assuming left covariance is `Left.mul_le_one`. -/
@[to_additive "Assumes right covariance.
The lemma assuming left covariance is `Left.add_nonpos`."]
theorem Right.mul_le_one [CovariantClass α α (swap (· * ·)) (· ≤ ·)] {a b : α} (ha : a ≤ 1)
(hb : b ≤ 1) :
a * b ≤ 1 :=
mul_le_of_le_one_of_le ha hb
#align right.mul_le_one Right.mul_le_one
#align right.add_nonpos Right.add_nonpos
/-- Assumes right covariance.
The lemma assuming left covariance is `Left.mul_lt_one_of_lt_of_le`. -/
@[to_additive Right.add_neg_of_neg_of_nonpos
"Assumes right covariance.
The lemma assuming left covariance is `Left.add_neg_of_neg_of_nonpos`."]
theorem Right.mul_lt_one_of_lt_of_le [CovariantClass α α (swap (· * ·)) (· < ·)] {a b : α}
(ha : a < 1) (hb : b ≤ 1) :
a * b < 1 :=
mul_lt_of_lt_one_of_le ha hb
#align right.mul_lt_one_of_lt_of_le Right.mul_lt_one_of_lt_of_le
#align right.add_neg_of_neg_of_nonpos Right.add_neg_of_neg_of_nonpos
/-- Assumes right covariance.
The lemma assuming left covariance is `Left.mul_lt_one_of_le_of_lt`. -/
@[to_additive Right.add_neg_of_nonpos_of_neg
"Assumes right covariance.
The lemma assuming left covariance is `Left.add_neg_of_nonpos_of_neg`."]
theorem Right.mul_lt_one_of_le_of_lt [CovariantClass α α (swap (· * ·)) (· ≤ ·)] {a b : α}
(ha : a ≤ 1) (hb : b < 1) :
a * b < 1 :=
mul_lt_of_le_one_of_lt ha hb
#align right.mul_lt_one_of_le_of_lt Right.mul_lt_one_of_le_of_lt
#align right.add_neg_of_nonpos_of_neg Right.add_neg_of_nonpos_of_neg
/-- Assumes right covariance.
The lemma assuming left covariance is `Left.mul_lt_one`. -/
@[to_additive "Assumes right covariance.
The lemma assuming left covariance is `Left.add_neg`."]
theorem Right.mul_lt_one [CovariantClass α α (swap (· * ·)) (· < ·)] {a b : α} (ha : a < 1)
(hb : b < 1) :
a * b < 1 :=
mul_lt_of_lt_one_of_lt ha hb
#align right.mul_lt_one Right.mul_lt_one
#align right.add_neg Right.add_neg
/-- Assumes right covariance.
The lemma assuming left covariance is `Left.mul_lt_one'`. -/
@[to_additive "Assumes right covariance.
The lemma assuming left covariance is `Left.add_neg'`."]
theorem Right.mul_lt_one' [CovariantClass α α (swap (· * ·)) (· ≤ ·)] {a b : α} (ha : a < 1)
(hb : b < 1) :
a * b < 1 :=
mul_lt_of_lt_one_of_lt' ha hb
#align right.mul_lt_one' Right.mul_lt_one'
#align right.add_neg' Right.add_neg'
/-! Lemmas of the form `1 ≤ a → b ≤ c → b ≤ a * c`,
which assume right covariance. -/
@[to_additive]
theorem le_mul_of_one_le_of_le [CovariantClass α α (swap (· * ·)) (· ≤ ·)] {a b c : α} (ha : 1 ≤ a)
(hbc : b ≤ c) :
b ≤ a * c :=
calc
b ≤ c := hbc
_ = 1 * c := (one_mul c).symm
_ ≤ a * c := mul_le_mul_right' ha c
#align le_mul_of_one_le_of_le le_mul_of_one_le_of_le
#align le_add_of_nonneg_of_le le_add_of_nonneg_of_le
@[to_additive]
theorem lt_mul_of_one_lt_of_le [CovariantClass α α (swap (· * ·)) (· < ·)] {a b c : α} (ha : 1 < a)
(hbc : b ≤ c) :
b < a * c :=
calc
b ≤ c := hbc
_ = 1 * c := (one_mul c).symm
_ < a * c := mul_lt_mul_right' ha c
#align lt_mul_of_one_lt_of_le lt_mul_of_one_lt_of_le
#align lt_add_of_pos_of_le lt_add_of_pos_of_le
@[to_additive]
theorem lt_mul_of_one_le_of_lt [CovariantClass α α (swap (· * ·)) (· ≤ ·)] {a b c : α} (ha : 1 ≤ a)
(hbc : b < c) :
b < a * c :=
calc
b < c := hbc
_ = 1 * c := (one_mul c).symm
_ ≤ a * c := mul_le_mul_right' ha c
#align lt_mul_of_one_le_of_lt lt_mul_of_one_le_of_lt
#align lt_add_of_nonneg_of_lt lt_add_of_nonneg_of_lt
@[to_additive]
theorem lt_mul_of_one_lt_of_lt [CovariantClass α α (swap (· * ·)) (· < ·)] {a b c : α} (ha : 1 < a)
(hbc : b < c) :
b < a * c :=
calc
b < c := hbc
_ = 1 * c := (one_mul c).symm
_ < a * c := mul_lt_mul_right' ha c
#align lt_mul_of_one_lt_of_lt lt_mul_of_one_lt_of_lt
#align lt_add_of_pos_of_lt lt_add_of_pos_of_lt
@[to_additive]
theorem lt_mul_of_one_lt_of_lt' [CovariantClass α α (swap (· * ·)) (· ≤ ·)] {a b c : α} (ha : 1 < a)
(hbc : b < c) :
b < a * c :=
lt_mul_of_one_le_of_lt ha.le hbc
#align lt_mul_of_one_lt_of_lt' lt_mul_of_one_lt_of_lt'
#align lt_add_of_pos_of_lt' lt_add_of_pos_of_lt'
/-- Assumes right covariance.
The lemma assuming left covariance is `Left.one_le_mul`. -/
@[to_additive Right.add_nonneg "Assumes right covariance.
The lemma assuming left covariance is `Left.add_nonneg`."]
theorem Right.one_le_mul [CovariantClass α α (swap (· * ·)) (· ≤ ·)] {a b : α} (ha : 1 ≤ a)
(hb : 1 ≤ b) :
1 ≤ a * b :=
le_mul_of_one_le_of_le ha hb
#align right.one_le_mul Right.one_le_mul
#align right.add_nonneg Right.add_nonneg
/-- Assumes right covariance.
The lemma assuming left covariance is `Left.one_lt_mul_of_lt_of_le`. -/
@[to_additive Right.add_pos_of_pos_of_nonneg
"Assumes right covariance.
The lemma assuming left covariance is `Left.add_pos_of_pos_of_nonneg`."]
theorem Right.one_lt_mul_of_lt_of_le [CovariantClass α α (swap (· * ·)) (· < ·)] {a b : α}
(ha : 1 < a) (hb : 1 ≤ b) :
1 < a * b :=
lt_mul_of_one_lt_of_le ha hb
#align right.one_lt_mul_of_lt_of_le Right.one_lt_mul_of_lt_of_le
#align right.add_pos_of_pos_of_nonneg Right.add_pos_of_pos_of_nonneg
/-- Assumes right covariance.
The lemma assuming left covariance is `Left.one_lt_mul_of_le_of_lt`. -/
@[to_additive Right.add_pos_of_nonneg_of_pos
"Assumes right covariance.
The lemma assuming left covariance is `Left.add_pos_of_nonneg_of_pos`."]
theorem Right.one_lt_mul_of_le_of_lt [CovariantClass α α (swap (· * ·)) (· ≤ ·)] {a b : α}
(ha : 1 ≤ a) (hb : 1 < b) :
1 < a * b :=
lt_mul_of_one_le_of_lt ha hb
#align right.one_lt_mul_of_le_of_lt Right.one_lt_mul_of_le_of_lt
#align right.add_pos_of_nonneg_of_pos Right.add_pos_of_nonneg_of_pos
/-- Assumes right covariance.
The lemma assuming left covariance is `Left.one_lt_mul`. -/
@[to_additive Right.add_pos "Assumes right covariance.
The lemma assuming left covariance is `Left.add_pos`."]
theorem Right.one_lt_mul [CovariantClass α α (swap (· * ·)) (· < ·)] {a b : α} (ha : 1 < a)
(hb : 1 < b) :
1 < a * b :=
lt_mul_of_one_lt_of_lt ha hb
#align right.one_lt_mul Right.one_lt_mul
#align right.add_pos Right.add_pos
/-- Assumes right covariance.
The lemma assuming left covariance is `Left.one_lt_mul'`. -/
@[to_additive Right.add_pos' "Assumes right covariance.
The lemma assuming left covariance is `Left.add_pos'`."]
theorem Right.one_lt_mul' [CovariantClass α α (swap (· * ·)) (· ≤ ·)] {a b : α} (ha : 1 < a)
(hb : 1 < b) :
1 < a * b :=
lt_mul_of_one_lt_of_lt' ha hb
#align right.one_lt_mul' Right.one_lt_mul'
#align right.add_pos' Right.add_pos'
alias Left.mul_le_one ← mul_le_one'
#align mul_le_one' mul_le_one'
alias Left.mul_lt_one_of_le_of_lt ← mul_lt_one_of_le_of_lt
#align mul_lt_one_of_le_of_lt mul_lt_one_of_le_of_lt
alias Left.mul_lt_one_of_lt_of_le ← mul_lt_one_of_lt_of_le
#align mul_lt_one_of_lt_of_le mul_lt_one_of_lt_of_le
alias Left.mul_lt_one ← mul_lt_one
#align mul_lt_one mul_lt_one
alias Left.mul_lt_one' ← mul_lt_one'
#align mul_lt_one' mul_lt_one'
attribute [to_additive add_nonpos "**Alias** of `Left.add_nonpos`."] mul_le_one'
#align add_nonpos add_nonpos
attribute [to_additive add_neg_of_nonpos_of_neg "**Alias** of `Left.add_neg_of_nonpos_of_neg`."]
mul_lt_one_of_le_of_lt
#align add_neg_of_nonpos_of_neg add_neg_of_nonpos_of_neg
attribute [to_additive add_neg_of_neg_of_nonpos "**Alias** of `Left.add_neg_of_neg_of_nonpos`."]
mul_lt_one_of_lt_of_le
#align add_neg_of_neg_of_nonpos add_neg_of_neg_of_nonpos
attribute [to_additive "**Alias** of `Left.add_neg`."] mul_lt_one
#align add_neg add_neg
attribute [to_additive "**Alias** of `Left.add_neg'`."] mul_lt_one'
#align add_neg' add_neg'
alias Left.one_le_mul ← one_le_mul
#align one_le_mul one_le_mul
alias Left.one_lt_mul_of_le_of_lt ← one_lt_mul_of_le_of_lt'
#align one_lt_mul_of_le_of_lt' one_lt_mul_of_le_of_lt'
alias Left.one_lt_mul_of_lt_of_le ← one_lt_mul_of_lt_of_le'
#align one_lt_mul_of_lt_of_le' one_lt_mul_of_lt_of_le'
alias Left.one_lt_mul ← one_lt_mul'
#align one_lt_mul' one_lt_mul'
alias Left.one_lt_mul' ← one_lt_mul''
#align one_lt_mul'' one_lt_mul''
attribute [to_additive add_nonneg "**Alias** of `Left.add_nonneg`."] one_le_mul
#align add_nonneg add_nonneg
attribute [to_additive add_pos_of_nonneg_of_pos "**Alias** of `Left.add_pos_of_nonneg_of_pos`."]
one_lt_mul_of_le_of_lt'
#align add_pos_of_nonneg_of_pos add_pos_of_nonneg_of_pos
attribute [to_additive add_pos_of_pos_of_nonneg "**Alias** of `Left.add_pos_of_pos_of_nonneg`."]
one_lt_mul_of_lt_of_le'
#align add_pos_of_pos_of_nonneg add_pos_of_pos_of_nonneg
attribute [to_additive add_pos "**Alias** of `Left.add_pos`."] one_lt_mul'
#align add_pos add_pos
attribute [to_additive add_pos' "**Alias** of `Left.add_pos'`."] one_lt_mul''
#align add_pos' add_pos'
@[to_additive]
theorem lt_of_mul_lt_of_one_le_left [CovariantClass α α (· * ·) (· ≤ ·)] {a b c : α} (h : a * b < c)
(hle : 1 ≤ b) :
a < c :=
(le_mul_of_one_le_right' hle).trans_lt h
#align lt_of_mul_lt_of_one_le_left lt_of_mul_lt_of_one_le_left
#align lt_of_add_lt_of_nonneg_left lt_of_add_lt_of_nonneg_left
@[to_additive]
theorem le_of_mul_le_of_one_le_left [CovariantClass α α (· * ·) (· ≤ ·)] {a b c : α} (h : a * b ≤ c)
(hle : 1 ≤ b) :
a ≤ c :=
(le_mul_of_one_le_right' hle).trans h
#align le_of_mul_le_of_one_le_left le_of_mul_le_of_one_le_left
#align le_of_add_le_of_nonneg_left le_of_add_le_of_nonneg_left
@[to_additive]
theorem lt_of_lt_mul_of_le_one_left [CovariantClass α α (· * ·) (· ≤ ·)] {a b c : α} (h : a < b * c)
(hle : c ≤ 1) :
a < b :=
h.trans_le (mul_le_of_le_one_right' hle)
#align lt_of_lt_mul_of_le_one_left lt_of_lt_mul_of_le_one_left
#align lt_of_lt_add_of_nonpos_left lt_of_lt_add_of_nonpos_left
@[to_additive]
theorem le_of_le_mul_of_le_one_left [CovariantClass α α (· * ·) (· ≤ ·)] {a b c : α} (h : a ≤ b * c)
(hle : c ≤ 1) :
a ≤ b :=
h.trans (mul_le_of_le_one_right' hle)
#align le_of_le_mul_of_le_one_left le_of_le_mul_of_le_one_left
#align le_of_le_add_of_nonpos_left le_of_le_add_of_nonpos_left
@[to_additive]
theorem lt_of_mul_lt_of_one_le_right [CovariantClass α α (swap (· * ·)) (· ≤ ·)] {a b c : α}
(h : a * b < c) (hle : 1 ≤ a) :
b < c :=
(le_mul_of_one_le_left' hle).trans_lt h
#align lt_of_mul_lt_of_one_le_right lt_of_mul_lt_of_one_le_right
#align lt_of_add_lt_of_nonneg_right lt_of_add_lt_of_nonneg_right
@[to_additive]
theorem le_of_mul_le_of_one_le_right [CovariantClass α α (swap (· * ·)) (· ≤ ·)] {a b c : α}
(h : a * b ≤ c) (hle : 1 ≤ a) :
b ≤ c :=
(le_mul_of_one_le_left' hle).trans h
#align le_of_mul_le_of_one_le_right le_of_mul_le_of_one_le_right
#align le_of_add_le_of_nonneg_right le_of_add_le_of_nonneg_right
@[to_additive]
theorem lt_of_lt_mul_of_le_one_right [CovariantClass α α (swap (· * ·)) (· ≤ ·)] {a b c : α}
(h : a < b * c) (hle : b ≤ 1) :
a < c :=
h.trans_le (mul_le_of_le_one_left' hle)
#align lt_of_lt_mul_of_le_one_right lt_of_lt_mul_of_le_one_right
#align lt_of_lt_add_of_nonpos_right lt_of_lt_add_of_nonpos_right
@[to_additive]
theorem le_of_le_mul_of_le_one_right [CovariantClass α α (swap (· * ·)) (· ≤ ·)] {a b c : α}
(h : a ≤ b * c) (hle : b ≤ 1) :
a ≤ c :=
h.trans (mul_le_of_le_one_left' hle)
#align le_of_le_mul_of_le_one_right le_of_le_mul_of_le_one_right
#align le_of_le_add_of_nonpos_right le_of_le_add_of_nonpos_right
end Preorder
section PartialOrder
variable [PartialOrder α]
@[to_additive]
theorem mul_eq_one_iff' [CovariantClass α α (· * ·) (· ≤ ·)]
[CovariantClass α α (swap (· * ·)) (· ≤ ·)] {a b : α} (ha : 1 ≤ a) (hb : 1 ≤ b) :
a * b = 1 ↔ a = 1 ∧ b = 1 :=
Iff.intro
(fun hab : a * b = 1 =>
have : a ≤ 1 := hab ▸ le_mul_of_le_of_one_le le_rfl hb
have : a = 1 := le_antisymm this ha
have : b ≤ 1 := hab ▸ le_mul_of_one_le_of_le ha le_rfl
have : b = 1 := le_antisymm this hb
And.intro ‹a = 1› ‹b = 1›)
(by rintro ⟨rfl, rfl⟩; rw [mul_one])
-- porting note: original proof of the second implication,
-- `fun ⟨ha', hb'⟩ => by rw [ha', hb', mul_one]`,
-- had its `to_additive`-ization fail due to some bug
#align mul_eq_one_iff' mul_eq_one_iff'
#align add_eq_zero_iff' add_eq_zero_iff'
@[to_additive] lemma mul_le_mul_iff_of_ge [CovariantClass α α (· * ·) (· ≤ ·)]
[CovariantClass α α (swap (· * ·)) (· ≤ ·)] [CovariantClass α α (· * ·) (· < ·)]
[CovariantClass α α (swap (· * ·)) (· < ·)] {a₁ a₂ b₁ b₂ : α} (ha : a₁ ≤ a₂) (hb : b₁ ≤ b₂) :
a₂ * b₂ ≤ a₁ * b₁ ↔ a₁ = a₂ ∧ b₁ = b₂ := by
refine' ⟨fun h ↦ _, by rintro ⟨rfl, rfl⟩; rfl⟩
simp only [eq_iff_le_not_lt, ha, hb, true_and]
refine' ⟨fun ha ↦ h.not_lt _, fun hb ↦ h.not_lt _⟩
{ exact mul_lt_mul_of_lt_of_le ha hb }
{ exact mul_lt_mul_of_le_of_lt ha hb }
#align add_le_add_iff_of_ge add_le_add_iff_of_ge
#align mul_le_mul_iff_of_ge mul_le_mul_iff_of_ge
section Left
variable [CovariantClass α α (· * ·) (· ≤ ·)] {a b : α}
@[to_additive eq_zero_of_add_nonneg_left]
theorem eq_one_of_one_le_mul_left (ha : a ≤ 1) (hb : b ≤ 1) (hab : 1 ≤ a * b) : a = 1 :=
ha.eq_of_not_lt fun h => hab.not_lt <| mul_lt_one_of_lt_of_le h hb
#align eq_one_of_one_le_mul_left eq_one_of_one_le_mul_left
#align eq_zero_of_add_nonneg_left eq_zero_of_add_nonneg_left
@[to_additive]
theorem eq_one_of_mul_le_one_left (ha : 1 ≤ a) (hb : 1 ≤ b) (hab : a * b ≤ 1) : a = 1 :=
ha.eq_of_not_gt fun h => hab.not_lt <| one_lt_mul_of_lt_of_le' h hb
#align eq_one_of_mul_le_one_left eq_one_of_mul_le_one_left
#align eq_zero_of_add_nonpos_left eq_zero_of_add_nonpos_left
end Left
section Right
variable [CovariantClass α α (swap (· * ·)) (· ≤ ·)] {a b : α}
@[to_additive eq_zero_of_add_nonneg_right]
theorem eq_one_of_one_le_mul_right (ha : a ≤ 1) (hb : b ≤ 1) (hab : 1 ≤ a * b) : b = 1 :=
hb.eq_of_not_lt fun h => hab.not_lt <| Right.mul_lt_one_of_le_of_lt ha h
#align eq_one_of_one_le_mul_right eq_one_of_one_le_mul_right
#align eq_zero_of_add_nonneg_right eq_zero_of_add_nonneg_right
@[to_additive]
theorem eq_one_of_mul_le_one_right (ha : 1 ≤ a) (hb : 1 ≤ b) (hab : a * b ≤ 1) : b = 1 :=
hb.eq_of_not_gt fun h => hab.not_lt <| Right.one_lt_mul_of_le_of_lt ha h
#align eq_one_of_mul_le_one_right eq_one_of_mul_le_one_right
#align eq_zero_of_add_nonpos_right eq_zero_of_add_nonpos_right
end Right
end PartialOrder
section LinearOrder
variable [LinearOrder α]
theorem exists_square_le [CovariantClass α α (· * ·) (· < ·)] (a : α) : ∃ b : α, b * b ≤ a := by
by_cases h:a < 1
· use a
have : a * a < a * 1 := mul_lt_mul_left' h a
rw [mul_one] at this
exact le_of_lt this
· use 1
push_neg at h
rwa [mul_one]
#align exists_square_le exists_square_le
end LinearOrder
end MulOneClass
section Semigroup
variable [Semigroup α]
section PartialOrder
variable [PartialOrder α]
/- This is not instance, since we want to have an instance from `LeftCancelSemigroup`s
to the appropriate `CovariantClass`. -/
/-- A semigroup with a partial order and satisfying `LeftCancelSemigroup`
(i.e. `a * c < b * c → a < b`) is a `left_cancel Semigroup`. -/
@[to_additive
"An additive semigroup with a partial order and satisfying `AddLeftCancelSemigroup`
(i.e. `c + a < c + b → a < b`) is a `left_cancel AddSemigroup`."]
def Contravariant.toLeftCancelSemigroup [ContravariantClass α α (· * ·) (· ≤ ·)] :
LeftCancelSemigroup α :=
{ ‹Semigroup α› with mul_left_cancel := fun a b c => mul_left_cancel'' }
#align contravariant.to_left_cancel_semigroup Contravariant.toLeftCancelSemigroup
#align contravariant.to_left_cancel_add_semigroup Contravariant.toAddLeftCancelSemigroup
/- This is not instance, since we want to have an instance from `RightCancelSemigroup`s
to the appropriate `CovariantClass`. -/
/-- A semigroup with a partial order and satisfying `RightCancelSemigroup`
(i.e. `a * c < b * c → a < b`) is a `right_cancel Semigroup`. -/
@[to_additive
"An additive semigroup with a partial order and satisfying `AddRightCancelSemigroup`
(`a + c < b + c → a < b`) is a `right_cancel AddSemigroup`."]
def Contravariant.toRightCancelSemigroup [ContravariantClass α α (swap (· * ·)) (· ≤ ·)] :
RightCancelSemigroup α :=
{ ‹Semigroup α› with mul_right_cancel := fun a b c => mul_right_cancel'' }
#align contravariant.to_right_cancel_semigroup Contravariant.toRightCancelSemigroup
#align contravariant.to_right_cancel_add_semigroup Contravariant.toAddRightCancelSemigroup
@[to_additive]
theorem Left.mul_eq_mul_iff_eq_and_eq [CovariantClass α α (· * ·) (· < ·)]
[CovariantClass α α (swap (· * ·)) (· ≤ ·)] [ContravariantClass α α (· * ·) (· ≤ ·)]
[ContravariantClass α α (swap (· * ·)) (· ≤ ·)] {a b c d : α} (hac : a ≤ c) (hbd : b ≤ d) :
a * b = c * d ↔ a = c ∧ b = d := by
refine' ⟨fun h => _, fun h => congr_arg₂ (· * ·) h.1 h.2⟩
rcases hac.eq_or_lt with (rfl | hac)
· exact ⟨rfl, mul_left_cancel'' h⟩
rcases eq_or_lt_of_le hbd with (rfl | hbd)
· exact ⟨mul_right_cancel'' h, rfl⟩
exact ((Left.mul_lt_mul hac hbd).ne h).elim
#align left.mul_eq_mul_iff_eq_and_eq Left.mul_eq_mul_iff_eq_and_eq
#align left.add_eq_add_iff_eq_and_eq Left.add_eq_add_iff_eq_and_eq
@[to_additive]
theorem Right.mul_eq_mul_iff_eq_and_eq [CovariantClass α α (· * ·) (· ≤ ·)]
[ContravariantClass α α (· * ·) (· ≤ ·)] [CovariantClass α α (swap (· * ·)) (· < ·)]
[ContravariantClass α α (swap (· * ·)) (· ≤ ·)] {a b c d : α} (hac : a ≤ c) (hbd : b ≤ d) :
a * b = c * d ↔ a = c ∧ b = d := by
refine' ⟨fun h => _, fun h => congr_arg₂ (· * ·) h.1 h.2⟩
rcases hac.eq_or_lt with (rfl | hac)
· exact ⟨rfl, mul_left_cancel'' h⟩
rcases eq_or_lt_of_le hbd with (rfl | hbd)
· exact ⟨mul_right_cancel'' h, rfl⟩
exact ((Right.mul_lt_mul hac hbd).ne h).elim
#align right.mul_eq_mul_iff_eq_and_eq Right.mul_eq_mul_iff_eq_and_eq
#align right.add_eq_add_iff_eq_and_eq Right.add_eq_add_iff_eq_and_eq
alias Left.mul_eq_mul_iff_eq_and_eq ← mul_eq_mul_iff_eq_and_eq
#align mul_eq_mul_iff_eq_and_eq mul_eq_mul_iff_eq_and_eq
attribute [to_additive] mul_eq_mul_iff_eq_and_eq
#align add_eq_add_iff_eq_and_eq add_eq_add_iff_eq_and_eq
end PartialOrder
end Semigroup
section Mono
variable [Mul α] [Preorder α] [Preorder β] {f g : β → α} {s : Set β}
@[to_additive const_add]
theorem Monotone.const_mul' [CovariantClass α α (· * ·) (· ≤ ·)] (hf : Monotone f) (a : α) :
Monotone fun x => a * f x := fun _ _ h => mul_le_mul_left' (hf h) a
#align monotone.const_mul' Monotone.const_mul'
#align monotone.const_add Monotone.const_add
@[to_additive const_add]
theorem MonotoneOn.const_mul' [CovariantClass α α (· * ·) (· ≤ ·)] (hf : MonotoneOn f s) (a : α) :
MonotoneOn (fun x => a * f x) s := fun _ hx _ hy h => mul_le_mul_left' (hf hx hy h) a
#align monotone_on.const_mul' MonotoneOn.const_mul'
#align monotone_on.const_add MonotoneOn.const_add
@[to_additive const_add]
theorem Antitone.const_mul' [CovariantClass α α (· * ·) (· ≤ ·)] (hf : Antitone f) (a : α) :
Antitone fun x => a * f x := fun _ _ h => mul_le_mul_left' (hf h) a
#align antitone.const_mul' Antitone.const_mul'
#align antitone.const_add Antitone.const_add
@[to_additive const_add]
theorem AntitoneOn.const_mul' [CovariantClass α α (· * ·) (· ≤ ·)] (hf : AntitoneOn f s) (a : α) :
AntitoneOn (fun x => a * f x) s := fun _ hx _ hy h => mul_le_mul_left' (hf hx hy h) a
#align antitone_on.const_mul' AntitoneOn.const_mul'
#align antitone_on.const_add AntitoneOn.const_add
@[to_additive add_const]
theorem Monotone.mul_const' [CovariantClass α α (swap (· * ·)) (· ≤ ·)] (hf : Monotone f) (a : α) :
Monotone fun x => f x * a := fun _ _ h => mul_le_mul_right' (hf h) a
#align monotone.mul_const' Monotone.mul_const'
#align monotone.add_const Monotone.add_const
@[to_additive add_const]
theorem MonotoneOn.mul_const' [CovariantClass α α (swap (· * ·)) (· ≤ ·)] (hf : MonotoneOn f s)
(a : α) :
MonotoneOn (fun x => f x * a) s := fun _ hx _ hy h => mul_le_mul_right' (hf hx hy h) a
#align monotone_on.mul_const' MonotoneOn.mul_const'
#align monotone_on.add_const MonotoneOn.add_const
@[to_additive add_const]
theorem Antitone.mul_const' [CovariantClass α α (swap (· * ·)) (· ≤ ·)] (hf : Antitone f) (a : α) :
Antitone fun x => f x * a := fun _ _ h => mul_le_mul_right' (hf h) a
#align antitone.mul_const' Antitone.mul_const'
#align antitone.add_const Antitone.add_const
@[to_additive add_const]
theorem AntitoneOn.mul_const' [CovariantClass α α (swap (· * ·)) (· ≤ ·)] (hf : AntitoneOn f s)
(a : α) :
AntitoneOn (fun x => f x * a) s := fun _ hx _ hy h => mul_le_mul_right' (hf hx hy h) a
#align antitone_on.mul_const' AntitoneOn.mul_const'
#align antitone_on.add_const AntitoneOn.add_const
/-- The product of two monotone functions is monotone. -/
@[to_additive add "The sum of two monotone functions is monotone."]
theorem Monotone.mul' [CovariantClass α α (· * ·) (· ≤ ·)]
[CovariantClass α α (swap (· * ·)) (· ≤ ·)] (hf : Monotone f) (hg : Monotone g) :
Monotone fun x => f x * g x := fun _ _ h => mul_le_mul' (hf h) (hg h)
#align monotone.mul' Monotone.mul'
#align monotone.add Monotone.add
/-- The product of two monotone functions is monotone. -/
@[to_additive add "The sum of two monotone functions is monotone."]
theorem MonotoneOn.mul' [CovariantClass α α (· * ·) (· ≤ ·)]
[CovariantClass α α (swap (· * ·)) (· ≤ ·)] (hf : MonotoneOn f s) (hg : MonotoneOn g s) :
MonotoneOn (fun x => f x * g x) s := fun _ hx _ hy h =>
mul_le_mul' (hf hx hy h) (hg hx hy h)
#align monotone_on.mul' MonotoneOn.mul'
#align monotone_on.add MonotoneOn.add
/-- The product of two antitone functions is antitone. -/
@[to_additive add "The sum of two antitone functions is antitone."]
theorem Antitone.mul' [CovariantClass α α (· * ·) (· ≤ ·)]
[CovariantClass α α (swap (· * ·)) (· ≤ ·)] (hf : Antitone f) (hg : Antitone g) :
Antitone fun x => f x * g x := fun _ _ h => mul_le_mul' (hf h) (hg h)
#align antitone.mul' Antitone.mul'
#align antitone.add Antitone.add
/-- The product of two antitone functions is antitone. -/
@[to_additive add "The sum of two antitone functions is antitone."]
theorem AntitoneOn.mul' [CovariantClass α α (· * ·) (· ≤ ·)]
[CovariantClass α α (swap (· * ·)) (· ≤ ·)] (hf : AntitoneOn f s) (hg : AntitoneOn g s) :
AntitoneOn (fun x => f x * g x) s :=
fun _ hx _ hy h => mul_le_mul' (hf hx hy h) (hg hx hy h)
#align antitone_on.mul' AntitoneOn.mul'
#align antitone_on.add AntitoneOn.add
section Left
variable [CovariantClass α α (· * ·) (· < ·)]
@[to_additive const_add]
theorem StrictMono.const_mul' (hf : StrictMono f) (c : α) : StrictMono fun x => c * f x :=
fun _ _ ab => mul_lt_mul_left' (hf ab) c
#align strict_mono.const_mul' StrictMono.const_mul'
#align strict_mono.const_add StrictMono.const_add
@[to_additive const_add]
theorem StrictMonoOn.const_mul' (hf : StrictMonoOn f s) (c : α) :
StrictMonoOn (fun x => c * f x) s :=
fun _ ha _ hb ab => mul_lt_mul_left' (hf ha hb ab) c
#align strict_mono_on.const_mul' StrictMonoOn.const_mul'
#align strict_mono_on.const_add StrictMonoOn.const_add
@[to_additive const_add]
theorem StrictAnti.const_mul' (hf : StrictAnti f) (c : α) : StrictAnti fun x => c * f x :=
fun _ _ ab => mul_lt_mul_left' (hf ab) c
#align strict_anti.const_mul' StrictAnti.const_mul'
#align strict_anti.const_add StrictAnti.const_add
@[to_additive const_add]
theorem StrictAntiOn.const_mul' (hf : StrictAntiOn f s) (c : α) :
StrictAntiOn (fun x => c * f x) s :=
fun _ ha _ hb ab => mul_lt_mul_left' (hf ha hb ab) c
#align strict_anti_on.const_mul' StrictAntiOn.const_mul'
#align strict_anti_on.const_add StrictAntiOn.const_add
end Left
section Right
variable [CovariantClass α α (swap (· * ·)) (· < ·)]
@[to_additive add_const]
theorem StrictMono.mul_const' (hf : StrictMono f) (c : α) : StrictMono fun x => f x * c :=
fun _ _ ab => mul_lt_mul_right' (hf ab) c
#align strict_mono.mul_const' StrictMono.mul_const'
#align strict_mono.add_const StrictMono.add_const
@[to_additive add_const]
theorem StrictMonoOn.mul_const' (hf : StrictMonoOn f s) (c : α) :
StrictMonoOn (fun x => f x * c) s :=
fun _ ha _ hb ab => mul_lt_mul_right' (hf ha hb ab) c
#align strict_mono_on.mul_const' StrictMonoOn.mul_const'
#align strict_mono_on.add_const StrictMonoOn.add_const
@[to_additive add_const]
theorem StrictAnti.mul_const' (hf : StrictAnti f) (c : α) : StrictAnti fun x => f x * c :=
fun _ _ ab => mul_lt_mul_right' (hf ab) c
#align strict_anti.mul_const' StrictAnti.mul_const'
#align strict_anti.add_const StrictAnti.add_const
@[to_additive add_const]
theorem StrictAntiOn.mul_const' (hf : StrictAntiOn f s) (c : α) :
StrictAntiOn (fun x => f x * c) s :=
fun _ ha _ hb ab => mul_lt_mul_right' (hf ha hb ab) c
#align strict_anti_on.mul_const' StrictAntiOn.mul_const'
#align strict_anti_on.add_const StrictAntiOn.add_const
end Right
/-- The product of two strictly monotone functions is strictly monotone. -/
@[to_additive add "The sum of two strictly monotone functions is strictly monotone."]
theorem StrictMono.mul' [CovariantClass α α (· * ·) (· < ·)]
[CovariantClass α α (swap (· * ·)) (· < ·)] (hf : StrictMono f) (hg : StrictMono g) :
StrictMono fun x => f x * g x := fun _ _ ab =>
mul_lt_mul_of_lt_of_lt (hf ab) (hg ab)
#align strict_mono.mul' StrictMono.mul'
#align strict_mono.add StrictMono.add
/-- The product of two strictly monotone functions is strictly monotone. -/
@[to_additive add "The sum of two strictly monotone functions is strictly monotone."]
theorem StrictMonoOn.mul' [CovariantClass α α (· * ·) (· < ·)]
[CovariantClass α α (swap (· * ·)) (· < ·)] (hf : StrictMonoOn f s) (hg : StrictMonoOn g s) :
StrictMonoOn (fun x => f x * g x) s :=
fun _ ha _ hb ab => mul_lt_mul_of_lt_of_lt (hf ha hb ab) (hg ha hb ab)
#align strict_mono_on.mul' StrictMonoOn.mul'
#align strict_mono_on.add StrictMonoOn.add
/-- The product of two strictly antitone functions is strictly antitone. -/
@[to_additive add "The sum of two strictly antitone functions is strictly antitone."]
theorem StrictAnti.mul' [CovariantClass α α (· * ·) (· < ·)]
[CovariantClass α α (swap (· * ·)) (· < ·)] (hf : StrictAnti f) (hg : StrictAnti g) :
StrictAnti fun x => f x * g x :=
fun _ _ ab => mul_lt_mul_of_lt_of_lt (hf ab) (hg ab)
#align strict_anti.mul' StrictAnti.mul'
#align strict_anti.add StrictAnti.add
/-- The product of two strictly antitone functions is strictly antitone. -/
@[to_additive add "The sum of two strictly antitone functions is strictly antitone."]
theorem StrictAntiOn.mul' [CovariantClass α α (· * ·) (· < ·)]
[CovariantClass α α (swap (· * ·)) (· < ·)] (hf : StrictAntiOn f s) (hg : StrictAntiOn g s) :
StrictAntiOn (fun x => f x * g x) s :=
fun _ ha _ hb ab => mul_lt_mul_of_lt_of_lt (hf ha hb ab) (hg ha hb ab)
#align strict_anti_on.mul' StrictAntiOn.mul'
#align strict_anti_on.add StrictAntiOn.add
/-- The product of a monotone function and a strictly monotone function is strictly monotone. -/
@[to_additive add_strictMono "The sum of a monotone function and a strictly monotone function is
strictly monotone."]
theorem Monotone.mul_strictMono' [CovariantClass α α (· * ·) (· < ·)]
[CovariantClass α α (swap (· * ·)) (· ≤ ·)] {f g : β → α} (hf : Monotone f)
(hg : StrictMono g) :
StrictMono fun x => f x * g x :=
fun _ _ h => mul_lt_mul_of_le_of_lt (hf h.le) (hg h)
#align monotone.mul_strict_mono' Monotone.mul_strictMono'
#align monotone.add_strict_mono Monotone.add_strictMono
/-- The product of a monotone function and a strictly monotone function is strictly monotone. -/
@[to_additive add_strictMono "The sum of a monotone function and a strictly monotone function is
strictly monotone."]
theorem MonotoneOn.mul_strictMono' [CovariantClass α α (· * ·) (· < ·)]
[CovariantClass α α (swap (· * ·)) (· ≤ ·)] {f g : β → α} (hf : MonotoneOn f s)
(hg : StrictMonoOn g s) : StrictMonoOn (fun x => f x * g x) s :=
fun _ hx _ hy h => mul_lt_mul_of_le_of_lt (hf hx hy h.le) (hg hx hy h)
#align monotone_on.mul_strict_mono' MonotoneOn.mul_strictMono'
#align monotone_on.add_strict_mono MonotoneOn.add_strictMono
/-- The product of a antitone function and a strictly antitone function is strictly antitone. -/
@[to_additive add_strictAnti "The sum of a antitone function and a strictly antitone function is
strictly antitone."]
theorem Antitone.mul_strictAnti' [CovariantClass α α (· * ·) (· < ·)]
[CovariantClass α α (swap (· * ·)) (· ≤ ·)] {f g : β → α} (hf : Antitone f)
(hg : StrictAnti g) :
StrictAnti fun x => f x * g x :=
fun _ _ h => mul_lt_mul_of_le_of_lt (hf h.le) (hg h)
#align antitone.mul_strict_anti' Antitone.mul_strictAnti'
#align antitone.add_strict_anti Antitone.add_strictAnti
/-- The product of a antitone function and a strictly antitone function is strictly antitone. -/
@[to_additive add_strictAnti "The sum of a antitone function and a strictly antitone function is
strictly antitone."]
theorem AntitoneOn.mul_strictAnti' [CovariantClass α α (· * ·) (· < ·)]
[CovariantClass α α (swap (· * ·)) (· ≤ ·)] {f g : β → α} (hf : AntitoneOn f s)
(hg : StrictAntiOn g s) :
StrictAntiOn (fun x => f x * g x) s :=
fun _ hx _ hy h => mul_lt_mul_of_le_of_lt (hf hx hy h.le) (hg hx hy h)
#align antitone_on.mul_strict_anti' AntitoneOn.mul_strictAnti'
#align antitone_on.add_strict_anti AntitoneOn.add_strictAnti
variable [CovariantClass α α (· * ·) (· ≤ ·)] [CovariantClass α α (swap (· * ·)) (· < ·)]
/-- The product of a strictly monotone function and a monotone function is strictly monotone. -/
@[to_additive add_monotone "The sum of a strictly monotone function and a monotone function is
strictly monotone."]
theorem StrictMono.mul_monotone' (hf : StrictMono f) (hg : Monotone g) :
StrictMono fun x => f x * g x :=
fun _ _ h => mul_lt_mul_of_lt_of_le (hf h) (hg h.le)
#align strict_mono.mul_monotone' StrictMono.mul_monotone'
#align strict_mono.add_monotone StrictMono.add_monotone
/-- The product of a strictly monotone function and a monotone function is strictly monotone. -/
@[to_additive add_monotone "The sum of a strictly monotone function and a monotone function is
strictly monotone."]
theorem StrictMonoOn.mul_monotone' (hf : StrictMonoOn f s) (hg : MonotoneOn g s) :
StrictMonoOn (fun x => f x * g x) s :=
fun _ hx _ hy h => mul_lt_mul_of_lt_of_le (hf hx hy h) (hg hx hy h.le)
#align strict_mono_on.mul_monotone' StrictMonoOn.mul_monotone'
#align strict_mono_on.add_monotone StrictMonoOn.add_monotone
/-- The product of a strictly antitone function and a antitone function is strictly antitone. -/
@[to_additive add_antitone "The sum of a strictly antitone function and a antitone function is
strictly antitone."]
theorem StrictAnti.mul_antitone' (hf : StrictAnti f) (hg : Antitone g) :
StrictAnti fun x => f x * g x :=
fun _ _ h => mul_lt_mul_of_lt_of_le (hf h) (hg h.le)
#align strict_anti.mul_antitone' StrictAnti.mul_antitone'
#align strict_anti.add_antitone StrictAnti.add_antitone
/-- The product of a strictly antitone function and a antitone function is strictly antitone. -/
@[to_additive add_antitone "The sum of a strictly antitone function and a antitone function is
strictly antitone."]
theorem StrictAntiOn.mul_antitone' (hf : StrictAntiOn f s) (hg : AntitoneOn g s) :
StrictAntiOn (fun x => f x * g x) s :=
fun _ hx _ hy h => mul_lt_mul_of_lt_of_le (hf hx hy h) (hg hx hy h.le)
#align strict_anti_on.mul_antitone' StrictAntiOn.mul_antitone'
#align strict_anti_on.add_antitone StrictAntiOn.add_antitone
@[to_additive (attr := simp) cmp_add_left]
theorem cmp_mul_left' {α : Type _} [Mul α] [LinearOrder α] [CovariantClass α α (· * ·) (· < ·)]
(a b c : α) :
cmp (a * b) (a * c) = cmp b c :=
(strictMono_id.const_mul' a).cmp_map_eq b c
#align cmp_mul_left' cmp_mul_left'
#align cmp_add_left cmp_add_left
@[to_additive (attr := simp) cmp_add_right]
theorem cmp_mul_right' {α : Type _} [Mul α] [LinearOrder α]
[CovariantClass α α (swap (· * ·)) (· < ·)] (a b c : α) :
cmp (a * c) (b * c) = cmp a b :=
(strictMono_id.mul_const' c).cmp_map_eq a b
#align cmp_mul_right' cmp_mul_right'
#align cmp_add_right cmp_add_right
end Mono
/-- An element `a : α` is `MulLECancellable` if `x ↦ a * x` is order-reflecting.
We will make a separate version of many lemmas that require `[ContravariantClass α α (*) (≤)]` with
`MulLECancellable` assumptions instead. These lemmas can then be instantiated to specific types,
like `ENNReal`, where we can replace the assumption `AddLECancellable x` by `x ≠ ∞`.
-/
@[to_additive
"An element `a : α` is `AddLECancellable` if `x ↦ a + x` is order-reflecting.
We will make a separate version of many lemmas that require `[ContravariantClass α α (+) (≤)]` with
`AddLECancellable` assumptions instead. These lemmas can then be instantiated to specific types,
like `ENNReal`, where we can replace the assumption `AddLECancellable x` by `x ≠ ∞`. "]
def MulLECancellable [Mul α] [LE α] (a : α) : Prop :=
∀ ⦃b c⦄, a * b ≤ a * c → b ≤ c
#align mul_le_cancellable MulLECancellable
#align add_le_cancellable AddLECancellable
@[to_additive]
theorem Contravariant.MulLECancellable [Mul α] [LE α] [ContravariantClass α α (· * ·) (· ≤ ·)]
{a : α} :
MulLECancellable a :=
fun _ _ => le_of_mul_le_mul_left'
#align contravariant.mul_le_cancellable Contravariant.MulLECancellable
#align contravariant.add_le_cancellable Contravariant.AddLECancellable
@[to_additive]
theorem mulLECancellable_one [Monoid α] [LE α] : MulLECancellable (1 : α) := fun a b => by
simpa only [one_mul] using id
#align mul_le_cancellable_one mulLECancellable_one
#align add_le_cancellable_zero addLECancellable_zero
namespace MulLECancellable
@[to_additive]
protected theorem Injective [Mul α] [PartialOrder α] {a : α} (ha : MulLECancellable a) :
Injective ((· * ·) a) :=
fun _ _ h => le_antisymm (ha h.le) (ha h.ge)
#align mul_le_cancellable.injective MulLECancellable.Injective
#align add_le_cancellable.injective AddLECancellable.Injective
@[to_additive]
protected theorem inj [Mul α] [PartialOrder α] {a b c : α} (ha : MulLECancellable a) :
a * b = a * c ↔ b = c :=
ha.Injective.eq_iff
#align mul_le_cancellable.inj MulLECancellable.inj
#align add_le_cancellable.inj AddLECancellable.inj
@[to_additive]
protected theorem injective_left [CommSemigroup α] [PartialOrder α] {a : α}
(ha : MulLECancellable a) :
Injective (· * a) := fun b c h => ha.Injective <| by dsimp; rwa [mul_comm a, mul_comm a]
#align mul_le_cancellable.injective_left MulLECancellable.injective_left
#align add_le_cancellable.injective_left AddLECancellable.injective_left
@[to_additive]
protected theorem inj_left [CommSemigroup α] [PartialOrder α] {a b c : α}
(hc : MulLECancellable c) :
a * c = b * c ↔ a = b :=
hc.injective_left.eq_iff
#align mul_le_cancellable.inj_left MulLECancellable.inj_left
#align add_le_cancellable.inj_left AddLECancellable.inj_left
variable [LE α]
@[to_additive]
protected theorem mul_le_mul_iff_left [Mul α] [CovariantClass α α (· * ·) (· ≤ ·)] {a b c : α}
(ha : MulLECancellable a) : a * b ≤ a * c ↔ b ≤ c :=
⟨fun h => ha h, fun h => mul_le_mul_left' h a⟩
#align mul_le_cancellable.mul_le_mul_iff_left MulLECancellable.mul_le_mul_iff_left
#align add_le_cancellable.add_le_add_iff_left AddLECancellable.add_le_add_iff_left
@[to_additive]
protected theorem mul_le_mul_iff_right [CommSemigroup α] [CovariantClass α α (· * ·) (· ≤ ·)]
{a b c : α} (ha : MulLECancellable a) :
b * a ≤ c * a ↔ b ≤ c := by rw [mul_comm b, mul_comm c, ha.mul_le_mul_iff_left]
#align mul_le_cancellable.mul_le_mul_iff_right MulLECancellable.mul_le_mul_iff_right
#align add_le_cancellable.add_le_add_iff_right AddLECancellable.add_le_add_iff_right
@[to_additive]
protected theorem le_mul_iff_one_le_right [MulOneClass α] [CovariantClass α α (· * ·) (· ≤ ·)]
{a b : α} (ha : MulLECancellable a) :
a ≤ a * b ↔ 1 ≤ b :=
Iff.trans (by rw [mul_one]) ha.mul_le_mul_iff_left
#align mul_le_cancellable.le_mul_iff_one_le_right MulLECancellable.le_mul_iff_one_le_right
#align add_le_cancellable.le_add_iff_nonneg_right AddLECancellable.le_add_iff_nonneg_right
@[to_additive]
protected theorem mul_le_iff_le_one_right [MulOneClass α] [CovariantClass α α (· * ·) (· ≤ ·)]
{a b : α} (ha : MulLECancellable a) :
a * b ≤ a ↔ b ≤ 1 :=
Iff.trans (by rw [mul_one]) ha.mul_le_mul_iff_left
#align mul_le_cancellable.mul_le_iff_le_one_right MulLECancellable.mul_le_iff_le_one_right
#align add_le_cancellable.add_le_iff_nonpos_right AddLECancellable.add_le_iff_nonpos_right
@[to_additive]
protected theorem le_mul_iff_one_le_left [CommMonoid α] [CovariantClass α α (· * ·) (· ≤ ·)]
{a b : α} (ha : MulLECancellable a) :
a ≤ b * a ↔ 1 ≤ b := by rw [mul_comm, ha.le_mul_iff_one_le_right]
#align mul_le_cancellable.le_mul_iff_one_le_left MulLECancellable.le_mul_iff_one_le_left
#align add_le_cancellable.le_add_iff_nonneg_left AddLECancellable.le_add_iff_nonneg_left
@[to_additive]
protected theorem mul_le_iff_le_one_left [CommMonoid α] [CovariantClass α α (· * ·) (· ≤ ·)]
{a b : α} (ha : MulLECancellable a) :
b * a ≤ a ↔ b ≤ 1 := by rw [mul_comm, ha.mul_le_iff_le_one_right]
#align mul_le_cancellable.mul_le_iff_le_one_left MulLECancellable.mul_le_iff_le_one_left
#align add_le_cancellable.add_le_iff_nonpos_left AddLECancellable.add_le_iff_nonpos_left
end MulLECancellable
section Bit
set_option linter.deprecated false
variable [Add α] [Preorder α]
@[deprecated]
theorem bit0_mono [CovariantClass α α (· + ·) (· ≤ ·)] [CovariantClass α α (swap (· + ·))
(· ≤ ·)] :
Monotone (bit0 : α → α) := fun _ _ h => add_le_add h h
#align bit0_mono bit0_mono
@[deprecated]
theorem bit0_strictMono [CovariantClass α α (· + ·) (· < ·)]
[CovariantClass α α (swap (· + ·)) (· < ·)] :
StrictMono (bit0 : α → α) := fun _ _ h => add_lt_add h h
#align bit0_strict_mono bit0_strictMono
end Bit
|
module Lib where
import Data.List ()
import Numeric.LinearAlgebra
( Linear (scale),
Matrix,
Vector,
cmap,
maxIndex,
outer,
(#>),
)
import Text.CSV ()
{-
Sooooooooooooo
What do I need to do
Need to make the output a vector
make the weights a matrix
make the learning stage be an outer product thats then
subracted from the weights
I need to implement sigmoid?
-}
predict ::
Matrix Double -> -- curried - Weights
Vector Double -> -- curried - Input vector
Vector Double -- Guess
predict w x = cmap sigmoid w #> x
predictEpoch ::
Matrix Double -> -- w - Weights
[Vector Double] -> -- x - An epoch of input vectorors
[Vector Double] -- A guess for each input in the epoch
predictEpoch w x = map (cmap sigmoid) (fmap (w #>) x)
sigmoid :: Double -> Double
sigmoid x = 1.0 / (1.0 + exp (negate x))
stocTrain ::
Matrix Double -> -- w - Weights
Vector Double -> -- x - Single input vectoror
Vector Double -> -- l - label
Matrix Double -- Trained Weights
stocTrain w x l = w - delta
where
error = predict w x - l
sigDeriv = error * cmap (1 -) error
delta = scale 0.001 $ sigDeriv `outer` x
trainEpoch ::
Matrix Double -> -- w - Weights
[Vector Double] -> -- x - An epoch of inputs
[Vector Double] -> -- l - An epoch of labels
Matrix Double -- Weights
trainEpoch w (x : xs) (l : ls) = trainEpoch (stocTrain w x l) xs ls
trainEpoch w [] [] = w
train ::
Int -> -- t - number of epochs to train on
Matrix Double -> -- w - Weights
[Vector Double] -> -- x - An epoch of inputs
[Vector Double] -> -- l - An epoch of labels
Matrix Double -- Weights
train t w xs ls
| t == 0 = w
| otherwise = train (t - 1) (trainEpoch w xs ls) xs ls
successRate ::
Matrix Double -> -- w - Weights
[Vector Double] -> -- x - An epoch of input vectors
[Vector Double] -> -- l - An epoch of labels
Double -- A guess for each input in the epoch
successRate w x l = intDiv correct total
where
correct = sum (zipWith checkSame l (predictEpoch w x))
total = length l
intDiv :: Int -> Int -> Double
intDiv n l = fromIntegral n / fromIntegral l
-- validate w x l = fmap maxIndex (predictEpoch w x)
checkSame :: Vector Double -> Vector Double -> Int
checkSame x y
| maxIndex x == maxIndex y = 1
| otherwise = 0
{-
so we need to:
stick the ending bias 1 onto each input array
turn the weights into a matrix
turn the input vectors in a list of vectors
turn the labels into a list of one hot encoded vectors
-}
-- csvToTrainingPred :: [[String]] -> [Double]
-- csvToTrainingPred s = predictEpoch trainedWeights scaledInputs
-- where
-- (labels, unscaledInputs) = labelInputSplit (map (map read) (init s))
-- scaledInputs = scaleInput unscaledInputs
-- trainedWeights = train 1 scaledInputs labels [0 ..]
-- scaleInput :: [[Double]] -> [[Double]]
-- scaleInput = map (map (/ 255))
-- labelInputSplit :: [[Double]] -> ([Double], [[Double]])
-- labelInputSplit xs = (head t, transpose (tail t))
-- where
-- t = transpose xs
|
= = Provisions = =
|
newtwitteravatar <- function(tweetname= "j_colomb", klimaimagepath= "images/klima.jpg") {
a=rtweet::lookup_users(tweetname)
download.file(sub("_normal.", ".",a$profile_image_url),paste0("images/avatar.jpg"), mode ="wb")
bgd =magick::image_read(klimaimagepath)%>%
magick::image_scale( "400x400")
img=magick::image_read("images/avatar.jpg")%>%
magick::image_scale( "400x400")%>%
magick::image_convert("png")%>%
magick::image_charcoal( radius = 1.8, sigma = 0.8) %>%
magick::image_transparent( "white", fuzz = 20)
imgnew <- c( bgd, img)
magick::image_mosaic(imgnew) %>%
magick::image_convert("jpg")%>%
magick::image_write("images/newavatar.jpg")
}
newtwitteravatar() |
open import Agda.Builtin.Nat
open import Agda.Builtin.Reflection
open import Agda.Builtin.Unit
macro
five : Term → TC ⊤
five hole = unify hole (lit (nat 5))
-- Here you get hole = _X (λ {n} → y {_n})
-- and fail to solve _n.
yellow : ({n : Nat} → Set) → Nat
yellow y = five
-- Here you get hole = _X ⦃ n ⦄ (λ ⦃ n' ⦄ → y ⦃ _n ⦄)
-- and fail to solve _n due to the multiple candidates n and n'.
more-yellow : ⦃ n : Nat ⦄ → (⦃ n : Nat ⦄ → Set) → Nat
more-yellow y = five
|
[STATEMENT]
lemma floor_eq_iff: "\<lfloor>x\<rfloor> = a \<longleftrightarrow> of_int a \<le> x \<and> x < of_int a + 1"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lfloor>x\<rfloor> = a) = (of_int a \<le> x \<and> x < of_int a + (1::'a))
[PROOF STEP]
using floor_correct floor_unique
[PROOF STATE]
proof (prove)
using this:
of_int \<lfloor>?x\<rfloor> \<le> ?x \<and> ?x < of_int (\<lfloor>?x\<rfloor> + 1)
\<lbrakk>of_int ?z \<le> ?x; ?x < of_int ?z + (1::?'a)\<rbrakk> \<Longrightarrow> \<lfloor>?x\<rfloor> = ?z
goal (1 subgoal):
1. (\<lfloor>x\<rfloor> = a) = (of_int a \<le> x \<and> x < of_int a + (1::'a))
[PROOF STEP]
by auto |
(** Equality.v *)
From Babel Require Import TerminalDogma.
Set Implicit Arguments.
Unset Strict Implicit.
Unset Printing Implicit Defensive.
Lemma refl_iff_True (T : Type) (a : T) :
a = a <-> True.
Proof. by []. Qed.
|
# load a dataset
using RDatasets
iris = dataset("datasets", "iris");
println(iris)
# load the StatPlots recipes (for DataFrames) available via:
# Pkg.add("StatPlots")
using StatPlots
# Scatter plot with some custom settings
@df iris scatter(:SepalLength, :SepalWidth, group=:Species,
title = "Iris awesome plot from dataset",
xlabel = "Length", ylabel = "Width",
m=(0.5, [:cross :hex :star7], 12),
bg=RGB(.2,.2,.2))
# save a png
png("iris") |
[STATEMENT]
lemma norm_Pair_le:
shows "norm (x, y) \<le> norm x + norm y"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. norm (x, y) \<le> norm x + norm y
[PROOF STEP]
unfolding norm_Pair
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sqrt ((norm x)\<^sup>2 + (norm y)\<^sup>2) \<le> norm x + norm y
[PROOF STEP]
by (metis norm_ge_zero sqrt_sum_squares_le_sum) |
/-
Copyright (c) 2021 David Wärn. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: David Wärn, Eric Wieser, Joachim Breitner
! This file was ported from Lean 3 source module group_theory.is_free_group
! leanprover-community/mathlib commit f7fc89d5d5ff1db2d1242c7bb0e9062ce47ef47c
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathlib.GroupTheory.FreeGroup
/-!
# Free groups structures on arbitrary types
This file defines a type class for type that are free groups, together with the usual operations.
The type class can be instantiated by providing an isomorphim to the canonical free group, or by
proving that the universal property holds.
For the explicit construction of free groups, see `GroupTheory/FreeGroup`.
## Main definitions
* `IsFreeGroup G` - a typeclass to indicate that `G` is free over some generators
* `IsFreeGroup.of` - the canonical injection of `G`'s generators into `G`
* `IsFreeGroup.lift` - the universal property of the free group
## Main results
* `IsFreeGroup.toFreeGroup` - any free group with generators `A` is equivalent to `FreeGroup A`.
* `IsFreeGroup.unique_lift` - the universal property of a free group
* `IsFreeGroup.ofUniqueLift` - constructing `IsFreeGroup` from the universal property
-/
universe u
/- ./././Mathport/Syntax/Translate/Command.lean:388:30: infer kinds are unsupported in Lean 4:
#[`MulEquiv] [] -/
/- Porting Note regarding the comment above:
The mathlib3 version makes `G` explicit in `is_free_group.mul_equiv`. -/
/-- `IsFreeGroup G` means that `G` isomorphic to a free group. -/
class IsFreeGroup (G : Type u) [Group G] where
/-- The generators of a free group. -/
Generators : Type u
/-- The multiplicative equivalence between "the" free group on the generators, and
the given group `G`.
Note: `IsFreeGroup.MulEquiv'` should not be used directly.
`IsFreeGroup.MulEquiv` should be used instead because it makes `G` an explicit variable.-/
MulEquiv' : FreeGroup Generators ≃* G
#align is_free_group IsFreeGroup
instance (X : Type _) : IsFreeGroup (FreeGroup X) where
Generators := X
MulEquiv' := MulEquiv.refl _
namespace IsFreeGroup
variable (G : Type _) [Group G] [IsFreeGroup G]
/-- Any free group is isomorphic to "the" free group. -/
def MulEquiv : FreeGroup (Generators G) ≃* G := IsFreeGroup.MulEquiv'
/-- Any free group is isomorphic to "the" free group. -/
@[simps!]
def toFreeGroup : G ≃* FreeGroup (Generators G) :=
(MulEquiv G).symm
#align is_free_group.to_free_group IsFreeGroup.toFreeGroup
#align is_free_group.to_free_group_apply IsFreeGroup.toFreeGroup_apply
#align is_free_group.to_free_group_symm_apply IsFreeGroup.toFreeGroup_symm_apply
variable {G}
/-- The canonical injection of G's generators into G -/
def of : Generators G → G :=
(MulEquiv G).toFun ∘ FreeGroup.of
#align is_free_group.of IsFreeGroup.of
@[simp]
theorem of_eq_freeGroup_of {A : Type u} : @of (FreeGroup A) _ _ = FreeGroup.of :=
rfl
#align is_free_group.of_eq_free_group_of IsFreeGroup.of_eq_freeGroup_of
variable {H : Type _} [Group H]
/-- The equivalence between functions on the generators and group homomorphisms from a free group
given by those generators. -/
def lift : (Generators G → H) ≃ (G →* H) :=
FreeGroup.lift.trans
{ toFun := fun f => f.comp (MulEquiv G).symm.toMonoidHom
invFun := fun f => f.comp (MulEquiv G).toMonoidHom
left_inv := fun f => by
ext
simp
right_inv := fun f => by
ext
simp }
#align is_free_group.lift IsFreeGroup.lift
@[simp]
@[simp]
theorem lift_of (f : Generators G → H) (a : Generators G) : lift f (of a) = f a :=
congr_fun (lift.symm_apply_apply f) a
#align is_free_group.lift_of IsFreeGroup.lift_of
@[simp]
theorem lift_symm_apply (f : G →* H) (a : Generators G) : (lift.symm f) a = f (of a) :=
rfl
#align is_free_group.lift_symm_apply IsFreeGroup.lift_symm_apply
@[ext 1050] --Porting note: increased priority, but deliberately less than for example
--`FreeProduct.ext_hom`
theorem ext_hom ⦃f g : G →* H⦄ (h : ∀ a : Generators G, f (of a) = g (of a)) : f = g :=
lift.symm.injective (funext h)
#align is_free_group.ext_hom IsFreeGroup.ext_hom
/-- The universal property of a free group: A functions from the generators of `G` to another
group extends in a unique way to a homomorphism from `G`.
Note that since `IsFreeGroup.lift` is expressed as a bijection, it already
expresses the universal property. -/
theorem unique_lift (f : Generators G → H) : ∃! F : G →* H, ∀ a, F (of a) = f a := by
simpa only [Function.funext_iff] using lift.symm.bijective.existsUnique f
#align is_free_group.unique_lift IsFreeGroup.unique_lift
/-- If a group satisfies the universal property of a free group, then it is a free group, where
the universal property is expressed as in `IsFreeGroup.lift` and its properties. -/
def ofLift {G : Type u} [Group G] (X : Type u) (of : X → G)
(lift : ∀ {H : Type u} [Group H], (X → H) ≃ (G →* H))
(lift_of : ∀ {H : Type u} [Group H], ∀ (f : X → H) (a), lift f (of a) = f a) : IsFreeGroup G
where
Generators := X
MulEquiv' :=
MonoidHom.toMulEquiv (FreeGroup.lift of) (lift FreeGroup.of)
(by
apply FreeGroup.ext_hom; intro x
simp only [MonoidHom.coe_comp, Function.comp_apply, MonoidHom.id_apply, FreeGroup.lift.of,
lift_of])
(by
let lift_symm_of : ∀ {H : Type u} [Group H], ∀ (f : G →* H) (a), lift.symm f a = f (of a) :=
by intro H _ f a ; simp [← lift_of (lift.symm f)]
apply lift.symm.injective; ext x
simp only [MonoidHom.coe_comp, Function.comp_apply, MonoidHom.id_apply, FreeGroup.lift.of,
lift_of, lift_symm_of])
#align is_free_group.of_lift IsFreeGroup.ofLift
/-- If a group satisfies the universal property of a free group, then it is a free group, where
the universal property is expressed as in `IsFreeGroup.unique_lift`. -/
noncomputable def ofUniqueLift {G : Type u} [Group G] (X : Type u) (of : X → G)
(h : ∀ {H : Type u} [Group H] (f : X → H), ∃! F : G →* H, ∀ a, F (of a) = f a) :
IsFreeGroup G :=
let lift {H : Type u} [Group H] : (X → H) ≃ (G →* H) :=
{ toFun := fun f => Classical.choose (h f)
invFun := fun F => F ∘ of
left_inv := fun f => funext (Classical.choose_spec (h f)).left
right_inv := fun F => ((Classical.choose_spec (h (F ∘ of))).right F fun _ => rfl).symm }
let lift_of {H : Type u} [Group H] (f : X → H) (a : X) : lift f (of a) = f a :=
congr_fun (lift.symm_apply_apply f) a
ofLift X of @lift @lift_of
#align is_free_group.of_unique_lift IsFreeGroup.ofUniqueLift
/-- Being a free group transports across group isomorphisms. -/
def ofMulEquiv {H : Type _} [Group H] (h : G ≃* H) : IsFreeGroup H
where
Generators := Generators G
MulEquiv' := (MulEquiv G).trans h
#align is_free_group.of_mul_equiv IsFreeGroup.ofMulEquiv
end IsFreeGroup
|
[GOAL]
⊢ StableUnderComposition fun {R S} [CommRing R] [CommRing S] f => IsIntegral f
[PROOFSTEP]
introv R hf hg
[GOAL]
R S T : Type u_1
inst✝² : CommRing R
inst✝¹ : CommRing S
inst✝ : CommRing T
f : R →+* S
g : S →+* T
hf : IsIntegral f
hg : IsIntegral g
⊢ IsIntegral (comp g f)
[PROOFSTEP]
exact RingHom.isIntegral_trans _ _ hf hg
[GOAL]
⊢ RespectsIso fun {R S} [CommRing R] [CommRing S] f => IsIntegral f
[PROOFSTEP]
apply isIntegral_stableUnderComposition.respectsIso
[GOAL]
⊢ ∀ {R S : Type u_1} [inst : CommRing R] [inst_1 : CommRing S] (e : R ≃+* S), IsIntegral (RingEquiv.toRingHom e)
[PROOFSTEP]
introv x
[GOAL]
R S : Type u_1
inst✝¹ : CommRing R
inst✝ : CommRing S
e : R ≃+* S
x : S
⊢ IsIntegralElem (RingEquiv.toRingHom e) x
[PROOFSTEP]
rw [← e.apply_symm_apply x]
[GOAL]
R S : Type u_1
inst✝¹ : CommRing R
inst✝ : CommRing S
e : R ≃+* S
x : S
⊢ IsIntegralElem (RingEquiv.toRingHom e) (↑e (↑(RingEquiv.symm e) x))
[PROOFSTEP]
apply RingHom.is_integral_map
[GOAL]
⊢ StableUnderBaseChange fun {R S} [CommRing R] [CommRing S] f => IsIntegral f
[PROOFSTEP]
refine' StableUnderBaseChange.mk _ isIntegral_respectsIso _
[GOAL]
⊢ ∀ ⦃R S T : Type u_1⦄ [inst : CommRing R] [inst_1 : CommRing S] [inst_2 : CommRing T] [inst_3 : Algebra R S]
[inst_4 : Algebra R T], IsIntegral (algebraMap R T) → IsIntegral includeLeftRingHom
[PROOFSTEP]
introv h x
[GOAL]
R S T : Type u_1
inst✝⁴ : CommRing R
inst✝³ : CommRing S
inst✝² : CommRing T
inst✝¹ : Algebra R S
inst✝ : Algebra R T
h : IsIntegral (algebraMap R T)
x : S ⊗[R] T
⊢ IsIntegralElem includeLeftRingHom x
[PROOFSTEP]
refine' TensorProduct.induction_on x _ _ _
[GOAL]
case refine'_1
R S T : Type u_1
inst✝⁴ : CommRing R
inst✝³ : CommRing S
inst✝² : CommRing T
inst✝¹ : Algebra R S
inst✝ : Algebra R T
h : IsIntegral (algebraMap R T)
x : S ⊗[R] T
⊢ IsIntegralElem includeLeftRingHom 0
[PROOFSTEP]
apply isIntegral_zero
[GOAL]
case refine'_2
R S T : Type u_1
inst✝⁴ : CommRing R
inst✝³ : CommRing S
inst✝² : CommRing T
inst✝¹ : Algebra R S
inst✝ : Algebra R T
h : IsIntegral (algebraMap R T)
x : S ⊗[R] T
⊢ ∀ (x : S) (y : T), IsIntegralElem includeLeftRingHom (x ⊗ₜ[R] y)
[PROOFSTEP]
intro x y
[GOAL]
case refine'_2
R S T : Type u_1
inst✝⁴ : CommRing R
inst✝³ : CommRing S
inst✝² : CommRing T
inst✝¹ : Algebra R S
inst✝ : Algebra R T
h : IsIntegral (algebraMap R T)
x✝ : S ⊗[R] T
x : S
y : T
⊢ IsIntegralElem includeLeftRingHom (x ⊗ₜ[R] y)
[PROOFSTEP]
exact IsIntegral.tmul x (h y)
[GOAL]
case refine'_3
R S T : Type u_1
inst✝⁴ : CommRing R
inst✝³ : CommRing S
inst✝² : CommRing T
inst✝¹ : Algebra R S
inst✝ : Algebra R T
h : IsIntegral (algebraMap R T)
x : S ⊗[R] T
⊢ ∀ (x y : S ⊗[R] T),
IsIntegralElem includeLeftRingHom x →
IsIntegralElem includeLeftRingHom y → IsIntegralElem includeLeftRingHom (x + y)
[PROOFSTEP]
intro x y hx hy
[GOAL]
case refine'_3
R S T : Type u_1
inst✝⁴ : CommRing R
inst✝³ : CommRing S
inst✝² : CommRing T
inst✝¹ : Algebra R S
inst✝ : Algebra R T
h : IsIntegral (algebraMap R T)
x✝ x y : S ⊗[R] T
hx : IsIntegralElem includeLeftRingHom x
hy : IsIntegralElem includeLeftRingHom y
⊢ IsIntegralElem includeLeftRingHom (x + y)
[PROOFSTEP]
exact isIntegral_add hx hy
|
def allPairs (xs : List α) (ys : List β) : List (α × β) :=
let rec aux
| [], r => r
| x::xs, r =>
let rec aux₂
| [], r => r
| y::ys, r => (x, y) :: r
aux₂ ys (aux xs r)
aux xs []
def allPairsFixed (xs : List α) (ys : List β) : List (α × β) :=
let rec aux
| [], r => r
| x::xs, r =>
let rec aux₂
| [], r => r
| y::ys, r => aux₂ ys ((x, y) :: r)
aux₂ ys (aux xs r)
aux xs []
#eval allPairsFixed [1, 2, 3] ['a', 'b']
example : (allPairsFixed [1, 2, 3] ['a', 'b']) = [(1, 'b'), (1, 'a'), (2, 'b'), (2, 'a'), (3, 'b'), (3, 'a')] :=
rfl
example : (allPairsFixed (List.iota 3) (List.iota 4) |>.length) = 12 :=
rfl
|
import data.real.basic
--OUTPUT 1
theorem nesbitt_inequality {a b c : ℝ} (ha : 0 < a) (hb : 0 < b) (hc : 0 < c) : (a / (b + c)) + (b / (a + c)) + (c / (a + b)) ≥ (3 / 2) :=
begin
have h1 : a + b + c > 0, from sorry,
have h2 : a + b + c = 3 * (a + b + c) / 2, from sorry,
have h3 : a + b + c = 3 * (a + b + c) / 2, from sorry,
have h4 : a + b + c = 3 * (a + b + c) / 2, from sorry,
have h5 : a + b + c = 3 * (a + b + c) / 2, from sorry,
have h6 : a + b + c = 3 * (a + b + c) / 2, from sorry,
have h7 : a + b + c = 3 * (a + b + c) / 2, from sorry,
calc (a / (b + c)) + (b / (a + c)) + (c / (a + b)) ≥ (3 / 2) : sorry,
end
--OUTPUT 2
theorem nesbitt_inequality {a b c : ℝ} (ha : 0 < a) (hb : 0 < b) (hc : 0 < c) : (a / (b + c)) + (b / (a + c)) + (c / (a + b)) ≥ (3 / 2) :=
begin
calc (a / (b + c)) + (b / (a + c)) + (c / (a + b)) ≥
(a + b + c) / (b + c) + (a + b + c) / (a + c) + (a + b + c) / (a + b) : by sorry
... = 9 / 2 : by sorry
... = 9 / ((b + c) + (a + c) + (a + b)) : by sorry
... ≥ 3 / ((b + c) + (a + c) + (a + b)) : by sorry
... ≥ 3 / 2 : by sorry,
end
--OUTPUT 3
theorem nesbitt_inequality {a b c : ℝ} (ha : 0 < a) (hb : 0 < b) (hc : 0 < c) : (a / (b + c)) + (b / (a + c)) + (c / (a + b)) ≥ (3 / 2) :=
begin
have h1 : (a / (b + c)) + (b / (a + c)) + (c / (a + b)) ≥ ((a + b + c) / (b + c)) + ((a + b + c) / (a + c)) + ((a + b + c) / (a + b)), from sorry,
have h2 : ((a + b + c) / (b + c)) + ((a + b + c) / (a + c)) + ((a + b + c) / (a + b)) ≥ ((9 * (a + b + c)) / ((b + c) + (a + c) + (a + b))), from sorry,
have h3 : ((9 * (a + b + c)) / ((b + c) + (a + c) + (a + b))) ≥ (((1 / (b + c)) + (1 / (a + c)) + (1 / (a + b))) / 3), from sorry,
have h4 : (((1 / (b + c)) + (1 / (a + c)) + (1 / (a + b))) / 3) ≥ (3 / ((b + c) + (a + c) + (a + b))), from sorry,
have h5 : (1 / (b + c)) + (1 / (a + c)) + (1 / (a + b)) ≥ 3, from sorry,
show (a / (b + c)) + (b / (a + c)) + (c / (a + b)) ≥ (3 / 2), from sorry,
end
--OUTPUT 4
theorem nesbitt_inequality {a b c : ℝ} (ha : 0 < a) (hb : 0 < b) (hc : 0 < c) : (a / (b + c)) + (b / (a + c)) + (c / (a + b)) ≥ (3 / 2) :=
begin
have h1 : (a + b + c) / (b + c) + (a + b + c) / (a + c) + (a + b + c) / (a + b) ≥ 9 / 2, from sorry,
have h2 : (a + b + c) / (b + c) + (a + b + c) / (a + c) + (a + b + c) / (a + b) ≥ (9 * (a + b + c)) / ((b + c) + (a + c) + (a + b)), from sorry,
have h3 : ((1 / (b + c)) + (1 / (a + c)) + (1 / (a + b))) / 3 ≥ (3 / ((b + c) + (a + c) + (a + b))), from sorry,
show (a / (b + c)) + (b / (a + c)) + (c / (a + b)) ≥ (3 / 2), from sorry,
end
--OUTPUT 5
theorem nesbitt_inequality {a b c : ℝ} (ha : 0 < a) (hb : 0 < b) (hc : 0 < c) : (a / (b + c)) + (b / (a + c)) + (c / (a + b)) ≥ (3 / 2) :=
begin
calc (a / (b + c)) + (b / (a + c)) + (c / (a + b)) ≥ (3 / 2) : by {
have h1 : (a / (b + c)) + (b / (a + c)) + (c / (a + b)) = (a + b + c) / (b + c) + (a + b + c) / (a + c) + (a + b + c) / (a + b), from sorry,
have h2 : (a / (b + c)) + (b / (a + c)) + (c / (a + b)) ≥ (9 / 2) * ((a + b + c) / ((b + c) + (a + c) + (a + b))), from sorry,
have h3 : (a + b + c) / ((b + c) + (a + c) + (a + b)) = (1 / 2), from sorry,
show (a / (b + c)) + (b / (a + c)) + (c / (a + b)) ≥ (3 / 2), from sorry,
},
end
--OUTPUT 6
theorem nesbitt_inequality {a b c : ℝ} (ha : 0 < a) (hb : 0 < b) (hc : 0 < c) : (a / (b + c)) + (b / (a + c)) + (c / (a + b)) ≥ (3 / 2) :=
by {
calc (a / (b + c)) + (b / (a + c)) + (c / (a + b))
≥ ((a + b + c) / (b + c)) + ((a + b + c) / (a + c)) + ((a + b + c) / (a + b)) : sorry
... ≥ (9 / 2) : sorry
... ≥ (3 / 2) : sorry,
}
/--`theorem`
Real Multiplication Distributes over Addition
Let $a$, $b$ and $c$ be real numbers.
Then:
:$a \cdot \paren {b + c} = a \cdot b + a \cdot c$
`proof`
{{begin-eqn}}
{{eqn | l = a \cdot \paren {b + c}
| o = =
| r = a \cdot b + a \cdot c
}}
{{eqn | ll= \leadstoandfrom
| l = a \cdot b + a \cdot c
| o = =
| r = a \cdot \paren {b + c}
| c = by commutativity of multiplication
}}
{{end-eqn}}
{{qed}}
-/
theorem real_multiplication_distributes_over_addition (a b c : ℝ) : a * (b + c) = a * b + a * c :=
begin
calc a * (b + c) = a * (c + b) : by sorry
... = a * c + a * b : by sorry
... = a * b + a * c : by sorry,
end
--OUTPUT 7
theorem nesbitt_inequality {a b c : ℝ} (ha : 0 < a) (hb : 0 < b) (hc : 0 < c) : (a / (b + c)) + (b / (a + c)) + (c / (a + b)) ≥ (3 / 2) :=
begin
have h1 : (a / (b + c)) + (b / (a + c)) + (c / (a + b)) ≥ (a + b + c) / (b + c) + (a + b + c) / (a + c) + (a + b + c) / (a + b), from sorry,
have h2 : (a + b + c) / (b + c) + (a + b + c) / (a + c) + (a + b + c) / (a + b) ≥ (9 * (a + b + c)) / ( (b + c) + (a + c) + (a + b)), from sorry,
have h3 : (9 * (a + b + c)) / ( (b + c) + (a + c) + (a + b)) ≥ (3 / ((b + c) + (a + c) + (a + b))), from sorry,
have h4 : (3 / ((b + c) + (a + c) + (a + b))) ≥ (3 / 2), from sorry,
show (a / (b + c)) + (b / (a + c)) + (c / (a + b)) ≥ (3 / 2), from sorry,
end
--OUTPUT 8
theorem nesbitt_inequality {a b c : ℝ} (ha : 0 < a) (hb : 0 < b) (hc : 0 < c) : (a / (b + c)) + (b / (a + c)) + (c / (a + b)) ≥ (3 / 2) :=
begin
have h1 : (a + b + c) / (b + c) + (a + b + c) / (a + c) + (a + b + c) / (a + b) ≥ 9 / 2,
begin
calc (a + b + c) / (b + c) + (a + b + c) / (a + c) + (a + b + c) / (a + b)
= (a + b + c) / (b + c) + (a + b + c) / (a + c) + (a + b + c) / (a + b) : by sorry
... = (a + b + c) / (b + c) + (a + b + c) / (a + c) + (a + b + c) / (a + b) : by sorry
... = (a + b + c) / (b + c) + (a + b + c) / (a + c) + (a + b + c) / (a + b) : by sorry
... ≥ 3 / 2 : by sorry,
end,
show (a / (b + c)) + (b / (a + c)) + (c / (a + b)) ≥ (3 / 2),
begin
calc (a / (b + c)) + (b / (a + c)) + (c / (a + b))
= (a + b + c) / (b + c) + (a + b + c) / (a + c) + (a + b + c) / (a + b) : by sorry
... ≥ (9 * (a + b + c)) / (2 * (b + c + a + c + a + b)) : by sorry
... ≥ (9 * (a + b + c)) / (2 * (3 * (a + b + c))) : by sorry
... ≥ (3 / 2) : by sorry,
end,
end
/- FEW SHOT PROMPTS TO CODEX(START)
/--`theorem`
Power Set is Closed under Intersection
Let $S$ be a set.
Let $\powerset S$ be the power set of $S$.
Then:
:$\forall A, B \in \powerset S: A \cap B \in \powerset S$
`proof`
Let $A, B \in \powerset S$.
Then by the definition of power set, $A \subseteq S$ and $B \subseteq S$.
From Intersection is Subset we have that $A \cap B \subseteq A$.
It follows from Subset Relation is Transitive that $A \cap B \subseteq S$.
Thus $A \cap B \in \powerset S$ and closure is proved.
{{qed}}
-/
theorem power_set_intersection_closed {α : Type*} (S : set α) : ∀ A B ∈ 𝒫 S, (A ∩ B) ∈ 𝒫 S :=
begin
assume (A : set α) (hA : A ∈ 𝒫 S) (B : set α) (hB : B ∈ 𝒫 S),
have h1 : (A ⊆ S) ∧ (B ⊆ S), from sorry,
have h2 : (A ∩ B) ⊆ A, from sorry,
have h3 : (A ∩ B) ⊆ S, from sorry,
show (A ∩ B) ∈ 𝒫 S, from sorry,
end
/--`theorem`
Square of Sum
:$\forall x, y \in \R: \paren {x + y}^2 = x^2 + 2 x y + y^2$
`proof`
Follows from the distribution of multiplication over addition:
{{begin-eqn}}
{{eqn | l = \left({x + y}\right)^2
| r = \left({x + y}\right) \cdot \left({x + y}\right)
}}
{{eqn | r = x \cdot \left({x + y}\right) + y \cdot \left({x + y}\right)
| c = Real Multiplication Distributes over Addition
}}
{{eqn | r = x \cdot x + x \cdot y + y \cdot x + y \cdot y
| c = Real Multiplication Distributes over Addition
}}
{{eqn | r = x^2 + 2xy + y^2
| c =
}}
{{end-eqn}}
{{qed}}
-/
theorem square_of_sum (x y : ℝ) : (x + y)^2 = (x^2 + 2*x*y + y^2) :=
begin
calc (x + y)^2 = (x+y)*(x+y) : by sorry
... = x*(x+y) + y*(x+y) : by sorry
... = x*x + x*y + y*x + y*y : by sorry
... = x^2 + 2*x*y + y^2 : by sorry,
end
/--`theorem`
Identity of Group is Unique
Let $\struct {G, \circ}$ be a group. Then there is a unique identity element $e \in G$.
`proof`
From Group has Latin Square Property, there exists a unique $x \in G$ such that:
:$a x = b$
and there exists a unique $y \in G$ such that:
:$y a = b$
Setting $b = a$, this becomes:
There exists a unique $x \in G$ such that:
:$a x = a$
and there exists a unique $y \in G$ such that:
:$y a = a$
These $x$ and $y$ are both $e$, by definition of identity element.
{{qed}}
-/
theorem group_identity_unique {G : Type*} [group G] : ∃! e : G, ∀ a : G, e * a = a ∧ a * e = a :=
begin
have h1 : ∀ a b : G, ∃! x : G, a * x = b, from sorry,
have h2 : ∀ a b : G, ∃! y : G, y * a = b, from sorry,
have h3 : ∀ a : G, ∃! x : G, a * x = a, from sorry,
have h4 : ∀ a : G, ∃! y : G, y * a = a, from sorry,
have h5 : ∀ a : G, classical.some (h3 a) = (1 : G), from sorry,
have h6 : ∀ a : G, classical.some (h4 a) = (1 : G), from sorry,
show ∃! e : G, ∀ a : G, e * a = a ∧ a * e = a, from by {
use (1 : G),
have h7 : ∀ e : G, (∀ a : G, e * a = a ∧ a * e = a) → e = 1, from by {
assume (e : G) (h7 : ∀ a : G, e * a = a ∧ a * e = a),
have h8 : ∀ a : G, e = classical.some (h3 a), from sorry,
have h9 : ∀ a : G, e = classical.some (h4 a), from sorry,
show e = (1 : G), from sorry,
},
sorry,
}
end
/--`theorem`
Nesbitt's inequality
Let $a$, $b$ and $c$ be positive real numbers.
Then:
:$\dfrac a {b + c} + \dfrac b {a + c} + \dfrac c {a + b} \ge \dfrac 3 2$
`proof`
{{begin-eqn}}
{{eqn | l = \frac a {b + c} + \frac b {a + c} + \frac c {a + b}
| o = \ge
| r = \dfrac 3 2
}}
{{eqn | ll= \leadstoandfrom
| l = \frac {a + b + c} {b + c} + \frac {a + b + c} {a + c} + \frac {a + b + c} {a + b}
| o = \ge
| r = \frac 9 2
| c = by adding $3$
}}
{{eqn | ll= \leadstoandfrom
| l = \frac {a + b + c} {b + c} + \frac {a + b + c} {a + c} + \frac {a + b + c} {a + b}
| o = \ge
| r = \frac {9 \paren {a + b + c} } {\paren {b + c} + \paren {a + c} + \paren {a + b} }
| c = as $\dfrac {a + b + c} {\paren {b + c} + \paren {a + c} + \paren {a + b} } = \dfrac 1 2$
}}
{{eqn | ll= \leadstoandfrom
| l = \frac {\frac 1 {b + c} + \frac 1 {a + c} + \frac 1 {a + b} } 3
| o = \ge
| r = \frac 3 {\paren {b + c} + \paren {a + c} + \paren {a + b} }
| c = dividing by $3 \paren {a + b + c}$
}}
{{end-eqn}}
These are the arithmetic mean and the harmonic mean of $\dfrac 1 {b + c}$, $\dfrac 1 {a + c}$ and $\dfrac 1 {a + b}$.
From Arithmetic Mean is Never Less than Harmonic Mean the last inequality is true.
Thus Nesbitt's Inequality holds.
{{qed}}
-/
theorem nesbitt_inequality {a b c : ℝ} (ha : 0 < a) (hb : 0 < b) (hc : 0 < c) : (a / (b + c)) + (b / (a + c)) + (c / (a + b)) ≥ (3 / 2) :=
FEW SHOT PROMPTS TO CODEX(END)-/
|
function [E,GReuss,GHill] = shearModulus(S,h,u)
% shear modulus for an compliance tensor
%
% Syntax
%
% [GV,GR,GVRH] = shearModulus(S) % the isotropic case
%
% E = shearModulus(S,h,u) % the anisotropic case with plane h and shear direction u
% E = shearModulus(S,[],u)
% E = shearModulus(S,h,[])
%
% Input
% C - elastic @stiffnessTensor
% h - shear plane @vector3d
% u - shear direction @vector3d
%
% Output
% E - shear modulus
% GVoigt - Voigt effective shear modulus, upper bound
% GReuss - Reuss effective shear modulus, lower bound
% GHill - Hill effective shear modulus
%
% Description
%
% $$E = \frac{1}{4 S_{ijkl} h_i u_j h_k u_l}$$
%
% See also
% complianceTensor/YoungsModulus complianceTensor/volumeCompressibility complianceTensor/ChristoffelTensor
if nargin == 1 % the isotropic case
% compute stifness tensor as 6x6 matrices
C = matrix(inv(S),'voigt');
S = matrix(S,'voigt');
% the Voigt upper bound
% GV = ((C(1,1)+C(2,2)+C(3,3))-(C(1,2)+C(2,3)+C(3,1))+3*(C(4,4)+C(5,5)+C(6,6)))/15
GVoigt = ((C(1,1,:) + C(2,2) + C(3,3,:)) ...
- (C(1,2,:) + C(2,3,:) + C(3,1,:)) ...
+ 3 * (C(4,4,:) + C(5,5,:) + C(6,6,:))) ./ 15;
% the Reuss lower bound
% GR = 15/(4*(S(1,1)+S(2,2)+S(3,3))-4*(S(1,2)+S(2,3)+S(3,1))+3*(S(4,4)+S(5,5)+S(6,6)))
GReuss = 15 ./ (4 * (S(1,1,:) + S(2,2,:) + S(3,3,:)) ...
- 4 * (S(1,2,:) + S(2,3,:) + S(3,1,:)) ...
+ 3 * (S(4,4,:) + S(5,5,:) + S(6,6,:)));
% Voigt Reuss Hill average
GHill = 0.5.*(GVoigt + GReuss);
E = GVoigt;
elseif nargin == 2 || isempty(h)
E = S2FunHarmonicSym.quadrature(@(u) shearModulus(S,h,u),'bandwidth',4,S.CS);
elseif isempty(u)
E = S2FunHarmonicSym.quadrature(@(u) shearModulus(S,h,u),'bandwidth',4,S.CS);
else
% the anisotropic shear modulus
E = 0.25./EinsteinSum(S,[-1 -2 -3 -4],h,-1,u,-2,h,-3,u,-4);
end |
## Data Tables
# Display your matrices or data.tables in sorting
# and filtering format
install.packages('DT')
library('DT')
datatable(iris, options = list(pageLength = 7))
## Time-series Line Graph
install.packages('dygraphs')
library('dygraphs')
dygraph(nhtemp, main = "New Haven Temperatures") %>%
dyRangeSelector(dateWindow = c("1926-01-01", "1970-01-01"))
## Scatterplot
devtools::install_github('hrbrmstr/metricsgraphics')
library('metricsgraphics')
mjs_plot(mtcars, x=wt, y=mpg) %>%
mjs_point(color_accessor=carb, size_accessor=carb) %>%
mjs_labs(x="Weight of Car", y="Miles per Gallon")
## Network Graph
install.packages('networkD3')
library('networkD3')
data(MisLinks, MisNodes)
forceNetwork(Links = MisLinks, Nodes = MisNodes, Source = "source",
Target = "target", Value = "value", NodeID = "name",
Group = "group", opacity = 0.4)
## ThreeJS
install.packages('threejs')
library('threejs')
z <- seq(-10, 10, 0.01)
x <- cos(z*4)
y <- sin(z*4)
scatterplot3js(x,y,z, color=rainbow(length(z)))
## Diagrams and Flowcharts
install.packages('DiagrammeR')
library('DiagrammeR')
grViz("
digraph {
layout = twopi
node [shape = circle]
A -> {B C D}
}")
|
From ch2o_compcert Require Export locals_example_ch2o_core_c ch2o_safety_bigstep.
From ch2o Require Export restricted_smallstep.
Local Open Scope string_scope.
Theorem locals_example_ch2o_safe: ch2o_safe_program Γ δ (λ z, z = 2%Z).
Proof.
eapply ch2o_safe_program_bigstep with (1:=Γ_valid) (2:=δ_valid) (3:=δ_main) (4:=eq_refl) (6:=eq_refl).
assert (17 ÷ 2 = 8)%Z. reflexivity.
assert (15 ÷ 5 = 3)%Z. reflexivity.
assert (8 ÷ 3 = 2)%Z. reflexivity.
assert (2 = (17 ÷ 2) ÷ (15 ÷ 5))%Z. reflexivity.
rewrite H2 at 2.
repeat econstructor; try (unfold int_lower || unfold int_upper); simpl; lia.
Qed.
|
function det = sgbdi ( abd, lda, n, ml, mu, ipvt )
%*****************************************************************************80
%
%% SGBDI computes the determinant of a band matrix factored by SGBCO or SGBFA.
%
% Discussion:
%
% If the inverse is needed, use SGBSL N times.
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 09 November 2006
%
% Author:
%
% MATLAB version by John Burkardt.
%
% Reference:
%
% Dongarra, Moler, Bunch and Stewart,
% LINPACK User's Guide,
% SIAM, (Society for Industrial and Applied Mathematics),
% 3600 University City Science Center,
% Philadelphia, PA, 19104-2688.
% ISBN 0-89871-172-X
%
% Parameters:
%
% Input, real ABD(LDA,N), the output from SGBCO or SGBFA.
%
% Input, integer LDA, the leading dimension of the array ABD.
%
% Input, integer N, the order of the matrix.
%
% Input, integer ML, MU, the number of diagonals below and above the
% main diagonal. 0 <= ML < N, 0 <= MU < N.
%
% Input, integer IPVT(N), the pivot vector from SGBCO or SGBFA.
%
% Output, real DET(2), the determinant of the original matrix.
% determinant = DET(1) * 10.0**DET(2)
% with 1.0 <= abs ( DET(1) ) < 10.0 or DET(1) = 0.0.
%
ten = 10.0;
m = ml + mu + 1;
det(1) = 1.0;
det(2) = 0.0;
for i = 1 : n
if ( ipvt(i) ~= i )
det(1) = -det(1);
end
det(1) = abd(m,i) * det(1);
if ( det(1) == 0.0 )
return
end
while ( abs ( det(1) ) < 1.0 )
det(1) = ten * det(1);
det(2) = det(2) - 1.0;
end
while ( ten <= abs ( det(1) ) )
det(1) = det(1) / ten;
det(2) = det(2) + 1.0;
end
end
return
end
|
R version 3.2.3 (2015-12-10) -- "Wooden Christmas-Tree"
Copyright (C) 2015 The R Foundation for Statistical Computing
Platform: x86_64-w64-mingw32/x64 (64-bit)
R is free software and comes with ABSOLUTELY NO WARRANTY.
You are welcome to redistribute it under certain conditions.
Type 'license()' or 'licence()' for distribution details.
R is a collaborative project with many contributors.
Type 'contributors()' for more information and
'citation()' on how to cite R or R packages in publications.
Type 'demo()' for some demos, 'help()' for on-line help, or
'help.start()' for an HTML browser interface to help.
Type 'q()' to quit R.
[Workspace loaded from ~/.RData]
protein = read.csv("d:/Europenaprotein.csv",header=T)
head(protein)
set.seed(123456789)
groupMeat <- kmeans(protein[,c("WhiteMeat","RedMeat")], centers=3, nstart=10)
groupMeat
o=order(groupMeat$cluster)
data.frame(protein$Country[o],groupMeat$cluster[o])
plot(protein$Red, protein$White, type="n", xlim=c(3,19), xlab="Red Meat", ylab="White Meat")
text(x=protein$Red, y=protein$White, labels=protein$Country,col=groupMeat$cluster+1)
set.seed(123456789)
groupProtein <- kmeans(protein[,-1], centers=7, nstart=10)
o=order(groupProtein$cluster)
data.frame(protein$Country[o],groupProtein$cluster[o])
library(cluster)
clusplot(protein[,-1], groupProtein$cluster, main='2D representation of the Cluster solution', color=TRUE, shade=TRUE, labels=2, lines=0)
foodagg=agnes(protein,diss=FALSE,metric="euclidian")
foodagg
plot(foodagg, main='Dendrogram')
groups <- cutree(foodagg, k=4)
rect.hclust(foodagg, k=4, border="red") |
(*
* Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
*
* SPDX-License-Identifier: BSD-2-Clause
*)
(*
* Tactic for solving monadic equalities, such as:
*
* (liftE (return 3) = returnOk 3
*
* Theorems of the form:
*
* ((a, s') \<in> fst (A s)) = P a s s'
*
* and
*
* snd (A s) = P s
*
* are added to the "monad_eq" set.
*)
theory MonadEq
imports
In_Monad
NonDetMonadVCG
begin
(* Setup "monad_eq" attributes. *)
ML \<open>
structure MonadEqThms = Named_Thms (
val name = Binding.name "monad_eq"
val description = "monad equality-prover theorems"
)
\<close>
attribute_setup monad_eq = \<open>
Attrib.add_del
(Thm.declaration_attribute MonadEqThms.add_thm)
(Thm.declaration_attribute MonadEqThms.del_thm)\<close>
"Monad equality-prover theorems"
(* Setup tactic. *)
ML \<open>
fun monad_eq_tac ctxt =
let
(* Set a simpset as being hidden, so warnings are not printed from it. *)
val ctxt' = Context_Position.set_visible false ctxt
in
CHANGED (clarsimp_tac (ctxt' addsimps (MonadEqThms.get ctxt')) 1)
end
\<close>
method_setup monad_eq = \<open>
Method.sections Clasimp.clasimp_modifiers >> (K (SIMPLE_METHOD o monad_eq_tac))\<close>
"prove equality on monads"
lemma monad_eq_simp_state[monad_eq]:
"((A :: ('s, 'a) nondet_monad) s = B s') =
((\<forall>r t. (r, t) \<in> fst (A s) \<longrightarrow> (r, t) \<in> fst (B s'))
\<and> (\<forall>r t. (r, t) \<in> fst (B s') \<longrightarrow> (r, t) \<in> fst (A s))
\<and> (snd (A s) = snd (B s')))"
by (auto intro!: set_eqI prod_eqI)
lemma monad_eq_simp[monad_eq]:
"((A :: ('s, 'a) nondet_monad) = B) =
((\<forall>r t s. (r, t) \<in> fst (A s) \<longrightarrow> (r, t) \<in> fst (B s))
\<and> (\<forall>r t s. (r, t) \<in> fst (B s) \<longrightarrow> (r, t) \<in> fst (A s))
\<and> (\<forall>x. snd (A x) = snd (B x)))"
by (auto intro!: set_eqI prod_eqI)
declare in_monad[monad_eq]
declare in_bindE[monad_eq]
(* Test *)
lemma "returnOk 3 = liftE (return 3)"
apply monad_eq
oops
end
|
/-
Copyright (c) 2018 Jeremy Avigad. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jeremy Avigad, Simon Hudon
-/
import data.pfunctor.multivariate.W
import data.qpf.multivariate.basic
/-!
# The initial algebra of a multivariate qpf is again a qpf.
For a `(n+1)`-ary QPF `F (α₀,..,αₙ)`, we take the least fixed point of `F` with
regards to its last argument `αₙ`. The result is a `n`-ary functor: `fix F (α₀,..,αₙ₋₁)`.
Making `fix F` into a functor allows us to take the fixed point, compose with other functors
and take a fixed point again.
## Main definitions
* `fix.mk` - constructor
* `fix.dest - destructor
* `fix.rec` - recursor: basis for defining functions by structural recursion on `fix F α`
* `fix.drec` - dependent recursor: generalization of `fix.rec` where
the result type of the function is allowed to depend on the `fix F α` value
* `fix.rec_eq` - defining equation for `recursor`
* `fix.ind` - induction principle for `fix F α`
## Implementation notes
For `F` a QPF`, we define `fix F α` in terms of the W-type of the polynomial functor `P` of `F`.
We define the relation `Wequiv` and take its quotient as the definition of `fix F α`.
```lean
inductive Wequiv {α : typevec n} : q.P.W α → q.P.W α → Prop
| ind (a : q.P.A) (f' : q.P.drop.B a ⟹ α) (f₀ f₁ : q.P.last.B a → q.P.W α) :
(∀ x, Wequiv (f₀ x) (f₁ x)) → Wequiv (q.P.W_mk a f' f₀) (q.P.W_mk a f' f₁)
| abs (a₀ : q.P.A) (f'₀ : q.P.drop.B a₀ ⟹ α) (f₀ : q.P.last.B a₀ → q.P.W α)
(a₁ : q.P.A) (f'₁ : q.P.drop.B a₁ ⟹ α) (f₁ : q.P.last.B a₁ → q.P.W α) :
abs ⟨a₀, q.P.append_contents f'₀ f₀⟩ = abs ⟨a₁, q.P.append_contents f'₁ f₁⟩ →
Wequiv (q.P.W_mk a₀ f'₀ f₀) (q.P.W_mk a₁ f'₁ f₁)
| trans (u v w : q.P.W α) : Wequiv u v → Wequiv v w → Wequiv u w
```
See [avigad-carneiro-hudon2019] for more details.
## Reference
* Jeremy Avigad, Mario M. Carneiro and Simon Hudon.
[*Data Types as Quotients of Polynomial Functors*][avigad-carneiro-hudon2019]
-/
universes u v
namespace mvqpf
open typevec
open mvfunctor (liftp liftr)
open_locale mvfunctor
variables {n : ℕ} {F : typevec.{u} (n+1) → Type u} [mvfunctor F] [q : mvqpf F]
include q
/-- `recF` is used as a basis for defining the recursor on `fix F α`. `recF`
traverses recursively the W-type generated by `q.P` using a function on `F`
as a recursive step -/
def recF {α : typevec n} {β : Type*} (g : F (α.append1 β) → β) : q.P.W α → β :=
q.P.W_rec (λ a f' f rec, g (abs ⟨a, split_fun f' rec⟩))
theorem recF_eq {α : typevec n} {β : Type*} (g : F (α.append1 β) → β)
(a : q.P.A) (f' : q.P.drop.B a ⟹ α) (f : q.P.last.B a → q.P.W α) :
recF g (q.P.W_mk a f' f) = g (abs ⟨a, split_fun f' (recF g ∘ f)⟩) :=
by rw [recF, mvpfunctor.W_rec_eq]; refl
theorem recF_eq' {α : typevec n} {β : Type*} (g : F (α.append1 β) → β)
(x : q.P.W α) :
recF g x = g (abs ((append_fun id (recF g)) <$$> q.P.W_dest' x)) :=
begin
apply q.P.W_cases _ x,
intros a f' f,
rw [recF_eq, q.P.W_dest'_W_mk, mvpfunctor.map_eq, append_fun_comp_split_fun,
typevec.id_comp]
end
/-- Equivalence relation on W-types that represent the same `fix F`
value -/
inductive Wequiv {α : typevec n} : q.P.W α → q.P.W α → Prop
| ind (a : q.P.A) (f' : q.P.drop.B a ⟹ α) (f₀ f₁ : q.P.last.B a → q.P.W α) :
(∀ x, Wequiv (f₀ x) (f₁ x)) → Wequiv (q.P.W_mk a f' f₀) (q.P.W_mk a f' f₁)
| abs (a₀ : q.P.A) (f'₀ : q.P.drop.B a₀ ⟹ α) (f₀ : q.P.last.B a₀ → q.P.W α)
(a₁ : q.P.A) (f'₁ : q.P.drop.B a₁ ⟹ α) (f₁ : q.P.last.B a₁ → q.P.W α) :
abs ⟨a₀, q.P.append_contents f'₀ f₀⟩ = abs ⟨a₁, q.P.append_contents f'₁ f₁⟩ →
Wequiv (q.P.W_mk a₀ f'₀ f₀) (q.P.W_mk a₁ f'₁ f₁)
| trans (u v w : q.P.W α) : Wequiv u v → Wequiv v w → Wequiv u w
theorem recF_eq_of_Wequiv (α : typevec n) {β : Type*} (u : F (α.append1 β) → β)
(x y : q.P.W α) :
Wequiv x y → recF u x = recF u y :=
begin
apply q.P.W_cases _ x,
intros a₀ f'₀ f₀,
apply q.P.W_cases _ y,
intros a₁ f'₁ f₁,
intro h, induction h,
case mvqpf.Wequiv.ind : a f' f₀ f₁ h ih { simp only [recF_eq, function.comp, ih] },
case mvqpf.Wequiv.abs : a₀ f'₀ f₀ a₁ f'₁ f₁ h
{ simp only [recF_eq', abs_map, mvpfunctor.W_dest'_W_mk, h] },
case mvqpf.Wequiv.trans : x y z e₁ e₂ ih₁ ih₂
{ exact eq.trans ih₁ ih₂ }
end
theorem Wequiv.abs' {α : typevec n} (x y : q.P.W α)
(h : abs (q.P.W_dest' x) = abs (q.P.W_dest' y)) :
Wequiv x y :=
begin
revert h,
apply q.P.W_cases _ x,
intros a₀ f'₀ f₀,
apply q.P.W_cases _ y,
intros a₁ f'₁ f₁,
apply Wequiv.abs
end
theorem Wequiv.refl {α : typevec n} (x : q.P.W α) : Wequiv x x :=
by apply q.P.W_cases _ x; intros a f' f; exact Wequiv.abs a f' f a f' f rfl
theorem Wequiv.symm {α : typevec n} (x y : q.P.W α) : Wequiv x y → Wequiv y x :=
begin
intro h, induction h,
case mvqpf.Wequiv.ind : a f' f₀ f₁ h ih
{ exact Wequiv.ind _ _ _ _ ih },
case mvqpf.Wequiv.abs : a₀ f'₀ f₀ a₁ f'₁ f₁ h
{ exact Wequiv.abs _ _ _ _ _ _ h.symm },
case mvqpf.Wequiv.trans : x y z e₁ e₂ ih₁ ih₂
{ exact mvqpf.Wequiv.trans _ _ _ ih₂ ih₁}
end
/-- maps every element of the W type to a canonical representative -/
def Wrepr {α : typevec n} : q.P.W α → q.P.W α := recF (q.P.W_mk' ∘ repr)
theorem Wrepr_W_mk {α : typevec n}
(a : q.P.A) (f' : q.P.drop.B a ⟹ α) (f : q.P.last.B a → q.P.W α) :
Wrepr (q.P.W_mk a f' f) =
q.P.W_mk' (repr (abs ((append_fun id Wrepr) <$$> ⟨a, q.P.append_contents f' f⟩))) :=
by rw [Wrepr, recF_eq', q.P.W_dest'_W_mk]; refl
theorem Wrepr_equiv {α : typevec n} (x : q.P.W α) : Wequiv (Wrepr x) x :=
begin
apply q.P.W_ind _ x, intros a f' f ih,
apply Wequiv.trans _ (q.P.W_mk' ((append_fun id Wrepr) <$$> ⟨a, q.P.append_contents f' f⟩)),
{ apply Wequiv.abs',
rw [Wrepr_W_mk, q.P.W_dest'_W_mk', q.P.W_dest'_W_mk', abs_repr] },
rw [q.P.map_eq, mvpfunctor.W_mk', append_fun_comp_split_fun, id_comp],
apply Wequiv.ind, exact ih
end
theorem Wequiv_map {α β : typevec n} (g : α ⟹ β) (x y : q.P.W α) :
Wequiv x y → Wequiv (g <$$> x) (g <$$> y) :=
begin
intro h, induction h,
case mvqpf.Wequiv.ind : a f' f₀ f₁ h ih
{ rw [q.P.W_map_W_mk, q.P.W_map_W_mk], apply Wequiv.ind, apply ih },
case mvqpf.Wequiv.abs : a₀ f'₀ f₀ a₁ f'₁ f₁ h
{ rw [q.P.W_map_W_mk, q.P.W_map_W_mk], apply Wequiv.abs,
show abs (q.P.obj_append1 a₀ (g ⊚ f'₀) (λ x, q.P.W_map g (f₀ x))) =
abs (q.P.obj_append1 a₁ (g ⊚ f'₁) (λ x, q.P.W_map g (f₁ x))),
rw [←q.P.map_obj_append1, ←q.P.map_obj_append1, abs_map, abs_map, h] },
case mvqpf.Wequiv.trans : x y z e₁ e₂ ih₁ ih₂
{ apply mvqpf.Wequiv.trans, apply ih₁, apply ih₂ }
end
/--
Define the fixed point as the quotient of trees under the equivalence relation.
-/
def W_setoid (α : typevec n) : setoid (q.P.W α) :=
⟨Wequiv, @Wequiv.refl _ _ _ _ _, @Wequiv.symm _ _ _ _ _, @Wequiv.trans _ _ _ _ _⟩
local attribute [instance] W_setoid
/-- Least fixed point of functor F. The result is a functor with one fewer parameters
than the input. For `F a b c` a ternary functor, fix F is a binary functor such that
```lean
fix F a b = F a b (fix F a b)
```
-/
def fix {n : ℕ} (F : typevec (n+1) → Type*) [mvfunctor F] [q : mvqpf F] (α : typevec n) :=
quotient (W_setoid α : setoid (q.P.W α))
attribute [nolint has_inhabited_instance] fix
/-- `fix F` is a functor -/
def fix.map {α β : typevec n} (g : α ⟹ β) : fix F α → fix F β :=
quotient.lift (λ x : q.P.W α, ⟦q.P.W_map g x⟧)
(λ a b h, quot.sound (Wequiv_map _ _ _ h))
instance fix.mvfunctor : mvfunctor (fix F) :=
{ map := @fix.map _ _ _ _}
variable {α : typevec.{u} n}
/-- Recursor for `fix F` -/
def fix.rec {β : Type u} (g : F (α ::: β) → β) : fix F α → β :=
quot.lift (recF g) (recF_eq_of_Wequiv α g)
/-- Access W-type underlying `fix F` -/
def fix_to_W : fix F α → q.P.W α :=
quotient.lift Wrepr (recF_eq_of_Wequiv α (λ x, q.P.W_mk' (repr x)))
/-- Constructor for `fix F` -/
def fix.mk (x : F (append1 α (fix F α))) : fix F α :=
quot.mk _ (q.P.W_mk' (append_fun id fix_to_W <$$> repr x))
/-- Destructor for `fix F` -/
def fix.dest : fix F α → F (append1 α (fix F α)) :=
fix.rec (mvfunctor.map (append_fun id fix.mk))
theorem fix.rec_eq {β : Type u} (g : F (append1 α β) → β) (x : F (append1 α (fix F α))) :
fix.rec g (fix.mk x) = g (append_fun id (fix.rec g) <$$> x) :=
have recF g ∘ fix_to_W = fix.rec g,
by { apply funext, apply quotient.ind, intro x, apply recF_eq_of_Wequiv,
apply Wrepr_equiv },
begin
conv { to_lhs, rw [fix.rec, fix.mk], dsimp },
cases h : repr x with a f,
rw [mvpfunctor.map_eq, recF_eq', ←mvpfunctor.map_eq, mvpfunctor.W_dest'_W_mk'],
rw [←mvpfunctor.comp_map, abs_map, ←h, abs_repr, ←append_fun_comp, id_comp, this]
end
theorem fix.ind_aux (a : q.P.A) (f' : q.P.drop.B a ⟹ α) (f : q.P.last.B a → q.P.W α) :
fix.mk (abs ⟨a, q.P.append_contents f' (λ x, ⟦f x⟧)⟩) = ⟦q.P.W_mk a f' f⟧ :=
have fix.mk (abs ⟨a, q.P.append_contents f' (λ x, ⟦f x⟧)⟩) = ⟦Wrepr (q.P.W_mk a f' f)⟧,
begin
apply quot.sound, apply Wequiv.abs',
rw [mvpfunctor.W_dest'_W_mk', abs_map, abs_repr, ←abs_map, mvpfunctor.map_eq],
conv { to_rhs, rw [Wrepr_W_mk, q.P.W_dest'_W_mk', abs_repr, mvpfunctor.map_eq] },
congr' 2, rw [mvpfunctor.append_contents, mvpfunctor.append_contents],
rw [append_fun, append_fun, ←split_fun_comp, ←split_fun_comp],
reflexivity
end,
by { rw this, apply quot.sound, apply Wrepr_equiv }
theorem fix.ind_rec {β : Type*} (g₁ g₂ : fix F α → β)
(h : ∀ x : F (append1 α (fix F α)),
(append_fun id g₁) <$$> x = (append_fun id g₂) <$$> x → g₁ (fix.mk x) = g₂ (fix.mk x)) :
∀ x, g₁ x = g₂ x :=
begin
apply quot.ind,
intro x,
apply q.P.W_ind _ x, intros a f' f ih,
show g₁ ⟦q.P.W_mk a f' f⟧ = g₂ ⟦q.P.W_mk a f' f⟧,
rw [←fix.ind_aux a f' f], apply h,
rw [←abs_map, ←abs_map, mvpfunctor.map_eq, mvpfunctor.map_eq],
congr' 2,
rw [mvpfunctor.append_contents, append_fun, append_fun, ←split_fun_comp, ←split_fun_comp],
have : g₁ ∘ (λ x, ⟦f x⟧) = g₂ ∘ (λ x, ⟦f x⟧),
{ ext x, exact ih x },
rw this
end
theorem fix.rec_unique {β : Type*} (g : F (append1 α β) → β) (h : fix F α → β)
(hyp : ∀ x, h (fix.mk x) = g (append_fun id h <$$> x)) :
fix.rec g = h :=
begin
ext x,
apply fix.ind_rec,
intros x hyp',
rw [hyp, ←hyp', fix.rec_eq]
end
theorem fix.mk_dest (x : fix F α) : fix.mk (fix.dest x) = x :=
begin
change (fix.mk ∘ fix.dest) x = x,
apply fix.ind_rec,
intro x, dsimp,
rw [fix.dest, fix.rec_eq, ←comp_map, ←append_fun_comp, id_comp],
intro h, rw h,
show fix.mk (append_fun id id <$$> x) = fix.mk x,
rw [append_fun_id_id, mvfunctor.id_map]
end
theorem fix.dest_mk (x : F (append1 α (fix F α))) : fix.dest (fix.mk x) = x :=
begin
unfold fix.dest, rw [fix.rec_eq, ←fix.dest, ←comp_map],
conv { to_rhs, rw ←(mvfunctor.id_map x) },
rw [←append_fun_comp, id_comp],
have : fix.mk ∘ fix.dest = id, {ext x, apply fix.mk_dest },
rw [this, append_fun_id_id]
end
theorem fix.ind {α : typevec n} (p : fix F α → Prop)
(h : ∀ x : F (α.append1 (fix F α)), liftp (pred_last α p) x → p (fix.mk x)) :
∀ x, p x :=
begin
apply quot.ind,
intro x,
apply q.P.W_ind _ x, intros a f' f ih,
change p ⟦q.P.W_mk a f' f⟧,
rw [←fix.ind_aux a f' f],
apply h,
rw mvqpf.liftp_iff,
refine ⟨_, _, rfl, _⟩,
intros i j,
cases i,
{ apply ih },
{ trivial },
end
instance mvqpf_fix : mvqpf (fix F) :=
{ P := q.P.Wp,
abs := λ α, quot.mk Wequiv,
repr := λ α, fix_to_W,
abs_repr := by { intros α, apply quot.ind, intro a, apply quot.sound, apply Wrepr_equiv },
abs_map :=
begin
intros α β g x, conv { to_rhs, dsimp [mvfunctor.map]},
rw fix.map, apply quot.sound,
apply Wequiv.refl
end }
/-- Dependent recursor for `fix F` -/
def fix.drec {β : fix F α → Type u}
(g : Π x : F (α ::: sigma β), β (fix.mk $ (id ::: sigma.fst) <$$> x)) (x : fix F α) : β x :=
let y := @fix.rec _ F _ _ α (sigma β) (λ i, ⟨_,g i⟩) x in
have x = y.1,
by { symmetry, dsimp [y], apply fix.ind_rec _ id _ x, intros x' ih,
rw fix.rec_eq, dsimp, simp [append_fun_id_id] at ih,
congr, conv { to_rhs, rw [← ih] }, rw [mvfunctor.map_map,← append_fun_comp,id_comp], },
cast (by rw this) y.2
end mvqpf
|
using DiscreteEntropyEstimators
using Base.Test
@test entropy_ml(Frequencies([1, 1, 2])) == entropy(MLEE(), Frequencies([1, 1, 2]))
@test entropy_ml(Frequencies([1, 1, 2]), 2) == 1.5
@test entropy_ml(Frequencies([1, 1, 2]), e) == entropy_ml(Frequencies([1, 1, 2]))
@test entropy_ml(Frequencies([1, 1, 1, 1, 4, 4, 4]), 4) == 1.25
@test entropy_ml(Samples([1, 2, 3, 3])) == entropy_ml(Frequencies([1, 1, 2]))
@test entropy(MMBCEE(), Frequencies([1, 1, 2])) == entropy(MMBCEE(), Samples([1, 2, 3, 3])) |
{-# LANGUAGE FlexibleInstances #-}
{-# OPTIONS_GHC -Wall #-}
-- |
-- Module : Numeric.NLOPT
-- Copyright : (c) Matthew Peddie 2017
-- License : BSD3
-- Maintainer : Matthew Peddie <[email protected]>
-- Stability : provisional
-- Portability : GHC
--
-- This module provides a high-level, @hmatrix@-compatible interface to
-- the <http://ab-initio.mit.edu/wiki/index.php/NLopt NLOPT> library by
-- Steven G. Johnson.
--
-- = Documentation
--
-- Most non-numerical details are documented, but for specific
-- information on what the optimization methods do, how constraints are
-- handled, etc., you should consult:
--
-- * The <http://ab-initio.mit.edu/wiki/index.php/NLopt_Introduction NLOPT introduction>
--
-- * The <http://ab-initio.mit.edu/wiki/index.php/NLopt_Reference NLOPT reference manual>
--
-- * The <http://ab-initio.mit.edu/wiki/index.php/NLopt_Algorithms NLOPT algorithm manual>
--
-- = Example program
--
-- The following interactive session example uses the Nelder-Mead simplex
-- algorithm, a derivative-free local optimizer, to minimize a trivial
-- function with a minimum of 22.0 at @(0, 0)@.
--
-- >>> import Numeric.LinearAlgebra ( dot, fromList )
-- >>> let objf x = x `dot` x + 22 -- define objective
-- >>> let stop = ObjectiveRelativeTolerance 1e-6 :| [] -- define stopping criterion
-- >>> let algorithm = NELDERMEAD objf [] Nothing -- specify algorithm
-- >>> let problem = LocalProblem 2 stop algorithm -- specify problem
-- >>> let x0 = fromList [5, 10] -- specify initial guess
-- >>> minimizeLocal problem x0
-- Right (Solution {solutionCost = 22.0, solutionParams = [0.0,0.0], solutionResult = FTOL_REACHED})
module Numeric.NLOPT
( -- * Specifying the objective function
Objective,
ObjectiveD,
Preconditioner,
-- * Specifying the constraints
-- ** Bound constraints
Bounds (..),
-- ** Nonlinear constraints
--
-- $nonlinearconstraints
-- *** Constraint functions
ScalarConstraint,
ScalarConstraintD,
VectorConstraint,
VectorConstraintD,
-- *** Constraint types
Constraint (..),
EqualityConstraint (..),
InequalityConstraint (..),
-- *** Collections of constraints
EqualityConstraints,
EqualityConstraintsD,
InequalityConstraints,
InequalityConstraintsD,
-- * Stopping conditions
--
-- $nonempty
StoppingCondition (..),
NonEmpty (..),
-- * Additional configuration
RandomSeed (..),
Population (..),
VectorStorage (..),
InitialStep (..),
-- * Minimization problems
-- ** Local minimization
LocalAlgorithm (..),
LocalProblem (..),
minimizeLocal,
-- ** Global minimization
GlobalAlgorithm (..),
GlobalProblem (..),
minimizeGlobal,
-- ** Minimization by augmented Lagrangian
AugLagAlgorithm (..),
AugLagProblem (..),
minimizeAugLag,
-- ** Results
Solution (..),
N.Result (..),
)
where
import Control.Exception (Exception)
import qualified Control.Exception as Ex
import Data.Foldable (traverse_)
import Data.List.NonEmpty (NonEmpty (..))
import Data.Typeable (Typeable)
import qualified Data.Vector.Storable as V
import Numeric.LinearAlgebra as HM
import qualified Numeric.Optimization.NLOPT.Bindings as N
import System.IO.Unsafe (unsafePerformIO)
{- Function wrapping for the immutable HMatrix interface -}
wrapScalarFunction :: (Vector Double -> Double) -> N.ScalarFunction ()
wrapScalarFunction f params _ _ = return $ f params
wrapScalarFunctionD ::
(Vector Double -> (Double, Vector Double)) ->
N.ScalarFunction ()
wrapScalarFunctionD f params grad _ = do
case grad of
Nothing -> return ()
Just g -> V.copy g usergrad
return result
where
(result, usergrad) = f params
wrapVectorFunction ::
(Vector Double -> Word -> Vector Double) ->
Word ->
N.VectorFunction ()
wrapVectorFunction f n params vout _ _ = V.copy vout $ f params n
wrapVectorFunctionD ::
(Vector Double -> Word -> (Vector Double, Matrix Double)) ->
Word ->
N.VectorFunction ()
wrapVectorFunctionD f n params vout jac _ = do
V.copy vout result
case jac of
Nothing -> return ()
Just j -> V.copy j (HM.flatten userjac)
where
(result, userjac) = f params n
wrapPreconditionerFunction ::
(Vector Double -> Vector Double -> Vector Double) ->
N.PreconditionerFunction ()
wrapPreconditionerFunction f params v vpre _ = V.copy vpre (f params v)
{- Objective functions -}
-- | An objective function that calculates the objective value at the
-- given parameter vector.
type Objective =
-- | Parameter vector
Vector Double ->
-- | Objective function value
Double
-- | An objective function that calculates both the objective value
-- and the gradient of the objective with respect to the input
-- parameter vector, at the given parameter vector.
type ObjectiveD =
-- | Parameter vector
Vector Double ->
-- | (Objective function value, gradient)
(Double, Vector Double)
-- | A preconditioner function, which computes @vpre = H(x) v@, where
-- @H@ is the Hessian matrix: the positive semi-definite second
-- derivative at the given parameter vector @x@, or an approximation
-- thereof.
type Preconditioner =
-- | Parameter vector @x@
Vector Double ->
-- | Vector @v@ to precondition at @x@
Vector Double ->
-- | Preconditioned vector @vpre@
Vector Double
data ObjectiveFunction f
= MinimumObjective f
| PreconditionedMinimumObjective Preconditioner f
applyObjective :: N.Opt -> ObjectiveFunction Objective -> IO N.Result
applyObjective opt (MinimumObjective f) =
N.set_min_objective opt (wrapScalarFunction f) ()
applyObjective opt (PreconditionedMinimumObjective p f) =
N.set_precond_min_objective
opt
(wrapScalarFunction f)
(wrapPreconditionerFunction p)
()
applyObjectiveD :: N.Opt -> ObjectiveFunction ObjectiveD -> IO N.Result
applyObjectiveD opt (MinimumObjective f) =
N.set_min_objective opt (wrapScalarFunctionD f) ()
applyObjectiveD opt (PreconditionedMinimumObjective p f) =
N.set_precond_min_objective
opt
(wrapScalarFunctionD f)
(wrapPreconditionerFunction p)
()
{- Constraint functions -}
-- | A constraint function which returns @c(x)@ given the parameter
-- vector @x@. The constraint will enforce that @c(x) == 0@ (equality
-- constraint) or @c(x) <= 0@ (inequality constraint).
type ScalarConstraint =
-- | Parameter vector @x@
Vector Double ->
-- | Constraint violation (deviation from 0)
Double
-- | A constraint function which returns @c(x)@ given the parameter
-- vector @x@ along with the gradient of @c(x)@ with respect to @x@ at
-- that point. The constraint will enforce that @c(x) == 0@ (equality
-- constraint) or @c(x) <= 0@ (inequality constraint).
type ScalarConstraintD =
-- | Parameter vector
Vector Double ->
-- | (Constraint violation, constraint gradient)
(Double, Vector Double)
-- | A constraint function which returns a vector @c(x)@ given the
-- parameter vector @x@. The constraint will enforce that @c(x) == 0@
-- (equality constraint) or @c(x) <= 0@ (inequality constraint).
type VectorConstraint =
-- | Parameter vector
Vector Double ->
-- | Constraint vector size
Word ->
-- | Constraint violation vector
Vector Double
-- | A constraint function which returns @c(x)@ given the parameter
-- vector @x@ along with the Jacobian (first derivative) matrix of
-- @c(x)@ with respect to @x@ at that point. The constraint will
-- enforce that @c(x) == 0@ (equality constraint) or @c(x) <= 0@
-- (inequality constraint).
type VectorConstraintD =
-- | Parameter vector
Vector Double ->
-- | Constraint vector size
Word ->
-- | (Constraint violation vector,
-- constraint Jacobian)
(Vector Double, Matrix Double)
-- $nonlinearconstraints
--
-- Note that most NLOPT algorithms do not support nonlinear
-- constraints natively; if you need to enforce nonlinear constraints,
-- you may want to use the 'AugLagAlgorithm' family of solvers, which
-- can add nonlinear constraints to some algorithm that does not
-- support them by a principled modification of the objective
-- function.
--
-- == Example program
--
-- The following interactive session example enforces a scalar
-- constraint on the problem given in the beginning of the module: the
-- parameters must always sum to 1. The minimizer finds a constrained
-- minimum of 22.5 at @(0.5, 0.5)@.
--
-- >>> import Numeric.LinearAlgebra ( dot, fromList, toList )
-- >>> let objf x = x `dot` x + 22
-- >>> let stop = ObjectiveRelativeTolerance 1e-9 :| []
-- >>> -- define constraint function:
-- >>> let constraintf x = sum (toList x) - 1.0
-- >>> -- define constraint object to pass to the algorithm:
-- >>> let constraint = EqualityConstraint (Scalar constraintf) 1e-6
-- >>> let algorithm = COBYLA objf [] [] [constraint] Nothing
-- >>> let problem = LocalProblem 2 stop algorithm
-- >>> let x0 = fromList [5, 10]
-- >>> minimizeLocal problem x0
-- Right (Solution {solutionCost = 22.500000000013028, solutionParams = [0.5000025521533521,0.49999744784664796], solutionResult = FTOL_REACHED})
data Constraint s v
= -- | A scalar constraint.
Scalar s
| -- | A vector constraint.
Vector Word v
| -- | A scalar constraint with an attached preconditioning function.
Preconditioned Preconditioner s
-- | An equality constraint, comprised of both the constraint function
-- (or functions, if a preconditioner is used) along with the desired
-- tolerance.
data EqualityConstraint s v = EqualityConstraint
{ eqConstraintFunctions :: Constraint s v,
eqConstraintTolerance :: Double
}
-- | An inequality constraint, comprised of both the constraint
-- function (or functions, if a preconditioner is used) along with the
-- desired tolerance.
data InequalityConstraint s v = InequalityConstraint
{ ineqConstraintFunctions :: Constraint s v,
ineqConstraintTolerance :: Double
}
-- | A collection of equality constraints that do not supply
-- constraint derivatives.
type EqualityConstraints =
[EqualityConstraint ScalarConstraint VectorConstraint]
-- | A collection of inequality constraints that do not supply
-- constraint derivatives.
type InequalityConstraints =
[InequalityConstraint ScalarConstraint VectorConstraint]
-- | A collection of equality constraints that supply constraint
-- derivatives.
type EqualityConstraintsD = [EqualityConstraint ScalarConstraintD VectorConstraintD]
-- | A collection of inequality constraints that supply constraint
-- derivatives.
type InequalityConstraintsD = [InequalityConstraint ScalarConstraintD VectorConstraintD]
class ApplyConstraint constraint where
applyConstraint :: N.Opt -> constraint -> IO N.Result
instance ApplyConstraint (EqualityConstraint ScalarConstraint VectorConstraint) where
applyConstraint opt (EqualityConstraint ty tol) = case ty of
Scalar s ->
N.add_equality_constraint opt (wrapScalarFunction s) () tol
Vector n v ->
N.add_equality_mconstraint opt n (wrapVectorFunction v n) () tol
Preconditioned p s ->
N.add_precond_equality_constraint
opt
(wrapScalarFunction s)
(wrapPreconditionerFunction p)
()
tol
instance ApplyConstraint (InequalityConstraint ScalarConstraint VectorConstraint) where
applyConstraint opt (InequalityConstraint ty tol) = case ty of
Scalar s ->
N.add_inequality_constraint opt (wrapScalarFunction s) () tol
Vector n v ->
N.add_inequality_mconstraint opt n (wrapVectorFunction v n) () tol
Preconditioned p s ->
N.add_precond_inequality_constraint
opt
(wrapScalarFunction s)
(wrapPreconditionerFunction p)
()
tol
instance ApplyConstraint (EqualityConstraint ScalarConstraintD VectorConstraintD) where
applyConstraint opt (EqualityConstraint ty tol) = case ty of
Scalar s ->
N.add_equality_constraint opt (wrapScalarFunctionD s) () tol
Vector n v ->
N.add_equality_mconstraint opt n (wrapVectorFunctionD v n) () tol
Preconditioned p s ->
N.add_precond_equality_constraint
opt
(wrapScalarFunctionD s)
(wrapPreconditionerFunction p)
()
tol
instance ApplyConstraint (InequalityConstraint ScalarConstraintD VectorConstraintD) where
applyConstraint opt (InequalityConstraint ty tol) = case ty of
Scalar s ->
N.add_inequality_constraint opt (wrapScalarFunctionD s) () tol
Vector n v ->
N.add_inequality_mconstraint opt n (wrapVectorFunctionD v n) () tol
Preconditioned p s ->
N.add_precond_inequality_constraint
opt
(wrapScalarFunctionD s)
(wrapPreconditionerFunction p)
()
tol
{- Bounds -}
-- | Bound constraints are specified by vectors of the same dimension
-- as the parameter space.
--
-- == Example program
--
-- The following interactive session example enforces lower bounds on
-- the example from the beginning of the module. This prevents the
-- optimizer from locating the true minimum at @(0, 0)@; a slightly
-- higher constrained minimum at @(1, 1)@ is found. Note that the
-- optimizer returns 'N.XTOL_REACHED' rather than 'N.FTOL_REACHED',
-- because the bound constraint is active at the final minimum.
--
-- >>> import Numeric.LinearAlgebra ( dot, fromList )
-- >>> let objf x = x `dot` x + 22 -- define objective
-- >>> let stop = ObjectiveRelativeTolerance 1e-6 :| [] -- define stopping criterion
-- >>> let lowerbound = LowerBounds $ fromList [1, 1] -- specify bounds
-- >>> let algorithm = NELDERMEAD objf [lowerbound] Nothing -- specify algorithm
-- >>> let problem = LocalProblem 2 stop algorithm -- specify problem
-- >>> let x0 = fromList [5, 10] -- specify initial guess
-- >>> minimizeLocal problem x0
-- Right (Solution {solutionCost = 24.0, solutionParams = [1.0,1.0], solutionResult = XTOL_REACHED})
data Bounds
= -- | Lower bound vector @v@ means we want @x >= v@.
LowerBounds (Vector Double)
| -- | Upper bound vector @u@ means we want @x <= u@.
UpperBounds (Vector Double)
deriving (Eq, Show, Read)
applyBounds :: N.Opt -> Bounds -> IO N.Result
applyBounds opt (LowerBounds lbvec) = N.set_lower_bounds opt lbvec
applyBounds opt (UpperBounds ubvec) = N.set_upper_bounds opt ubvec
{- Stopping conditions -}
-- | A 'StoppingCondition' tells NLOPT when to stop working on a
-- minimization problem. When multiple 'StoppingCondition's are
-- provided, the problem will stop when any one condition is met.
data StoppingCondition
= -- | Stop minimizing when an objective value @J@ less than or equal
-- to the provided value is found.
MinimumValue Double
| -- | Stop minimizing when an optimization step changes the objective
-- value @J@ by less than the provided tolerance multiplied by @|J|@.
ObjectiveRelativeTolerance Double
| -- | Stop minimizing when an optimization step changes the objective
-- value by less than the provided tolerance.
ObjectiveAbsoluteTolerance Double
| -- | Stop when an optimization step changes /every element/ of the
-- parameter vector @x@ by less than @x@ scaled by the provided
-- tolerance.
ParameterRelativeTolerance Double
| -- | Stop when an optimization step changes /every element/ of the
-- parameter vector @x@ by less than the corresponding element in
-- the provided vector of tolerances values.
ParameterAbsoluteTolerance (Vector Double)
| -- | Stop when the number of evaluations of the objective function
-- exceeds the provided count.
MaximumEvaluations Word
| -- | Stop when the optimization time exceeds the provided time (in
-- seconds). This is not a precise limit.
MaximumTime Double
deriving (Eq, Show, Read)
-- $nonempty
--
-- The 'NonEmpty' data type from 'Data.List.NonEmpty' is re-exported
-- here, because it is used to ensure that you always specify at least
-- one stopping condition.
applyStoppingCondition :: N.Opt -> StoppingCondition -> IO N.Result
applyStoppingCondition opt (MinimumValue x) = N.set_stopval opt x
applyStoppingCondition opt (ObjectiveRelativeTolerance x) = N.set_ftol_rel opt x
applyStoppingCondition opt (ObjectiveAbsoluteTolerance x) = N.set_ftol_abs opt x
applyStoppingCondition opt (ParameterRelativeTolerance x) = N.set_xtol_rel opt x
applyStoppingCondition opt (ParameterAbsoluteTolerance v) = N.set_xtol_abs opt v
applyStoppingCondition opt (MaximumEvaluations n) = N.set_maxeval opt n
applyStoppingCondition opt (MaximumTime deltat) = N.set_maxtime opt deltat
{- Random seed control -}
-- | This specifies how to initialize the random number generator for
-- stochastic algorithms.
data RandomSeed
= -- | Seed the RNG with the provided value.
SeedValue Word
| -- | Seed the RNG using the system clock.
SeedFromTime
| -- | Don't perform any explicit initialization of the RNG.
Don'tSeed
deriving (Eq, Show, Read)
applyRandomSeed :: RandomSeed -> IO ()
applyRandomSeed Don'tSeed = return ()
applyRandomSeed (SeedValue n) = N.srand n
applyRandomSeed SeedFromTime = N.srand_time
{- Random stuff -}
-- | This specifies the population size for algorithms that use a pool
-- of solutions.
newtype Population = Population Word deriving (Eq, Show, Read)
applyPopulation :: N.Opt -> Population -> IO N.Result
applyPopulation opt (Population n) = N.set_population opt n
-- | This specifies the memory size to be used by algorithms like
-- 'LBFGS' which store approximate Hessian or Jacobian matrices.
newtype VectorStorage = VectorStorage Word deriving (Eq, Show, Read)
applyVectorStorage :: N.Opt -> VectorStorage -> IO N.Result
applyVectorStorage opt (VectorStorage n) = N.set_vector_storage opt n
-- | This vector with the same dimension as the parameter vector @x@
-- specifies the initial step for the optimizer to take. (This
-- applies to local gradient-free algorithms, which cannot use
-- gradients to estimate how big a step to take.)
newtype InitialStep = InitialStep (Vector Double) deriving (Eq, Show, Read)
applyInitialStep :: N.Opt -> InitialStep -> IO N.Result
applyInitialStep opt (InitialStep v) = N.set_initial_step opt v
{- Algorithms -}
data GlobalProblem = GlobalProblem
{ -- | Lower bounds for @x@
lowerBounds :: Vector Double,
-- | Upper bounds for @x@
upperBounds :: Vector Double,
-- | At least one stopping
-- condition
gstop :: NonEmpty StoppingCondition,
-- | Algorithm specification
galgorithm :: GlobalAlgorithm
}
-- | These are the global minimization algorithms provided by NLOPT. Please see
-- <http://ab-initio.mit.edu/wiki/index.php/NLopt_Algorithms the NLOPT algorithm manual>
-- for more details on how the methods work and how they relate to one another.
--
-- Optional parameters are wrapped in a 'Maybe'; for example, if you
-- see 'Maybe' 'Population', you can simply specify 'Nothing' to use
-- the default behavior.
--
-- N.B.: There may be at most n equality constraints for problems with n dimensions due to restrictions
-- in NLopt. This may be circumvented by combination of equality constraints according to:
-- h1(x)==0 && h2(x)==0 <=> h1(x)^2+h2(x)^2==0.
data GlobalAlgorithm
= -- | DIviding RECTangles
DIRECT Objective
| -- | DIviding RECTangles, locally-biased variant
DIRECT_L Objective
| -- | DIviding RECTangles, "slightly randomized"
DIRECT_L_RAND Objective RandomSeed
| -- | DIviding RECTangles, unscaled version
DIRECT_NOSCAL Objective
| -- | DIviding RECTangles, locally-biased and unscaled
DIRECT_L_NOSCAL Objective
| -- | DIviding RECTangles, locally-biased, unscaled and "slightly
-- randomized"
DIRECT_L_RAND_NOSCAL Objective RandomSeed
| -- | DIviding RECTangles, original FORTRAN implementation
ORIG_DIRECT Objective InequalityConstraints
| -- | DIviding RECTangles, locally-biased, original FORTRAN
-- implementation
ORIG_DIRECT_L Objective InequalityConstraints
| -- | Stochastic Global Optimization.
-- __This algorithm is only available if you have linked with @libnlopt_cxx@.__
STOGO ObjectiveD
| -- | Stochastic Global Optimization, randomized variant.
-- __This algorithm is only available if you have linked with @libnlopt_cxx@.__
STOGO_RAND ObjectiveD RandomSeed
| -- | Controlled Random Search with Local Mutation
CRS2_LM Objective RandomSeed (Maybe Population)
| -- | Improved Stochastic Ranking Evolution Strategy, the population defaults to 20×(n+1) in n dimensions
ISRES Objective InequalityConstraints EqualityConstraints RandomSeed (Maybe Population)
| -- | Evolutionary Algorithm
ESCH Objective
| -- | Original Multi-Level Single-Linkage
MLSL Objective LocalProblem (Maybe Population)
| -- | Multi-Level Single-Linkage with Sobol Low-Discrepancy
-- Sequence for starting points
MLSL_LDS Objective LocalProblem (Maybe Population)
algorithmEnumOfGlobal :: GlobalAlgorithm -> N.Algorithm
algorithmEnumOfGlobal (DIRECT _) = N.GN_DIRECT
algorithmEnumOfGlobal (DIRECT_L _) = N.GN_DIRECT_L
algorithmEnumOfGlobal (DIRECT_L_RAND _ _) = N.GN_DIRECT_L_RAND
algorithmEnumOfGlobal (DIRECT_NOSCAL _) = N.GN_DIRECT_NOSCAL
algorithmEnumOfGlobal (DIRECT_L_NOSCAL _) = N.GN_DIRECT_L_NOSCAL
algorithmEnumOfGlobal (DIRECT_L_RAND_NOSCAL _ _) = N.GN_DIRECT_L_RAND_NOSCAL
algorithmEnumOfGlobal (ORIG_DIRECT _ _) = N.GN_ORIG_DIRECT
algorithmEnumOfGlobal (ORIG_DIRECT_L _ _) = N.GN_ORIG_DIRECT_L
algorithmEnumOfGlobal (STOGO _) = N.GD_STOGO
algorithmEnumOfGlobal (STOGO_RAND _ _) = N.GD_STOGO_RAND
algorithmEnumOfGlobal (CRS2_LM _ _ _) = N.GN_CRS2_LM
algorithmEnumOfGlobal (ISRES _ _ _ _ _) = N.GN_ISRES
algorithmEnumOfGlobal (ESCH _) = N.GN_ESCH
algorithmEnumOfGlobal (MLSL _ _ _) = N.G_MLSL
algorithmEnumOfGlobal (MLSL_LDS _ _ _) = N.G_MLSL_LDS
applyGlobalObjective :: N.Opt -> GlobalAlgorithm -> IO ()
applyGlobalObjective opt alg = go alg
where
obj = tryTo . applyObjective opt . MinimumObjective
objD = tryTo . applyObjectiveD opt . MinimumObjective
go (DIRECT o) = obj o
go (DIRECT_L o) = obj o
go (DIRECT_NOSCAL o) = obj o
go (DIRECT_L_NOSCAL o) = obj o
go (ESCH o) = obj o
go (STOGO o) = objD o
go (DIRECT_L_RAND o _) = obj o
go (DIRECT_L_RAND_NOSCAL o _) = obj o
go (ORIG_DIRECT o _) = obj o
go (ORIG_DIRECT_L o _) = obj o
go (STOGO_RAND o _) = objD o
go (CRS2_LM o _ _) = obj o
go (ISRES o _ _ _ _) = obj o
go (MLSL o _ _) = obj o
go (MLSL_LDS o _ _) = obj o
applyGlobalAlgorithm :: N.Opt -> GlobalAlgorithm -> IO ()
applyGlobalAlgorithm opt alg = do
applyGlobalObjective opt alg
go alg
where
seed = applyRandomSeed
pop = maybe (return ()) (tryTo . applyPopulation opt)
ic = traverse_ (tryTo . applyConstraint opt)
ec = traverse_ (tryTo . applyConstraint opt)
local lp = setupLocalProblem lp >>= N.set_local_optimizer opt
go (DIRECT_L_RAND _ s) = seed s
go (DIRECT_L_RAND_NOSCAL _ s) = seed s
go (ORIG_DIRECT _ ineq) = ic ineq
go (ORIG_DIRECT_L _ ineq) = ic ineq
go (STOGO_RAND _ s) = seed s
go (CRS2_LM _ s p) = seed s *> pop p
go (ISRES _ ineq eq s p) = ic ineq *> ec eq *> seed s *> pop p
go (MLSL _ lp p) = local lp *> pop p
go (MLSL_LDS _ lp p) = local lp *> pop p
go _ = return ()
tryTo :: IO N.Result -> IO ()
tryTo act = do
result <- act
if (N.isSuccess result)
then return ()
else Ex.throw $ NloptException result
data NloptException = NloptException N.Result deriving (Show, Typeable)
instance Exception NloptException
-- | Solve the specified global optimization problem.
--
-- = Example program
--
-- The following interactive session example uses the 'ISRES'
-- algorithm, a stochastic, derivative-free global optimizer, to
-- minimize a trivial function with a minimum of 22.0 at @(0, 0)@.
-- The search is conducted within a box from -10 to 10 in each
-- dimension.
--
-- >>> import Numeric.LinearAlgebra ( dot, fromList )
-- >>> let objf x = x `dot` x + 22 -- define objective
-- >>> let stop = ObjectiveRelativeTolerance 1e-12 :| [] -- define stopping criterion
-- >>> let algorithm = ISRES objf [] [] (SeedValue 22) -- specify algorithm
-- >>> let lowerbounds = fromList [-10, -10] -- specify bounds
-- >>> let upperbounds = fromList [10, 10] -- specify bounds
-- >>> let problem = GlobalProblem lowerbounds upperbounds stop algorithm
-- >>> let x0 = fromList [5, 8] -- specify initial guess
-- >>> minimizeGlobal problem x0
-- Right (Solution {solutionCost = 22.000000000002807, solutionParams = [-1.660591102367038e-6,2.2407062393213684e-7], solutionResult = FTOL_REACHED})
minimizeGlobal ::
-- | Problem specification
GlobalProblem ->
-- | Initial parameter guess
Vector Double ->
-- | Optimization results
Either N.Result Solution
minimizeGlobal prob x0 =
unsafePerformIO $ (Right <$> minimizeGlobal' prob x0) `Ex.catch` handler
where
handler :: NloptException -> IO (Either N.Result a)
handler (NloptException retcode) = return $ Left retcode
applyGlobalProblem :: N.Opt -> GlobalProblem -> IO ()
applyGlobalProblem opt (GlobalProblem lb ub stop alg) = do
tryTo $ applyBounds opt (LowerBounds lb)
tryTo $ applyBounds opt (UpperBounds ub)
traverse_ (tryTo . applyStoppingCondition opt) stop
applyGlobalAlgorithm opt alg
newOpt :: N.Algorithm -> Word -> IO N.Opt
newOpt alg sz = do
opt' <- N.create alg sz
case opt' of
Nothing -> Ex.throw $ NloptException N.FAILURE
Just opt -> return opt
setupGlobalProblem :: GlobalProblem -> IO N.Opt
setupGlobalProblem gp@(GlobalProblem _ _ _ alg) = do
opt <- newOpt (algorithmEnumOfGlobal alg) (problemSize gp)
applyGlobalProblem opt gp
return opt
solveProblem :: N.Opt -> Vector Double -> IO Solution
solveProblem opt x0 = do
(N.Output outret outcost outx) <- N.optimize opt x0
if (N.isSuccess outret)
then return $ Solution outcost outx outret
else Ex.throw $ NloptException outret
minimizeGlobal' :: GlobalProblem -> Vector Double -> IO Solution
minimizeGlobal' gp x0 = do
opt <- setupGlobalProblem gp
solveProblem opt x0
data LocalProblem = LocalProblem
{ -- | The dimension of the
-- parameter vector.
lsize :: Word,
-- | At least one stopping
-- condition
lstop :: NonEmpty StoppingCondition,
-- | Algorithm specification
lalgorithm :: LocalAlgorithm
}
-- | These are the local minimization algorithms provided by NLOPT. Please see
-- <http://ab-initio.mit.edu/wiki/index.php/NLopt_Algorithms the NLOPT algorithm manual>
-- for more details on how the methods work and how they relate to one
-- another. Note that some local methods require you provide
-- derivatives (gradients or Jacobians) for your objective function
-- and constraint functions.
--
-- Optional parameters are wrapped in a 'Maybe'; for example, if you
-- see 'Maybe' 'VectorStorage', you can simply specify 'Nothing' to
-- use the default behavior.
data LocalAlgorithm
= -- | Limited-memory BFGS
LBFGS_NOCEDAL ObjectiveD (Maybe VectorStorage)
| -- | Limited-memory BFGS
LBFGS ObjectiveD (Maybe VectorStorage)
| -- | Shifted limited-memory variable-metric, rank-2
VAR2 ObjectiveD (Maybe VectorStorage)
| -- | Shifted limited-memory variable-metric, rank-1
VAR1 ObjectiveD (Maybe VectorStorage)
| -- | Truncated Newton's method
TNEWTON ObjectiveD (Maybe VectorStorage)
| -- | Truncated Newton's method with automatic restarting
TNEWTON_RESTART ObjectiveD (Maybe VectorStorage)
| -- | Preconditioned truncated Newton's method
TNEWTON_PRECOND ObjectiveD (Maybe VectorStorage)
| -- | Preconditioned truncated Newton's method with automatic
-- restarting
TNEWTON_PRECOND_RESTART ObjectiveD (Maybe VectorStorage)
| -- | Method of moving averages
MMA ObjectiveD InequalityConstraintsD
| -- | Sequential Least-Squares Quadratic Programming
SLSQP ObjectiveD [Bounds] InequalityConstraintsD EqualityConstraintsD
| -- | Conservative Convex Separable Approximation
CCSAQ ObjectiveD Preconditioner
| -- | PRincipal AXIS gradient-free local optimization
PRAXIS Objective [Bounds] (Maybe InitialStep)
| -- | Constrained Optimization BY Linear Approximations
COBYLA
Objective
[Bounds]
InequalityConstraints
EqualityConstraints
(Maybe InitialStep)
| -- | Powell's NEWUOA algorithm
NEWUOA Objective (Maybe InitialStep)
| -- | Powell's NEWUOA algorithm with bounds by SGJ
NEWUOA_BOUND Objective [Bounds] (Maybe InitialStep)
| -- | Nelder-Mead Simplex gradient-free method
NELDERMEAD Objective [Bounds] (Maybe InitialStep)
| -- | NLOPT implementation of Rowan's Subplex algorithm
SBPLX Objective [Bounds] (Maybe InitialStep)
| -- | Bounded Optimization BY Quadratic Approximations
BOBYQA Objective [Bounds] (Maybe InitialStep)
algorithmEnumOfLocal :: LocalAlgorithm -> N.Algorithm
algorithmEnumOfLocal (LBFGS_NOCEDAL _ _) = N.LD_LBFGS_NOCEDAL
algorithmEnumOfLocal (LBFGS _ _) = N.LD_LBFGS
algorithmEnumOfLocal (VAR2 _ _) = N.LD_VAR2
algorithmEnumOfLocal (VAR1 _ _) = N.LD_VAR1
algorithmEnumOfLocal (TNEWTON _ _) = N.LD_TNEWTON
algorithmEnumOfLocal (TNEWTON_RESTART _ _) = N.LD_TNEWTON_RESTART
algorithmEnumOfLocal (TNEWTON_PRECOND _ _) = N.LD_TNEWTON_PRECOND
algorithmEnumOfLocal (TNEWTON_PRECOND_RESTART _ _) = N.LD_TNEWTON_PRECOND_RESTART
algorithmEnumOfLocal (MMA _ _) = N.LD_MMA
algorithmEnumOfLocal (SLSQP _ _ _ _) = N.LD_SLSQP
algorithmEnumOfLocal (CCSAQ _ _) = N.LD_CCSAQ
algorithmEnumOfLocal (PRAXIS _ _ _) = N.LN_PRAXIS
algorithmEnumOfLocal (COBYLA _ _ _ _ _) = N.LN_COBYLA
algorithmEnumOfLocal (NEWUOA _ _) = N.LN_NEWUOA
algorithmEnumOfLocal (NEWUOA_BOUND _ _ _) = N.LN_NEWUOA
algorithmEnumOfLocal (NELDERMEAD _ _ _) = N.LN_NELDERMEAD
algorithmEnumOfLocal (SBPLX _ _ _) = N.LN_SBPLX
algorithmEnumOfLocal (BOBYQA _ _ _) = N.LN_BOBYQA
applyLocalObjective :: N.Opt -> LocalAlgorithm -> IO ()
applyLocalObjective opt alg = go alg
where
obj = tryTo . applyObjective opt . MinimumObjective
objD = tryTo . applyObjectiveD opt . MinimumObjective
precond p = tryTo . applyObjectiveD opt . PreconditionedMinimumObjective p
go (LBFGS_NOCEDAL o _) = objD o
go (LBFGS o _) = objD o
go (VAR2 o _) = objD o
go (VAR1 o _) = objD o
go (TNEWTON o _) = objD o
go (TNEWTON_RESTART o _) = objD o
go (TNEWTON_PRECOND o _) = objD o
go (TNEWTON_PRECOND_RESTART o _) = objD o
go (MMA o _) = objD o
go (SLSQP o _ _ _) = objD o
go (CCSAQ o prec) = precond prec o
go (PRAXIS o _ _) = obj o
go (COBYLA o _ _ _ _) = obj o
go (NEWUOA o _) = obj o
go (NEWUOA_BOUND o _ _) = obj o
go (NELDERMEAD o _ _) = obj o
go (SBPLX o _ _) = obj o
go (BOBYQA o _ _) = obj o
applyLocalAlgorithm :: N.Opt -> LocalAlgorithm -> IO ()
applyLocalAlgorithm opt alg = do
applyLocalObjective opt alg
go alg
where
ic = traverse_ (tryTo . applyConstraint opt)
icd = traverse_ (tryTo . applyConstraint opt)
ec = traverse_ (tryTo . applyConstraint opt)
ecd = traverse_ (tryTo . applyConstraint opt)
store = maybe (return ()) (tryTo . applyVectorStorage opt)
bound = traverse_ (tryTo . applyBounds opt)
step0 = maybe (return ()) (tryTo . applyInitialStep opt)
go (LBFGS_NOCEDAL _ vs) = store vs
go (LBFGS _ vs) = store vs
go (VAR2 _ vs) = store vs
go (VAR1 _ vs) = store vs
go (TNEWTON _ vs) = store vs
go (TNEWTON_RESTART _ vs) = store vs
go (TNEWTON_PRECOND _ vs) = store vs
go (TNEWTON_PRECOND_RESTART _ vs) = store vs
go (MMA _ ineqd) = icd ineqd
go (SLSQP _ b ineqd eqd) =
bound b *> icd ineqd *> ecd eqd
go (CCSAQ _ _) = return ()
go (PRAXIS _ b s) = bound b *> step0 s
go (COBYLA _ b ineq eq s) =
bound b *> ic ineq *> ec eq *> step0 s
go (NEWUOA _ s) = step0 s
go (NEWUOA_BOUND _ b s) = bound b *> step0 s
go (NELDERMEAD _ b s) = bound b *> step0 s
go (SBPLX _ b s) = bound b *> step0 s
go (BOBYQA _ b s) = bound b *> step0 s
applyLocalProblem :: N.Opt -> LocalProblem -> IO ()
applyLocalProblem opt (LocalProblem _ stop alg) = do
traverse_ (tryTo . applyStoppingCondition opt) stop
applyLocalAlgorithm opt alg
setupLocalProblem :: LocalProblem -> IO N.Opt
setupLocalProblem lp@(LocalProblem sz _ alg) = do
opt <- newOpt (algorithmEnumOfLocal alg) sz
applyLocalProblem opt lp
return opt
minimizeLocal' :: LocalProblem -> Vector Double -> IO Solution
minimizeLocal' lp x0 = do
opt <- setupLocalProblem lp
solveProblem opt x0
-- |
-- == Example program
--
-- The following interactive session example enforces the same scalar
-- constraint as the nonlinear constraint example, but this time it
-- uses the SLSQP solver to find the minimum.
--
-- >>> import Numeric.LinearAlgebra ( dot, fromList, toList, scale )
-- >>> let objf x = (x `dot` x + 22, 2 `scale` x)
-- >>> let stop = ObjectiveRelativeTolerance 1e-9 :| []
-- >>> let constraintf x = (sum (toList x) - 1.0, fromList [1, 1])
-- >>> let constraint = EqualityConstraint (Scalar constraintf) 1e-6
-- >>> let algorithm = SLSQP objf [] [] [constraint]
-- >>> let problem = LocalProblem 2 stop algorithm
-- >>> let x0 = fromList [5, 10]
-- >>> minimizeLocal problem x0
-- Right (Solution {solutionCost = 22.5, solutionParams = [0.4999999999999998,0.5000000000000002], solutionResult = FTOL_REACHED})
minimizeLocal :: LocalProblem -> Vector Double -> Either N.Result Solution
minimizeLocal prob x0 =
unsafePerformIO $ (Right <$> minimizeLocal' prob x0) `Ex.catch` handler
where
handler :: NloptException -> IO (Either N.Result a)
handler (NloptException retcode) = return $ Left retcode
class ProblemSize c where
problemSize :: c -> Word
instance ProblemSize LocalProblem where
problemSize = lsize
instance ProblemSize GlobalProblem where
problemSize = fromIntegral . HM.size . lowerBounds
instance ProblemSize AugLagProblem where
problemSize (AugLagProblem _ _ alg) = case alg of
AUGLAG_LOCAL lp _ _ -> problemSize lp
AUGLAG_EQ_LOCAL lp -> problemSize lp
AUGLAG_GLOBAL gp _ _ -> problemSize gp
AUGLAG_EQ_GLOBAL gp -> problemSize gp
-- | __IMPORTANT NOTE__
--
-- For augmented lagrangian problems, you, the user, are responsible
-- for providing the appropriate type of constraint. If the
-- subsidiary problem requires an `ObjectiveD`, then you should
-- provide constraint functions with derivatives. If the subsidiary
-- problem requires an `Objective`, you should provide constraint
-- functions without derivatives. If you don't do this, you may get a
-- runtime error.
data AugLagProblem = AugLagProblem
{ -- | Possibly empty set of
-- equality constraints
alEquality :: EqualityConstraints,
-- | Possibly empty set of
-- equality constraints with
-- derivatives
alEqualityD :: EqualityConstraintsD,
-- | Algorithm specification.
alalgorithm :: AugLagAlgorithm
}
-- | The Augmented Lagrangian solvers allow you to enforce nonlinear
-- constraints while using local or global algorithms that don't
-- natively support them. The subsidiary problem is used to do the
-- minimization, but the @AUGLAG@ methods modify the objective to
-- enforce the constraints. Please see
-- <http://ab-initio.mit.edu/wiki/index.php/NLopt_Algorithms the NLOPT algorithm manual>
-- for more details on how the methods work and how they relate to one another.
--
-- See the documentation for 'AugLagProblem' for an important note
-- about the constraint functions.
data AugLagAlgorithm
= -- | AUGmented LAGrangian with a local subsidiary method
AUGLAG_LOCAL LocalProblem InequalityConstraints InequalityConstraintsD
| -- | AUGmented LAGrangian with a local subsidiary method and with
-- penalty functions only for equality constraints
AUGLAG_EQ_LOCAL LocalProblem
| -- | AUGmented LAGrangian with a global subsidiary method
AUGLAG_GLOBAL GlobalProblem InequalityConstraints InequalityConstraintsD
| -- | AUGmented LAGrangian with a global subsidiary method and with
-- penalty functions only for equality constraints.
AUGLAG_EQ_GLOBAL GlobalProblem
algorithmEnumOfAugLag :: AugLagAlgorithm -> N.Algorithm
algorithmEnumOfAugLag (AUGLAG_LOCAL _ _ _) = N.AUGLAG
algorithmEnumOfAugLag (AUGLAG_EQ_LOCAL _) = N.AUGLAG_EQ
algorithmEnumOfAugLag (AUGLAG_GLOBAL _ _ _) = N.AUGLAG
algorithmEnumOfAugLag (AUGLAG_EQ_GLOBAL _) = N.AUGLAG_EQ
-- | This structure is returned in the event of a successful
-- optimization.
data Solution = Solution
{ -- | The objective function value
-- at the minimum
solutionCost :: Double,
-- | The parameter vector which
-- minimizes the objective
solutionParams :: Vector Double,
-- | Why the optimizer stopped
solutionResult :: N.Result
}
deriving (Eq, Show, Read)
applyAugLagAlgorithm :: N.Opt -> AugLagAlgorithm -> IO ()
applyAugLagAlgorithm opt alg = go alg
where
ic = traverse_ (tryTo . applyConstraint opt)
icd = traverse_ (tryTo . applyConstraint opt)
-- AUGLAG won't work at all if you don't pass it the same
-- objective as the subproblem -- here we pull out the subproblem
-- objectives from the algorithm spec and set the same objective
-- function so the user can't mess it up.
local lp = tryTo $ do
localopt <- setupLocalProblem lp
applyLocalObjective opt (lalgorithm lp)
N.set_local_optimizer opt localopt
global gp = do
tryTo $ setupGlobalProblem gp >>= N.set_local_optimizer opt
applyGlobalObjective opt (galgorithm gp)
go (AUGLAG_LOCAL lp ineq ineqd) = local lp *> ic ineq *> icd ineqd
go (AUGLAG_EQ_LOCAL lp) = local lp
go (AUGLAG_GLOBAL gp ineq ineqd) = global gp *> ic ineq *> icd ineqd
go (AUGLAG_EQ_GLOBAL gp) = global gp
applyAugLagProblem :: N.Opt -> AugLagProblem -> IO ()
applyAugLagProblem opt (AugLagProblem eq eqd alg) = do
traverse_ (tryTo . applyConstraint opt) eq
traverse_ (tryTo . applyConstraint opt) eqd
applyAugLagAlgorithm opt alg
minimizeAugLag' :: AugLagProblem -> Vector Double -> IO Solution
minimizeAugLag' ap@(AugLagProblem _ _ alg) x0 = do
opt <- newOpt (algorithmEnumOfAugLag alg) (problemSize ap)
applyAugLagProblem opt ap
solveProblem opt x0
-- |
-- == Example program
--
-- The following interactive session example enforces the same scalar
-- constraint as the nonlinear constraint example, but this time it
-- uses the augmented Lagrangian method to enforce the constraint and
-- the 'SBPLX' algorithm, which does not support nonlinear constraints
-- itself, to perform the minimization. As before, the parameters
-- must always sum to 1, and the minimizer finds the same constrained
-- minimum of 22.5 at @(0.5, 0.5)@.
-- >>> import Numeric.LinearAlgebra ( dot, fromList, toList )
-- >>> let objf x = x `dot` x + 22
-- >>> let stop = ObjectiveRelativeTolerance 1e-9 :| []
-- >>> let algorithm = SBPLX objf [] Nothing
-- >>> let subproblem = LocalProblem 2 stop algorithm
-- >>> let x0 = fromList [5, 10]
-- >>> minimizeLocal subproblem x0
-- Right (Solution {solutionCost = 22.0, solutionParams = [0.0,0.0], solutionResult = FTOL_REACHED})
-- >>> -- define constraint function:
-- >>> let constraintf x = sum (toList x) - 1.0
-- >>> -- define constraint object to pass to the algorithm:
-- >>> let constraint = EqualityConstraint (Scalar constraintf) 1e-6
-- >>> let problem = AugLagProblem [constraint] [] (AUGLAG_EQ_LOCAL subproblem)
-- >>> minimizeAugLag problem x0
-- Right (Solution {solutionCost = 22.500000015505844, solutionParams = [0.5000880506776678,0.4999119493223323], solutionResult = FTOL_REACHED})
minimizeAugLag :: AugLagProblem -> Vector Double -> Either N.Result Solution
minimizeAugLag prob x0 =
unsafePerformIO $ (Right <$> minimizeAugLag' prob x0) `Ex.catch` handler
where
handler :: NloptException -> IO (Either N.Result a)
handler (NloptException retcode) = return $ Left retcode
|
/*
* Copyright 2013+ Ruslan Nigmatullin <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "connection_p.hpp"
#include <vector>
#include <boost/bind.hpp>
#include <iostream>
#include "server_p.hpp"
#include "stream_p.hpp"
#include "stockreplies_p.hpp"
namespace ioremap {
namespace thevoid {
#define CONNECTION_LOG(log_level, ...) \
BH_LOG(m_logger, (log_level), __VA_ARGS__)
#define CONNECTION_DEBUG(...) \
CONNECTION_LOG(SWARM_LOG_DEBUG, __VA_ARGS__)
#define CONNECTION_INFO(...) \
CONNECTION_LOG(SWARM_LOG_INFO, __VA_ARGS__)
#define CONNECTION_ERROR(...) \
CONNECTION_LOG(SWARM_LOG_ERROR, __VA_ARGS__)
#define SAFE_SEND_NONE do {} while (0)
#define SAFE_SEND_ERROR \
do { \
boost::system::error_code ignored_ec; \
m_socket.shutdown(boost::asio::socket_base::shutdown_both, ignored_ec); \
m_socket.close(ignored_ec); \
if (m_handler) { \
--m_server->m_data->active_connections_counter; \
m_handler.reset(); \
} \
return; \
} while (0)
#define SAFE_CALL(expr, err_prefix, error_handler) \
do { \
if (m_server->m_data->safe_mode) { \
try { \
expr; \
} catch (const std::exception &ex) { \
CONNECTION_ERROR("uncaught exception") \
("context", (err_prefix)) \
("error", ex.what()); \
m_access_status = 598; \
print_access_log(); \
error_handler; \
} catch (...) { \
CONNECTION_ERROR("uncaught exception") \
("context", (err_prefix)) \
("error", "unknown"); \
m_access_status = 598; \
print_access_log(); \
error_handler; \
} \
} else { \
expr; \
} \
} while (0)
static blackhole::log::attributes_t make_attributes(void *connection)
{
char buffer[128];
snprintf(buffer, sizeof(buffer), "%p", connection);
blackhole::log::attributes_t attributes = {
blackhole::attribute::make(std::string("connection"), std::string(buffer))
};
return std::move(attributes);
}
template <typename T>
connection<T>::connection(base_server *server, boost::asio::io_service &service, size_t buffer_size) :
m_server(server),
m_base_logger(m_server->logger(), make_attributes(this)),
m_logger(m_base_logger, blackhole::log::attributes_t()),
m_socket(service),
m_buffer(buffer_size),
m_content_length(0),
m_access_log_printed(false),
m_close_invoked(false),
m_state(read_headers | waiting_for_first_data),
m_sending(false),
m_keep_alive(false),
m_at_read(false),
m_pause_receive(false)
{
m_unprocessed_begin = m_buffer.data();
m_unprocessed_end = m_buffer.data();
m_access_start.tv_sec = 0;
m_access_start.tv_usec = 0;
m_access_status = 0;
m_access_received = 0;
m_access_sent = 0;
m_request_processing_was_finished = false;
CONNECTION_DEBUG("connection created")
("service", &service);
}
template <typename T>
connection<T>::~connection()
{
if (m_server) {
CONNECTION_INFO("connection to client closed");
--m_server->m_data->connections_counter;
}
if (m_handler) {
m_access_status = 597;
print_access_log();
}
// This isn't actually possible.
// Handler keeps pointer to the connection, if the connection has pointer to the handler
// they prolong lifetime of each other.
/*
if (auto handler = try_handler())
SAFE_CALL(handler->on_close(boost::system::error_code()), "connection::~connection -> on_close", SAFE_SEND_NONE);
*/
CONNECTION_DEBUG("connection destroyed");
}
template <typename T>
typename connection<T>::socket_type &connection<T>::socket()
{
return m_socket;
}
template <typename T>
typename connection<T>::endpoint_type &connection<T>::endpoint()
{
return m_endpoint;
}
template <typename T>
void connection<T>::start(const std::string &local_endpoint)
{
m_access_local = local_endpoint;
m_access_remote = boost::lexical_cast<std::string>(m_endpoint);
++m_server->m_data->connections_counter;
CONNECTION_INFO("connection to client opened")
("local", m_access_local)
("remote", m_access_remote);
async_read();
}
template <typename T>
void connection<T>::send_headers(http_response &&rep,
const boost::asio::const_buffer &content,
std::function<void (const boost::system::error_code &err)> &&handler)
{
m_access_status = rep.code();
if (m_keep_alive) {
rep.headers().set_keep_alive();
}
CONNECTION_DEBUG("handler sends headers to client")
("keep_alive", m_keep_alive)
("status", rep.code())
("state", make_state_attribute());
buffer_info info(
std::move(stock_replies::to_buffers(rep, content)),
std::move(rep),
std::move(handler)
);
send_impl(std::move(info));
}
template <typename T>
void connection<T>::send_data(const boost::asio::const_buffer &buffer,
std::function<void (const boost::system::error_code &)> &&handler)
{
CONNECTION_DEBUG("handler sends data to client")
("size", boost::asio::buffer_size(buffer))
("state", make_state_attribute());
buffer_info info(
std::move(std::vector<boost::asio::const_buffer>(1, buffer)),
boost::none,
std::move(handler)
);
send_impl(std::move(info));
}
template <typename T>
void connection<T>::want_more()
{
// Invoke close_impl some time later, so we won't need any mutexes to guard the logic
m_socket.get_io_service().post(std::bind(&connection::want_more_impl, this->shared_from_this()));
}
template <typename T>
void connection<T>::pause_receive()
{
m_pause_receive = true;
}
template <typename T>
void connection<T>::initialize(base_request_stream_data *data)
{
(void) data;
}
template <typename T>
swarm::logger connection<T>::create_logger()
{
return swarm::logger(m_logger, blackhole::log::attributes_t());
}
template <typename T>
void connection<T>::close(const boost::system::error_code &err)
{
m_close_invoked = true;
CONNECTION_DEBUG("handler asks for closing connection")
("error", err.message())
("state", make_state_attribute());
// Invoke close_impl some time later, so we won't need any mutexes to guard the logic
if (err) {
m_socket.get_io_service().dispatch(std::bind(&connection::close_impl, this->shared_from_this(), err));
} else {
send_data(boost::asio::const_buffer(),
std::bind(&connection::close_impl, this->shared_from_this(), std::placeholders::_1));
}
}
template <typename T>
void connection<T>::virtual_hook(reply_stream::reply_stream_hook id, void *data)
{
switch (id) {
case get_logger_attributes_hook: {
auto &attributes_data = *reinterpret_cast<get_logger_attributes_hook_data *>(data);
attributes_data.data = &m_attributes;
break;
}
}
}
template <typename T>
std::shared_ptr<base_request_stream> connection<T>::try_handler()
{
if (!m_close_invoked)
return m_handler;
else
return std::shared_ptr<base_request_stream>();
}
template <typename T>
void connection<T>::want_more_impl()
{
CONNECTION_DEBUG("handler asks for more data from client")
("state", make_state_attribute());
m_pause_receive = false;
if (m_content_length > 0 && m_unprocessed_begin == m_unprocessed_end) {
async_read();
}
else {
process_data();
}
}
template <typename T>
void connection<T>::send_impl(buffer_info &&info)
{
std::lock_guard<std::mutex> lock(m_outgoing_mutex);
m_outgoing.emplace_back(std::move(info));
if (!m_sending) {
m_sending = true;
send_nolock();
}
}
template <typename T>
void connection<T>::write_finished(const boost::system::error_code &err, size_t bytes_written)
{
m_access_sent += bytes_written;
CONNECTION_LOG(err ? SWARM_LOG_ERROR : SWARM_LOG_DEBUG, "write to client finished")
("error", err.message())
("size", bytes_written);
if (err) {
decltype(m_outgoing) outgoing;
{
std::lock_guard<std::mutex> lock(m_outgoing_mutex);
outgoing = std::move(m_outgoing);
}
for (auto it = outgoing.begin(); it != outgoing.end(); ++it) {
if (it->handler)
it->handler(err);
}
if (auto handler = try_handler()) {
SAFE_CALL(handler->on_close(err), "connection::write_finished -> on_close", SAFE_SEND_NONE);
}
if (m_handler) {
--m_server->m_data->active_connections_counter;
m_handler.reset();
}
m_access_status = 499;
close_impl(err);
}
else {
do {
std::unique_lock<std::mutex> lock(m_outgoing_mutex);
if (m_outgoing.empty()) {
CONNECTION_ERROR("wrote extra bytes")
("size", bytes_written)
("state", make_state_attribute());
break;
}
auto &buffers = m_outgoing.front().buffer;
auto it = buffers.begin();
for (; it != buffers.end(); ++it) {
const size_t size = boost::asio::buffer_size(*it);
if (size <= bytes_written) {
bytes_written -= size;
} else {
*it = bytes_written + *it;
bytes_written = 0;
break;
}
}
if (it == buffers.end()) {
const auto handler = std::move(m_outgoing.front().handler);
m_outgoing.pop_front();
if (handler) {
lock.unlock();
handler(err);
lock.lock();
}
} else {
buffers.erase(buffers.begin(), it);
}
} while (bytes_written);
}
std::unique_lock<std::mutex> lock(m_outgoing_mutex);
if (m_outgoing.empty()) {
m_sending = false;
return;
}
send_nolock();
}
class buffers_array
{
private:
enum {
buffers_count = 32
};
public:
typedef boost::asio::const_buffer value_type;
typedef const value_type * const_iterator;
template <typename Iterator>
buffers_array(Iterator begin, Iterator end) :
m_size(0)
{
for (auto it = begin; it != end && m_size < buffers_count; ++it) {
for (auto jt = it->buffer.begin(); jt != it->buffer.end() && m_size < buffers_count; ++jt) {
m_data[m_size++] = *jt;
}
}
}
const_iterator begin() const
{
return m_data;
}
const_iterator end() const
{
return &m_data[m_size];
}
private:
value_type m_data[buffers_count];
size_t m_size;
};
template <typename T>
void connection<T>::send_nolock()
{
buffers_array data(m_outgoing.begin(), m_outgoing.end());
m_socket.async_write_some(data, detail::attributes_bind(m_logger, m_attributes, std::bind(
&connection::write_finished, this->shared_from_this(),
std::placeholders::_1, std::placeholders::_2)));
}
template <typename T>
void connection<T>::close_impl(const boost::system::error_code &err)
{
CONNECTION_DEBUG("handler closes connection")
("error", err.message())
("keep_alive", m_keep_alive)
("unreceived_size", m_content_length)
("state", make_state_attribute());
if (m_handler) {
--m_server->m_data->active_connections_counter;
m_handler.reset();
}
m_request_processing_was_finished = true;
if (err) {
// If access status is set to 499 there was an error during writing the data,
// so it looks like the client is already dead.
if (m_access_status != 499)
m_access_status = 599;
print_access_log();
boost::system::error_code ignored_ec;
// If there was any error - close the connection, it's broken
m_socket.shutdown(boost::asio::socket_base::shutdown_both, ignored_ec);
m_socket.close(ignored_ec);
return;
}
// Is request data is not fully received yet - receive it
if (m_state != processing_request) {
m_state |= request_processed;
m_pause_receive = false;
if (m_unprocessed_begin != m_unprocessed_end) {
process_data();
} else {
async_read();
}
return;
}
if (!m_keep_alive) {
print_access_log();
boost::system::error_code ignored_ec;
m_socket.shutdown(boost::asio::socket_base::shutdown_both, ignored_ec);
m_socket.close(ignored_ec);
return;
}
process_next();
}
template <typename T>
void connection<T>::process_next()
{
print_access_log();
// Start to wait new HTTP requests by this socket due to HTTP 1.1
m_state = read_headers | waiting_for_first_data;
m_access_method.clear();
m_access_url.clear();
m_access_start.tv_sec = 0;
m_access_start.tv_usec = 0;
m_access_status = 0;
m_access_received = 0;
m_access_sent = 0;
m_request_processing_was_finished = false;
m_request_parser.reset();
m_access_log_printed = false;
m_close_invoked = false;
m_content_length = 0;
m_pause_receive = false;
m_attributes.clear();
m_logger = swarm::logger(m_base_logger, m_attributes);
m_request = http_request();
CONNECTION_DEBUG("process next request")
("size", m_unprocessed_end - m_unprocessed_begin);
if (m_unprocessed_begin != m_unprocessed_end) {
process_data();
} else {
async_read();
}
}
template <typename T>
void connection<T>::print_access_log()
{
if (m_state & waiting_for_first_data)
return;
if (m_access_log_printed)
return;
m_access_log_printed = true;
timeval end;
gettimeofday(&end, NULL);
unsigned long long delta = 1000000ull * (end.tv_sec - m_access_start.tv_sec) + end.tv_usec - m_access_start.tv_usec;
CONNECTION_LOG(SWARM_LOG_INFO, "access_log_entry: method: %s, url: %s, local: %s, remote: %s, status: %d, received: %llu, sent: %llu, time: %llu us",
m_access_method.empty() ? "-" : m_access_method.c_str(),
m_access_url.empty() ? "-" : m_access_url.c_str(),
m_access_local.c_str(),
m_access_remote.c_str(),
m_access_status,
m_access_received,
m_access_sent,
delta);
}
template <typename T>
void connection<T>::handle_read(const boost::system::error_code &err, std::size_t bytes_transferred)
{
m_at_read = false;
// This message is not error in case of disconnect between requests
const bool error = err && !((m_state & waiting_for_first_data)
&& err.category() == boost::asio::error::get_misc_category()
&& err.value() == boost::asio::error::eof);
CONNECTION_LOG(error ? SWARM_LOG_ERROR : SWARM_LOG_DEBUG, "received new data")
("error", err.message())
("real_error", error)
("state", make_state_attribute())
("size", bytes_transferred);
if (err) {
if (m_access_status == 0 || !m_request_processing_was_finished) {
m_access_status = 499;
}
print_access_log();
if (auto handler = try_handler()) {
SAFE_CALL(handler->on_close(err), "connection::handle_read -> on_close", SAFE_SEND_NONE);
}
if (m_handler) {
--m_server->m_data->active_connections_counter;
m_handler.reset();
}
close_impl(err);
return;
}
m_unprocessed_begin = m_buffer.data();
m_unprocessed_end = m_buffer.data() + bytes_transferred;
process_data();
// If an error occurs then no new asynchronous operations are started. This
// means that all shared_ptr references to the connection object will
// disappear and the object will be destroyed automatically after this
// handler returns. The connection class's destructor closes the socket.
}
template <typename T>
void connection<T>::process_data()
{
if (m_pause_receive) {
return;
}
const char* begin = m_unprocessed_begin;
const char* end = m_unprocessed_end;
CONNECTION_DEBUG("process data")
("size", end - begin)
("state", make_state_attribute());
if (m_state & read_headers) {
if (m_state & waiting_for_first_data) {
m_state &= ~waiting_for_first_data;
gettimeofday(&m_access_start, NULL);
}
boost::tribool result;
const char *new_begin = NULL;
boost::tie(result, new_begin) = m_request_parser.parse(m_request, begin, end);
CONNECTION_DEBUG("processed headers")
("result", result ? "true" : (!result ? "false" : "unknown_state"))
("raw_data", std::string(begin, new_begin));
m_access_received += (new_begin - begin);
m_unprocessed_begin = new_begin;
if (!result) {
m_keep_alive = false;
m_unprocessed_begin = m_unprocessed_end = 0;
m_state = processing_request;
send_error(http_response::bad_request);
return;
} else if (result) {
m_access_method = m_request.method();
m_access_url = m_request.url().original();
uint64_t request_id = 0;
bool trace_bit = false;
bool failed_to_parse_request_id = true;
const std::string &request_header = m_server->m_data->request_header;
int request_header_err = 0;
if (!request_header.empty()) {
if (auto request_ptr = m_request.headers().get(request_header)) {
std::string tmp = request_ptr->substr(0, 16);
errno = 0;
request_id = strtoull(tmp.c_str(), NULL, 16);
request_header_err = -errno;
if (request_header_err != 0) {
request_id = 0;
} else {
failed_to_parse_request_id = false;
}
}
}
if (failed_to_parse_request_id) {
unsigned char *buffer = reinterpret_cast<unsigned char *>(&request_id);
for (size_t i = 0; i < sizeof(request_id) / sizeof(unsigned char); ++i) {
buffer[i] = std::rand();
}
}
const std::string &trace_header = m_server->m_data->trace_header;
if (!trace_header.empty()) {
if (auto trace_bit_ptr = m_request.headers().get(trace_header)) {
try {
trace_bit = boost::lexical_cast<uint32_t>(*trace_bit_ptr) > 0;
} catch (std::exception &exc) {
CONNECTION_ERROR("failed to parse trace header, must be either 0 or 1")
("url", m_request.url().original())
("header_value", *trace_bit_ptr)
("header_name", trace_header)
("error", exc.what());
}
}
}
m_attributes = blackhole::log::attributes_t({
swarm::keyword::request_id() = request_id,
blackhole::keyword::tracebit() = trace_bit
});
m_logger = swarm::logger(m_base_logger, m_attributes);
blackhole::scoped_attributes_t logger_guard(m_logger, blackhole::log::attributes_t(m_attributes));
if (request_header_err != 0) {
auto request_ptr = m_request.headers().get(request_header);
CONNECTION_ERROR("failed to parse request header")
("url", m_request.url().original())
("header_value", *request_ptr)
("header_name", request_header)
("error", request_header_err);
}
m_request.set_request_id(request_id);
m_request.set_trace_bit(trace_bit);
m_request.set_local_endpoint(m_access_local);
m_request.set_remote_endpoint(m_access_remote);
if (!m_request.url().is_valid()) {
CONNECTION_ERROR("failed to parse invalid url")
("url", m_access_url);
// terminate connection on invalid url
m_keep_alive = false;
m_unprocessed_begin = m_unprocessed_end = 0;
m_state = processing_request;
send_error(http_response::bad_request);
return;
} else {
auto factory = m_server->factory(m_request);
if (auto length = m_request.headers().content_length())
m_content_length = *length;
else
m_content_length = 0;
m_keep_alive = m_request.is_keep_alive();
if (factory) {
++m_server->m_data->active_connections_counter;
m_handler = factory->create();
m_handler->initialize(std::static_pointer_cast<reply_stream>(this->shared_from_this()));
SAFE_CALL(m_handler->on_headers(std::move(m_request)), "connection::process_data -> on_headers", SAFE_SEND_ERROR);
} else {
CONNECTION_ERROR("failed to find handler")
("method", m_access_method)
("url", m_access_url);
// terminate connection if appropriate handler is not found
m_keep_alive = false;
m_unprocessed_begin = m_unprocessed_end = 0;
m_state = processing_request;
send_error(http_response::not_found);
return;
}
}
m_state &= ~read_headers;
m_state |= read_data;
process_data();
// async_read is called by processed_data
return;
} else {
// need more data for request processing
async_read();
}
} else if (m_state & read_data) {
size_t data_from_body = std::min<size_t>(m_content_length, end - begin);
size_t processed_size = data_from_body;
if (data_from_body) {
if (auto handler = try_handler()) {
SAFE_CALL(processed_size = handler->on_data(boost::asio::buffer(begin, data_from_body)),
"connection::process_data -> on_data", SAFE_SEND_ERROR);
}
}
if (processed_size > data_from_body) {
processed_size = data_from_body;
}
m_content_length -= processed_size;
m_access_received += processed_size;
m_unprocessed_begin = begin + processed_size;
CONNECTION_DEBUG("processed body")
("size", processed_size)
("total_size", data_from_body)
("need_size", m_content_length)
("unprocesed_size", m_unprocessed_end - m_unprocessed_begin)
("state", make_state_attribute());
if (m_pause_receive) {
// Handler don't want to receive more data (and callbacks),
// wait until want_more method is called
return;
}
if (data_from_body != processed_size) {
// Handler can't process all data, wait until want_more method is called
return;
} else if (m_content_length > 0) {
async_read();
} else {
m_state &= ~read_data;
if (auto handler = try_handler()) {
SAFE_CALL(handler->on_close(boost::system::error_code()), "connection::process_data -> on_close", SAFE_SEND_ERROR);
}
if (m_handler) {
--m_server->m_data->active_connections_counter;
m_handler.reset();
}
if (m_state & request_processed) {
process_next();
}
}
}
}
template <typename T>
void connection<T>::async_read()
{
// here m_pause_receive is false
if (m_at_read)
return;
m_at_read = true;
m_unprocessed_begin = NULL;
m_unprocessed_end = NULL;
CONNECTION_DEBUG("request read from client")
("state", make_state_attribute());
m_socket.async_read_some(boost::asio::buffer(m_buffer),
detail::attributes_bind(m_logger, m_attributes,
std::bind(&connection::handle_read, this->shared_from_this(),
std::placeholders::_1,
std::placeholders::_2)));
}
template <typename T>
void connection<T>::send_error(http_response::status_type type)
{
CONNECTION_DEBUG("handler sends error to client")
("status", type)
("state", make_state_attribute());
send_headers(stock_replies::stock_reply(type),
boost::asio::const_buffer(),
std::bind(&connection::close, this->shared_from_this(), std::placeholders::_1));
}
template class connection<boost::asio::local::stream_protocol::socket>;
template class connection<boost::asio::ip::tcp::socket>;
} // namespace thevoid
} // namespace ioremap
|
= = Æsthetic Club = =
|
/-
Copyright (c) 2021 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro
-/
import Mathlib.Tactic.Basic
import Mathlib.Data.Array.Basic
structure UFModel (n) where
parent : Fin n → Fin n
rank : Nat → Nat
rank_lt : ∀ i, (parent i).1 ≠ i → rank i < rank (parent i)
namespace UFModel
def empty : UFModel 0 where
parent i := i.elim0
rank i := 0
rank_lt i := i.elim0
def push {n} (m : UFModel n) (k) (le : n ≤ k) : UFModel k where
parent i :=
if h : i < n then
let ⟨a, h'⟩ := m.parent ⟨i, h⟩
⟨a, lt_of_lt_of_le h' le⟩
else i
rank i := if h : i < n then m.rank i else 0
rank_lt i := by
simp; split <;> rename_i h
· simp [(m.parent ⟨i, h⟩).2, h]; exact m.rank_lt _
· intro.
def setParent {n} (m : UFModel n) (x y : Fin n) (h : m.rank x < m.rank y) : UFModel n where
parent i := if x.1 = i then y else m.parent i
rank := m.rank
rank_lt i := by
simp; split <;> rename_i h'
· rw [← h']; exact fun _ => h
· exact m.rank_lt i
def setParentBump {n} (m : UFModel n) (x y : Fin n)
(ne : x.1 ≠ y) (H : m.rank x ≤ m.rank y) (hroot : (m.parent y).1 = y) : UFModel n where
parent i := if x.1 = i then y else m.parent i
rank i := if y.1 = i ∧ m.rank x = m.rank y then m.rank y + 1 else m.rank i
rank_lt i := by
simp; split <;>
(rename_i h₁; simp [h₁]; split <;> rename_i h₂ <;>
(intro h; simp [h] at h₂ <;> simp [h₁, h₂, h]))
· simp [← h₁]; split <;> rename_i h₃
· rw [h₃]; apply Nat.lt_succ_self
· exact lt_of_le_of_ne H h₃
· have := Fin.eq_of_val_eq h₂.1; subst this
simp [hroot] at h
· have := m.rank_lt i h
split <;> rename_i h₃
· rw [h₃.1]; exact Nat.lt_succ_of_lt this
· exact this
end UFModel
structure UFNode (α : Type _) where
parent : Nat
value : α
rank : Nat
inductive UFModel.Agrees (arr : Array α) (f : α → β) : ∀ {n}, (Fin n → β) → Prop
| mk : Agrees arr f fun i => f (arr.get i)
namespace UFModel.Agrees
theorem mk' {arr : Array α} {f : α → β} {n} {g : Fin n → β}
(e : n = arr.size)
(H : ∀ i h₁ h₂, f (arr.get ⟨i, h₁⟩) = g ⟨i, h₂⟩) :
Agrees arr f g := by
cases e
have : (fun i => f (arr.get i)) = g := by funext ⟨i, h⟩; apply H
cases this; constructor
theorem get_eq {arr : Array α} {n} {m : Fin n → β} (H : Agrees arr f m) :
∀ i h₁ h₂, f (arr.get ⟨i, h₁⟩) = m ⟨i, h₂⟩ := by
cases H; exact fun i h _ => rfl
theorem get_eq' {arr : Array α} {m : Fin arr.size → β} (H : Agrees arr f m)
(i) : f (arr.get i) = m i := H.get_eq ..
theorem empty {f : α → β} {g : Fin 0 → β} : Agrees #[] f g := mk' rfl λ.
theorem push {arr : Array α} {n} {m : Fin n → β} (H : Agrees arr f m)
(k) (hk : k = n + 1) (x) (m' : Fin k → β)
(hm₁ : ∀ (i : Fin k) (h : i < n), m' i = m ⟨i, h⟩)
(hm₂ : ∀ (h : n < k), f x = m' ⟨n, h⟩) : Agrees (arr.push x) f m' := by
cases H
have : k = (arr.push x).size := by simp [hk]
refine mk' this fun i h₁ h₂ => ?_
simp [Array.get_push]; split <;> (rename_i h; simp at hm₁ ⊢)
· exact (hm₁ ⟨i, h₂⟩ _).symm
· simp at h₁
cases le_antisymm (le_of_not_lt h) (Nat.le_of_lt_succ h₁); apply hm₂
theorem set {arr : Array α} {n} {m : Fin n → β} (H : Agrees arr f m)
{i : Fin arr.size} {x} {m' : Fin n → β}
(hm₁ : ∀ (j : Fin n) (h : j.1 ≠ i), m' j = m j)
(hm₂ : ∀ (h : i < n), f x = m' ⟨i, h⟩) : Agrees (arr.set i x) f m' := by
cases H
refine mk' (by simp) fun j hj₁ hj₂ => ?_
have := arr.get?_set i j x
rw [Array.get?_eq_get _ _ hj₁, Array.get?_eq_get _ _ hj₂] at this
revert this; split <;> (rename_i h; simp; intro h'; rw [h'])
· cases h; apply hm₂
· rw [hm₁]; exact Ne.symm h
end UFModel.Agrees
def UFModel.Models (arr : Array (UFNode α)) {n} (m : UFModel n) :=
UFModel.Agrees arr (·.parent) (fun i => m.parent i) ∧
UFModel.Agrees arr (·.rank) (fun i : Fin n => m.rank i)
namespace UFModel.Models
theorem size_eq {arr : Array (UFNode α)} {n} {m : UFModel n} (H : m.Models arr) :
n = arr.size := H.1.size_eq
theorem parent_eq {arr : Array (UFNode α)} {n} {m : UFModel n} (H : m.Models arr)
(i h₁ h₂) : (arr.get ⟨i, h₁⟩).parent = m.parent ⟨i, h₂⟩ := H.1.get_eq ..
theorem parent_eq' {arr : Array (UFNode α)} {m : UFModel arr.size} (H : m.Models arr)
(i) : (arr.get i).parent = m.parent i := H.parent_eq ..
theorem rank_eq {arr : Array (UFNode α)} {n} {m : UFModel n} (H : m.Models arr)
(i) : (arr.get i).rank = m.rank i := H.2.get_eq _ _ (by rw [H.size_eq]; exact i.2)
theorem empty : UFModel.empty.Models (α := α) #[] := ⟨Agrees.empty, Agrees.empty⟩
theorem push {arr : Array (UFNode α)} {n} {m : UFModel n} (H : m.Models arr)
(k) (hk : k = n + 1) (x) :
(m.push k (hk ▸ Nat.le_add_right ..)).Models (arr.push ⟨n, x, 0⟩) := by
apply H.imp <;>
· intro H
refine H.push _ hk _ _ (fun i h => ?_) (fun h => ?_) <;>
simp [UFModel.push, h, lt_irrefl]
theorem setParent {arr : Array (UFNode α)} {n} {m : UFModel n} (hm : m.Models arr)
(i j H hi x) (hp : x.parent = j.1) (hrk : x.rank = (arr.get ⟨i.1, hi⟩).rank) :
(m.setParent i j H).Models (arr.set ⟨i.1, hi⟩ x) :=
⟨hm.1.set
(fun k h => by simp [UFModel.setParent, h.symm])
(fun h => by simp [UFModel.setParent, hp]),
hm.2.set (fun _ _ => rfl) (fun _ => hrk.trans $ hm.2.get_eq ..)⟩
end UFModel.Models
structure UnionFind (α) where
arr : Array (UFNode α)
model : ∃ (n : _) (m : UFModel n), m.Models arr
namespace UnionFind
def size (self : UnionFind α) := self.arr.size
theorem model' (self : UnionFind α) : ∃ (m : UFModel self.arr.size), m.Models self.arr := by
let ⟨n, m, hm⟩ := self.model; cases hm.size_eq; exact ⟨m, hm⟩
def empty : UnionFind α where
arr := #[]
model := ⟨_, _, UFModel.Models.empty⟩
def mkEmpty (c : Nat) : UnionFind α where
arr := Array.mkEmpty c
model := ⟨_, _, UFModel.Models.empty⟩
def rank (self : UnionFind α) (i : Nat) : Nat :=
if h : i < self.size then (self.arr.get ⟨i, h⟩).rank else 0
def rankMaxAux (self : UnionFind α) : ∀ (i : Nat),
{k : Nat // ∀ j < i, ∀ h, (self.arr.get ⟨j, h⟩).rank ≤ k}
| 0 => ⟨0, λ.⟩
| i+1 => by
let ⟨k, H⟩ := rankMaxAux self i
refine ⟨max k (if h : _ then (self.arr.get ⟨i, h⟩).rank else 0), fun j hj h => ?_⟩
match j, lt_or_eq_of_le (Nat.le_of_lt_succ hj) with
| j, Or.inl hj => exact le_trans (H _ hj h) (le_max_left _ _)
| _, Or.inr rfl => simp [h, le_max_right]
def rankMax (self : UnionFind α) := (rankMaxAux self self.size).1 + 1
theorem lt_rankMax' (self : UnionFind α) (i : Fin self.size) :
(self.arr.get i).rank < self.rankMax :=
Nat.lt_succ_iff.2 $ (rankMaxAux self self.size).2 _ i.2 _
theorem lt_rankMax (self : UnionFind α) (i : Nat) : self.rank i < self.rankMax := by
simp [rank]; split; {apply lt_rankMax'}; apply Nat.succ_pos
theorem rank_eq (self : UnionFind α) {n} {m : UFModel n} (H : m.Models self.arr)
{i} (h : i < self.size) : self.rank i = m.rank i := by
simp [rank, h, H.rank_eq]
theorem rank_lt (self : UnionFind α) {i} : (self.arr.get i).parent ≠ i →
self.rank i < self.rank (self.arr.get i).parent := by
let ⟨m, hm⟩ := self.model'
simp [hm.parent_eq', hm.rank_eq, rank, size, i.2, (m.parent i).2]
exact m.rank_lt i
theorem parent_lt (self : UnionFind α) (i) : (self.arr.get i).parent < self.size := by
let ⟨m, hm⟩ := self.model'
simp [hm.parent_eq', size, (m.parent i).2]
def push (self : UnionFind α) (x : α) : UnionFind α where
arr := self.arr.push ⟨self.arr.size, x, 0⟩
model := let ⟨m, hm⟩ := self.model'; ⟨_, _, hm.push _ rfl _⟩
def findAux (self : UnionFind α) (x : Fin self.size) :
(s : Array (UFNode α)) ×' (root : Fin s.size) ×'
∃ n, ∃ (m : UFModel n) (m' : UFModel n),
m.Models self.arr ∧ m'.Models s ∧ m'.rank = m.rank ∧
(∃ hr, (m'.parent ⟨root, hr⟩).1 = root) ∧
m.rank x ≤ m.rank root := by
let y := (self.arr.get x).parent
refine if h : y = x then ⟨self.arr, x, ?a⟩ else
have := Nat.sub_lt_sub_left (self.lt_rankMax x) (self.rank_lt h)
let ⟨arr₁, root, H⟩ := self.findAux ⟨y, self.parent_lt x⟩
have hx := ?hx
let arr₂ := arr₁.set ⟨x, hx⟩ {arr₁.get ⟨x, hx⟩ with parent := root}
⟨arr₂, ⟨root, by simp [root.2]⟩, ?b⟩
-- start proof
case a =>
let ⟨m, hm⟩ := self.model'
exact ⟨_, m, m, hm, hm, rfl, ⟨x.2, by rwa [← hm.parent_eq']⟩, le_refl _⟩
all_goals let ⟨n, m, m', hm, hm', e, ⟨_, hr⟩, le⟩ := H
case hx => exact hm'.size_eq ▸ hm.size_eq.symm ▸ x.2
case b =>
let x' : Fin n := ⟨x, hm.size_eq ▸ x.2⟩
let root : Fin n := ⟨root, hm'.size_eq.symm ▸ root.2⟩
have hy : (UFModel.parent m x').1 = y := by rw [← hm.parent_eq x x.2 x'.2]; rfl
have := m.rank_lt x'; rw [hy] at this
have := lt_of_lt_of_le (this h) le
refine ⟨n, m, _, hm,
hm'.setParent x' root (by rw [e]; exact this) hx _ rfl rfl, e,
⟨root.2, ?_⟩, le_of_lt this⟩
have := show x.1 ≠ root from mt (congrArg _) (ne_of_lt this)
simp [UFModel.setParent, this, hr]
termination_by _ α self x => self.rankMax - self.rank x
def find (self : UnionFind α) (x : Fin self.size) :
(s : UnionFind α) × (root : Fin s.size) ×'
s.size = self.size ∧ (s.arr.get root).parent = root :=
let ⟨s, root, H⟩ := self.findAux x
have : _ ∧ s.size = self.size ∧ (s.get root).parent = root :=
let ⟨n, _, m', hm, hm', _, ⟨_, hr⟩, _⟩ := H
⟨⟨n, m', hm'⟩, hm'.size_eq.symm.trans hm.size_eq, by rwa [hm'.parent_eq]⟩
⟨⟨s, this.1⟩, root, this.2⟩
def link (self : UnionFind α) (x y : Fin self.size)
(yroot : (self.arr.get y).parent = y) : UnionFind α := by
refine if ne : x.1 = y then self else
let nx := self.arr.get x
let ny := self.arr.get y
if h : ny.rank < nx.rank then
⟨self.arr.set y {ny with parent := x}, ?a⟩
else
let arr₁ := self.arr.set x {nx with parent := y}
let arr₂ := if e : nx.rank = ny.rank then
arr₁.set ⟨y, by simp; exact y.2⟩ {ny with rank := ny.rank + 1}
else arr₁
⟨arr₂, ?b⟩
-- start proof
case a =>
let ⟨m, hm⟩ := self.model'
simp [hm.rank_eq] at h
exact ⟨_, _, hm.setParent y x h _ _ rfl rfl⟩
case b =>
let ⟨m, hm⟩ := self.model'; let n := self.size
simp [hm.rank_eq] at h; simp [hm.parent_eq'] at yroot
refine ⟨_, m.setParentBump x y ne h yroot, ?_⟩
let parent (i : Fin n) := (if x.1 = i then y else m.parent i).1
have : UFModel.Agrees arr₁ (·.parent) parent :=
hm.1.set (fun i h => by simp; rw [if_neg h.symm]) (fun h => by simp)
have H1 : UFModel.Agrees arr₂ (·.parent) parent := by
simp; split
· exact this.set (fun i h => by simp [h.symm]) (fun h => by simp [ne, hm.parent_eq'])
· exact this
have : UFModel.Agrees arr₁ (·.rank) (fun i : Fin n => m.rank i) :=
hm.2.set (fun i h => by simp) (fun h => by simp [hm.rank_eq])
let rank (i : Fin n) := if y.1 = i ∧ m.rank x = m.rank y then m.rank y + 1 else m.rank i
have H2 : UFModel.Agrees arr₂ (·.rank) rank := by
simp; split <;> (rename_i xy; simp [hm.rank_eq] at xy; simp [xy])
· exact this.set (fun i h => by rw [if_neg h.symm]) (fun h => by simp [hm.rank_eq])
· exact this
exact ⟨H1, H2⟩
def union (self : UnionFind α) (x y : Fin self.size) : UnionFind α :=
let ⟨self₁, rx, e, _⟩ := self.find x
let ⟨self₂, ry, e, hry⟩ := self₁.find ⟨y, by rw [e]; exact y.2⟩
self₂.link ⟨rx, by rw [e]; exact rx.2⟩ ry hry
|
From Hammer Require Import Hammer.
Require Import NZAxioms NZMulOrder.
Module Type Pow (Import A : Typ).
Parameters Inline pow : t -> t -> t.
End Pow.
Module Type PowNotation (A : Typ)(Import B : Pow A).
Infix "^" := pow.
End PowNotation.
Module Type Pow' (A : Typ) := Pow A <+ PowNotation A.
Module Type NZPowSpec (Import A : NZOrdAxiomsSig')(Import B : Pow' A).
Declare Instance pow_wd : Proper (eq==>eq==>eq) pow.
Axiom pow_0_r : forall a, a^0 == 1.
Axiom pow_succ_r : forall a b, 0<=b -> a^(succ b) == a * a^b.
Axiom pow_neg_r : forall a b, b<0 -> a^b == 0.
End NZPowSpec.
Module Type NZPow (A : NZOrdAxiomsSig) := Pow A <+ NZPowSpec A.
Module Type NZPow' (A : NZOrdAxiomsSig) := Pow' A <+ NZPowSpec A.
Module Type NZPowProp
(Import A : NZOrdAxiomsSig')
(Import B : NZPow' A)
(Import C : NZMulOrderProp A).
Hint Rewrite pow_0_r pow_succ_r : nz.
Lemma pow_0_l : forall a, 0<a -> 0^a == 0.
Proof. hammer_hook "NZPow" "NZPow.NZPowProp.pow_0_l".
intros a Ha.
destruct (lt_exists_pred _ _ Ha) as (a' & EQ & Ha').
rewrite EQ. now nzsimpl.
Qed.
Lemma pow_0_l' : forall a, a~=0 -> 0^a == 0.
Proof. hammer_hook "NZPow" "NZPow.NZPowProp.pow_0_l'".
intros a Ha.
destruct (lt_trichotomy a 0) as [LT|[EQ|GT]]; try order.
now rewrite pow_neg_r.
now apply pow_0_l.
Qed.
Lemma pow_1_r : forall a, a^1 == a.
Proof. hammer_hook "NZPow" "NZPow.NZPowProp.pow_1_r".
intros. now nzsimpl'.
Qed.
Lemma pow_1_l : forall a, 0<=a -> 1^a == 1.
Proof. hammer_hook "NZPow" "NZPow.NZPowProp.pow_1_l".
apply le_ind; intros. solve_proper.
now nzsimpl.
now nzsimpl.
Qed.
Hint Rewrite pow_1_r pow_1_l : nz.
Lemma pow_2_r : forall a, a^2 == a*a.
Proof. hammer_hook "NZPow" "NZPow.NZPowProp.pow_2_r".
intros. rewrite two_succ. nzsimpl; order'.
Qed.
Hint Rewrite pow_2_r : nz.
Lemma pow_eq_0 : forall a b, 0<=b -> a^b == 0 -> a == 0.
Proof. hammer_hook "NZPow" "NZPow.NZPowProp.pow_eq_0".
intros a b Hb. apply le_ind with (4:=Hb).
solve_proper.
rewrite pow_0_r. order'.
clear b Hb. intros b Hb IH.
rewrite pow_succ_r by trivial.
intros H. apply eq_mul_0 in H. destruct H; trivial.
now apply IH.
Qed.
Lemma pow_nonzero : forall a b, a~=0 -> 0<=b -> a^b ~= 0.
Proof. hammer_hook "NZPow" "NZPow.NZPowProp.pow_nonzero".
intros a b Ha Hb. contradict Ha. now apply pow_eq_0 with b.
Qed.
Lemma pow_eq_0_iff : forall a b, a^b == 0 <-> b<0 \/ (0<b /\ a==0).
Proof. hammer_hook "NZPow" "NZPow.NZPowProp.pow_eq_0_iff".
intros a b. split.
intros H.
destruct (lt_trichotomy b 0) as [Hb|[Hb|Hb]].
now left.
rewrite Hb, pow_0_r in H; order'.
right. split; trivial. apply pow_eq_0 with b; order.
intros [Hb|[Hb Ha]]. now rewrite pow_neg_r.
rewrite Ha. apply pow_0_l'. order.
Qed.
Lemma pow_add_r : forall a b c, 0<=b -> 0<=c ->
a^(b+c) == a^b * a^c.
Proof. hammer_hook "NZPow" "NZPow.NZPowProp.pow_add_r".
intros a b c Hb. apply le_ind with (4:=Hb). solve_proper.
now nzsimpl.
clear b Hb. intros b Hb IH Hc.
nzsimpl; trivial.
rewrite IH; trivial. apply mul_assoc.
now apply add_nonneg_nonneg.
Qed.
Lemma pow_mul_l : forall a b c,
(a*b)^c == a^c * b^c.
Proof. hammer_hook "NZPow" "NZPow.NZPowProp.pow_mul_l".
intros a b c.
destruct (lt_ge_cases c 0) as [Hc|Hc].
rewrite !(pow_neg_r _ _ Hc). now nzsimpl.
apply le_ind with (4:=Hc). solve_proper.
now nzsimpl.
clear c Hc. intros c Hc IH.
nzsimpl; trivial.
rewrite IH; trivial. apply mul_shuffle1.
Qed.
Lemma pow_mul_r : forall a b c, 0<=b -> 0<=c ->
a^(b*c) == (a^b)^c.
Proof. hammer_hook "NZPow" "NZPow.NZPowProp.pow_mul_r".
intros a b c Hb. apply le_ind with (4:=Hb). solve_proper.
intros. now nzsimpl.
clear b Hb. intros b Hb IH Hc.
nzsimpl; trivial.
rewrite pow_add_r, IH, pow_mul_l; trivial. apply mul_comm.
now apply mul_nonneg_nonneg.
Qed.
Lemma pow_nonneg : forall a b, 0<=a -> 0<=a^b.
Proof. hammer_hook "NZPow" "NZPow.NZPowProp.pow_nonneg".
intros a b Ha.
destruct (lt_ge_cases b 0) as [Hb|Hb].
now rewrite !(pow_neg_r _ _ Hb).
apply le_ind with (4:=Hb). solve_proper.
nzsimpl; order'.
clear b Hb. intros b Hb IH.
nzsimpl; trivial. now apply mul_nonneg_nonneg.
Qed.
Lemma pow_pos_nonneg : forall a b, 0<a -> 0<=b -> 0<a^b.
Proof. hammer_hook "NZPow" "NZPow.NZPowProp.pow_pos_nonneg".
intros a b Ha Hb. apply le_ind with (4:=Hb). solve_proper.
nzsimpl; order'.
clear b Hb. intros b Hb IH.
nzsimpl; trivial. now apply mul_pos_pos.
Qed.
Lemma pow_lt_mono_l : forall a b c, 0<c -> 0<=a<b -> a^c < b^c.
Proof. hammer_hook "NZPow" "NZPow.NZPowProp.pow_lt_mono_l".
intros a b c Hc. apply lt_ind with (4:=Hc). solve_proper.
intros (Ha,H). nzsimpl; trivial; order.
clear c Hc. intros c Hc IH (Ha,H).
nzsimpl; try order.
apply mul_lt_mono_nonneg; trivial.
apply pow_nonneg; try order.
apply IH. now split.
Qed.
Lemma pow_le_mono_l : forall a b c, 0<=a<=b -> a^c <= b^c.
Proof. hammer_hook "NZPow" "NZPow.NZPowProp.pow_le_mono_l".
intros a b c (Ha,H).
destruct (lt_trichotomy c 0) as [Hc|[Hc|Hc]].
rewrite !(pow_neg_r _ _ Hc); now nzsimpl.
rewrite Hc; now nzsimpl.
apply lt_eq_cases in H. destruct H as [H|H]; [|now rewrite <- H].
apply lt_le_incl, pow_lt_mono_l; now try split.
Qed.
Lemma pow_gt_1 : forall a b, 1<a -> (0<b <-> 1<a^b).
Proof. hammer_hook "NZPow" "NZPow.NZPowProp.pow_gt_1".
intros a b Ha. split; intros Hb.
rewrite <- (pow_1_l b) by order.
apply pow_lt_mono_l; try split; order'.
destruct (lt_trichotomy b 0) as [H|[H|H]]; trivial.
rewrite pow_neg_r in Hb; order'.
rewrite H, pow_0_r in Hb. order.
Qed.
Lemma pow_lt_mono_r : forall a b c, 1<a -> 0<=c -> b<c -> a^b < a^c.
Proof. hammer_hook "NZPow" "NZPow.NZPowProp.pow_lt_mono_r".
intros a b c Ha Hc H.
destruct (lt_ge_cases b 0) as [Hb|Hb].
rewrite pow_neg_r by trivial. apply pow_pos_nonneg; order'.
assert (H' : b<=c) by order.
destruct (le_exists_sub _ _ H') as (d & EQ & Hd).
rewrite EQ, pow_add_r; trivial. rewrite <- (mul_1_l (a^b)) at 1.
apply mul_lt_mono_pos_r.
apply pow_pos_nonneg; order'.
apply pow_gt_1; trivial.
apply lt_eq_cases in Hd; destruct Hd as [LT|EQ']; trivial.
rewrite <- EQ' in *. rewrite add_0_l in EQ. order.
Qed.
Lemma pow_le_mono_r : forall a b c, 0<a -> b<=c -> a^b <= a^c.
Proof. hammer_hook "NZPow" "NZPow.NZPowProp.pow_le_mono_r".
intros a b c Ha H.
destruct (lt_ge_cases b 0) as [Hb|Hb].
rewrite (pow_neg_r _ _ Hb). apply pow_nonneg; order.
apply le_succ_l in Ha; rewrite <- one_succ in Ha.
apply lt_eq_cases in Ha; destruct Ha as [Ha|Ha]; [|rewrite <- Ha].
apply lt_eq_cases in H; destruct H as [H|H]; [|now rewrite <- H].
apply lt_le_incl, pow_lt_mono_r; order.
nzsimpl; order.
Qed.
Lemma pow_le_mono : forall a b c d, 0<a<=c -> b<=d ->
a^b <= c^d.
Proof. hammer_hook "NZPow" "NZPow.NZPowProp.pow_le_mono".
intros. transitivity (a^d).
apply pow_le_mono_r; intuition order.
apply pow_le_mono_l; intuition order.
Qed.
Lemma pow_lt_mono : forall a b c d, 0<a<c -> 0<b<d ->
a^b < c^d.
Proof. hammer_hook "NZPow" "NZPow.NZPowProp.pow_lt_mono".
intros a b c d (Ha,Hac) (Hb,Hbd).
apply le_succ_l in Ha; rewrite <- one_succ in Ha.
apply lt_eq_cases in Ha; destruct Ha as [Ha|Ha]; [|rewrite <- Ha].
transitivity (a^d).
apply pow_lt_mono_r; intuition order.
apply pow_lt_mono_l; try split; order'.
nzsimpl; try order. apply pow_gt_1; order.
Qed.
Lemma pow_inj_l : forall a b c, 0<=a -> 0<=b -> 0<c ->
a^c == b^c -> a == b.
Proof. hammer_hook "NZPow" "NZPow.NZPowProp.pow_inj_l".
intros a b c Ha Hb Hc EQ.
destruct (lt_trichotomy a b) as [LT|[EQ'|GT]]; trivial.
assert (a^c < b^c) by (apply pow_lt_mono_l; try split; trivial).
order.
assert (b^c < a^c) by (apply pow_lt_mono_l; try split; trivial).
order.
Qed.
Lemma pow_inj_r : forall a b c, 1<a -> 0<=b -> 0<=c ->
a^b == a^c -> b == c.
Proof. hammer_hook "NZPow" "NZPow.NZPowProp.pow_inj_r".
intros a b c Ha Hb Hc EQ.
destruct (lt_trichotomy b c) as [LT|[EQ'|GT]]; trivial.
assert (a^b < a^c) by (apply pow_lt_mono_r; try split; trivial).
order.
assert (a^c < a^b) by (apply pow_lt_mono_r; try split; trivial).
order.
Qed.
Lemma pow_lt_mono_l_iff : forall a b c, 0<=a -> 0<=b -> 0<c ->
(a<b <-> a^c < b^c).
Proof. hammer_hook "NZPow" "NZPow.NZPowProp.pow_lt_mono_l_iff".
intros a b c Ha Hb Hc.
split; intro LT.
apply pow_lt_mono_l; try split; trivial.
destruct (le_gt_cases b a) as [LE|GT]; trivial.
assert (b^c <= a^c) by (apply pow_le_mono_l; try split; order).
order.
Qed.
Lemma pow_le_mono_l_iff : forall a b c, 0<=a -> 0<=b -> 0<c ->
(a<=b <-> a^c <= b^c).
Proof. hammer_hook "NZPow" "NZPow.NZPowProp.pow_le_mono_l_iff".
intros a b c Ha Hb Hc.
split; intro LE.
apply pow_le_mono_l; try split; trivial.
destruct (le_gt_cases a b) as [LE'|GT]; trivial.
assert (b^c < a^c) by (apply pow_lt_mono_l; try split; trivial).
order.
Qed.
Lemma pow_lt_mono_r_iff : forall a b c, 1<a -> 0<=c ->
(b<c <-> a^b < a^c).
Proof. hammer_hook "NZPow" "NZPow.NZPowProp.pow_lt_mono_r_iff".
intros a b c Ha Hc.
split; intro LT.
now apply pow_lt_mono_r.
destruct (le_gt_cases c b) as [LE|GT]; trivial.
assert (a^c <= a^b) by (apply pow_le_mono_r; order').
order.
Qed.
Lemma pow_le_mono_r_iff : forall a b c, 1<a -> 0<=c ->
(b<=c <-> a^b <= a^c).
Proof. hammer_hook "NZPow" "NZPow.NZPowProp.pow_le_mono_r_iff".
intros a b c Ha Hc.
split; intro LE.
apply pow_le_mono_r; order'.
destruct (le_gt_cases b c) as [LE'|GT]; trivial.
assert (a^c < a^b) by (apply pow_lt_mono_r; order').
order.
Qed.
Lemma pow_gt_lin_r : forall a b, 1<a -> 0<=b -> b < a^b.
Proof. hammer_hook "NZPow" "NZPow.NZPowProp.pow_gt_lin_r".
intros a b Ha Hb. apply le_ind with (4:=Hb). solve_proper.
nzsimpl. order'.
clear b Hb. intros b Hb IH. nzsimpl; trivial.
rewrite <- !le_succ_l in *. rewrite <- two_succ in Ha.
transitivity (2*(S b)).
nzsimpl'. rewrite <- 2 succ_le_mono.
rewrite <- (add_0_l b) at 1. apply add_le_mono; order.
apply mul_le_mono_nonneg; trivial.
order'.
now apply lt_le_incl, lt_succ_r.
Qed.
Lemma pow_add_lower : forall a b c, 0<=a -> 0<=b -> 0<c ->
a^c + b^c <= (a+b)^c.
Proof. hammer_hook "NZPow" "NZPow.NZPowProp.pow_add_lower".
intros a b c Ha Hb Hc. apply lt_ind with (4:=Hc). solve_proper.
nzsimpl; order.
clear c Hc. intros c Hc IH.
assert (0<=c) by order'.
nzsimpl; trivial.
transitivity ((a+b)*(a^c + b^c)).
rewrite mul_add_distr_r, !mul_add_distr_l.
apply add_le_mono.
rewrite <- add_0_r at 1. apply add_le_mono_l.
apply mul_nonneg_nonneg; trivial.
apply pow_nonneg; trivial.
rewrite <- add_0_l at 1. apply add_le_mono_r.
apply mul_nonneg_nonneg; trivial.
apply pow_nonneg; trivial.
apply mul_le_mono_nonneg_l; trivial.
now apply add_nonneg_nonneg.
Qed.
Lemma pow_add_upper : forall a b c, 0<=a -> 0<=b -> 0<c ->
(a+b)^c <= 2^(pred c) * (a^c + b^c).
Proof. hammer_hook "NZPow" "NZPow.NZPowProp.pow_add_upper".
assert (aux : forall a b c, 0<=a<=b -> 0<c ->
(a + b) * (a ^ c + b ^ c) <= 2 * (a * a ^ c + b * b ^ c)).
intros a b c (Ha,H) Hc.
rewrite !mul_add_distr_l, !mul_add_distr_r. nzsimpl'.
rewrite <- !add_assoc. apply add_le_mono_l.
rewrite !add_assoc. apply add_le_mono_r.
destruct (le_exists_sub _ _ H) as (d & EQ & Hd).
rewrite EQ.
rewrite 2 mul_add_distr_r.
rewrite !add_assoc. apply add_le_mono_r.
rewrite add_comm. apply add_le_mono_l.
apply mul_le_mono_nonneg_l; trivial.
apply pow_le_mono_l; try split; order.
intros a b c Ha Hb Hc. apply lt_ind with (4:=Hc). solve_proper.
nzsimpl; order.
clear c Hc. intros c Hc IH.
assert (0<=c) by order.
nzsimpl; trivial.
transitivity ((a+b)*(2^(pred c) * (a^c + b^c))).
apply mul_le_mono_nonneg_l; trivial.
now apply add_nonneg_nonneg.
rewrite mul_assoc. rewrite (mul_comm (a+b)).
assert (EQ : S (P c) == c) by (apply lt_succ_pred with 0; order').
assert (LE : 0 <= P c) by (now rewrite succ_le_mono, EQ, le_succ_l).
assert (EQ' : 2^c == 2^(P c) * 2) by (rewrite <- EQ at 1; nzsimpl'; order).
rewrite EQ', <- !mul_assoc.
apply mul_le_mono_nonneg_l.
apply pow_nonneg; order'.
destruct (le_gt_cases a b).
apply aux; try split; order'.
rewrite (add_comm a), (add_comm (a^c)), (add_comm (a*a^c)).
apply aux; try split; order'.
Qed.
End NZPowProp.
|
/*
* Copyright (C) 2014-2015 Open Source Robotics Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <iostream>
#include <boost/shared_ptr.hpp>
#include "gazebo/test/ServerFixture.hh"
#include "gazebo/msgs/msgs.hh"
#include "gazebo/common/common.hh"
using namespace std;
using namespace gazebo;
class ImageConvertStressTest : public ServerFixture
{
/////////////////////////////////////////////////
public: double virtMemory()
{
double resident, share;
GetMemInfo(resident, share);
return resident + share;
}
public: static void delete_many(unsigned char* ptr)
{
delete[] ptr;
}
};
/////////////////////////////////////////////////
TEST_F(ImageConvertStressTest, ManyConversions)
{
boost::shared_ptr<unsigned char> u(new unsigned char[400*400*3], delete_many);
double memBefore = virtMemory();
for (int i = 0; i < 1000; i++)
{
common::Image image;
msgs::Image msg;
image.SetFromData(u.get(), 400, 400, common::Image::RGB_INT8);
msgs::Set(&msg, image);
}
double memAfter = virtMemory();
// Without the fix in pull request #1057, the difference is over 470000
EXPECT_LE(memAfter - memBefore, 2000);
}
/////////////////////////////////////////////////
int main(int argc, char **argv)
{
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
|
{-# LANGUAGE FlexibleContexts #-}
-- | Mixed-radix FFT calculation.
--
-- Arbitrary input vector lengths are handled using a mixed-radix
-- Cooley-Tukey decimation in time algorithm with residual prime
-- length vectors being treated using Rader's algorithm or hand-coded
-- codelets for small primes.
module Numeric.FFT
( fft, ifft, fftWith, ifftWith
, plan, planFromFactors, execute
, Plan (..), Direction (..), BaseTransform (..)
) where
import Prelude hiding (length, map, sum, zipWith)
import Data.Vector.Generic
import Data.Complex
import Numeric.FFT.Types
import Numeric.FFT.Plan
import Numeric.FFT.Execute
-- | Forward FFT with embedded plan calculation. For an input vector
-- /h/ of length /N/, with entries numbered from 0 to /N - 1/,
-- calculates the entries in /H/, the discrete Fourier transform of
-- /h/, as:
--
-- <<doc-formulae/fft-formula.svg>>
fft :: Vector v (Complex Double) =>
v (Complex Double) -> IO (v (Complex Double))
fft xs = do
p <- plan $ length xs
return $ fftWith p xs
-- | Inverse FFT with embedded plan calculation. For an input vector
-- /H/ of length /N/, with entries numbered from 0 to /N - 1/,
-- representing Fourier amplitudes of a signal, calculates the entries
-- in /h/, the inverse discrete Fourier transform of /H/, as:
--
-- <<doc-formulae/ifft-formula.svg>>
ifft :: Vector v (Complex Double) =>
v (Complex Double) -> IO (v (Complex Double))
ifft xs = do
p <- plan $ length xs
return $ ifftWith p xs
-- | Forward FFT with pre-computed plan.
fftWith :: Vector v (Complex Double) =>
Plan -> v (Complex Double) -> v (Complex Double)
fftWith p = convert . execute p Forward . convert
-- | Inverse FFT with pre-computed plan.
ifftWith :: Vector v (Complex Double) =>
Plan -> v (Complex Double) -> v (Complex Double)
ifftWith p = convert . execute p Inverse . convert
|
/*
* This file is part of the Visual Computing Library (VCL) release under the
* license.
*
* Copyright (c) 2017 Basil Fierz
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#pragma once
// VCL configuration
#include <vcl/config/global.h>
// C++ Standard library
#include <memory>
#include <vector>
// GSL
#include <gsl/gsl>
// VCL
#include <vcl/hid/windows/hid.h>
#include <vcl/hid/spacenavigator.h>
namespace Vcl { namespace HID { namespace Windows
{
//! Implementation of a 3Dconnexion space mouse
class SpaceNavigatorHID : public AbstractHID, public SpaceNavigator
{
private:
struct InputData
{
//! Current time to live for telling
//! if the device was unplugged while sending data
int timeToLive;
//! Indicate if the data is dirty
bool isDirty;
//! Axis data
std::array<float,6> axes;
//! Check if the data is zero
bool isZero()
{
return (0 == axes[0] && 0 == axes[1] && 0 == axes[2] &&
0 == axes[3] && 0 == axes[4] && 0 == axes[5] );
}
//! Maximum time to live
static const int MaxTimeToLive = 5;
};
public:
SpaceNavigatorHID(std::unique_ptr<GenericHID> dev, bool poll_3d_mouse = false);
//! Reset device when activating the program
void onActivateApp(BOOL active, DWORD dwThreadID);
//! Handle device input
bool processInput(HWND window_handle, UINT input_code, PRAWINPUT raw_input) override;
private:
/*!
* \brief Does all the preprocessing of the rawinput device data before
* finally calling the move3D method.
*
* If polling is enabled (_poll3DMouse == true) this method is called
* from the windows timer message handler
* If polling is not enabled (_poll3DMouse == false) this method is
* called directly from the WM_INPUT handler
*/
void on3DMouseInput();
/*!
* \brief onSpaceMouseMove is invoked when new 3d mouse data is
* available.
*
* \param motion_data Contains the displacement data, using a
* right-handed coordinate system with z down.
* See 'Programing for the 3dmouse' document
* available at www.3dconnexion.com.
* Entries 0, 1, 2 is the incremental pan zoom
* displacement vector (x,y,z).
* Entries 3, 4, 5 is the incremental rotation vector
* (NOT Euler angles).
*/
void onSpaceMouseMove(std::array<float, 6> motion_data);
/*!
* \brief onSpaceMouseKeyDown processes the 3d mouse key presses
*
* \param virtual_key 3d mouse key code
*/
void onSpaceMouseKeyDown(UINT virtual_key);
/*!
* \brief onSpaceMouseKeyUp processes the 3d mouse key releases
*
* \param virtual_key 3d mouse key code
*/
void onSpaceMouseKeyUp(UINT virtual_key);
private:
//! Process a raw input message
bool translateRawInputData(UINT input_code, PRAWINPUT raw_input);
//! Axis input data
InputData _deviceData;
//! Button input data
uint32_t _keystate{ 0 };
//! Last time the data was updated.
//! Use to calculate distance traveled since last event
DWORD _last3DMouseInputTime{ 0 };
private: // Polling support methods
//! Start the timer
void startTimer(HWND hwnd);
//! Timer callback
void onTimer(UINT_PTR event_id);
//! Kill the currently running timer
void killPollingTimer();
private: // Polling support
//! 3D mouse is in polling mode
bool _poll3DMouse{ false };
//! Polling period. Default is 50 Hz)
UINT _pollingPeriod3DMouse{ 20 };
//! 3DMouse data polling timer
//! Only used if _poll3DMouse == true
UINT_PTR _timer3DMouse{ 0 };
};
}}}
|
Literary Nonfiction. One of the most versatile writers of his generation, Stephen Kessler has distinguished himself over the last forty years as a poet, critic ("certainly the best poetry critic in sight," according to Lawrence Ferlinghetti), translator, novelist, and a wide-ranging journalist. As editor, he founded the international journal Alcatraz (1979-1985) and the Santa Cruz newsweekly The Sun (1986-1989), as well as The Redwood Coast Review (1999-2014). Following his earlier two books of essays, organized around themes of poetry and cultural criticism, in this third collection he gets more personal and political. Kessler's keen eye, sharp wit and readable style—whether reflecting on Viagra, multilingualism, Miss America, fatherhood, Gertrude Stein, anarchism, Robinson Jeffers, the pleasures of gossip, a trip to Cuba, Steve Jobs, Charles Bukowski, or getting mugged in New York—keep his writings vividly alive. The eclectic essays in NEED I SAY MORE? have both the immediacy of the present moment and the lasting value of literature. |
[STATEMENT]
lemma rbl_succ: "rbl_succ (rev (bin_to_bl n bin)) = rev (bin_to_bl n (bin + 1))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. rbl_succ (rev (bin_to_bl n bin)) = rev (bin_to_bl n (bin + 1))
[PROOF STEP]
apply (unfold bin_to_bl_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. rbl_succ (rev (bin_to_bl_aux n bin [])) = rev (bin_to_bl_aux n (bin + 1) [])
[PROOF STEP]
apply (induction n arbitrary: bin)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>bin. rbl_succ (rev (bin_to_bl_aux 0 bin [])) = rev (bin_to_bl_aux 0 (bin + 1) [])
2. \<And>n bin. (\<And>bin. rbl_succ (rev (bin_to_bl_aux n bin [])) = rev (bin_to_bl_aux n (bin + 1) [])) \<Longrightarrow> rbl_succ (rev (bin_to_bl_aux (Suc n) bin [])) = rev (bin_to_bl_aux (Suc n) (bin + 1) [])
[PROOF STEP]
apply simp_all
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>n bin. (\<And>bin. rbl_succ (rev (bin_to_bl_aux n bin [])) = rev (bin_to_bl_aux n (bin + 1) [])) \<Longrightarrow> rbl_succ (rev (bin_to_bl_aux n (bin div 2) [odd bin])) = rev (bin_to_bl_aux n ((bin + 1) div 2) [even bin])
[PROOF STEP]
apply (case_tac bin rule: bin_exhaust)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>n bin x b. \<lbrakk>\<And>bin. rbl_succ (rev (bin_to_bl_aux n bin [])) = rev (bin_to_bl_aux n (bin + 1) []); bin = of_bool b + 2 * x\<rbrakk> \<Longrightarrow> rbl_succ (rev (bin_to_bl_aux n (bin div 2) [odd bin])) = rev (bin_to_bl_aux n ((bin + 1) div 2) [even bin])
[PROOF STEP]
apply (simp_all add: bin_to_bl_aux_alt ac_simps)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
(*
* Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
*
* SPDX-License-Identifier: BSD-2-Clause
*)
theory ShowTypes_Test
imports
Lib.ShowTypes
CLib.LemmaBucket_C
CParser.CTranslation
begin
text \<open>
Simple demo and test. The main HOL theories don't have that much
hidden polymorphism, so we use l4.verified.
\<close>
experiment
(* The point of this test is to confirm that the generic 'show types' feature
shows enough type information to fully reconstruct a term; the pointer type
feature does something similar so we disable it here. *)
notes [[show_ptr_types = false]]
begin
lemma c_guard_cast_byte: "c_guard (x :: ('a :: {mem_type}) ptr) \<Longrightarrow> c_guard (ptr_coerce x :: 8 word ptr)"
goal_show_types 0
using [[show_sorts]]
goal_show_types 0
apply (case_tac x)
apply (fastforce intro!: byte_ptr_guarded simp: c_guard_def dest: c_null_guard)
done
thm c_guard_cast_byte[where x = "Ptr (ucast (0 :: 8 word))"]
thm_show_types c_guard_cast_byte[where x = "Ptr (ucast (0 :: 8 word))"]
(* Round-trip test *)
ML \<open>
let val ctxt = Config.put show_sorts true @{context}
(* NB: this test fails if we leave some polymorphism in the term *)
val term = @{thm c_guard_cast_byte[where x = "Ptr (ucast (0 :: 8 word)) :: unit ptr"]} |> Thm.prop_of
val string_no_types = Syntax.pretty_term ctxt term
|> Pretty.string_of |> YXML.content_of
val string_show_types = Show_Types.term_show_types true ctxt term
val _ = assert (Syntax.read_term ctxt string_no_types <> term) "Show_Types test (baseline)"
val _ = assert (Syntax.read_term ctxt string_show_types = term) "Show_Types test"
in () end
\<close>
end
end |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.