text
stringlengths 0
3.34M
|
---|
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : BooleanRing α
a b : α
⊢ a + a = 0
[PROOFSTEP]
have : a + a = a + a + (a + a) :=
calc
a + a = (a + a) * (a + a) := by rw [mul_self]
_ = a * a + a * a + (a * a + a * a) := by rw [add_mul, mul_add]
_ = a + a + (a + a) := by rw [mul_self]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : BooleanRing α
a b : α
⊢ a + a = (a + a) * (a + a)
[PROOFSTEP]
rw [mul_self]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : BooleanRing α
a b : α
⊢ (a + a) * (a + a) = a * a + a * a + (a * a + a * a)
[PROOFSTEP]
rw [add_mul, mul_add]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : BooleanRing α
a b : α
⊢ a * a + a * a + (a * a + a * a) = a + a + (a + a)
[PROOFSTEP]
rw [mul_self]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : BooleanRing α
a b : α
this : a + a = a + a + (a + a)
⊢ a + a = 0
[PROOFSTEP]
rwa [self_eq_add_left] at this
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : BooleanRing α
a b : α
⊢ -a = -a + 0
[PROOFSTEP]
rw [add_zero]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : BooleanRing α
a b : α
⊢ -a + 0 = -a + -a + a
[PROOFSTEP]
rw [← neg_add_self, add_assoc]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : BooleanRing α
a b : α
⊢ -a + -a + a = a
[PROOFSTEP]
rw [add_self, zero_add]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : BooleanRing α
a b : α
⊢ a = -b ↔ a = b
[PROOFSTEP]
rw [neg_eq]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : BooleanRing α
a b : α
⊢ a * b + b * a = 0
[PROOFSTEP]
have : a + b = a + b + (a * b + b * a) :=
calc
a + b = (a + b) * (a + b) := by rw [mul_self]
_ = a * a + a * b + (b * a + b * b) := by rw [add_mul, mul_add, mul_add]
_ = a + a * b + (b * a + b) := by simp only [mul_self]
_ = a + b + (a * b + b * a) := by abel
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : BooleanRing α
a b : α
⊢ a + b = (a + b) * (a + b)
[PROOFSTEP]
rw [mul_self]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : BooleanRing α
a b : α
⊢ (a + b) * (a + b) = a * a + a * b + (b * a + b * b)
[PROOFSTEP]
rw [add_mul, mul_add, mul_add]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : BooleanRing α
a b : α
⊢ a * a + a * b + (b * a + b * b) = a + a * b + (b * a + b)
[PROOFSTEP]
simp only [mul_self]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : BooleanRing α
a b : α
⊢ a + a * b + (b * a + b) = a + b + (a * b + b * a)
[PROOFSTEP]
abel
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : BooleanRing α
a b : α
⊢ a + a * b + (b * a + b) = a + b + (a * b + b * a)
[PROOFSTEP]
abel
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : BooleanRing α
a b : α
this : a + b = a + b + (a * b + b * a)
⊢ a * b + b * a = 0
[PROOFSTEP]
rwa [self_eq_add_right] at this
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : BooleanRing α
a b : α
⊢ a - b = a + b
[PROOFSTEP]
rw [sub_eq_add_neg, add_right_inj, neg_eq]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : BooleanRing α
a b : α
⊢ a * (1 + a) = 0
[PROOFSTEP]
rw [mul_add, mul_one, mul_self, add_self]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝ : BooleanRing α
a✝ b✝ : α
src✝ : BooleanRing α := inferInstance
a b : α
⊢ a * b = b * a
[PROOFSTEP]
rw [← add_eq_zero', mul_add_mul]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝² : BooleanRing α
inst✝¹ : BooleanRing β
inst✝ : BooleanRing γ
a b : α
⊢ a ⊔ b = b ⊔ a
[PROOFSTEP]
dsimp only [(· ⊔ ·)]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝² : BooleanRing α
inst✝¹ : BooleanRing β
inst✝ : BooleanRing γ
a b : α
⊢ a + b + a * b = b + a + b * a
[PROOFSTEP]
ring
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝² : BooleanRing α
inst✝¹ : BooleanRing β
inst✝ : BooleanRing γ
a b : α
⊢ a ⊓ b = b ⊓ a
[PROOFSTEP]
dsimp only [(· ⊓ ·)]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝² : BooleanRing α
inst✝¹ : BooleanRing β
inst✝ : BooleanRing γ
a b : α
⊢ a * b = b * a
[PROOFSTEP]
ring
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝² : BooleanRing α
inst✝¹ : BooleanRing β
inst✝ : BooleanRing γ
a b c : α
⊢ a ⊔ b ⊔ c = a ⊔ (b ⊔ c)
[PROOFSTEP]
dsimp only [(· ⊔ ·)]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝² : BooleanRing α
inst✝¹ : BooleanRing β
inst✝ : BooleanRing γ
a b c : α
⊢ a + b + a * b + c + (a + b + a * b) * c = a + (b + c + b * c) + a * (b + c + b * c)
[PROOFSTEP]
ring
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝² : BooleanRing α
inst✝¹ : BooleanRing β
inst✝ : BooleanRing γ
a b c : α
⊢ a ⊓ b ⊓ c = a ⊓ (b ⊓ c)
[PROOFSTEP]
dsimp only [(· ⊓ ·)]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝² : BooleanRing α
inst✝¹ : BooleanRing β
inst✝ : BooleanRing γ
a b c : α
⊢ a * b * c = a * (b * c)
[PROOFSTEP]
ring
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝² : BooleanRing α
inst✝¹ : BooleanRing β
inst✝ : BooleanRing γ
a b : α
⊢ a ⊔ a ⊓ b = a
[PROOFSTEP]
dsimp only [(· ⊔ ·), (· ⊓ ·)]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝² : BooleanRing α
inst✝¹ : BooleanRing β
inst✝ : BooleanRing γ
a b : α
⊢ a + a * b + a * (a * b) = a
[PROOFSTEP]
rw [← mul_assoc, mul_self, add_assoc, add_self, add_zero]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝² : BooleanRing α
inst✝¹ : BooleanRing β
inst✝ : BooleanRing γ
a b : α
⊢ a ⊓ (a ⊔ b) = a
[PROOFSTEP]
dsimp only [(· ⊔ ·), (· ⊓ ·)]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝² : BooleanRing α
inst✝¹ : BooleanRing β
inst✝ : BooleanRing γ
a b : α
⊢ a * (a + b + a * b) = a
[PROOFSTEP]
rw [mul_add, mul_add, mul_self, ← mul_assoc, mul_self, add_assoc, add_self, add_zero]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝² : BooleanRing α
inst✝¹ : BooleanRing β
inst✝ : BooleanRing γ
a b c : α
⊢ (a + b + a * b) * (a + c + a * c) =
a * a + b * c + a * (b * c) + (a * b + a * a * b) + (a * c + a * a * c) + (a * b * c + a * a * b * c)
[PROOFSTEP]
ring
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝² : BooleanRing α
inst✝¹ : BooleanRing β
inst✝ : BooleanRing γ
a b c : α
⊢ a * a + b * c + a * (b * c) + (a * b + a * a * b) + (a * c + a * a * c) + (a * b * c + a * a * b * c) =
a + b * c + a * (b * c)
[PROOFSTEP]
simp only [mul_self, add_self, add_zero]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝² : BooleanRing α
inst✝¹ : BooleanRing β
inst✝ : BooleanRing γ
a b c : α
⊢ (a ⊔ b) ⊓ (a ⊔ c) ⊔ (a ⊔ b ⊓ c) = a ⊔ b ⊓ c
[PROOFSTEP]
dsimp only [(· ⊔ ·), (· ⊓ ·)]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝² : BooleanRing α
inst✝¹ : BooleanRing β
inst✝ : BooleanRing γ
a b c : α
⊢ (a + b + a * b) * (a + c + a * c) + (a + b * c + a * (b * c)) +
(a + b + a * b) * (a + c + a * c) * (a + b * c + a * (b * c)) =
a + b * c + a * (b * c)
[PROOFSTEP]
rw [le_sup_inf_aux, add_self, mul_self, zero_add]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝² : BooleanRing α
inst✝¹ : BooleanRing β
inst✝ : BooleanRing γ
src✝ : Lattice α :=
Lattice.mk' (_ : ∀ (a b : α), a ⊔ b = b ⊔ a) (_ : ∀ (a b c : α), a ⊔ b ⊔ c = a ⊔ (b ⊔ c))
(_ : ∀ (a b : α), a ⊓ b = b ⊓ a) (_ : ∀ (a b c : α), a ⊓ b ⊓ c = a ⊓ (b ⊓ c)) (_ : ∀ (a b : α), a ⊔ a ⊓ b = a)
(_ : ∀ (a b : α), a ⊓ (a ⊔ b) = a)
a : α
⊢ a * (1 + a) + 0 + a * (1 + a) * 0 = 0
[PROOFSTEP]
norm_num [mul_add, mul_self, add_self]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝² : BooleanRing α
inst✝¹ : BooleanRing β
inst✝ : BooleanRing γ
src✝ : Lattice α :=
Lattice.mk' (_ : ∀ (a b : α), a ⊔ b = b ⊔ a) (_ : ∀ (a b c : α), a ⊔ b ⊔ c = a ⊔ (b ⊔ c))
(_ : ∀ (a b : α), a ⊓ b = b ⊓ a) (_ : ∀ (a b c : α), a ⊓ b ⊓ c = a ⊓ (b ⊓ c)) (_ : ∀ (a b : α), a ⊔ a ⊓ b = a)
(_ : ∀ (a b : α), a ⊓ (a ⊔ b) = a)
a : α
⊢ ⊤ ≤ a ⊔ aᶜ
[PROOFSTEP]
change 1 + (a + (1 + a) + a * (1 + a)) + 1 * (a + (1 + a) + a * (1 + a)) = a + (1 + a) + a * (1 + a)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝² : BooleanRing α
inst✝¹ : BooleanRing β
inst✝ : BooleanRing γ
src✝ : Lattice α :=
Lattice.mk' (_ : ∀ (a b : α), a ⊔ b = b ⊔ a) (_ : ∀ (a b c : α), a ⊔ b ⊔ c = a ⊔ (b ⊔ c))
(_ : ∀ (a b : α), a ⊓ b = b ⊓ a) (_ : ∀ (a b c : α), a ⊓ b ⊓ c = a ⊓ (b ⊓ c)) (_ : ∀ (a b : α), a ⊔ a ⊓ b = a)
(_ : ∀ (a b : α), a ⊓ (a ⊔ b) = a)
a : α
⊢ 1 + (a + (1 + a) + a * (1 + a)) + 1 * (a + (1 + a) + a * (1 + a)) = a + (1 + a) + a * (1 + a)
[PROOFSTEP]
norm_num [mul_add, mul_self]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝² : BooleanRing α
inst✝¹ : BooleanRing β
inst✝ : BooleanRing γ
src✝ : Lattice α :=
Lattice.mk' (_ : ∀ (a b : α), a ⊔ b = b ⊔ a) (_ : ∀ (a b c : α), a ⊔ b ⊔ c = a ⊔ (b ⊔ c))
(_ : ∀ (a b : α), a ⊓ b = b ⊓ a) (_ : ∀ (a b c : α), a ⊓ b ⊓ c = a ⊓ (b ⊓ c)) (_ : ∀ (a b : α), a ⊔ a ⊓ b = a)
(_ : ∀ (a b : α), a ⊓ (a ⊔ b) = a)
a : α
⊢ 1 + (a + (1 + a)) = 0
[PROOFSTEP]
rw [← add_assoc, add_self]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝² : BooleanRing α
inst✝¹ : BooleanRing β
inst✝ : BooleanRing γ
src✝ : Lattice α :=
Lattice.mk' (_ : ∀ (a b : α), a ⊔ b = b ⊔ a) (_ : ∀ (a b c : α), a ⊔ b ⊔ c = a ⊔ (b ⊔ c))
(_ : ∀ (a b : α), a ⊓ b = b ⊓ a) (_ : ∀ (a b c : α), a ⊓ b ⊓ c = a ⊓ (b ⊓ c)) (_ : ∀ (a b : α), a ⊔ a ⊓ b = a)
(_ : ∀ (a b : α), a ⊓ (a ⊔ b) = a)
a : α
⊢ a + 1 + a * 1 = 1
[PROOFSTEP]
rw [mul_one, (add_comm a 1), add_assoc, add_self, add_zero]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝² : BooleanRing α
inst✝¹ : BooleanRing β
inst✝ : BooleanRing γ
src✝ : Lattice α :=
Lattice.mk' (_ : ∀ (a b : α), a ⊔ b = b ⊔ a) (_ : ∀ (a b c : α), a ⊔ b ⊔ c = a ⊔ (b ⊔ c))
(_ : ∀ (a b : α), a ⊓ b = b ⊓ a) (_ : ∀ (a b c : α), a ⊓ b ⊓ c = a ⊓ (b ⊓ c)) (_ : ∀ (a b : α), a ⊔ a ⊓ b = a)
(_ : ∀ (a b : α), a ⊓ (a ⊔ b) = a)
a : α
⊢ 0 + a + 0 * a = a
[PROOFSTEP]
rw [zero_mul, zero_add, add_zero]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝² : BooleanRing α
inst✝¹ : BooleanRing β
inst✝ : BooleanRing γ
a b : α
⊢ (a + b + a * b) * (1 + a * b) = a + b + (a * b + a * b * (a * b)) + (a * (b * b) + a * a * b)
[PROOFSTEP]
ring
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝² : BooleanRing α
inst✝¹ : BooleanRing β
inst✝ : BooleanRing γ
a b : α
⊢ a + b + (a * b + a * b * (a * b)) + (a * (b * b) + a * a * b) = a + b
[PROOFSTEP]
simp only [mul_self, add_self, add_zero]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝² : BooleanRing α
inst✝¹ : BooleanRing β
inst✝ : BooleanRing γ
a b : AsBoolAlg α
⊢ ↑ofBoolAlg (a ∆ b) = ↑ofBoolAlg a + ↑ofBoolAlg b
[PROOFSTEP]
rw [symmDiff_eq_sup_sdiff_inf]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝² : BooleanRing α
inst✝¹ : BooleanRing β
inst✝ : BooleanRing γ
a b : AsBoolAlg α
⊢ ↑ofBoolAlg ((a ⊔ b) \ (a ⊓ b)) = ↑ofBoolAlg a + ↑ofBoolAlg b
[PROOFSTEP]
exact of_boolalg_symmDiff_aux _ _
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝² : BooleanRing α
inst✝¹ : BooleanRing β
inst✝ : BooleanRing γ
f : α →+* β
a b : AsBoolAlg α
⊢ (↑toBoolAlg ∘ ↑f ∘ ↑ofBoolAlg) (a ⊔ b) = (↑toBoolAlg ∘ ↑f ∘ ↑ofBoolAlg) a ⊔ (↑toBoolAlg ∘ ↑f ∘ ↑ofBoolAlg) b
[PROOFSTEP]
dsimp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
inst✝² : BooleanRing α
inst✝¹ : BooleanRing β
inst✝ : BooleanRing γ
f : α →+* β
a b : AsBoolAlg α
⊢ ↑toBoolAlg (↑f (↑ofBoolAlg a + ↑ofBoolAlg b + ↑ofBoolAlg a * ↑ofBoolAlg b)) =
↑toBoolAlg (↑f (↑ofBoolAlg a)) ⊔ ↑toBoolAlg (↑f (↑ofBoolAlg b))
[PROOFSTEP]
simp_rw [map_add f, map_mul f, toBoolAlg_add_add_mul]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
a : Bool
⊢ a * 0 = 0
[PROOFSTEP]
cases a
[GOAL]
case false
α : Type u_1
β : Type u_2
γ : Type u_3
⊢ false * 0 = 0
[PROOFSTEP]
rfl
[GOAL]
case true
α : Type u_1
β : Type u_2
γ : Type u_3
⊢ true * 0 = 0
[PROOFSTEP]
rfl
|
\chapter{$h$-characterization}
\label{ch:h:characterization}
In this chapter we develop a new characterization for a given Riordan array
$\mathcal{R}(d(t),h(t))$. In the new definition only function $h$ appears: this
transformation is done via a combination of function $d$ with compositional
inverse $\hat{h}$.
\input{Chapters/h-characterization/main-idea}
\input{Chapters/h-characterization/group-operations}
\input{Chapters/h-characterization/sequences-connection}
\input{Chapters/h-characterization/open-questions}
|
Formal statement is: lemma open_ball: "open {y. dist x y < d}" Informal statement is: The open ball of radius $d$ around $x$ is open. |
/-
Copyright (c) 2022 Yaël Dillies. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yaël Dillies
! This file was ported from Lean 3 source module order.category.FinBoolAlg
! leanprover-community/mathlib commit e8ac6315bcfcbaf2d19a046719c3b553206dac75
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Data.Fintype.Powerset
import Mathbin.Order.Category.BoolAlg
import Mathbin.Order.Category.FinPartOrd
import Mathbin.Order.Hom.CompleteLattice
/-!
# The category of finite boolean algebras
This file defines `FinBoolAlg`, the category of finite boolean algebras.
## TODO
Birkhoff's representation for finite Boolean algebras.
`Fintype_to_FinBoolAlg_op.left_op ⋙ FinBoolAlg.dual ≅ Fintype_to_FinBoolAlg_op.left_op`
`FinBoolAlg` is essentially small.
-/
universe u
open CategoryTheory OrderDual Opposite
/-- The category of finite boolean algebras with bounded lattice morphisms. -/
structure FinBoolAlg where
toBoolAlg : BoolAlg
[isFintype : Fintype to_BoolAlg]
#align FinBoolAlg FinBoolAlg
namespace FinBoolAlg
instance : CoeSort FinBoolAlg (Type _) :=
⟨fun X => X.toBoolAlg⟩
instance (X : FinBoolAlg) : BooleanAlgebra X :=
X.toBoolAlg.str
attribute [instance] FinBoolAlg.isFintype
@[simp]
theorem coe_toBoolAlg (X : FinBoolAlg) : ↥X.toBoolAlg = ↥X :=
rfl
#align FinBoolAlg.coe_to_BoolAlg FinBoolAlg.coe_toBoolAlg
/-- Construct a bundled `FinBoolAlg` from `boolean_algebra` + `fintype`. -/
def of (α : Type _) [BooleanAlgebra α] [Fintype α] : FinBoolAlg :=
⟨⟨α⟩⟩
#align FinBoolAlg.of FinBoolAlg.of
@[simp]
theorem coe_of (α : Type _) [BooleanAlgebra α] [Fintype α] : ↥(of α) = α :=
rfl
#align FinBoolAlg.coe_of FinBoolAlg.coe_of
instance : Inhabited FinBoolAlg :=
⟨of PUnit⟩
instance largeCategory : LargeCategory FinBoolAlg :=
InducedCategory.category FinBoolAlg.toBoolAlg
#align FinBoolAlg.large_category FinBoolAlg.largeCategory
instance concreteCategory : ConcreteCategory FinBoolAlg :=
InducedCategory.concreteCategory FinBoolAlg.toBoolAlg
#align FinBoolAlg.concrete_category FinBoolAlg.concreteCategory
instance hasForgetToBoolAlg : HasForget₂ FinBoolAlg BoolAlg :=
InducedCategory.hasForget₂ FinBoolAlg.toBoolAlg
#align FinBoolAlg.has_forget_to_BoolAlg FinBoolAlg.hasForgetToBoolAlg
instance forgetToBoolAlgFull : Full (forget₂ FinBoolAlg BoolAlg) :=
InducedCategory.full _
#align FinBoolAlg.forget_to_BoolAlg_full FinBoolAlg.forgetToBoolAlgFull
instance forget_to_boolAlg_faithful : Faithful (forget₂ FinBoolAlg BoolAlg) :=
InducedCategory.faithful _
#align FinBoolAlg.forget_to_BoolAlg_faithful FinBoolAlg.forget_to_boolAlg_faithful
@[simps]
instance hasForgetToFinPartOrd : HasForget₂ FinBoolAlg FinPartOrd
where forget₂ :=
{ obj := fun X => FinPartOrd.of X
map := fun X Y f => show OrderHom X Y from ↑(show BoundedLatticeHom X Y from f) }
#align FinBoolAlg.has_forget_to_FinPartOrd FinBoolAlg.hasForgetToFinPartOrd
instance forget_to_finPartOrd_faithful : Faithful (forget₂ FinBoolAlg FinPartOrd) :=
⟨fun X Y f g h =>
haveI := congr_arg (coeFn : _ → X → Y) h
FunLike.coe_injective this⟩
#align FinBoolAlg.forget_to_FinPartOrd_faithful FinBoolAlg.forget_to_finPartOrd_faithful
/-- Constructs an equivalence between finite Boolean algebras from an order isomorphism between
them. -/
@[simps]
def Iso.mk {α β : FinBoolAlg.{u}} (e : α ≃o β) : α ≅ β
where
hom := (e : BoundedLatticeHom α β)
inv := (e.symm : BoundedLatticeHom β α)
hom_inv_id' := by
ext
exact e.symm_apply_apply _
inv_hom_id' := by
ext
exact e.apply_symm_apply _
#align FinBoolAlg.iso.mk FinBoolAlg.Iso.mk
/-- `order_dual` as a functor. -/
@[simps]
def dual : FinBoolAlg ⥤ FinBoolAlg where
obj X := of Xᵒᵈ
map X Y := BoundedLatticeHom.dual
#align FinBoolAlg.dual FinBoolAlg.dual
/-- The equivalence between `FinBoolAlg` and itself induced by `order_dual` both ways. -/
@[simps Functor inverse]
def dualEquiv : FinBoolAlg ≌ FinBoolAlg :=
Equivalence.mk dual dual
(NatIso.ofComponents (fun X => Iso.mk <| OrderIso.dualDual X) fun X Y f => rfl)
(NatIso.ofComponents (fun X => Iso.mk <| OrderIso.dualDual X) fun X Y f => rfl)
#align FinBoolAlg.dual_equiv FinBoolAlg.dualEquiv
end FinBoolAlg
/-- The powerset functor. `set` as a functor. -/
@[simps]
def fintypeToFinBoolAlgOp : FintypeCat ⥤ FinBoolAlgᵒᵖ
where
obj X := op <| FinBoolAlg.of (Set X)
map X Y f :=
Quiver.Hom.op <| (CompleteLatticeHom.setPreimage f : BoundedLatticeHom (Set Y) (Set X))
#align Fintype_to_FinBoolAlg_op fintypeToFinBoolAlgOp
|
State Before: α : Type u_1
β : Type ?u.11677
r : α → α → Prop
c c₁ c₂ c₃ s t : Set α
a b x y : α
hc₁ : ChainClosure r c₁
hc₂ : ChainClosure r c₂
h : c₁ ⊆ c₂
⊢ c₂ = c₁ ∨ SuccChain r c₁ ⊆ c₂ State After: case succ
α : Type u_1
β : Type ?u.11677
r : α → α → Prop
c c₂ c₃ s t : Set α
a b x y : α
s✝ : Set α
a✝ : ChainClosure r s✝
a_ih✝ : ∀ {c₁ : Set α}, ChainClosure r c₁ → c₁ ⊆ s✝ → s✝ = c₁ ∨ SuccChain r c₁ ⊆ s✝
c₁ : Set α
hc₁ : ChainClosure r c₁
h : c₁ ⊆ SuccChain r s✝
⊢ SuccChain r s✝ = c₁ ∨ SuccChain r c₁ ⊆ SuccChain r s✝
case union
α : Type u_1
β : Type ?u.11677
r : α → α → Prop
c c₂ c₃ s t : Set α
a b x y : α
s✝ : Set (Set α)
a✝ : ∀ (a : Set α), a ∈ s✝ → ChainClosure r a
a_ih✝ : ∀ (a : Set α), a ∈ s✝ → ∀ {c₁ : Set α}, ChainClosure r c₁ → c₁ ⊆ a → a = c₁ ∨ SuccChain r c₁ ⊆ a
c₁ : Set α
hc₁ : ChainClosure r c₁
h : c₁ ⊆ ⋃₀ s✝
⊢ ⋃₀ s✝ = c₁ ∨ SuccChain r c₁ ⊆ ⋃₀ s✝ Tactic: induction hc₂ generalizing c₁ hc₁ State Before: α : Type u_1
β : Type ?u.11677
r : α → α → Prop
c c₂✝ c₃ s t : Set α
a b x y : α
c₂ : Set α
a✝ : ChainClosure r c₂
ih : ∀ {c₁ : Set α}, ChainClosure r c₁ → c₁ ⊆ c₂ → c₂ = c₁ ∨ SuccChain r c₁ ⊆ c₂
c₁ : Set α
hc₁ : ChainClosure r c₁
h : c₁ ⊆ SuccChain r c₂
⊢ SuccChain r c₂ = c₁ ∨ SuccChain r c₁ ⊆ SuccChain r c₂ State After: α : Type u_1
β : Type ?u.11677
r : α → α → Prop
c c₂✝ c₃ s t : Set α
a b x y : α
c₂ : Set α
a✝ : ChainClosure r c₂
ih : ∀ {c₁ : Set α}, ChainClosure r c₁ → c₁ ⊆ c₂ → c₂ = c₁ ∨ SuccChain r c₁ ⊆ c₂
c₁ : Set α
hc₁ : ChainClosure r c₁
h : c₁ ⊆ SuccChain r c₂
h₁ : c₁ ⊆ c₂
⊢ SuccChain r c₁ ⊆ SuccChain r c₂ Tactic: refine' ((chainClosure_succ_total_aux hc₁) fun c₁ => ih).imp h.antisymm' fun h₁ => _ State Before: α : Type u_1
β : Type ?u.11677
r : α → α → Prop
c c₂✝ c₃ s t : Set α
a b x y : α
c₂ : Set α
a✝ : ChainClosure r c₂
ih : ∀ {c₁ : Set α}, ChainClosure r c₁ → c₁ ⊆ c₂ → c₂ = c₁ ∨ SuccChain r c₁ ⊆ c₂
c₁ : Set α
hc₁ : ChainClosure r c₁
h : c₁ ⊆ SuccChain r c₂
h₁ : c₁ ⊆ c₂
⊢ SuccChain r c₁ ⊆ SuccChain r c₂ State After: case inl
α : Type u_1
β : Type ?u.11677
r : α → α → Prop
c c₂✝ c₃ s t : Set α
a b x y : α
c₂ : Set α
a✝ : ChainClosure r c₂
ih : ∀ {c₁ : Set α}, ChainClosure r c₁ → c₁ ⊆ c₂ → c₂ = c₁ ∨ SuccChain r c₁ ⊆ c₂
hc₁ : ChainClosure r c₂
h : c₂ ⊆ SuccChain r c₂
h₁ : c₂ ⊆ c₂
⊢ SuccChain r c₂ ⊆ SuccChain r c₂
case inr
α : Type u_1
β : Type ?u.11677
r : α → α → Prop
c c₂✝ c₃ s t : Set α
a b x y : α
c₂ : Set α
a✝ : ChainClosure r c₂
ih : ∀ {c₁ : Set α}, ChainClosure r c₁ → c₁ ⊆ c₂ → c₂ = c₁ ∨ SuccChain r c₁ ⊆ c₂
c₁ : Set α
hc₁ : ChainClosure r c₁
h : c₁ ⊆ SuccChain r c₂
h₁ : c₁ ⊆ c₂
h₂ : SuccChain r c₁ ⊆ c₂
⊢ SuccChain r c₁ ⊆ SuccChain r c₂ Tactic: obtain rfl | h₂ := ih hc₁ h₁ State Before: case inl
α : Type u_1
β : Type ?u.11677
r : α → α → Prop
c c₂✝ c₃ s t : Set α
a b x y : α
c₂ : Set α
a✝ : ChainClosure r c₂
ih : ∀ {c₁ : Set α}, ChainClosure r c₁ → c₁ ⊆ c₂ → c₂ = c₁ ∨ SuccChain r c₁ ⊆ c₂
hc₁ : ChainClosure r c₂
h : c₂ ⊆ SuccChain r c₂
h₁ : c₂ ⊆ c₂
⊢ SuccChain r c₂ ⊆ SuccChain r c₂ State After: no goals Tactic: exact Subset.rfl State Before: case inr
α : Type u_1
β : Type ?u.11677
r : α → α → Prop
c c₂✝ c₃ s t : Set α
a b x y : α
c₂ : Set α
a✝ : ChainClosure r c₂
ih : ∀ {c₁ : Set α}, ChainClosure r c₁ → c₁ ⊆ c₂ → c₂ = c₁ ∨ SuccChain r c₁ ⊆ c₂
c₁ : Set α
hc₁ : ChainClosure r c₁
h : c₁ ⊆ SuccChain r c₂
h₁ : c₁ ⊆ c₂
h₂ : SuccChain r c₁ ⊆ c₂
⊢ SuccChain r c₁ ⊆ SuccChain r c₂ State After: no goals Tactic: exact h₂.trans subset_succChain State Before: α : Type u_1
β : Type ?u.11677
r : α → α → Prop
c c₂ c₃ s✝ t : Set α
a b x y : α
s : Set (Set α)
a✝ : ∀ (a : Set α), a ∈ s → ChainClosure r a
ih : ∀ (a : Set α), a ∈ s → ∀ {c₁ : Set α}, ChainClosure r c₁ → c₁ ⊆ a → a = c₁ ∨ SuccChain r c₁ ⊆ a
c₁ : Set α
hc₁ : ChainClosure r c₁
h : c₁ ⊆ ⋃₀ s
⊢ ⋃₀ s = c₁ ∨ SuccChain r c₁ ⊆ ⋃₀ s State After: α : Type u_1
β : Type ?u.11677
r : α → α → Prop
c c₂ c₃ s✝ t : Set α
a b x y : α
s : Set (Set α)
a✝ : ∀ (a : Set α), a ∈ s → ChainClosure r a
ih : ∀ (a : Set α), a ∈ s → ∀ {c₁ : Set α}, ChainClosure r c₁ → c₁ ⊆ a → a = c₁ ∨ SuccChain r c₁ ⊆ a
c₁ : Set α
hc₁ : ChainClosure r c₁
h : c₁ ⊆ ⋃₀ s
⊢ ⋃₀ s ⊆ c₁ ∨ SuccChain r c₁ ⊆ ⋃₀ s Tactic: apply Or.imp_left h.antisymm' State Before: α : Type u_1
β : Type ?u.11677
r : α → α → Prop
c c₂ c₃ s✝ t : Set α
a b x y : α
s : Set (Set α)
a✝ : ∀ (a : Set α), a ∈ s → ChainClosure r a
ih : ∀ (a : Set α), a ∈ s → ∀ {c₁ : Set α}, ChainClosure r c₁ → c₁ ⊆ a → a = c₁ ∨ SuccChain r c₁ ⊆ a
c₁ : Set α
hc₁ : ChainClosure r c₁
h : c₁ ⊆ ⋃₀ s
⊢ ⋃₀ s ⊆ c₁ ∨ SuccChain r c₁ ⊆ ⋃₀ s State After: case a
α : Type u_1
β : Type ?u.11677
r : α → α → Prop
c c₂ c₃ s✝ t : Set α
a b x y : α
s : Set (Set α)
a✝ : ∀ (a : Set α), a ∈ s → ChainClosure r a
ih : ∀ (a : Set α), a ∈ s → ∀ {c₁ : Set α}, ChainClosure r c₁ → c₁ ⊆ a → a = c₁ ∨ SuccChain r c₁ ⊆ a
c₁ : Set α
hc₁ : ChainClosure r c₁
h : c₁ ⊆ ⋃₀ s
⊢ ¬(⋃₀ s ⊆ c₁ ∨ SuccChain r c₁ ⊆ ⋃₀ s) → False Tactic: apply by_contradiction State Before: case a
α : Type u_1
β : Type ?u.11677
r : α → α → Prop
c c₂ c₃ s✝ t : Set α
a b x y : α
s : Set (Set α)
a✝ : ∀ (a : Set α), a ∈ s → ChainClosure r a
ih : ∀ (a : Set α), a ∈ s → ∀ {c₁ : Set α}, ChainClosure r c₁ → c₁ ⊆ a → a = c₁ ∨ SuccChain r c₁ ⊆ a
c₁ : Set α
hc₁ : ChainClosure r c₁
h : c₁ ⊆ ⋃₀ s
⊢ ¬(⋃₀ s ⊆ c₁ ∨ SuccChain r c₁ ⊆ ⋃₀ s) → False State After: case a
α : Type u_1
β : Type ?u.11677
r : α → α → Prop
c c₂ c₃ s✝ t : Set α
a b x y : α
s : Set (Set α)
a✝ : ∀ (a : Set α), a ∈ s → ChainClosure r a
ih : ∀ (a : Set α), a ∈ s → ∀ {c₁ : Set α}, ChainClosure r c₁ → c₁ ⊆ a → a = c₁ ∨ SuccChain r c₁ ⊆ a
c₁ : Set α
hc₁ : ChainClosure r c₁
h : c₁ ⊆ ⋃₀ s
⊢ ∀ (x : Set α), x ∈ s → ¬x ⊆ c₁ → ¬SuccChain r c₁ ⊆ ⋃₀ s → False Tactic: simp only [sUnion_subset_iff, not_or, not_forall, exists_prop, and_imp, forall_exists_index] State Before: case a
α : Type u_1
β : Type ?u.11677
r : α → α → Prop
c c₂ c₃ s✝ t : Set α
a b x y : α
s : Set (Set α)
a✝ : ∀ (a : Set α), a ∈ s → ChainClosure r a
ih : ∀ (a : Set α), a ∈ s → ∀ {c₁ : Set α}, ChainClosure r c₁ → c₁ ⊆ a → a = c₁ ∨ SuccChain r c₁ ⊆ a
c₁ : Set α
hc₁ : ChainClosure r c₁
h : c₁ ⊆ ⋃₀ s
⊢ ∀ (x : Set α), x ∈ s → ¬x ⊆ c₁ → ¬SuccChain r c₁ ⊆ ⋃₀ s → False State After: case a
α : Type u_1
β : Type ?u.11677
r : α → α → Prop
c c₂ c₃✝ s✝ t : Set α
a b x y : α
s : Set (Set α)
a✝ : ∀ (a : Set α), a ∈ s → ChainClosure r a
ih : ∀ (a : Set α), a ∈ s → ∀ {c₁ : Set α}, ChainClosure r c₁ → c₁ ⊆ a → a = c₁ ∨ SuccChain r c₁ ⊆ a
c₁ : Set α
hc₁ : ChainClosure r c₁
h : c₁ ⊆ ⋃₀ s
c₃ : Set α
hc₃ : c₃ ∈ s
h₁ : ¬c₃ ⊆ c₁
h₂ : ¬SuccChain r c₁ ⊆ ⋃₀ s
⊢ False Tactic: intro c₃ hc₃ h₁ h₂ State Before: case a
α : Type u_1
β : Type ?u.11677
r : α → α → Prop
c c₂ c₃✝ s✝ t : Set α
a b x y : α
s : Set (Set α)
a✝ : ∀ (a : Set α), a ∈ s → ChainClosure r a
ih : ∀ (a : Set α), a ∈ s → ∀ {c₁ : Set α}, ChainClosure r c₁ → c₁ ⊆ a → a = c₁ ∨ SuccChain r c₁ ⊆ a
c₁ : Set α
hc₁ : ChainClosure r c₁
h : c₁ ⊆ ⋃₀ s
c₃ : Set α
hc₃ : c₃ ∈ s
h₁ : ¬c₃ ⊆ c₁
h₂ : ¬SuccChain r c₁ ⊆ ⋃₀ s
⊢ False State After: case a.inl
α : Type u_1
β : Type ?u.11677
r : α → α → Prop
c c₂ c₃✝ s✝ t : Set α
a b x y : α
s : Set (Set α)
a✝ : ∀ (a : Set α), a ∈ s → ChainClosure r a
ih : ∀ (a : Set α), a ∈ s → ∀ {c₁ : Set α}, ChainClosure r c₁ → c₁ ⊆ a → a = c₁ ∨ SuccChain r c₁ ⊆ a
c₁ : Set α
hc₁ : ChainClosure r c₁
h✝ : c₁ ⊆ ⋃₀ s
c₃ : Set α
hc₃ : c₃ ∈ s
h₁ : ¬c₃ ⊆ c₁
h₂ : ¬SuccChain r c₁ ⊆ ⋃₀ s
h : SuccChain r c₃ ⊆ c₁
⊢ False
case a.inr
α : Type u_1
β : Type ?u.11677
r : α → α → Prop
c c₂ c₃✝ s✝ t : Set α
a b x y : α
s : Set (Set α)
a✝ : ∀ (a : Set α), a ∈ s → ChainClosure r a
ih : ∀ (a : Set α), a ∈ s → ∀ {c₁ : Set α}, ChainClosure r c₁ → c₁ ⊆ a → a = c₁ ∨ SuccChain r c₁ ⊆ a
c₁ : Set α
hc₁ : ChainClosure r c₁
h✝ : c₁ ⊆ ⋃₀ s
c₃ : Set α
hc₃ : c₃ ∈ s
h₁ : ¬c₃ ⊆ c₁
h₂ : ¬SuccChain r c₁ ⊆ ⋃₀ s
h : c₁ ⊆ c₃
⊢ False Tactic: obtain h | h := chainClosure_succ_total_aux hc₁ fun c₄ => ih _ hc₃ State Before: case a.inr
α : Type u_1
β : Type ?u.11677
r : α → α → Prop
c c₂ c₃✝ s✝ t : Set α
a b x y : α
s : Set (Set α)
a✝ : ∀ (a : Set α), a ∈ s → ChainClosure r a
ih : ∀ (a : Set α), a ∈ s → ∀ {c₁ : Set α}, ChainClosure r c₁ → c₁ ⊆ a → a = c₁ ∨ SuccChain r c₁ ⊆ a
c₁ : Set α
hc₁ : ChainClosure r c₁
h✝ : c₁ ⊆ ⋃₀ s
c₃ : Set α
hc₃ : c₃ ∈ s
h₁ : ¬c₃ ⊆ c₁
h₂ : ¬SuccChain r c₁ ⊆ ⋃₀ s
h : c₁ ⊆ c₃
⊢ False State After: case a.inr.inl
α : Type u_1
β : Type ?u.11677
r : α → α → Prop
c c₂ c₃✝ s✝ t : Set α
a b x y : α
s : Set (Set α)
a✝ : ∀ (a : Set α), a ∈ s → ChainClosure r a
ih : ∀ (a : Set α), a ∈ s → ∀ {c₁ : Set α}, ChainClosure r c₁ → c₁ ⊆ a → a = c₁ ∨ SuccChain r c₁ ⊆ a
c₁ : Set α
hc₁ : ChainClosure r c₁
h✝ : c₁ ⊆ ⋃₀ s
c₃ : Set α
hc₃ : c₃ ∈ s
h₁ : ¬c₃ ⊆ c₁
h₂ : ¬SuccChain r c₁ ⊆ ⋃₀ s
h : c₁ ⊆ c₃
h' : c₃ = c₁
⊢ False
case a.inr.inr
α : Type u_1
β : Type ?u.11677
r : α → α → Prop
c c₂ c₃✝ s✝ t : Set α
a b x y : α
s : Set (Set α)
a✝ : ∀ (a : Set α), a ∈ s → ChainClosure r a
ih : ∀ (a : Set α), a ∈ s → ∀ {c₁ : Set α}, ChainClosure r c₁ → c₁ ⊆ a → a = c₁ ∨ SuccChain r c₁ ⊆ a
c₁ : Set α
hc₁ : ChainClosure r c₁
h✝ : c₁ ⊆ ⋃₀ s
c₃ : Set α
hc₃ : c₃ ∈ s
h₁ : ¬c₃ ⊆ c₁
h₂ : ¬SuccChain r c₁ ⊆ ⋃₀ s
h : c₁ ⊆ c₃
h' : SuccChain r c₁ ⊆ c₃
⊢ False Tactic: obtain h' | h' := ih c₃ hc₃ hc₁ h State Before: case a.inl
α : Type u_1
β : Type ?u.11677
r : α → α → Prop
c c₂ c₃✝ s✝ t : Set α
a b x y : α
s : Set (Set α)
a✝ : ∀ (a : Set α), a ∈ s → ChainClosure r a
ih : ∀ (a : Set α), a ∈ s → ∀ {c₁ : Set α}, ChainClosure r c₁ → c₁ ⊆ a → a = c₁ ∨ SuccChain r c₁ ⊆ a
c₁ : Set α
hc₁ : ChainClosure r c₁
h✝ : c₁ ⊆ ⋃₀ s
c₃ : Set α
hc₃ : c₃ ∈ s
h₁ : ¬c₃ ⊆ c₁
h₂ : ¬SuccChain r c₁ ⊆ ⋃₀ s
h : SuccChain r c₃ ⊆ c₁
⊢ False State After: no goals Tactic: exact h₁ (subset_succChain.trans h) State Before: case a.inr.inl
α : Type u_1
β : Type ?u.11677
r : α → α → Prop
c c₂ c₃✝ s✝ t : Set α
a b x y : α
s : Set (Set α)
a✝ : ∀ (a : Set α), a ∈ s → ChainClosure r a
ih : ∀ (a : Set α), a ∈ s → ∀ {c₁ : Set α}, ChainClosure r c₁ → c₁ ⊆ a → a = c₁ ∨ SuccChain r c₁ ⊆ a
c₁ : Set α
hc₁ : ChainClosure r c₁
h✝ : c₁ ⊆ ⋃₀ s
c₃ : Set α
hc₃ : c₃ ∈ s
h₁ : ¬c₃ ⊆ c₁
h₂ : ¬SuccChain r c₁ ⊆ ⋃₀ s
h : c₁ ⊆ c₃
h' : c₃ = c₁
⊢ False State After: no goals Tactic: exact h₁ h'.subset State Before: case a.inr.inr
α : Type u_1
β : Type ?u.11677
r : α → α → Prop
c c₂ c₃✝ s✝ t : Set α
a b x y : α
s : Set (Set α)
a✝ : ∀ (a : Set α), a ∈ s → ChainClosure r a
ih : ∀ (a : Set α), a ∈ s → ∀ {c₁ : Set α}, ChainClosure r c₁ → c₁ ⊆ a → a = c₁ ∨ SuccChain r c₁ ⊆ a
c₁ : Set α
hc₁ : ChainClosure r c₁
h✝ : c₁ ⊆ ⋃₀ s
c₃ : Set α
hc₃ : c₃ ∈ s
h₁ : ¬c₃ ⊆ c₁
h₂ : ¬SuccChain r c₁ ⊆ ⋃₀ s
h : c₁ ⊆ c₃
h' : SuccChain r c₁ ⊆ c₃
⊢ False State After: no goals Tactic: exact h₂ (h'.trans <| subset_sUnion_of_mem hc₃) |
State Before: α : Type u_2
β✝ : Type ?u.393825
γ : Type u_3
ι : Type ?u.393831
inst✝² : Countable ι
m : MeasurableSpace α
μ : Measure α
inst✝¹ : TopologicalSpace β✝
inst✝ : TopologicalSpace γ
f✝ g✝ : α → β✝
β : Type u_1
f : α → β
mα : MeasurableSpace α
μa : Measure α
mβ : MeasurableSpace β
μb : Measure β
hf : MeasurePreserving f
h₂ : MeasurableEmbedding f
g : β → γ
⊢ AEStronglyMeasurable (g ∘ f) μa ↔ AEStronglyMeasurable g μb State After: no goals Tactic: rw [← hf.map_eq, h₂.aestronglyMeasurable_map_iff] |
Require Import Fiat.Common Fiat.Computation
Fiat.ADT.ADTSig Fiat.ADT.Core
Fiat.ADTRefinement.Core Fiat.ADTRefinement.SetoidMorphisms.
Section SimplifyRep.
(* If a representation has extraneous information (perhaps intermediate
data introduced during refinement), simplifying the representation
is a valid refinement. *)
Variable oldRep : Type. (* The old representation type. *)
Variable newRep : Type. (* The new representation type. *)
Variable simplifyf : oldRep -> newRep. (* The simplification function. *)
Variable concretize : newRep -> oldRep. (* A map to the enriched representation. *)
(* The abstraction relation between old and new representations. *)
Variable AbsR : oldRep -> newRep -> Prop.
Notation "ro ≃ rn" := (AbsR ro rn) (at level 70).
(*Definition simplifyMethod
(Dom : list Type)
(Cod : Type)
(oldMeth : methodType oldRep Dom Cod)
r_n n : Comp (newRep * Cod) :=
(r_o' <- (oldMeth (concretize r_n) n);
ret (simplifyf (fst r_o'), snd r_o'))%comp.
Definition simplifyConstructor
(Dom : Type)
(oldConstr : constructorType oldRep Dom)
n : Comp newRep :=
(or <- oldConstr n;
ret (simplifyf or))%comp.
Variable Sig : ADTSig. (* The signature of the ADT being simplified. *)
Definition simplifyRep oldConstr oldMeths :
(forall r_o, r_o ≃ simplifyf r_o) ->
(forall r_n r_o,
(r_o ≃ r_n) ->
forall idx n,
refineEquiv (r_o'' <- oldMeths idx r_o n;
r_n' <- {r_n' | fst r_o'' ≃ r_n'};
ret (r_n', snd r_o''))
(r_o'' <- oldMeths idx (concretize r_n) n;
ret (simplifyf (fst r_o''), snd r_o''))) ->
refineADT
(@Build_ADT Sig oldRep oldConstr oldMeths)
(@Build_ADT Sig newRep
(fun idx => simplifyConstructor (oldConstr idx))
(fun idx => simplifyMethod (oldMeths idx))).
Proof.
econstructor 1 with
(AbsR := AbsR); simpl; eauto.
- unfold simplifyConstructor, refine; intros;
computes_to_inv; repeat computes_to_econstructor; try subst; eauto.
- unfold simplifyMethod; intros.
eapply H0; eauto.
Qed. *)
End SimplifyRep.
|
module Pkg3Tests
include("pkg.jl")
include("resolve.jl")
end # module
|
#!/usr/bin/env python
import cv2
import rospy
import numpy as np
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import imutils
class MotionTracker:
def __init__(self):
self.bridge = CvBridge()
self.min_area = 50
self.max_area = 50
self.last_frame = None
self.kernel = np.ones((5, 5), np.uint8)
rospy.Subscriber('/usb_cam/image_raw', Image, self._callback)
def _callback(self, Image):
image = self.bridge.imgmsg_to_cv2(Image, "bgr8")
image = imutils.resize(image, width=500)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray,
100,
255,
cv2.THRESH_BINARY_INV)[1]
thresh = cv2.morphologyEx(thresh,
cv2.MORPH_OPEN,
self.kernel)
if self.last_frame is None:
self.last_frame = thresh
return
frame_delta = cv2.absdiff(self.last_frame, thresh)
self.last_frame = thresh
cnts = cv2.findContours(frame_delta,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
for c in cnts:
if cv2.contourArea(c) < 40:
continue
(x, y, w, h) = cv2.boundingRect(c)
solidity = cv2.contourArea(c) / (w * h)
if solidity < 0.5:
continue
if w > 70 and h > 70:
continue
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imshow("frame_delta", frame_delta)
cv2.imshow("Motion-Tracker", image)
cv2.waitKey(1)
if __name__ == "__main__":
rospy.init_node('motion_tracker_node')
motion = MotionTracker()
rospy.spin()
|
--
-- The Java KeyWord Lexer
--
%Options fp=JavaKWLexer,states
%options template=KeywordTemplateF.gi
%Include
KWLexerMapF.gi
%End
%Export
abstract
assert
boolean
break
byte
case
catch
char
class
const
continue
default
do
double
enum
else
extends
false
final
finally
float
for
goto
if
implements
import
instanceof
int
interface
long
native
new
null
package
private
protected
public
return
short
static
strictfp
super
switch
synchronized
this
throw
throws
transient
true
try
void
volatile
while
BeginAction
BeginJava
EndAction
EndJava
NoAction
NullAction
BadAction
%End
%Terminals
a b c d e f g h i j k l m
n o p q r s t u v w x y z
%End
%Start
KeyWord
%End
%Notice
/.
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2007 IBM Corporation.
// All rights reserved. This program and the accompanying materials
// are made available under the terms of the Eclipse Public License v1.0
// which accompanies this distribution, and is available at
// http://www.eclipse.org/legal/epl-v10.html
//
//Contributors:
// Philippe Charles ([email protected]) - initial API and implementation
////////////////////////////////////////////////////////////////////////////////
./
%End
%Globals
/.
#include "$sym_type.h"
#include "$prs_type.h"
./
%End
%Rules
-- The Goal for the parser is a single Keyword
KeyWord ::= a b s t r a c t
/.$BeginAction
$setResult($_abstract);
$EndAction
./
| a s s e r t
/.$BeginAction
$setResult($_assert);
$EndAction
./
| b o o l e a n
/.$BeginAction
$setResult($_boolean);
$EndAction
./
| b r e a k
/.$BeginAction
$setResult($_break);
$EndAction
./
| b y t e
/.$BeginAction
$setResult($_byte);
$EndAction
./
| c a s e
/.$BeginAction
$setResult($_case);
$EndAction
./
| c a t c h
/.$BeginAction
$setResult($_catch);
$EndAction
./
| c h a r
/.$BeginAction
$setResult($_char);
$EndAction
./
| c l a s s
/.$BeginAction
$setResult($_class);
$EndAction
./
| c o n s t
/.$BeginAction
$setResult($_const);
$EndAction
./
| c o n t i n u e
/.$BeginAction
$setResult($_continue);
$EndAction
./
| d e f a u l t
/.$BeginAction
$setResult($_default);
$EndAction
./
| d o
/.$BeginAction
$setResult($_do);
$EndAction
./
| d o u b l e
/.$BeginAction
$setResult($_double);
$EndAction
./
| e l s e
/.$BeginAction
$setResult($_else);
$EndAction
./
| e n u m
/.$BeginAction
$setResult($_enum);
$EndAction
./
| e x t e n d s
/.$BeginAction
$setResult($_extends);
$EndAction
./
| f a l s e
/.$BeginAction
$setResult($_false);
$EndAction
./
| f i n a l
/.$BeginAction
$setResult($_final);
$EndAction
./
| f i n a l l y
/.$BeginAction
$setResult($_finally);
$EndAction
./
| f l o a t
/.$BeginAction
$setResult($_float);
$EndAction
./
| f o r
/.$BeginAction
$setResult($_for);
$EndAction
./
| g o t o
/.$BeginAction
$setResult($_goto);
$EndAction
./
| i f
/.$BeginAction
$setResult($_if);
$EndAction
./
| i m p l e m e n t s
/.$BeginAction
$setResult($_implements);
$EndAction
./
| i m p o r t
/.$BeginAction
$setResult($_import);
$EndAction
./
| i n s t a n c e o f
/.$BeginAction
$setResult($_instanceof);
$EndAction
./
| i n t
/.$BeginAction
$setResult($_int);
$EndAction
./
| i n t e r f a c e
/.$BeginAction
$setResult($_interface);
$EndAction
./
| l o n g
/.$BeginAction
$setResult($_long);
$EndAction
./
| n a t i v e
/.$BeginAction
$setResult($_native);
$EndAction
./
| n e w
/.$BeginAction
$setResult($_new);
$EndAction
./
| n u l l
/.$BeginAction
$setResult($_null);
$EndAction
./
| p a c k a g e
/.$BeginAction
$setResult($_package);
$EndAction
./
| p r i v a t e
/.$BeginAction
$setResult($_private);
$EndAction
./
| p r o t e c t e d
/.$BeginAction
$setResult($_protected);
$EndAction
./
| p u b l i c
/.$BeginAction
$setResult($_public);
$EndAction
./
| r e t u r n
/.$BeginAction
$setResult($_return);
$EndAction
./
| s h o r t
/.$BeginAction
$setResult($_short);
$EndAction
./
| s t a t i c
/.$BeginAction
$setResult($_static);
$EndAction
./
| s t r i c t f p
/.$BeginAction
$setResult($_strictfp);
$EndAction
./
| s u p e r
/.$BeginAction
$setResult($_super);
$EndAction
./
| s w i t c h
/.$BeginAction
$setResult($_switch);
$EndAction
./
| s y n c h r o n i z e d
/.$BeginAction
$setResult($_synchronized);
$EndAction
./
| t h i s
/.$BeginAction
$setResult($_this);
$EndAction
./
| t h r o w
/.$BeginAction
$setResult($_throw);
$EndAction
./
| t h r o w s
/.$BeginAction
$setResult($_throws);
$EndAction
./
| t r a n s i e n t
/.$BeginAction
$setResult($_transient);
$EndAction
./
| t r u e
/.$BeginAction
$setResult($_true);
$EndAction
./
| t r y
/.$BeginAction
$setResult($_try);
$EndAction
./
| v o i d
/.$BeginAction
$setResult($_void);
$EndAction
./
| v o l a t i l e
/.$BeginAction
$setResult($_volatile);
$EndAction
./
| w h i l e
/.$BeginAction
$setResult($_while);
$EndAction
./
KeyWord ::= '$' bB eE gG iI nN aA cC tT iI oO nN
/.$BeginAction
$setResult($_BeginAction);
$EndAction
./
| '$' bB eE gG iI nN jJ aA vV aA
/.$BeginAction
$setResult($_BeginJava);
$EndAction
./
KeyWord ::= '$' eE nN dD aA cC tT iI oO nN
/.$BeginAction
$setResult($_EndAction);
$EndAction
./
| '$' eE nN dD jJ aA vV aA
/.$BeginAction
$setResult($_EndJava);
$EndAction
./
KeyWord ::= '$' nN oO aA cC tT iI oO nN
/.$BeginAction
$setResult($_NoAction);
$EndAction
./
KeyWord ::= '$' nN uU lL lL aA cC tT iI oO nN
/.$BeginAction
$setResult($_NullAction);
$EndAction
./
KeyWord ::= '$' bB aA dD aA cC tT iI oO nN
/.$BeginAction
$setResult($_BadAction);
$EndAction
./
aA -> a | A
bB -> b | B
cC -> c | C
dD -> d | D
eE -> e | E
gG -> g | G
iI -> i | I
jJ -> j | J
lL -> l | L
nN -> n | N
oO -> o | O
tT -> t | T
uU -> u | U
vV -> v | V
%End |
Formal statement is: lemma holomorphic_on_power [holomorphic_intros]: "f holomorphic_on s \<Longrightarrow> (\<lambda>z. (f z)^n) holomorphic_on s" Informal statement is: If $f$ is holomorphic on a set $S$, then $f^n$ is holomorphic on $S$. |
module Minecraft.Base.PreClassic.GrassBlock.Item.Export
import public Minecraft.Core.Entity.Pickup.Export
import public Minecraft.Base.PreClassic.GrassBlock.Block
import public Minecraft.Base.PreClassic.GrassBlock.Item
import public Minecraft.Base.PreClassic.GrassBlock.ItemEntity
%default total
[grassBlockItem']
Item GrassBlock.Item where
id = "minecraft:grass_block"
stackable = Just 64
givenName = \x => x.base.givenName
proj = \x => new GrassBlock.ItemEntity GrassBlock.MkItemEntity
[putGrassBlock']
Put GrassBlock.Item where
putItem self = new GrassBlock.Block GrassBlock.MkBlock
[grassBlockVtbl']
Vtbl GrassBlock.Item where
vtable (Item GrassBlock.Item) = Just grassBlockItem
vtable (Put GrassBlock.Item) = Just putGrassBlock
vtable _ = Nothing
grassBlockItem = grassBlockItem'
putGrassBlock = putGrassBlock'
grassBlockVtbl = grassBlockVtbl'
|
Require Import Crypto.Arithmetic.PrimeFieldTheorems.
Require Import Crypto.Specific.solinas32_2e189m25_8limbs.Synthesis.
(* TODO : change this to field once field isomorphism happens *)
Definition carry :
{ carry : feBW_loose -> feBW_tight
| forall a, phiBW_tight (carry a) = (phiBW_loose a) }.
Proof.
Set Ltac Profiling.
Time synthesize_carry ().
Show Ltac Profile.
Time Defined.
Print Assumptions carry.
|
Formal statement is: lemma power: "f \<in> R \<Longrightarrow> (\<lambda>x. f x^n) \<in> R" Informal statement is: If $f$ is a real-valued function, then $x \mapsto f(x)^n$ is also a real-valued function. |
library(methods)
{{rimport}}('__init__.r', 'plot.r')
indir = {{i.indir | R}}
outdir = {{o.outdir | R}}
params = {{args.params | R}}
cutoff = {{args.cutoff | R}}
plots = {{args.plot | R}}
devpars = {{args.devpars | R}}
plink = {{args.plink | quote}}
nthread = {{args.nthread | R}}
bedfile = Sys.glob(file.path(indir, '*.bed'))
input = tools::file_path_sans_ext(bedfile)
output = file.path(outdir, basename(input))
shell$load_config(plink = plink)
params$bfile = input
params$out = output
params$threads = nthread
shell$plink(params, .raise = TRUE, .report = TRUE, .fg = TRUE)$reset()
sexcheck_result = paste0(output, '.sexcheck')
if (file.exists(sexcheck_result)) {
sexcheck = read.table(sexcheck_result, header = T, row.names = NULL, check.names = F)
sex.sample.fail = sexcheck[which(sexcheck$STATUS == 'PROBLEM'), c('FID', 'IID'), drop=F]
write.table(sex.sample.fail, paste0(output, '.sex.fail'), col.names = F, row.names = F, sep = "\t", quote = F)
}
hwe_result = paste0(output, '.hwe')
if (file.exists(hwe_result)) {
hardy = read.table(paste0(output, '.hwe'), header = T, row.names = NULL, check.names = F)
if (!is.null(cutoff$hardy.hwe)) {
hardy.fail = hardy[which(hardy$P < cutoff$hardy.hwe), 'SNP', drop = F]
write.table(hardy.fail, paste0(output, '.hardy.fail'), col.names = F, row.names = F, sep = "\t", quote = F)
}
if (is.true(plots$hardy.hwe)) {
hardy$Pval = -log10(hardy$P)
hardy$Status = "Pass"
ggs = list()
if (!is.null(cutoff$hardy.hwe)) {
hardy[which(hardy$SNP %in% hardy.fail$SNP), "Status"] = "Fail"
ggs$geom_vline = list(xintercept = -log10(cutoff$hardy.hwe), color = "red", linetype="dashed")
ggs$geom_text = list(
aes(x = -log10(cutoff$hardy.hwe), y = Inf, label = cutoff$hardy.hwe),
colour="red", angle=90, vjust = 1.2, hjust = 1.2
)
}
ggs$xlab = list("-log10(HWE p-value)")
ggs$ylab = list("Count")
ggs$theme = list(legend.position = "none")
plot.histo(
data = hardy,
x = 'Pval',
plotfile = paste0(output, '.hardy.png'),
params = list(aes(fill=Status), bins = 50),
ggs = ggs,
devpars = devpars
)
}
if (!is.null(cutoff$hardy.mingt) || is.true(plots$hardy.mingt)) {
mingt = data.frame(SNP = hardy$SNP)
gts = t(as.data.frame(lapply(hardy$GENO, function(x) as.numeric(unlist(strsplit(as.character(x), "/", fixed = TRUE))))))
mingt$MINGT_NO = apply(gts, 1, function(x) min(x))
mingt$MINGT = apply(gts, 1, function(x) min(x)/sum(x))
if (!is.null(cutoff$hardy.mingt)) {
if (cutoff$hardy.mingt < 1) {
mingt.fail = mingt[which(mingt$MINGT < cutoff$hardy.mingt), 'SNP', drop = F]
write.table(mingt.fail, paste0(output, '.mingt.fail'), col.names = F, row.names = F, sep = "\t", quote = F)
} else {
mingt.fail = mingt[which(mingt$MINGT_NO < cutoff$hardy.mingt), 'SNP', drop = F]
write.table(mingt.fail, paste0(output, '.mingt.fail'), col.names = F, row.names = F, sep = "\t", quote = F)
}
}
if (is.true(plots$hardy.mingt)) {
mingt$Status = "Pass"
ggs = list()
if (!is.null(cutoff$hardy.mingt)) {
mingt[which(mingt$SNP %in% mingt.fail$SNP), "Status"] = "Fail"
ggs$geom_vline = list(xintercept = cutoff$hardy.mingt, color = "red", linetype="dashed")
ggs$geom_text = list(
aes(x = cutoff$hardy.mingt, y = Inf, label = cutoff$hardy.mingt),
colour="red", angle=90, vjust = 1.2, hjust = 1.2
)
}
ggs$xlab = list("Min_GT")
ggs$ylab = list("Count")
ggs$theme = list(legend.position = "none")
plot.histo(
data = mingt,
x = 'MINGT',
plotfile = paste0(output, '.mingt_rate.png'),
params = list(aes(fill=Status), bins = 50),
ggs = ggs,
devpars = devpars
)
plot.histo(
data = mingt,
x = 'MINGT_NO',
plotfile = paste0(output, '.mingt_no.png'),
params = list(aes(fill=Status), bins = 50),
ggs = ggs,
devpars = devpars
)
}
}
}
het_result = paste0(output, '.het')
if (file.exists(het_result)) {
phet = read.table(het_result, header = T, row.names = NULL, check.names = F)
het = data.frame(Het = 1 - phet[, "O(HOM)"]/phet[, "N(NM)"])
rownames(het) = paste(phet$FID, phet$IID, sep = "\t")
if (!is.null(cutoff$het)) {
het.mean = mean(het$Het, na.rm = T)
het.sd = sd(het$Het, na.rm = T)
het.fail = rownames(het[!is.na(het$Het) & (het$Het < het.mean-cutoff$het*het.sd | het$Het > het.mean+cutoff$het*het.sd),, drop = F])
writeLines(het.fail, con = file(paste0(output, '.het.fail')))
}
if (is.true(plots$het)) {
het$Status = "Pass"
ggs = list()
if (!is.null(cutoff$het)) {
het[het.fail, "Status"] = "Fail"
ggs$geom_vline = list(xintercept = c(het.mean-cutoff$het*het.sd, het.mean+cutoff$het*het.sd), color = "red", linetype="dashed")
ggs$geom_text = list(
aes(x = het.mean-cutoff$het*het.sd, y = Inf, label = sprintf('mean - %ssd (%.3f)', cutoff$het, het.mean - cutoff$het*het.sd)),
colour="red", angle=90, vjust = 1.2, hjust = 1.2
)
ggs$geom_text = list(
aes(x = het.mean+cutoff$het*het.sd, y = Inf, label = sprintf('mean + %ssd (%.3f)', cutoff$het, het.mean + cutoff$het*het.sd)),
colour="red", angle=90, vjust = 1.2, hjust = 1.2
)
}
ggs$xlab = list("Sample Heterozygosity")
ggs$ylab = list("Count")
ggs$geom_vline = list(xintercept = het.mean, color = "blue", linetype="dashed")
ggs$theme = list(legend.position = "none")
ggs$geom_text = list(
aes(x = het.mean, y = Inf, label = sprintf('mean (%.3f)', het.mean)),
colour="blue", vjust = 1.5, hjust = -.1
)
plot.histo(
data = het,
plotfile = paste0(output, '.het.png'),
params = list(aes(fill=Status), bins = 50),
ggs = ggs,
devpars = devpars
)
}
}
freq_result = paste0(output, '.frq')
if (file.exists(freq_result)) {
freq = read.table(freq_result, header = T, row.names = NULL, check.names = F)
if (!is.null(cutoff$freq)) {
freq.fail = freq[which(freq$MAF < cutoff$freq), 'SNP', drop = F]
write.table(freq.fail, paste0(output, '.freq.fail'), col.names = F, row.names = F, sep = "\t", quote = F)
}
if (is.true(plots$freq)) {
freq$Status = "Pass"
ggs = list()
if (!is.null(cutoff$freq)) {
freq[which(freq$SNP %in% freq.fail$SNP), "Status"] = "Fail"
ggs$geom_vline = list(xintercept = cutoff$freq, color = "red", linetype="dashed")
ggs$geom_text = list(
aes(x = cutoff$freq, y = Inf, label = cutoff$freq),
colour="red", angle=90, vjust = 1.2, hjust = 1.2
)
}
ggs$xlab = list("MAF")
ggs$ylab = list("Count")
ggs$theme = list(legend.position = "none")
plot.histo(
data = freq,
x = 'MAF',
plotfile = paste0(output, '.freq.png'),
params = list(aes(fill=Status), bins = 50),
ggs = ggs,
devpars = devpars
)
}
}
imiss_result = paste0(output, '.imiss')
if (file.exists(imiss_result)) {
imiss = read.table(imiss_result, header = T, row.names = NULL, check.names = F)
callrate.sample = data.frame(Callrate = 1-imiss$F_MISS)
rownames(callrate.sample) = paste(imiss$FID, imiss$IID, sep = "\t")
if (!is.null(cutoff$missing.sample)) {
callrate.sample.fail = rownames(callrate.sample[callrate.sample$Callrate < cutoff$missing.sample, , drop = F])
writeLines(callrate.sample.fail, con = file(paste0(output, '.samplecr.fail')))
}
if (is.true(plots$missing.sample)) {
callrate.sample$Status = "Pass"
ggs = list()
if (!is.null(cutoff$missing.sample)) {
callrate.sample[callrate.sample.fail, "Status"] = "Fail"
ggs$geom_vline = list(xintercept = cutoff$missing.sample, color = "red", linetype="dashed")
ggs$geom_text = list(
aes(x = cutoff$missing.sample, y = Inf, label = cutoff$missing.sample),
colour="red", angle=90, vjust = 1.2, hjust = 1.2
)
}
ggs$xlab = list("Sample Call Rate")
ggs$ylab = list("Count")
ggs$theme = list(legend.position = "none")
plot.histo(
data = callrate.sample,
plotfile = paste0(output, '.samplecr.png'),
params = list(aes(fill=Status), bins = 50),
ggs = ggs
)
}
}
lmiss_result = paste0(output, '.lmiss')
if (file.exists(lmiss_result)) {
lmiss = read.table(lmiss_result, header = T, row.names = NULL, check.names = F)
lmiss$Callrate = 1-lmiss$F_MISS
if (!is.null(cutoff$missing.snp)) {
callrate.snp.fail = lmiss[which(lmiss$Callrate < cutoff$missing.snp), 'SNP', drop = F]
write.table(callrate.snp.fail, paste0(output, '.snpcr.fail'), row.names = F, col.names = F, sep = "\t", quote = F)
}
if (is.true(plots$missing.snp)) {
lmiss$Status = "Pass"
ggs = list()
if (!is.null(cutoff$missing.snp)) {
lmiss[which(lmiss$Callrate < cutoff$missing.snp), "Status"] = "Fail"
ggs$geom_vline = list(xintercept = cutoff$missing.snp, color = "red", linetype="dashed")
ggs$geom_text = list(
aes(x = cutoff$missing.snp, y = Inf, label = cutoff$missing.snp),
colour="red", angle=90, vjust = 1.2, hjust = 1.2
)
}
ggs$xlab = list("SNP Call Rate")
ggs$ylab = list("Count")
ggs$theme = list(legend.position = "none")
plot.histo(
data = lmiss,
plotfile = paste0(output, '.snpcr.png'),
x = 'Callrate',
params = list(aes(fill=Status), bins = 50),
ggs = ggs,
devpars = devpars
)
}
}
|
From Undecidability.L.Tactics Require Import LTactics.
From Undecidability.L.Computability Require Import Enum.
From Undecidability.L.Functions Require Import Encoding Equality.
From Undecidability.L.Datatypes Require Import LNat Lists LProd.
Require Import Undecidability.Shared.Libs.PSL.Base Nat List Datatypes.
Set Default Proof Using "Type".
Import Nat.
(* ** Enumeratibility of L-terms *)
#[global]
Instance term_appCross : computableTime' appCross (fun A _ => (5,fun B _ => (length A * length B * 29 + length A * 46 + 4,tt))).
Proof.
extract. solverec. fold appCross;rewrite map_time_const,map_length.
unfold c__map, c__app. Lia.nia.
Qed.
#[global]
Instance term_exh_size : computable exh_size.
Proof.
extract.
Qed.
Definition T_nondec_helper A x : bool
:= negb (inb term_eqb x A) .
Fixpoint T_nondec (n : nat) : list term :=
match n with
| 0 => [# n]
| S n0 =>
T_nondec n0 ++
[# (S n0)] ++
filter (T_nondec_helper (T_nondec n0)) (map lam (T_nondec n0) ++ appCross (T_nondec n0) (T_nondec n0))
end.
Lemma T_nondec_correct : forall n, T n = T_nondec n.
Proof.
induction n. reflexivity.
simpl. unfold T_nondec_helper. do 2 f_equal. rewrite <-IHn.
generalize ((map lam (T n) ++ appCross (T n) (T n)): list term). generalize (T n).
induction l0. simpl. reflexivity.
simpl.
rewrite IHl0.
edestruct (inb_spec term_eqb_spec a l);dec;try (exfalso;tauto);reflexivity.
Qed.
Local Instance term_T_nondec : computable T_nondec.
Proof.
assert (computable T_nondec_helper).
extract.
extract.
Qed.
#[global]
Instance term_T : computable T.
Proof.
eapply computableExt with (x:= T_nondec). 2:exact _.
repeat intro. symmetry. apply T_nondec_correct.
Qed.
#[global]
Instance term_g_inv : computable g_inv.
Proof. unfold g_inv.
extract.
Qed.
Definition g_nondec s :=
match pos_nondec term_eqb s (T (exh_size s)) with
| Some n => n
| None => 0
end.
Lemma g_nondec_correct : forall n, g n = g_nondec n.
Proof.
unfold g, g_nondec.
setoid_rewrite pos_nondec_spec. reflexivity. apply term_eqb_spec.
Qed.
Local Instance term_g_nondec : computable g_nondec.
Proof.
unfold g_nondec.
extract.
Qed.
#[global]
Instance term_g : computable g.
Proof.
eapply computableExt with (x:= g_nondec). 2:exact _.
repeat intro. symmetry. apply g_nondec_correct.
Qed.
Local Definition f_filter A x := negb (inb (prod_eqb Nat.eqb Nat.eqb) x A).
Local Definition f_map (p : nat * nat) := let (p1, p2) := p in (p1, S p2).
Fixpoint C_nondec (n : nat) : list (nat * nat) :=
match n with
| 0 => [(0, 0)]
| S n0 => let C' := C_nondec n0 in
C' ++
(S n0, 0)
:: filter (f_filter C')
(map f_map C')
end.
Lemma C_nondec_correct : forall n, C n = C_nondec n.
Proof.
induction n. reflexivity.
simpl. do 2 f_equal. rewrite <-IHn. fold f_map.
generalize ((map f_map (C n))). generalize (C n).
induction l0;cbn. reflexivity.
rewrite IHl0. unfold f_filter at 3.
edestruct (inb_spec (prod_eqb_spec Nat.eqb_spec Nat.eqb_spec) a l); decide (~ a el l); try (exfalso;tauto);reflexivity.
Qed.
Local Instance term_C_nondec : computable C_nondec.
Proof.
assert (computable f_filter) by extract.
assert (computable f_map) by extract.
extract.
Qed.
#[global]
Instance term_C : computable C.
Proof.
eapply computableExt with (x:= C_nondec). 2:exact _.
repeat intro. symmetry. apply C_nondec_correct.
Qed.
#[global]
Instance term_eSize : computable eSize.
Proof.
extract.
Qed.
#[global]
Instance term_c : computable c.
Proof.
extract.
Qed.
|
lemma uniformly_continuous_on_norm[continuous_intros]: fixes f :: "'a :: metric_space \<Rightarrow> 'b :: real_normed_vector" assumes "uniformly_continuous_on s f" shows "uniformly_continuous_on s (\<lambda>x. norm (f x))" |
real function zeroin(ax,bx,f,tol)
real ax,bx,f,tol
c
c a zero of the function f(x) is computed in the interval ax,bx .
c
c input..
c
c ax left endpoint of initial interval
c bx right endpoint of initial interval
c f function subprogram which evaluates f(x) for any x in
c the interval ax,bx
c tol desired length of the interval of uncertainty of the
c final result ( .ge. 0.0)
c
c
c output..
c
c zeroin abcissa approximating a zero of f in the interval ax,bx
c
c
c it is assumed that f(ax) and f(bx) have opposite signs
c without a check. zeroin returns a zero x in the given interval
c ax,bx to within a tolerance 4*macheps*abs(x) + tol, where macheps
c is the relative machine precision.
c this function subprogram is a slightly modified translation of
c the algol 60 procedure zero given in richard brent, algorithms for
c minimization without derivatives, prentice - hall, inc. (1973).
c
c
real a,b,c,d,e,eps,fa,fb,fc,tol1,xm,p,q,r,s
c
c compute eps, the relative machine precision
c
eps = 1.0
10 eps = eps/2.0
tol1 = 1.0 + eps
if (tol1 .gt. 1.0) go to 10
c
c initialization
c
a = ax
b = bx
fa = f(a)
fb = f(b)
c
c begin step
c
20 c = a
fc = fa
d = b - a
e = d
30 if (abs(fc) .ge. abs(fb)) go to 40
a = b
b = c
c = a
fa = fb
fb = fc
fc = fa
c
c convergence test
c
40 tol1 = 2.0*eps*abs(b) + 0.5*tol
xm = .5*(c - b)
if (abs(xm) .le. tol1) go to 90
if (fb .eq. 0.0) go to 90
c
c is bisection necessary
c
if (abs(e) .lt. tol1) go to 70
if (abs(fa) .le. abs(fb)) go to 70
c
c is quadratic interpolation possible
c
if (a .ne. c) go to 50
c
c linear interpolation
c
s = fb/fa
p = 2.0*xm*s
q = 1.0 - s
go to 60
c
c inverse quadratic interpolation
c
50 q = fa/fc
r = fb/fc
s = fb/fa
p = s*(2.0*xm*q*(q - r) - (b - a)*(r - 1.0))
q = (q - 1.0)*(r - 1.0)*(s - 1.0)
c
c adjust signs
c
60 if (p .gt. 0.0) q = -q
p = abs(p)
c
c is interpolation acceptable
c
if ((2.0*p) .ge. (3.0*xm*q - abs(tol1*q))) go to 70
if (p .ge. abs(0.5*e*q)) go to 70
e = d
d = p/q
go to 80
c
c bisection
c
70 d = xm
e = d
c
c complete step
c
80 a = b
fa = fb
if (abs(d) .gt. tol1) b = b + d
if (abs(d) .le. tol1) b = b + sign(tol1, xm)
fb = f(b)
if ((fb*(fc/abs(fc))) .gt. 0.0) go to 20
go to 30
c
c done
c
90 zeroin = b
return
end
|
(*
* Copyright 2014, NICTA
*
* This software may be distributed and modified according to the terms of
* the GNU General Public License version 2. Note that NO WARRANTY is provided.
* See "LICENSE_GPLv2.txt" for details.
*
* @TAG(NICTA_GPL)
*)
theory RWHelper_DP
imports ProofHelpers_DP KHeap_DP
begin
definition eq_on :: "'a set \<Rightarrow> ('a \<Rightarrow> 'b option) \<Rightarrow> ('a \<Rightarrow> 'b option) \<Rightarrow> bool"
where "eq_on m s s' \<equiv> \<forall>ptr\<in> m. s ptr = s' ptr"
lemma eq_on_subset:
"\<lbrakk>B \<subseteq> A ; eq_on A s s' \<rbrakk> \<Longrightarrow> eq_on B s s'"
by (auto simp:eq_on_def)
definition WritingOf :: "(('a \<Rightarrow>'b option) \<Rightarrow> ('a \<Rightarrow> 'b option)) \<Rightarrow> 'a set"
where "WritingOf f \<equiv> SUP s:UNIV. {ptr. (f s) ptr \<noteq> s ptr} "
definition IsReadingEstimateOf :: "'a set \<Rightarrow> (('a \<Rightarrow> 'b option) \<Rightarrow> ('a \<Rightarrow> 'b option)) \<Rightarrow> 'a set \<Rightarrow> bool"
where "IsReadingEstimateOf m f estimate \<equiv> (\<forall>s s'. (eq_on m s s') \<longrightarrow> (eq_on estimate (f s) (f s')))"
definition ReadingEstimateOf :: "(('a \<Rightarrow>'b option) \<Rightarrow> ('a \<Rightarrow> 'b option)) \<Rightarrow> ('a set) \<Rightarrow> ('a set)"
where "ReadingEstimateOf f estimate \<equiv> Inter {m. IsReadingEstimateOf m f estimate }"
abbreviation ReadingOf :: "(('a \<Rightarrow>'b option) \<Rightarrow> ('a \<Rightarrow> 'b option)) \<Rightarrow> 'a set"
where "ReadingOf f \<equiv> ReadingEstimateOf f (WritingOf f)"
lemma eq_on_trans:
"\<lbrakk>eq_on a s sa ; eq_on a sa sb\<rbrakk> \<Longrightarrow> eq_on a s sb"
by (simp add:eq_on_def)
lemma ReadingEstimateOf_inter:
"\<lbrakk>IsReadingEstimateOf a f r; IsReadingEstimateOf b f r \<rbrakk> \<Longrightarrow> IsReadingEstimateOf (a \<inter> b) f r"
apply (clarsimp simp:IsReadingEstimateOf_def)
apply (drule_tac x = s in spec)
apply (drule_tac x = "(\<lambda>ptr. if ptr\<in>(b - a) then (s' ptr) else (s ptr))" in spec)
apply (drule_tac x = "(\<lambda>ptr. if ptr\<in>(b - a) then (s' ptr) else (s ptr))" in spec)
apply (drule_tac x = s' in spec)
apply (erule impE)
apply (simp add:eq_on_def)
apply (erule impE)
apply (simp add:eq_on_def)
apply (rule eq_on_trans)
apply simp+
done
lemma ReadingEstimateOf_read_subset:
"\<lbrakk>IsReadingEstimateOf a f w; a \<subseteq> b\<rbrakk> \<Longrightarrow> IsReadingEstimateOf b f w"
by (auto simp add:IsReadingEstimateOf_def eq_on_def)
lemma ReadingEstimateOf_write_subset:
"\<lbrakk>IsReadingEstimateOf a f w; w' \<subseteq> w\<rbrakk> \<Longrightarrow> IsReadingEstimateOf a f w'"
by (auto simp add:IsReadingEstimateOf_def eq_on_def)
lemma reading_estimateD:
"\<lbrakk>IsReadingEstimateOf a f w; eq_on a s s'\<rbrakk> \<Longrightarrow> eq_on w (f s) (f s')"
by (auto simp:IsReadingEstimateOf_def)
lemma not_writingD:
"ptr \<notin> WritingOf f \<Longrightarrow> (f s) ptr = s ptr"
by (auto simp:WritingOf_def)
lemma well_ordered_estimate:
"WritingOf f \<subseteq> writing_estimate \<Longrightarrow>
ReadingOf f \<subseteq> ReadingEstimateOf f writing_estimate"
by (auto simp add:IsReadingEstimateOf_def ReadingEstimateOf_def eq_on_subset)
lemma writing_estimate_pipe:
"\<lbrakk>WritingOf f \<subseteq> Q; WritingOf g \<subseteq> Q\<rbrakk> \<Longrightarrow> WritingOf (f\<circ>g) \<subseteq> Q"
apply (subst WritingOf_def)
apply clarsimp
apply (rule ccontr)
apply (drule(1) contra_subsetD)+
apply (drule_tac s = "g xa" in not_writingD)
apply (drule_tac s = xa in not_writingD)
apply simp
done
lemma reading_writing_estimate:
"\<lbrakk>eq_on R s s'; IsReadingEstimateOf R g (WritingOf g)\<rbrakk> \<Longrightarrow> eq_on R (g s) (g s')"
apply (subst eq_on_def)
apply clarsimp
apply (case_tac "ptr \<in> WritingOf g")
apply (clarsimp simp:IsReadingEstimateOf_def)
apply (elim impE allE)
apply simp
apply (clarsimp simp:eq_on_def)
apply (clarsimp simp:WritingOf_def eq_on_def)
done
lemma reading_estimate_pipe:
assumes reg: "IsReadingEstimateOf R g M"
and ref: " IsReadingEstimateOf R f M"
and wg: "WritingOf g \<subseteq> M"
and wf: "WritingOf f \<subseteq> M"
shows "IsReadingEstimateOf R (f \<circ> g) M"
apply (clarsimp simp: IsReadingEstimateOf_def)
apply (cut_tac ReadingEstimateOf_write_subset[OF reg wg])
apply (drule(1) reading_writing_estimate[rotated])
apply (erule reading_estimateD[OF ref])
done
definition
"IsSepWritingEstimateOf f P proj m \<equiv> \<forall>ptr v.
\<lbrace>\<lambda>s. (proj s) ptr = v \<and> P s \<and> ptr \<in> (UNIV - m) \<rbrace> f \<lbrace>\<lambda>r s. (proj s) ptr = v \<rbrace>"
definition
"IsStrongSepWritingEstimateOf f P proj m g \<equiv> \<forall>state.
\<lbrace>\<lambda>s. (proj s) = state \<and> P s \<rbrace> f
\<lbrace>\<lambda>r s. (proj s) |` m = (g state) \<and> (proj s) |` (UNIV - m) = state |` (UNIV - m)\<rbrace>"
definition
"IsSepReadingEstimateOf r f P proj m \<equiv> \<forall>substate. \<exists>g.
\<lbrace>\<lambda>s. (proj s) |` r = substate \<and> P s \<rbrace> f \<lbrace>\<lambda>r s. (proj s) |` m = g substate\<rbrace>"
lemma sep_writing_estimateD:
"\<lbrakk>IsSepWritingEstimateOf f P proj m; (r, s') \<in> fst (f s);P s \<rbrakk>
\<Longrightarrow> proj s |` (UNIV - m) = proj s' |` (UNIV - m)"
apply (rule ext)
apply (clarsimp simp: restrict_map_def
IsSepWritingEstimateOf_def)
apply (drule_tac x = x in spec)
apply (drule_tac x = "proj s x" in spec)
apply (drule(1) use_valid)
apply simp+
done
lemma sep_writing_estimate_imp:
"\<lbrakk>IsSepWritingEstimateOf f P' proj m; \<And>s. P s \<Longrightarrow> P' s\<rbrakk>
\<Longrightarrow> IsSepWritingEstimateOf f P proj m"
apply (clarsimp simp:IsSepWritingEstimateOf_def)
apply (drule_tac x = ptr in spec)
apply (drule_tac x = v in spec)
apply (erule hoare_pre)
apply clarsimp
done
lemma sep_strong_writing_estimateD:
"\<lbrakk>IsStrongSepWritingEstimateOf f P proj m g; (r, s') \<in> fst (f s);P s \<rbrakk>
\<Longrightarrow> proj s' |` m = g (proj s) \<and> proj s' |` (UNIV - m) = proj s |` (UNIV - m)"
apply (simp add:
IsStrongSepWritingEstimateOf_def)
apply (drule_tac x = "proj s " in spec)
apply (drule use_valid)
apply assumption
apply simp+
done
lemma intent_reset_twice[simp]:
"intent_reset (intent_reset z) = intent_reset z"
apply (case_tac z)
apply (simp_all add:intent_reset_def)
done
lemma largest_set:
"UNIV \<subseteq> cmps \<Longrightarrow> cmps = UNIV"
by auto
definition "sep_map_predicate p P cmps \<equiv> \<lambda>s. \<exists>obj. (sep_map_general p obj cmps s \<and> P obj)"
definition "sep_heap_dom P m = (\<forall>s. P s \<longrightarrow> dom (sep_heap s) = m)"
definition "sep_irq_node_dom P m = (\<forall>s. P s \<longrightarrow> dom (sep_irq_node s) = m)"
definition "sep_map_spec P s = (\<forall>s'. P s' \<longrightarrow> s' = s)"
lemma sep_heap_domD:
"\<lbrakk>sep_heap_dom P m; P s ; p \<notin> m\<rbrakk>
\<Longrightarrow> p \<notin> dom (sep_heap s)"
by (fastforce simp:sep_heap_dom_def)
lemma sep_heap_domD':
"\<lbrakk>sep_heap_dom P m;P s\<rbrakk>
\<Longrightarrow> m = dom (sep_heap s)"
by (fastforce simp:sep_heap_dom_def)
lemma sep_irq_node_domD':
"\<lbrakk>sep_irq_node_dom P m; P s\<rbrakk>
\<Longrightarrow> m = dom (sep_irq_node s)"
by (fastforce simp: sep_irq_node_dom_def)
lemma sep_specD:
"\<lbrakk>sep_map_spec P s; P s'\<rbrakk> \<Longrightarrow> s = s'"
by (clarsimp simp: sep_map_spec_def)
lemma sep_heap_dom_sep_map_predicate:
"m = {ptr}\<times> cmps \<Longrightarrow>
sep_heap_dom (sep_map_predicate ptr P cmps) m"
apply (clarsimp simp: sep_map_general_def
object_to_sep_state_def
sep_heap_dom_def sep_map_predicate_def
split:sep_state.splits if_splits)
apply (rule set_eqI)
apply (clarsimp simp:dom_def object_project_def split:cdl_component_id.splits)
done
lemma sep_irq_node_dom_sep_map_predicate:
"sep_irq_node_dom (sep_map_predicate ptr P cmps) {}"
apply (clarsimp simp: sep_map_general_def object_to_sep_state_def
sep_irq_node_dom_def sep_map_predicate_def
split:sep_state.splits if_split_asm)
done
lemma sep_map_rewrite_spec:
"sep_map_general = (\<lambda>p obj cmps. sep_map_predicate p (op = obj) cmps)"
"sep_map_o = (\<lambda>p obj. sep_map_predicate p (op = obj) UNIV)"
"sep_map_f = (\<lambda>p obj. sep_map_predicate p (op = obj) {Fields})"
"sep_map_c = (\<lambda>p cap. let (ptr,slot) = p in
sep_map_predicate ptr (\<lambda>obj. object_slots obj = [ slot \<mapsto> cap]) {Slot slot})"
by (fastforce simp: sep_map_predicate_def sep_any_def sep_map_general_def
sep_map_o_def sep_map_f_def sep_map_c_def split_def
split: sep_state.splits)+
lemma sep_map_rewrite_any:
"sep_any_map_c = (\<lambda>ptr state.
sep_map_predicate (fst ptr) (\<lambda>obj. \<exists>cap. object_slots obj = [(snd ptr) \<mapsto> cap]) {Slot (snd ptr)} state)"
by (fastforce simp: sep_map_predicate_def sep_map_general_def sep_any_def
sep_map_o_def sep_map_f_def sep_map_c_def split_def
split: sep_state.splits)
lemma sep_heap_dom_conj:
"\<lbrakk>sep_heap_dom P m;sep_heap_dom P' m'\<rbrakk> \<Longrightarrow> sep_heap_dom (P \<and>* P') (m \<union> m')"
apply (clarsimp simp: sep_heap_dom_def sep_conj_def
sep_disj_sep_state_def sep_state_disj_def)
apply (auto simp: map_disj_def plus_sep_state_def sep_state_add_def)
done
lemma sep_heap_dom_simps:
"sep_heap_dom (slot \<mapsto>c -) ({(fst slot,Slot (snd slot))})"
"sep_heap_dom (slot \<mapsto>c cap) ({(fst slot,Slot (snd slot))})"
apply (simp add:sep_map_rewrite_any sep_heap_dom_sep_map_predicate)
apply (simp add:sep_map_rewrite_spec sep_heap_dom_sep_map_predicate split_def)
done
lemma sep_irq_node_dom_simps:
"sep_irq_node_dom (slot \<mapsto>c -) {}"
"sep_irq_node_dom (slot \<mapsto>c cap) {}"
apply (simp add:sep_map_rewrite_any sep_irq_node_dom_sep_map_predicate)
apply (simp add:sep_map_rewrite_spec sep_irq_node_dom_sep_map_predicate split_def)
done
lemma sep_map_spec_conj:
"\<lbrakk>sep_map_spec P s; sep_map_spec P' s'\<rbrakk>
\<Longrightarrow> sep_map_spec (P \<and>* P')
(SepState (sep_heap s ++ sep_heap s')
(sep_irq_node s ++ sep_irq_node s'))"
by (clarsimp simp: sep_map_spec_def sep_conj_def
plus_sep_state_def sep_state_add_def)
lemma sep_spec_simps:
"sep_map_spec (slot \<mapsto>c cap)
(SepState [(fst slot,Slot (snd slot)) \<mapsto> (CDL_Cap (Some (reset_cap_asid cap)))]
empty)"
apply (clarsimp simp:sep_map_spec_def sep_map_c_def sep_map_general_def)
apply (case_tac s')
apply (clarsimp simp:object_to_sep_state_def)
apply (rule ext)
apply (clarsimp simp: object_project_def object_slots_object_clean
split: if_split_asm)
done
lemma sep_conj_spec:
"\<lbrakk> < P \<and>* Q > s\<rbrakk>
\<Longrightarrow> \<exists>s'. < P \<and>* op = s' > s"
by (auto simp:sep_state_projection_def sep_conj_def
sep_disj_sep_state_def sep_state_disj_def)
lemma sep_conj_spec_value:
"\<lbrakk> < P \<and>* op = s' > s; sep_heap_dom P m; p \<notin> m\<rbrakk>
\<Longrightarrow> (sep_heap s') p = (sep_heap (sep_state_projection s) |` (UNIV - m)) p"
apply (clarsimp simp:sep_state_projection_def sep_conj_def
sep_disj_sep_state_def sep_state_disj_def)
apply (drule(2) sep_heap_domD)
apply (simp add: plus_sep_state_def sep_state_add_def
split: sep_state.splits)
apply (clarsimp simp: map_add_def split:option.splits)
done
lemma write_estimate_via_sep:
assumes sep_valid: "\<And>obj Q. \<lbrace>\<lambda>s. < P \<and>* Q > s \<rbrace>
f \<lbrace>\<lambda>r s. < P' \<and>* Q > s \<rbrace>"
and sep_heap_dom: "sep_heap_dom P m"
and sep_heap_dom': "sep_heap_dom P' m"
shows "IsSepWritingEstimateOf f (\<lambda>s. < P \<and>* Q> s)
(\<lambda>s. sep_heap (sep_state_projection s)) m"
apply (clarsimp simp: valid_def IsSepWritingEstimateOf_def)
apply (drule sep_conj_spec)
apply clarsimp
apply (drule use_valid[OF _ sep_valid])
apply simp
apply (drule(1) sep_conj_spec_value[OF _ sep_heap_dom])
apply (drule(1) sep_conj_spec_value[OF _ sep_heap_dom'])
apply simp
done
lemma sep_map_dom_predicate:
"\<lbrakk>sep_heap_dom P m; sep_irq_node_dom P m';
<P \<and>* P'> b\<rbrakk>
\<Longrightarrow> P (SepState (sep_heap (sep_state_projection b) |` m)
(sep_irq_node (sep_state_projection b) |` m'))"
apply (clarsimp simp: sep_state_projection_def sep_conj_def
plus_sep_state_def sep_state_add_def)
apply (drule(1) sep_heap_domD')
apply (drule(1) sep_irq_node_domD')
apply simp
apply (case_tac x,case_tac y)
apply (clarsimp simp: sep_state_projection_def sep_conj_def
sep_disj_sep_state_def sep_state_disj_def)
apply (simp add: map_add_restrict_dom_left)
done
lemma strong_write_estimate_via_sep:
assumes sep_valid: "\<And>obj Q. \<lbrace>\<lambda>s. < P \<and>* Q > s \<rbrace>
f \<lbrace>\<lambda>r s. < P' \<and>* Q > s \<rbrace>"
and sep_heap_dom: "sep_heap_dom P m"
and sep_heap_dom': "sep_heap_dom P' m"
and sep_irq_node_dom' : "sep_irq_node_dom P' m'"
and sep_spec: "sep_map_spec P' state"
shows "IsStrongSepWritingEstimateOf f (\<lambda>s. < P \<and>* Q> s)
(\<lambda>s. sep_heap (sep_state_projection s)) m (\<lambda>s. sep_heap state)"
apply (clarsimp simp: valid_def IsStrongSepWritingEstimateOf_def)
apply (drule sep_conj_spec)
apply clarsimp
apply (drule use_valid[OF _ sep_valid])
apply simp
apply (rule conjI)
apply simp
apply (drule sep_map_dom_predicate[OF sep_heap_dom' sep_irq_node_dom'])
apply (drule sep_specD[OF sep_spec])
apply (case_tac state,simp)
apply (rule ext,clarsimp simp:restrict_map_def)
apply (drule sep_conj_spec_value)
apply (rule sep_heap_dom)
apply simp
apply (drule(1) sep_conj_spec_value[OF _ sep_heap_dom'])
apply simp
done
lemma map_eqI:
"\<lbrakk>a |` m = b |` m;a |` (UNIV - m) = b |` (UNIV - m)\<rbrakk> \<Longrightarrow> a = b"
apply (rule ext)
apply (drule_tac x = x in fun_cong)+
apply (auto simp:restrict_map_def split:if_splits)
done
lemma using_writing_estimate:
assumes we: "IsSepWritingEstimateOf f P proj m"
shows "\<lbrace>\<lambda>s. P s \<and> Q ((proj s) |` (UNIV - m)) \<rbrace> f \<lbrace>\<lambda>r s. Q ((proj s) |` (UNIV - m))\<rbrace>"
apply (clarsimp simp:valid_def)
apply (erule arg_cong[where f = Q,THEN iffD1,rotated])
apply (erule sep_writing_estimateD[OF we])
apply simp
done
lemma using_strong_writing_estimate:
assumes we: "IsStrongSepWritingEstimateOf f P proj m g"
shows
"\<lbrace>\<lambda>s. P s \<and> Q ((proj s) |` (UNIV - m) ++ g (proj s)) \<rbrace> f \<lbrace>\<lambda>r s. Q (proj s)\<rbrace>"
apply (clarsimp simp:valid_def)
apply (erule arg_cong[where f = Q,THEN iffD1,rotated])
apply (rule map_eqI[where m = m])
apply (drule(1) sep_strong_writing_estimateD[OF we,THEN conjunct1,symmetric])
apply (rule ext)
apply (clarsimp simp:restrict_map_def
map_add_def split:option.splits)
apply (frule(1) sep_strong_writing_estimateD[OF we,THEN conjunct1,symmetric])
apply (drule(1) sep_strong_writing_estimateD[OF we,THEN conjunct2,symmetric])
apply (rule ext)
apply simp
done
(* Here are some examples that we get valid rules from existing sep_logical rules *)
(* 1. We need some predicates in the furture to make sure schedule will do the right thing *)
definition "scheduable_cap cap \<equiv> case cap of
RunningCap \<Rightarrow> True | RestartCap \<Rightarrow> True | _ \<Rightarrow> False"
definition tcb_scheduable :: "cdl_tcb \<Rightarrow> bool"
where "tcb_scheduable \<equiv> \<lambda>tcb. (cdl_tcb_caps tcb) tcb_pending_op_slot
= Some RunningCap \<or> (cdl_tcb_caps tcb) tcb_pending_op_slot = Some RestartCap"
abbreviation "tcb_at_heap \<equiv> \<lambda>P ptr heap.
object_at_heap (\<lambda>obj. \<exists>tcb. obj = Tcb tcb \<and> P tcb) ptr heap"
definition all_scheduable_tcbs :: "(word32 \<Rightarrow> cdl_object option) \<Rightarrow> cdl_object_id set"
where "all_scheduable_tcbs \<equiv> \<lambda>m. {ptr. tcb_at_heap tcb_scheduable ptr m}"
definition sep_all_scheduable_tcbs :: "(32 word \<times> cdl_component_id \<Rightarrow> cdl_component option) \<Rightarrow> cdl_object_id set"
where "sep_all_scheduable_tcbs m \<equiv> {ptr. \<exists>obj cap. m (ptr,Fields) = Some (CDL_Object obj) \<and> is_tcb obj
\<and> m (ptr,Slot tcb_pending_op_slot) = Some (CDL_Cap (Some cap)) \<and> scheduable_cap cap}"
lemma is_tcb_obj_type:
"is_tcb = (\<lambda>x. object_type x = TcbType)"
by (auto simp:is_tcb_def object_type_def split:cdl_object.splits)
lemma all_scheduable_tcbs_rewrite:
"all_scheduable_tcbs (cdl_objects s) =
sep_all_scheduable_tcbs (sep_heap (sep_state_projection s))"
apply (intro set_eqI iffI)
apply (clarsimp simp:all_scheduable_tcbs_def sep_state_projection_def
sep_all_scheduable_tcbs_def object_at_heap_def object_project_def
is_tcb_obj_type)
apply (clarsimp simp:object_type_def object_slots_object_clean
tcb_scheduable_def object_slots_def scheduable_cap_def)
apply (fastforce simp:object_clean_def asid_reset_def update_slots_def
reset_cap_asid_def intent_reset_def object_slots_def
split:if_splits)
apply (clarsimp simp:all_scheduable_tcbs_def sep_state_projection_def
sep_all_scheduable_tcbs_def object_at_heap_def object_project_def
is_tcb_obj_type split:option.splits)
apply (clarsimp simp:object_type_def tcb_scheduable_def
scheduable_cap_def object_slots_def object_clean_def asid_reset_def
update_slots_def intent_reset_def reset_cap_asid_def
split:cdl_object.splits cdl_cap.splits option.splits)
done
lemma update_slots_rev:
"update_slots slots obj = obj' \<Longrightarrow>
obj = update_slots (object_slots obj) obj'"
by (clarsimp simp:update_slots_def object_slots_def
split:cdl_object.splits)
lemma all_scheduable_tcbsD:
"ptr \<in> all_scheduable_tcbs (cdl_objects s)
\<Longrightarrow> tcb_at_heap tcb_scheduable ptr (cdl_objects s)"
by (simp add:all_scheduable_tcbs_def)
lemma all_scheduable_tcbsD':
"ptr \<notin> all_scheduable_tcbs (cdl_objects s)
\<Longrightarrow> \<not> tcb_at_heap tcb_scheduable ptr (cdl_objects s)"
by (simp add:all_scheduable_tcbs_def)
lemma scheduable_cap_reset_cap_asid[simp]:
"scheduable_cap (reset_cap_asid cap) = scheduable_cap cap"
by (case_tac cap,simp_all add: reset_cap_asid_def scheduable_cap_def)
lemma set_cap_all_scheduable_tcbs:
"\<lbrace>\<lambda>s. all_scheduable_tcbs (cdl_objects s) = {cur_thread} \<and> (cap = RunningCap \<or> cap = RestartCap) \<rbrace>
set_cap (cur_thread,tcb_pending_op_slot) cap
\<lbrace>\<lambda>rv s. all_scheduable_tcbs (cdl_objects s) = {cur_thread} \<rbrace>"
apply (rule hoare_name_pre_state)
apply (cut_tac all_scheduable_tcbsD[where ptr = cur_thread])
prefer 2
apply fastforce
apply (clarsimp simp:all_scheduable_tcbs_rewrite)
apply (rule hoare_pre)
apply (rule using_strong_writing_estimate
[where proj = "(\<lambda>a. sep_heap (sep_state_projection a))"])
apply (rule strong_write_estimate_via_sep[OF set_cap_wp])
apply (rule sep_heap_dom_simps sep_irq_node_dom_simps sep_spec_simps)+
apply (rule conjI)
apply (clarsimp simp:sep_map_c_conj
Let_def sep_any_exist all_scheduable_tcbs_rewrite[symmetric]
dest!:in_singleton)
apply (clarsimp simp:object_at_heap_def tcb_scheduable_def
sep_state_projection_def object_project_def)
apply (rule conjI)
apply (clarsimp simp:object_slots_def object_clean_def
update_slots_def intent_reset_def asid_reset_def
split:option.splits)
apply fastforce+
apply (rule subst,assumption)
apply (drule in_singleton)
apply (intro set_eqI iffI)
apply (clarsimp simp: sep_all_scheduable_tcbs_def sep_state_projection_def
split: if_split_asm option.splits)
apply (fastforce simp: sep_all_scheduable_tcbs_def map_add_def
sep_state_projection_def scheduable_cap_def
split: option.splits)
done
lemma sep_inv_to_all_scheduable_tcbs:
assumes sep: "\<And>P. \<lbrace><P>\<rbrace> f \<lbrace>\<lambda>r. <P>\<rbrace>"
shows "\<lbrace>\<lambda>s. P (all_scheduable_tcbs (cdl_objects s))\<rbrace> f
\<lbrace>\<lambda>r s. P (all_scheduable_tcbs (cdl_objects s))\<rbrace>"
apply (clarsimp simp:valid_def all_scheduable_tcbs_rewrite)
apply (erule use_valid)
apply (rule hoare_strengthen_post)
apply (rule sep)
apply assumption
apply simp
done
lemma validE_to_valid:
assumes validE:"\<And>E. \<lbrace>P\<rbrace>f\<lbrace>\<lambda>r. Q\<rbrace>,\<lbrace>\<lambda>r s. E\<rbrace>"
shows "\<lbrace>P\<rbrace>f\<lbrace>\<lambda>r. Q\<rbrace>"
using validE[where E = False]
apply (clarsimp simp:validE_def valid_def)
apply (drule_tac spec)
apply (erule(1) impE)
apply (drule_tac bspec)
apply assumption
apply (auto split:sum.splits)
done
end
|
[STATEMENT]
lemma odd_le_div2_imp_le_times_2: "(m+1) div 2 < (Suc n) \<and> ((m::nat) mod 2 \<noteq> 0) \<Longrightarrow> m \<le> 2*n"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (m + 1) div 2 < Suc n \<and> m mod 2 \<noteq> 0 \<Longrightarrow> m \<le> 2 * n
[PROOF STEP]
by arith |
Require Import List.
Inductive emptiness := NonEmpty : emptiness | MaybeEmpty : emptiness.
Inductive char : Set := mk_char : nat -> char.
Definition string := list char.
Definition parser_guts (a : Set) := string -> list (a * nat).
Definition non_empty {a} (p : parser_guts a) : Prop :=
p nil = nil.
Definition satisfies_emptiness e {a} (p : parser_guts a) :=
match e with
| MaybeEmpty => True
| NonEmpty => non_empty p
end.
Definition parser (e : emptiness) (a : Set) : Set :=
{ p : parser_guts a | satisfies_emptiness e p }.
SearchAbout sig.
Definition returnP {a : Set} (x : a) : parser MaybeEmpty a.
refine (exist _ (fun _ => cons (x, 0) nil) _).
simpl; auto.
Defined.
Definition append_emptiness e1 e2 :=
match e1, e2 with
| MaybeEmpty, MaybeEmpty => MaybeEmpty
| _, _ => NonEmpty
end.
Definition bind_list {a b} (xs : list a) (k : a -> list b) : list b := concat (map k xs).
Definition bindP_guts {a b : Set} (p : parser_guts a) (k : a -> parser_guts b) : parser_guts b :=
fun input =>
bind_list (p input) (fun result =>
let (x, n) := result in
k x (skipn n input)).
Theorem bind_nonempty_nonempty : forall {a b} (p : parser_guts a) (k : a -> parser_guts b),
non_empty p ->
non_empty (bindP_guts p k).
Proof.
unfold non_empty in *. intros a b p k Hp.
unfold bindP_guts. rewrite Hp.
unfold bind_list. unfold map. reflexivity.
Qed.
Lemma skipn_nil_nil : forall {A} n, skipn n (@nil A) = (@nil A).
Proof.
intros.
destruct n; auto.
Qed.
Lemma bind_nil_nil : forall {a b} (l : list a) (k : a -> list b), (forall x, k x = nil) -> bind_list l k = nil.
Proof.
intros.
unfold bind_list.
induction l.
- auto.
- simpl. rewrite H. simpl. assumption.
Qed.
Theorem bind_nonempty_nonempty_2 : forall {a b} (p : parser_guts a) (k : a -> parser_guts b),
(forall x, non_empty (k x)) ->
non_empty (bindP_guts p k).
Proof.
unfold non_empty in *. intros a b p k Hk.
unfold bindP_guts.
destruct (p nil).
- auto.
- apply bind_nil_nil.
intros. destruct x. rewrite skipn_nil_nil. apply Hk.
Qed.
Definition bindP {a b : Set} {e1 e2} (p : parser e1 a) (k : a -> parser e2 b) : parser (append_emptiness e1 e2) b.
destruct p as [p_guts Hp].
exists (bindP_guts p_guts (fun x => proj1_sig (k x))).
unfold append_emptiness in *. unfold satisfies_emptiness in *.
destruct e1; destruct e2; try auto.
- apply bind_nonempty_nonempty. assumption.
- apply bind_nonempty_nonempty. assumption.
- apply bind_nonempty_nonempty_2.
intros.
destruct (k x). simpl.
apply s.
Qed.
Definition type_of {a} (x : a) := a.
Definition satisfy (p : char -> bool) : parser NonEmpty char.
exists (fun input =>
match input with
| nil => nil
| x :: xs =>
if p x
then (x, 1) :: nil
else nil
end).
reflexivity.
Defined.
Definition non_empty_str : Set := char * string.
Scheme Equality for char.
Definition mapP {a b : Set} {e} (f : a -> b) (p : parser e a) : parser e b.
destruct p as [p_guts Hp].
exists (fun input => map (fun result => match result with (x, n) => (f x, n) end) (p_guts input)).
unfold satisfies_emptiness in *.
destruct e; auto.
unfold non_empty in *.
rewrite Hp.
reflexivity.
Defined.
Fixpoint stringP' c s : parser NonEmpty unit :=
match s with
| c' :: s'' =>
bindP
(satisfy (char_beq c))
(fun _ => stringP' c' s'')
| nil =>
mapP (fun _ => tt) (satisfy (char_beq c))
end.
Definition stringP (s : non_empty_str) : parser NonEmpty unit :=
let (c, s') := s in stringP' c s'.
Definition alt_emptiness e1 e2 :=
match e1, e2 with
| NonEmpty, NonEmpty => NonEmpty
| _, _ => MaybeEmpty
end.
Definition fail {a} : parser NonEmpty a.
exists (fun _ => nil). reflexivity.
Defined.
Definition altP {e1 e2 a} (p1 : parser e1 a) (p2 : parser e2 a) : parser (alt_emptiness e1 e2) a.
Admitted.
Fixpoint manyP {a} (p : parser NonEmpty a) : parser MaybeEmpty (list a) :=
altP
(bindP p (fun x =>
bindP (manyP p) (fun xs =>
returnP (cons x xs))))
(returnP nil). |
\subsection{Aim and Objectives}
%The aim of the project is to investigate the current capabilities and future potential of artificial models for associative memory.
The aim of the project is to investigate the current capabilities of artificial models for associative memory.
The achieve this aim, the following objectives have been identified.
\begin{enumerate}
\item Outline key models for associative memory from different fields of research (e.g. Hopfield, memristors, ...).
% TODO: Remove in favour of "... to that of human potential" objective.
\item Evaluate the current capabilities of the models for associative memory both in terms of their capacity to store correlated information and in terms of the applications in which they've been used.
%\item Compare the capabilities of the ``state of the art'' models for associative memory to that of human potential.
%\item Discuss future possibilities if models for associative memory were to reach human or post-human potential.
\end{enumerate}
|
## AST1420 ``Galactic Structure and Dynamics'' Problem Set 3
#### Due on Nov. 19 at the start of class
#### Problem 1: Gravitational collapse in one dimension.
``[One] grows stale if he works all the time on insoluble problems, and a trip to the beautiful world of one dimension will refresh his imagination better than a dose of LSD.''
Gravitational $N$-body simulations of structure formation in the Universe are complex and computationally demanding.
Fortunately, some of the physics of gravitational collapse can be understood by simulating one-dimensional systems, where it is easy to reach high resolution and where gravity is simpler. Let's explore the formation of dark matter halos using gravitational $N$-body simulations in one
dimension.
(a) The Poisson equation is as usual
\begin{equation}
\nabla^2 \Phi = 4\pi G \rho\,.
\end{equation}
An important aspect of understanding gravitation is the determination of the Green's function, that is, the solution of this equation
for a density $\rho(x) \propto \delta(x)$.
In three dimensions, we showed in the notes that the gravitational potential for $\rho(\vec{x}) = M\,\delta(\vec{x})$ is the familiar $\Phi = -GM/r$ with $r = |\vec{x}|$.
- Show that the solution for $\rho(x) = A\,\delta(x)$ in one dimension is given by $\Phi(x) = 2\pi G\,A\,|x|$.
- What are the units of $A$?
\begin{equation}
\nabla^2 (2\pi G\,A\,|x|) = \Big(\frac{\partial^2 }{\partial x^2} + \frac{\partial^2 }{\partial y^2}\Big) (2\pi G\,A\,|x|)
\end{equation}
Since we're only considering one dimension
\begin{equation}
\frac{\partial^2 }{\partial x^2} (2\pi G\,A\,|x|)
\end{equation}
\begin{equation}
\frac{\partial }{\partial x} (2\pi G\,A\,\frac{x}{|x|})
\end{equation}
The derivative of $\frac{x}{|x|}$ is $2\delta(x)$. The 2 because you're jumping from -1 to 1.
\begin{equation}
2\pi G\,A\ 2 \delta(x)
\end{equation}
Setting this equal to the right hand side and solving for $\rho (x)$:
\begin{equation}
4\pi G\,A\ \delta(x) = \Big(\frac{\partial^2 }{\partial x^2} + \frac{\partial^2 }{\partial y^2}\Big) (2\pi G\,A\,|x|)
\end{equation}
\begin{equation}
4\pi G\,A\ \delta(x) = 4\pi G \rho
\end{equation}
\begin{equation}
\rho = \,A\ \delta(x)
\end{equation}
If x is in units of meters then $\delta(x)$ is in units of $\frac{1}{m}$. This is a property of dirac delta functions where $\delta(\alpha x) = \frac{1}{|\alpha|}\delta(x)$. So by dimensional analysis
\begin{equation}
\big[\frac{kg}{m^3}\big] = \,A\ \big[\frac{1}{m}\big]
\end{equation}
\begin{equation}
\,A\ = \big[\frac{kg}{m^2}\big]
\end{equation}
(b) The gravitational force corresponding to $\Phi(x) = 2\pi G\,A\,|x|$ is $F = -\mathrm{d} \Phi / \mathrm{d} x = -2\pi G\,A\,\mathrm{sign}(x)$, where $\mathrm{sign}(x)$ is the sign function that is equal to one for $x > 0$, equal to minus 1 for $x < 0$ and equal to zero for $x=0$.
Therefore, very unlike what happens in three dimensions, the gravitational force is constant as a function of distance!
In the next part, we will run an $N$-body simulation for $N$ equal-mass particles (so all $A$ are equal; you can also assume that $2\pi G = 1$). Because the gravitational force is constant with distance, the total force on any given particle $i$ in the sequence is therefore given by $A\times\,(N^+_i-N^-_i)$, where $N^+$ is the number of particles with $x > x_i$ and $N^-_i$ is the number of particles with $x < x_i$.
Write a function that for a given array of positions $x_i$ computes the total force on each particle.
Test this function by applying it to a large number of particles uniformly distributed between $-1/2$ and $1/2$, for which you should compute the analytical solution.
```python
import numpy as np
import matplotlib.pyplot as plt
import time
import seaborn as sns
sns.set()
```
```python
N = 10001
xs = np.linspace(-0.5,0.5,N)
def force_per_particle(xs):
F = np.zeros(len(xs))
for i,x in enumerate(xs):
N_plus = len(xs[xs>x])
N_minus = len(xs[xs<x])
F[i] = N_plus - N_minus
return F
s = time.time()
F = force_per_particle(xs)
print(f"Time taken: {time.time()-s:.2} s")
plt.plot(xs,F)
plt.xlabel("x")
plt.ylabel("F(x)")
plt.title("Force on each Particle (Loop Method)");
```
```python
N = 10001
xs = np.linspace(-0.5,0.5,N)
def force_per_particle_matrix(xs):
x1,x2 = np.meshgrid(xs,xs,copy=False)
x = x1 - x2
d = np.sign(x)
F = - np.sum(d,axis=0)
return F
s = time.time()
F = force_per_particle_matrix(xs)
print(f"Time taken: {time.time()-s:.2} s")
plt.plot(xs,F)
plt.xlabel("x")
plt.ylabel("F(x)")
plt.title("Force on each Particle (Matrix Method)");
```
```python
N = 10001
xs = np.linspace(-0.5,0.5,N)
def force_per_particle_sort(xs):
F = np.zeros(len(xs))
sortedIndices = np.argsort(xs)
for newIndex,originalIndex in enumerate(sortedIndices):
F[originalIndex] = (len(xs) - newIndex -1) - (newIndex)
return F
s=time.time()
F = force_per_particle_sort(xs)
print(f"Time taken: {time.time()-s:.2} s")
plt.plot(xs,F)
plt.xlabel("x")
plt.ylabel("F(x)")
plt.title("Force on each Particle (Sorting Method)");
```
```python
plt.figure(figsize=(5,2))
plt.hlines(1,xmin=0,xmax=1,lw=5,alpha=0.8,color="blue")
plt.vlines([0.005,0.5,0.99999],0.8,1,color="blue")
plt.vlines(0.35,0.75,1,color='r')
plt.text(-.06,0.5,r'$- \frac{1}{2}$',fontsize=16,color="blue")
plt.text(1-.02,0.5,r'$\frac{1}{2}$',fontsize=16,color="blue")
plt.text(0.48,0.5,r'$0$',fontsize=16,color="blue")
plt.text(0.33,0.5,r'$x_i$',fontsize=16,color="red")
plt.ylim(0,2)
plt.axis('off');
```
The analytic solution:
$N_i^+ : x > x_i = (\frac{1}{2} - x_i) \; N $
$N_i^- : x < x_i = \big(x_i - (-\frac{1}{2} )\big) \; N $
$F(x) = A\times\,(N^+_i-N^-_i)$
$F(x) = A\times\,((\frac{1}{2} - x_i) \; N - \big(x_i - (-\frac{1}{2} )\big) \; N)$
$F(x) = A N (\frac{1}{2} - x_i - x_i -\frac{1}{2} )$
$F(x) = A N (-2 x_i )$
```python
N = 10001
xs = np.linspace(-0.5,0.5,N)
s = time.time()
analytic_F = (- 2 * xs) * len(xs)
print(f"Time taken: {time.time()-s:.2} s")
plt.plot(xs,analytic_F)
plt.xlabel("x")
plt.ylabel("F(x)")
plt.title("Force per Particle (Analytic Solution)");
```
(c) Now write the second part of the $N$-body code by writing a leapfrog integrator that integrates all $N$ particles forward for a time step $\delta t$ using the force function that you wrote in (b).
Use this $N$-body code to integrate the following system of initial conditions
\begin{align}
x & \in [-\pi/2,\pi/2]\\
v &= -0.001\,\sin(x)\\
N & = 10,001\,,
\end{align}
where the $x$ are evenly spaced in the interval given. These initial conditions are similar to those of a dark matter halo that has just started collapsing after decoupling from the Hubble expansion.
- Integrate this system forward for a total time of $t = 200$ with $\delta t = 0.0005$.
- Plot the phase-space distribution $(x,v)$ at times $t=0, 18, 25, 40, 132$, and $200$.
- Describe what you see happening.
```python
import numpy as np
import time
# This function took the shortest amount of time, altogether about 30 minutes.
def force_per_particle_sort(xs):
# Container
F = np.zeros(len(xs))
# Get the indices that would sort an array
sortedIndices = np.argsort(xs)
# Fill the container
for newIndex,originalIndex in enumerate(sortedIndices):
# Force = N+ - N-
F[originalIndex] = (len(xs) - newIndex -1) - (newIndex)
return F
# This fn allows running everything at once or in segments.
def run(t_start,t_stop,xs,As,save_option):
# Time Keeper
print(f"Running {t_start} to {t_stop}")
start = time.time()
# Time interval
dt = 0.0005
ts = np.arange(t_start/dt,t_stop/dt + dt)
# Initial Condition
vs = -0.001 * np.sin(xs)
# Containers
x_ = []
v_ = []
# Leap Frog Integrator
for i,t in enumerate(ts):
if i%1000 == 0:
print(t/ts[-1]*100," complete")
xs = xs + vs * dt + .5 * As * dt **2
As_new = force_per_particle_sort(xs)
vs = vs + .5 * (As + As_new) * dt
As = As_new
# Save specific time steps
if np.isin(t*dt,save_option):
x_.append(xs)
v_.append(vs)
# Save everything out to a text file so it doesn't need to take the whole run time every time.
np.savetxt(f"x_/x_tstart_{t_start}_tstop_{t_stop}.txt",x_)
np.savetxt(f"v_/v_tstart_{t_start}_tstop_{t_stop}.txt",v_)
print(f"Time {time.time() - start}")
# Total Time
t = 200
N = 10001
xs = np.linspace(-np.pi/2,np.pi/2,N)
As = force_per_particle_sort(xs)
go = False
if go:
assignment_list = [0,18,25,40,132,200]
my_list = list(np.arange(10,35,.5))
run(0,36,xs,As,my_list)
```
```python
x = np.loadtxt('assign_x_tstart_0_tstop_200.txt')
v = np.loadtxt('assign_v_tstart_0_tstop_200.txt')
save_times = [0,18,25,40,132,200]
f, axes = plt.subplots(2,3,figsize=(20,15))
for i, ax in enumerate(axes.flatten()):
ax.scatter(x[i],v[i],s=5)
ax.set_title(save_times[i])
ax.set_xlim(-2,2)
ax.set_ylim(-200,200)
ax.set_xlabel('X')
ax.set_ylabel('V')
```
```python
my_list = list(np.arange(10,35,.5))
x = np.loadtxt('x_/x_tstart_0_tstop_36.txt')
v = np.loadtxt('v_/v_tstart_0_tstop_36.txt')
save_times = my_list
f, axes = plt.subplots(len(x),1,figsize=(5,20))
for i, ax in enumerate(axes):
ax.scatter(x[i],v[i],s=5)
ax.set_title(save_times[i])
ax.set_xlim(-2,2)
ax.set_ylim(-200,200)
ax.set_xlabel('X')
ax.set_ylabel('V')
```
```python
from matplotlib.animation import FuncAnimation
import matplotlib.pyplot as plt
import numpy as np
x = np.loadtxt('x_/x_tstart_0_tstop_36.txt')
v = np.loadtxt('v_/v_tstart_0_tstop_36.txt')
my_list = list(np.arange(10,35,.5))
f = plt.figure(figsize=(10,5))
scat = plt.scatter(x[0],v[0],s=5)
text = plt.text(-2.5, 175, str(my_list[0]))
plt.xlim(-3,3)
plt.ylim(-200,200)
def update(i):
scat.set_offsets(np.c_[x[i],v[i]])
text.set_text(str(my_list[i]))
return scat,
anim = FuncAnimation(f,update,blit=True,frames=49)
anim.save('SuperFunExploreTime.gif', writer='imagemagick', fps=10);
```
(d) The simulation that you ran is the same as that shown in the GIF at the start of chapter 6 in the notes, but the simulation in the notes solves the $N$-body problem exactly (which is possible in 1D, because the force does not depend on distance).
Compare your simulation's output to the GIF and discuss why your simulation might (dis)agree with the GIF.
What I find surprising is how quickly it explodes. The GIF in the notes takes a lot longer to come apart and looks less chaotic when it does. At some point the gif in the notes appears to stop moving, thus the approximation that it's static at later times, this one seems to have a lot more motion although maybe it would have simmered down if I had let it run longer (I wanted to be able to upload the files to github so they couldn't be too big.)
#### Problem 2: Recycling in chemical evolution and the abundance of deuterium.
In the class notes, we ignored the effect of recycling of unprocessed material by stars back into the ISM. That is, we assumed that gas consumed by star formation was fully lost from the ISM, except for the enriched ejecta that we described using the yield parameter $p$. However, in reality, winds from massive stars return a significant amount of mass to the ISM that was not changed by the star and thus returns mass to the ISM at the star's birth abundance $Z$.
Recycling is especially important to consider if we want to investigate the abundance of deuterium in the ISM. Deuterium is an interesting element because it is only destroyed by stars without being created (to a good approximation, deuterium is only produced during Big Bang nucleosynthesis [BBN]). As such, deuterium is a good tracer of whether or not gas has ever been in a star. For example, if most of the gas in the present-day Milky Way ISM was previously processed in stars, then the deuterium abundance in the ISM should be very small, because all of the deuterium should have been destroyed. The deuterium abundance of the ISM can be determined using UV spectroscopy and it is found to be approximately $90\%$ of the primordial BBN abundance. Let's see what we can learn about chemical evolution from this basic observation!
In this problem, we will denote the mass of the ISM in deuterium as $M_D$ and the fraction of the ISM in deuterium as $X_D = M_D/M_g$. The primordial deuterium abundance is $X_D^P$ (you don't need to know the actual value, but it is $\approx 2.6\times 10^{-5}$). The observations of deuterium of the ISM show that today $X_D/X_D^P \approx 0.9$. All of the following questions can be solved analytically using the same techniques as used in the notes for the closed/leaky/accreting box models.
(a) Extend the closed box model to include the effect of recycling, which we will model as happening instantaneously. Assume that a fraction $r$ of the mass turned into stars is returned to ISM at the ISM's abundance at the time of the formation of the star. Specifically, derive the relation between $Z$, $p$, and the gas fraction as in Equation (13.7) in the presence of recycling. Discuss.
Some gas is lost to stars, some returns to winds.
\begin{equation}
\dot{M}_g=-\dot{M}_* + r\dot{M}_*=-(1-r)\dot{M}_*
\end{equation}
The metallicity is split up in gas trapped in stars and processed gas returned in supernova explosions.
\begin{equation}
\dot{M}_z = \dot{M}_{*,-} + \dot{M}_{*,+}
\end{equation}
The first term on the right is gas withheld
\begin{equation}
\dot{M}_{*,-} = Z\dot{M}_g = -Z(1-r)\dot{M}_*
\end{equation}
The second term on the right is gas enriched, where p is the yield of gas returned.
\begin{equation}
\dot{M}_{*,+} = p \dot{M}_*
\end{equation}
Plugging these two terms back in
\begin{equation}
\dot{M}_z = -Z(1-r)\dot{M}_* + p \dot{M}_* = \big( -Z(1-r) + p\big) \dot{M}_*
\end{equation}
Solving for $M_g$
\begin{equation}
\dot{M}_*=-\frac{1}{(1-r)}\dot{M}_g
\end{equation}
\begin{equation}
\dot{M}_z = \big( -Z(1-r) + p\big) \big(-\frac{1}{(1-r)}\dot{M}_g\big)
\end{equation}
\begin{equation}
\dot{M}_z = \big(Z - \frac{p}{1-r}\big) \dot{M}_g
\end{equation}
Metallicity is defined as the mass of metals divided by the mass of gass
\begin{equation}
Z=\frac{M_Z}{M_g}
\end{equation}
Taking the derivative
\begin{equation}
\dot{Z}=\frac{\dot{M_Z}}{M_g}-Z\frac{\dot{M_g}}{M_g}
\end{equation}
Plugging in our equation for $\dot{M}_z$
\begin{equation}
\dot{Z}=\big(Z - \frac{p}{1-r}\big)\frac{\dot{M_g}}{M_g}-Z\frac{\dot{M_g}}{M_g}
\end{equation}
\begin{equation}
\dot{Z}=- \frac{p}{1-r}\frac{\dot{M_g}}{M_g}
\end{equation}
The integral of which is
\begin{equation}
\boxed{Z(t)=- \frac{p}{1-r}\ln\frac{M_g(t)}{M_g(0)}}
\end{equation}
- Metallicity still increases as gas decreases, but recycling decreases the rate at which gas is trapped within stars, therefore increasing Z(t).
- If r = 1, the result would be undefined ($\infty \times 0$) everything that is a star would be put back into the gas, therefore: no more stars, no enrichment.
- If r = 0, then you simply aren't considering winds and you get back the original expression from 13.7.
(b) Now work out the closed box model with recycling for deuterium. Remember that no deuterium is created by stars. What is the relation between $X_D/X_D^P$, $r$, and the gas fraction? For the current ISM's values of the deuterium abundance and the gas fraction, what recycling fraction do you need to match the two?
The definition of $X_D$ is given by
\begin{equation}
X_D = \frac{M_D}{M_g}
\end{equation}
Taking the derivative using the quotient rule
\begin{equation}
\dot{X}_D = \frac{\dot{M}_D}{M_g} - X_D \frac{\dot{M}_g}{M_g}
\end{equation}
The changing rate of the mass of deuterium is governed by what deuterium is trapped in stars. No deuterium is returned through Supernovae yields but it can be returned through winds.
\begin{equation}
\dot{M}_D = -X_D\dot{M}_{*}
\end{equation}
In the above problem we found:
\begin{equation}
\dot{M}_*=-\frac{1}{(1-r)}\dot{M}_g
\end{equation}
\begin{equation}
\dot{M}_D = X_D\frac{1}{(1-r)}\dot{M}_g
\end{equation}
\begin{equation}
\dot{X}_D = (\frac{1}{(1-r)} - 1)X_D \frac{\dot{M}_g}{M_g}
\end{equation}
\begin{equation}
\dot{X}_D = \frac{r}{(1-r)}X_D \frac{\dot{M}_g}{M_g}
\end{equation}
\begin{equation}
\frac{\dot{X}_D}{X_D} = \frac{r}{(1-r)} \frac{\dot{M}_g}{M_g}
\end{equation}
Taking the integral as we did before
\begin{equation}
ln \frac{X_D(t)}{X_D(0)} = \frac{r}{(1-r)} \ln\frac{M_g(t)}{M_g(0)}
\end{equation}
$X_D(0)= X_D^P$
\begin{equation}
\frac{X_D(t)}{X_D^P} = \Big(\frac{M_g(t)}{M_g(0)}\Big)^{r/(1-r)}
\end{equation}
\begin{equation}
\boxed{X_D(t) = X_D^P \Big(\frac{M_g(t)}{M_g(0)}\Big)^{r/(1-r)}}
\end{equation}
As recyling increases, the exponent $r/(1-r)$ increases, however since the gas fraction is always less than 1, $X_D$ will decrease as recycling increases. In the case that $r=1$, you have complete recycling, no stars are forming from the gas and thus you have $X_D(t) = X_D^P \Big(\frac{M_g(0))}{M_g(0)}\Big)^{r/(1-r)}=X_D^P$
Solving for $r$
\begin{equation}
\frac{X_D(t_{now})}{X_D^P} = \Big(\frac{M_g(t_{now})}{M_g(0)}\Big)^{r/(1-r)}
\end{equation}
\begin{equation}
ln \frac{X_D(t_{now})}{X_D^P} = {r/(1-r)} ln \frac{M_g(t_{now})}{M_g(0)}
\end{equation}
\begin{equation}
r = \frac{ln \Big( \frac{X_D(t_{now})}{X_D^P}\Big)/ ln\Big(\frac{M_g(t_{now})}{M_g(0)}\Big)}{ln \Big( \frac{X_D(t_{now})}{X_D^P}\Big)/ ln\Big(\frac{M_g(t_{now})}{M_g(0)}\Big) + 1}
\end{equation}
The problem gives us the term $\frac{X_D(t_{now})}{X_D^P}=0.9$.
From the notes we know that the current gas fraction in the Milky Way is $\frac{M_g(t_{now})}{M_g(0)} = 0.1$
\begin{equation}
\boxed{r = \frac{\ln 0.9 / \ln 0.1}{\ln 0.9 / \ln 0.1 + 1} \approx 0.0437 }
\end{equation}
(c) Stellar evolution tells us that $r \approx 0.4$. Compare this to the value you found in (b). If they are significantly different, explain the physical reason for this in the context of the closed box model.
The value we found in b is $r\approx 0.0437$. This has an 89% percent difference from the given value of $r \approx 0.4$ which is similar to the 83% percent difference of $p$ in the notes. As an assumption, the closed boxed model neglects galactic outflows which prove to have a non-neglible effect when comparing this model to observations.
(d) In class, we discussed how the accreting box model is successful at explaining the absence of a large number of very metal poor stars in the solar neighborhood (the G dwarf problem). Work out the evolution of the deuterium abundance $X_D/X_D^P$ in the accreting box model (with $\eta=0$), remembering that the specific model we looked at has a constant gas mass and that any inflowing gas has the primordial deuterium abundance. What recycling fraction $r$ do you need now to match $X_D/X_D^P$ given the observed gas fraction
With the assumption that $\dot{M}_g=0$ it must also be true that $\dot{M}_{infow}=r\dot{M}_{*}$. That is, the amount of material being accreted is equal to the material that remains in stars.
From that, we can say that the change in the mass of deuterium depends on what deuterium is being removed into stars and what deuterium is being added back into the system at the primordial abundance.
\begin{equation}
\dot{M}_{D} = -\dot{M}_{*} X_D + \dot{M}_{infow} X_D^P = -\dot{M}_{*} X_D + r\dot{M}_{*} X_D^P
\end{equation}
\begin{equation}
\dot{M}_{D} = (r X_D^P - X_D )\dot{M}_{*}
\end{equation}
Again taking the derivative of $X_D=M_D/M_G$
\begin{equation}
\dot{X}_D = \frac{\dot{M}_D}{M_g} - X_D \frac{\dot{M}_g}{M_g}
\end{equation}
Since we've assumed $\dot{M}_g=0$
\begin{equation}
\dot{X}_D = \frac{\dot{M}_D}{M_g} = \frac{\dot{M}_{*}}{M_g} (r X_D^P - X_D )
\end{equation}
If I did our typical manuever of replacing $\dot{M}_{*}$ with an expression involving $\dot{M}_{g}$, I'd have to set that again to 0.
Instead, inspired by the notes, I do a coordinate transform $\frac{\dot{X}_D}{\dot{M}}$ where $M$ is just $M=M_g+M_*+M_{inflow}$. The derivative of which is $\dot{M}=\dot{M_g}+\dot{M_*}+\dot{M_{inflow}}$ the first term on the right is again $0$. $\dot{M}=\dot{M_*}+r\dot{M}_{*}=(1+r)\dot{M}_{*}$
\begin{equation}
\frac{\dot{X}_D}{\dot{M}} = \frac{\dot{M}_D/M_g}{(1+r)\dot{M}_{*}} = \frac{ (r X_D^P - X_D )\dot{M}_{*} /M_g}{(1+r)\dot{M}_{*}}
\end{equation}
\begin{equation}
\frac{\dot{X}_D}{\dot{M}} = \frac{ (r X_D^P - X_D )}{(1+r)M_g}
\end{equation}
\begin{equation}
\frac{\dot{X}_D}{(r X_D^P - X_D )} = \frac{ \dot{M}}{(1+r)M_g}
\end{equation}
Taking the integral and moving the sign
\begin{equation}
\ln (X_D - r X_D^P) = - \frac{ M}{(1+r)M_g}
\end{equation}
\begin{equation}
X_D = r X_D^P + C e^{- \frac{ M}{(1+r)M_g}}
\end{equation}
To determine the constant we'll consider intial conditions. At the start of this process, all we had was gas $ M=M_g$ and $X_D=X_D^P$
\begin{equation}
C = X_D^P(1-r) e^{\frac{ 1}{(1+r)}}
\end{equation}
\begin{equation}
X_D = r X_D^P + X_D^P(1-r) e^{\frac{ 1}{(1+r)}} e^{- \frac{ M}{(1+r)M_g}}
\end{equation}
\begin{equation}
X_D = X_D^P(r + (1-r) e^{\frac{ 1}{(1+r)}} e^{- \frac{ M}{(1+r)M_g}})
\end{equation}
\begin{equation}
\boxed{\frac{X_D}{X_D^P} = r + (1-r) e^{\frac{ 1}{(1+r)}(1-\frac{M}{M_g})} }
\end{equation}
Using $M/M\approx 10$ and $\frac{X_D}{X_D^P} = 0.9$ and plugging into Mathematica we get:
Comparing to $r\approx 0.4$, in the closed box model we were a factor of 10 wrong, in the accreting model where we assume the amount accreted is the amount consumed: we're a factor of 2 wrong.
(e) Write the relation that you found in (d) in terms of the metallicity $Z$ of the ISM rather than the total-to-gas ratio $M/M_g$. Note that for this you will need to adjust the notes' accreting box model to take into account recycling (again use the $\eta=0$ version to keep things simpler). Try to obtain a very simple relation using the fact that $Z_\odot/p \ll 1$.
Fun fact: the relation that you derive actually holds very generally, so there is a direct relation between the metallicity of the ISM and the ratio $X_D/X_D^P$, regardless of the details of inflow and outflow and the star formation history.
\begin{equation}
\dot{X}_D=\frac{\dot{M}_D}{\dot{M}_g} \;\;\;\; \dot{Z}=\frac{\dot{M}_Z}{\dot{M}_g}
\end{equation}
\begin{equation}
\frac{\dot{X}_D}{\dot{Z}} = \frac{\dot{M}_D}{\dot{M}_Z}
\end{equation}
I derived an expression for $\dot{M}_D$ in the last problem, $\dot{M}_Z$ is the amount going into stars, the amount returned at the original metallicity, and the amount returned that is enriched.
\begin{equation}
\dot{M}_Z=-Z\dot{M}_*+rZ\dot{M}_*+p\dot{M}_* = (rZ-Z+p)\dot{M}_*
\end{equation}
Plugging this in
\begin{equation}
\frac{\dot{X}_D}{\dot{Z}} = \frac{-\dot{M}_{*} X_D + r\dot{M}_{*} X_D^P}{(rZ-Z+p)\dot{M}_*}
\end{equation}
Reducing and rearranging
\begin{equation}
\frac{\dot{X}_D}{-X_D + r X_D^P} = \frac{\dot{Z}}{(rZ-Z+p)}
\end{equation}
\begin{equation}
-\ln(r X_D^P-X_D)=\frac{1}{r-1}\ln \Big((r-1)Z+p\Big)
\end{equation}
\begin{equation}
\ln(r X_D^P-X_D)=\frac{1}{1-r}\ln \Big((r-1)Z+p\Big)
\end{equation}
\begin{equation}
r X_D^P-X_D=C\Big((r-1)Z+p\Big)^{\frac{1}{1-r}}
\end{equation}
To find the integration constant $C$, $X=X_D^P$ and $Z=0$
\begin{equation}
r X_D^P-X_D^P=Cp^{\frac{1}{1-r}}
\end{equation}
\begin{equation}
X_D^P(r-1)=C p^{\frac{1}{1-r}}
\end{equation}
\begin{equation}
C = p^{\frac{1}{r-1}} X_D^P(r-1)
\end{equation}
Plugging $C$ back in
\begin{equation}
r X_D^P-X_D=p^{\frac{1}{r-1}} X_D^P(r-1)\Big((r-1)Z+p\Big)^{\frac{1}{1-r}}
\end{equation}
\begin{equation}
X_D=r X_D^P-p^{\frac{1}{r-1}} X_D^P(r-1)\Big((r-1)Z+p\Big)^{\frac{1}{1-r}}
\end{equation}
\begin{equation}
\frac{X_D}{X_D^P}=r-p^{\frac{1}{r-1}} (r-1)\Big((r-1)Z+p\Big)^{\frac{1}{1-r}}
\end{equation}
Pulling a $p$ out from the parentheses
\begin{equation}
\frac{X_D}{X_D^P}=r-p^{\frac{1}{r-1}} (r-1)\Big((r-1)\frac{Z}{p}+1\Big)^{\frac{1}{1-r}} p^{\frac{1}{1-r}}
\end{equation}
\begin{equation}
\frac{X_D}{X_D^P}=r- (r-1)\Big((r-1)\frac{Z}{p}+1\Big)^{\frac{1}{1-r}}
\end{equation}
For small $Z/p$ we can use the taylor expansion $(1+x)^n=1+nx$
\begin{equation}
\frac{X_D}{X_D^P}=r- (r-1)(1+\frac{1}{1-r}(r-1)\frac{Z}{p})
\end{equation}
\begin{equation}
\frac{X_D}{X_D^P}=r- (r-1)(1-\frac{1}{r-1}(r-1)\frac{Z}{p})
\end{equation}
\begin{equation}
\frac{X_D}{X_D^P}=r- (r-1)(1-\frac{Z}{p})
\end{equation}
\begin{equation}
\frac{X_D}{X_D^P}=\frac{Z}{p}(r-1) -1
\end{equation}
One take-away from this problem should be that most of the hydrogen in
the disk of the Milky Way has never been inside of a
star. While all of the heavy elements in your body were produced in
stars, the hydrogen atoms that make up the $70\%$ or so of water in
your body were actually created in the Big Bang!
|
lemma has_contour_integral_shiftpath_D: assumes "(f has_contour_integral i) (shiftpath a g)" "valid_path g" "pathfinish g = pathstart g" "a \<in> {0..1}" shows "(f has_contour_integral i) g" |
(* Title: HOL/HOLCF/IOA/Storage/Action.thy
Author: Olaf Müller
*)
section \<open>The set of all actions of the system\<close>
theory Action
imports Main
begin
datatype action = New | Loc nat | Free nat
lemma [cong]: "\<And>x. x = y \<Longrightarrow> case_action a b c x = case_action a b c y"
by simp
end
|
lemma bounded_plus_comp: fixes f g::"'a \<Rightarrow> 'b::real_normed_vector" assumes "bounded (f ` S)" assumes "bounded (g ` S)" shows "bounded ((\<lambda>x. f x + g x) ` S)" |
[STATEMENT]
lemma not_\<omega>\<^sub>z_lt_of_nat[simp]: "\<not> \<omega>\<^sub>z < of_nat n"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<not> \<omega>\<^sub>z < of_nat n
[PROOF STEP]
by (simp add: not_less) |
# Portfolio Selection, part 1
After the discussion about the agents' preferences, we now turn to work on their consumption/investment portfolio. Let the security market has a **payoff matrix** $X\DeclareMathOperator*{\argmin}{argmin}
\DeclareMathOperator*{\argmax}{argmax}
\DeclareMathOperator*{\plim}{plim}
\newcommand{\using}[1]{\stackrel{\mathrm{#1}}{=}}
\newcommand{\ffrac}{\displaystyle \frac}
\newcommand{\asim}{\overset{\text{a}}{\sim}}
\newcommand{\space}{\text{ }}
\newcommand{\bspace}{\;\;\;\;}
\newcommand{\QQQ}{\boxed{?\:}}
\newcommand{\void}{\left.\right.}
\newcommand{\Tran}[1]{{#1}^{\mathrm{T}}}
\newcommand{\d}[1]{\displaystyle{#1}}
\newcommand{\CB}[1]{\left\{ #1 \right\}}
\newcommand{\SB}[1]{\left[ #1 \right]}
\newcommand{\P}[1]{\left( #1 \right)}
\newcommand{\abs}[1]{\left| #1 \right|}
\newcommand{\norm}[1]{\left\| #1 \right\|}
\newcommand{\dd}{\mathrm{d}}
\newcommand{\Exp}{\mathrm{E}}
\newcommand{\RR}{\mathbb{R}}
\newcommand{\EE}{\mathbb{E}}
\newcommand{\NN}{\mathbb{N}}
\newcommand{\ZZ}{\mathbb{Z}}
\newcommand{\QQ}{\mathbb{Q}}
\newcommand{\PP}{\mathbb{P}}
\newcommand{\AcA}{\mathcal{A}}
\newcommand{\FcF}{\mathcal{F}}
\newcommand{\AsA}{\mathscr{A}}
\newcommand{\FsF}{\mathscr{F}}
\newcommand{\Var}[2][\,\!]{\mathrm{Var}_{#1}\left[#2\right]}
\newcommand{\Avar}[2][\,\!]{\mathrm{Avar}_{#1}\left[#2\right]}
\newcommand{\Cov}[2][\,\!]{\mathrm{Cov}_{#1}\left(#2\right)}
\newcommand{\Corr}[2][\,\!]{\mathrm{Corr}_{#1}\left(#2\right)}
\newcommand{\I}[1]{\mathrm{I}\left( #1 \right)}
\newcommand{\N}[1]{\mathcal{N} \left( #1 \right)}
\newcommand{\ow}{\text{otherwise}}$ with **rank** $N$, and the corresponding **price vector** is $S$. Then for each agent, the optimization problem can be rewritten as
$$\begin{align}
\max_\theta & \bspace U\P{c} = u_0 \P{c_0} + \sum_{\omega = 1}^{\Omega} \pi_\omega u_1\P{c_{1\omega}} \\
s.t. &\bspace c_0 = e_0 - \Tran S \theta \\
&\bspace c_1 = e_1 - X \theta \\
&\bspace \text{where } c_0, c_1 \geq 0
\end{align}$$
here we omit the subscript for agent $k$.
## The Existence of The Solution and its Features
Actually in Chapter $2$ we have a more general form of the preceding:
$$\bbox[9px, border:2px solid #880015]{\begin{array}{ll}
\max\limits_{\theta_k} & U_k\P{c_k} \\
s.t. & c_{k,0} = e_{k,0} -\Tran{S} \theta_k , c_{k,1} = e_{k,1} - X \theta_k \\
& c_0, c_1 \geq 0
\end{array}}$$
so we only need to prove the existence of the solution of this one.
$Theorem.1$
Denote the security market as $\CB{X,S}$ and we assert that there's a solution for this $\iff$ "*no-arbitrage*".
$Proof$
$\bspace$Necessity$\Rightarrow)$
>Suppose there's the solution, $\theta$, with the consumption $c = \SB{c_0;c_1} = \SB{e_0-\Tran S\theta; e_1 + X\theta}$. We'll prove by contradiction. If there's a chance to arbitrage, with portfolio $\eta$, let $d \equiv \SB{-\Tran S \eta ; X\eta}$ as its consumption. By the definition of arbitrage, we have $d>0$, then $c+d > c$. By the instatiability, $c+d \succ c $, meaning $c+d$ strictly dominant $c$, so $c$ can't be the optimized solution.
$\bspace$Sufficiency$\Leftarrow)$
>Suppose there's no chance to arbitrage, by the fundamental principle of asset pricing, there's only *one* possible *possitive* **state price vector** $\phi$ such that $S = \Tran\phi X$. Then the consumption by portofolio $\theta$ is $\SB{-\Tran\phi X\theta;X\theta}$.
>
>Recall that we've defined the **budget plan set** $B\P{e,\CB{X,S}}$ as
>
>$$\CB{c \geq 0 : c = e + \SB{-\Tran\phi X\theta;X\theta} , \theta\in \RR^N}$$
>
>We need to prove that this set is **bounded** and **close**. In order to do so we define another set $\hat B\P{e,\phi}$
>
>$$\hat B\P{e,\phi} \equiv \CB{c \geq 0 : c = e + \SB{-\Tran\phi d;d}, d \in \RR^{\Omega}}$$
>
>Obviously we have $B \subseteq \hat B$. When the market is **complete**, $N = \Omega$ and $B = \hat B$. In chapter $3$, we've already seen that for $\phi \gg 0$, $\hat{B}$ is **bounded** (ref $Theorem3.1$). Meanwhile, $B$ is also a **close** set, which can be proved similarly like before (forget where... just find a series of consumption plan in it and you'll be fine). Thus $B$ is a **bounded** and **close** set. Given continuous $u_0$ and $u_1$, we have continuous $U$.
>
>Continuous function has its maximum in a **bounded** and **close** set. That's it.
$Remark$
>Arbitrage: for portfolio $\eta$ with consumption $d\equiv\SB{-\Tran T \eta;X\eta}$, $d>0$. Besides, the badget set, $e$ is the endowment, $\phi$ is the state price vector, $X$ is the market structure and $S = \Tran\phi X$ is the price vector
Then with this we have the optimization rewritten as
$$\max_\theta \bspace U\P c = u_0\P{e_0 - \Tran S\theta } + \sum_{\omega \in \Omega} \pi_\omega u_1\P{e_{1\omega} + X_\omega\theta}$$
Taking the derivative on $\theta$ and let it be $0$, we obtain the first order condition:
$$u'_0\P{c_0}S_n = \sum_{\omega \in \Omega} \pi_\omega u'_1\P{c_{1\omega}} X_{\omega,n} = \Exp\SB{u'_1\P{\tilde c_1}\tilde X_n}$$
Here, $S_n$ is the entries in $S_T$ and $n$ range from $1$ to $N$. $\tilde c_1$ is the notation that considering $c_1$ as a $r.v.$ and so is $\tilde X_n$.
And we call these $N$ equations the ***Eular Equation***. The left hand side is the loss of marginal effect when $S_0$ is used to buy $n$th security (由于需要付出其价格$S_0$而减少的当前消费所损失的边际效用) and the right hand side is the gain of marginal effect of the $\tilde X_n$ consumption increase in period $1$ when invest one unit of security $n$ in period $0$ (投资于一单位证券$n$使得未来消费增加$\tilde X_n$而得到的边际效用).
Or equivalently, we say 参与者在今天消费最后$1$元和把他用来投资以取得明天的消费之间没差啦
And another form of **Eular equation**: $\bbox[9px, border:2px solid #880015]{\Exp\SB{\ffrac{u'_1\P{\tilde c_1}} {u'_0\P{c_0}}\cdot\ffrac{\tilde X_n} {S_n}} = 1}$, for $n = 1,\dots,N$.
***
And first order condition may not be enough to gurantee the optimality. An additional condition for that: the second order condition, by double differentiate the equation on $\theta$.
$$S_n^2 u''_0\P{c_0} + \Exp\SB{u''_1\P{\tilde c_1}\tilde X_n^2} \leq 0, n=1,2,\dots,N$$
since $u_0$ and $u_1$ are **conCave (function)**, so we only need to check the first order one. And for **conCave (utility)** Expected utility function, we can see that the first one is also sufficient.
Thus forget about the second one.<span style="font-size:50px;">😆😆😆</span>
***
**e.g.1**
Consider an economy where two states with equal possibilities are presented in the period $1$. There's only one security with price and payoff
$$S \to \begin{cases}
x \P{\geq 1} \\[0.5em]
1
\end{cases}$$
Consider an agent with endowment $w_0$ at period $0$ and $0$ at period $1$. His utility function is $\log\P{c_0}+\ffrac{1}{2}\rho\P{\log\P{c_{1a}} +\log\P{c_{1b}}}$. And let the number of security he has be $\theta$. Then his optimization problem is:
$$\d{\max_\theta} \bspace \log\P{w_0-\theta S} +\ffrac{1} {2} \rho\SB{\log\P{\theta x} + \log\P{\theta}}
=\log\P{w_0-\theta S} +\rho\log\P{\theta} + \ffrac{1} {2}\rho\log\P{x}$$
To find the **Eular equation** we take the derivative on $\theta$, let it be $0$ and obtain:
$$\ffrac{S} {w_0-\theta S} =\ffrac{\rho} {\theta} \Longrightarrow \theta = \ffrac{\rho} {1+\rho} \ffrac{w_0} {S}$$
So that $c_0 = w_0 -\theta S = \ffrac{1} {1+\rho} w_0$, $c_{1a} = \theta x = \ffrac{\rho} {1+\rho} \ffrac{w_0} {S}x$, $c_{1b} =\theta = \ffrac{\rho} {1+\rho} \ffrac{w_0} {S}$
Here $\theta = \ffrac{\rho} {1+\rho} \ffrac{w_0} {S}$ indicates his attitude towards the current investment. And when optimized, as decribed before, we have two equal quantities:
$$\begin{align}
u_0'\P{c_0} =\ffrac{1} {c_0} &= \ffrac{1+\rho} {w_0}\\
\Exp\SB{u_0'\P{c_1} \ffrac{\tilde X} {S}} &= \ffrac{1} {2} \rho \SB{\ffrac{1} {c_{1a}}\ffrac{x} {S} + \ffrac{1} {c_{1b}} \ffrac{1} {S}}\\
&= \ffrac{1} {2} \rho \SB{\ffrac{1+\rho} {\rho} \ffrac{S} {w_0 x}\ffrac{x} {S} + \ffrac{1+\rho} {\rho} \ffrac{S} {w_0 } \ffrac{1} {S}}\\
&= \ffrac{1+\rho} {w_0} = u_0'\P{c_0}
\end{align}$$
***
## Portfolio Selection
Denote the value of portfolio $\theta$: $w\equiv e_0 - c_0 = \Tran S \theta$. We can determine the ***optimal portfolio*** by consider the following problem:
$$\bbox[9px, border:2px solid #880015]{v_1\P{w} = \max_{\CB{\theta: \Tran S\theta = w}} \Exp\SB{u_1\P{\tilde e_1 + \tilde X\theta}}}$$
Here $\tilde X \equiv \SB{\tilde X_1,\dots,\tilde X_N}$ is the random payoff vector for $N$ securities. And we call the $v_1\P{w}$ here the ***indirect utility function***, meaning the *expected utility* obtained from **optimal portfolio**. And with this, we rewrite the optimization problem:
$$\max_\theta \bspace U\P{c} = u_0 \P{c_0} + \sum_{\omega = 1}^{\Omega} \pi_\omega u_1\P{c_{1\omega}}\\
\Downarrow \\
\max_\theta \bspace u_0\P{e_0 - \Tran S\theta } + \sum_{\omega \in \Omega} \pi_\omega u_1\P{e_{1\omega} + X_\omega\theta}\\
\Downarrow \\
\max_\theta \bspace u_0\P{e_0 - w } + \max_{\CB{\theta: \Tran S\theta = w}} \Exp\SB{u_1\P{\tilde e_1 + \tilde X\theta}}\\
\Downarrow \\
\bbox[9px, border:2px solid #880015]{\max_\theta \bspace u_0\P{e_0 - w } + v_1\P{w}}
$$
To simplify, we assume $\myEmphy{\tilde e_1=0}$ which exclude the effect from future consumption. And consequently we write $\myEmphy{v\P{w} = \d{\max_{\CB{\theta: \Tran S\theta = w}}} \Exp\SB{u_1\P{\tilde X\theta}}}$ instead of $v_1\P{w}$. Then to further simplify this issue 😂, we assume that there's a riskless security in the market. Suppose it to be the $N$th with riskfree rate $r_F$. This lead to that all price, $S_n>0$ for all $1,2,\dots,N$. Define $a_n = \theta_n S_n$ as the investment on security $n$. The ***gross rate of return总收益率*** is defined as:
$$
\myEmphy{\tilde x_n = \ffrac{\tilde X_n}{S_n}}$$
and the ***rate of return*** of security $n$, $\myEmphy{\tilde r_n \equiv \ffrac{\tilde X_n-S_n} {S_n} = \tilde x_n-1}$ with $n=1,2,\dots,N$. Then we have the equation for $\tilde w$:
$$\myBox{\begin{align}
\tilde w &\equiv \sum_{n=1}^{N} \theta_n \tilde X_n\\
&= \sum_{n=1}^{N} a_n \tilde x_n = \sum_{n=1}^{N} a_n \cdot\P{1+\tilde r_n}\\
&= w\P{1+r_F} + \sum_{n=1}^{N-1} a_n\P{\tilde r_n - r_F}
\end{align}}$$
where $\color{#880015}{w = \sum\limits_{n=1}^{N} a_n}$. And $\tilde r_n - r_F$ has a new name, ***excess return***. So then if we write the vector $\tilde r = \SB{\tilde r_1;\cdots;\tilde r_{N-1}}$, the random payoff vector for risk securities and $ a = \SB{ a_1;\cdots; a_{N-1}}$, the investments on them, we can finally rewrite the optimiization problem as
$$\myBox{\begin{align}
\max_{a} \bspace \Exp\SB{u\P{\tilde w}}&= \Exp\SB{u\P{w\P{1+r_F} + \sum_{n=1}^{N-1} a_n\P{\tilde r_n - r_F}}}\\
&= \Exp\SB{u\P{w\P{1+r_F} +\Tran a\P{\tilde r - r_F\iota}}}
\end{align}}$$
Its first order condition, on $a_n$, is $\myEmphy{\Exp\SB{u'\P{\tilde w} \P{\tilde r_i - r_F}} = 0}$ where $\tilde w = w\P{1+r_F} +\Tran a\P{\tilde r - r_F\iota}$ for all $i$ from $1$ to $N-1$. These $N-1$ equations can be used to find vector $a$.
## Properties of Optimal Portfolio
### Market with One Riskfree Security and One Risky Security
Now the optimization problem changes to
$$\myBox{
\max_{a} \bspace \Exp\SB{u\P{\tilde w}} = \Exp\SB{u\P{w\P{1+r_F} + a\P{\tilde r - r_F}}}
}$$
$Theorem.2$
If the agents are risk averse, with $\tilde r$ defined as $\SB{\tilde r_1;\cdots;\tilde r_{N-1}}$,
- $a=0\iff \Exp\SB{\tilde r} =r_F$
- $a>0\iff \Exp\SB{\tilde r} >r_F$
- $a<0\iff \Exp\SB{\tilde r} <r_F$
$Proof$
>Let $\bar u\P{a} \equiv \Exp\SB{u\P{\tilde w}} = \Exp\SB{u\P{w\P{1+r_F}+a\P{\tilde r - r_F}}}$. By the fact that $u$ is strictly conCave, $\bar u''\P{a} = \Exp\SB{u''\P{\tilde w} \P{\tilde r-r_F}^2} \leq 0$ meaning that $\bar u$ has a MAXimum where $\bar u'\P{a} = 0$. Besides,
>
>$$\begin{align}
\bar u'\P{0} &= \Exp\SB{u'\P{w\P{1+r_F}+0 }\P{\tilde r - r_F}}\\
&=u'\P{w\P{1+r_F}+0}\cdot\P{\Exp\SB{\tilde r}-r_F}
\end{align}$$
>
>Since $u'>0$, we claim that $\bar u'\P{0}$ has the same sign with $\Exp\SB{\tilde r}-r_F$ and since $\Exp\SB{\tilde r}-r_F = 0 \iff \bar u'\P{0} = 0$ we have
>
>$$\myBox{\Exp\SB{\tilde r}-r_F \lesseqgtr 0 \iff \bar u'\P{0} \lesseqgtr 0, a\lesseqgtr0}$$
And we call $\Exp\SB{\tilde r} -r_F$ the ***risk premium***. And the preceding implies that
$\bspace$Risk Averse as the agent is, he expect more **risk premium**, the *expected excess return*.
Now we focus on the relation between the portfolio selection between the agents' preference, or the risk aversion. Let $A\P{w}$ be the **absolute risk aversion**.
To be specific, we suppose $\Exp\SB{\tilde r}> r_F$ and consequently $a > 0$. Then define
$$a\P{w} = \argmax \Exp\SB{u\P{w\P{1+r_F}+a\P{\tilde r - r_F}}}$$
$Theorem.3$
- $a'\P{w} = 0 \iff A'\P{w} = 0$, **CARA**
- $a'\P{w} > 0 \iff A'\P{w} < 0$, **DARA**
- $a'\P{w} < 0 \iff A'\P{w} > 0$, **IARA**
$Proof$
> Kindly remind you that $\myEmphy{A\P{w} \equiv -\ffrac{u''\P{w}} {u'\P{w}}} \geq 0$. The first order condition is
>
>$$\Exp\SB{u'\P{\tilde w}\P{\tilde r-r_F}}=0,\bspace \tilde w = w\P{1+r_F} + a\P{\tilde r - r_F}$$
>
>Take derivative on $w$ and let it be $0$:
>
>$$\Exp\SB{u''\P{w} \P{\tilde r-r_F}\P{\P{1+r_F}+\P{\tilde r-r_F}\ffrac{\dd a}{\dd w}}}=0 \\
\ffrac{\dd a}{\dd w} = -\ffrac{\P{1+r_F}\Exp\SB{u''\P{\tilde w}\P{\tilde r-r_F}}}{\Exp\SB{u''\P{\tilde w} \P{\tilde r-r_F}^2}}$$
|
lemma LIMSEQ_divide_realpow_zero: "1 < x \<Longrightarrow> (\<lambda>n. a / (x ^ n) :: real) \<longlonglongrightarrow> 0" |
// ------------------------------------------------------------------------
// Copyright (C)
// ETH Zurich - Switzerland
//
// Kevis-Kokitsi Maninis <[email protected]>
// Jordi Pont-Tuset <[email protected]>
// July 2016
// ------------------------------------------------------------------------
// This file is part of the COB package presented in:
// K.K. Maninis, J. Pont-Tuset, P. Arbelaez and L. Van Gool
// Convolutional Oriented Boundaries
// European Conference on Computer Vision (ECCV), 2016
// Please consider citing the paper if you use this code.
// ------------------------------------------------------------------------
#ifndef PART2CONT_HPP
#define PART2CONT_HPP
#include <iostream>
#include "containers.hpp"
#include <Eigen/Dense>
contour_container part2cont(const Eigen::Array<label_type,Eigen::Dynamic,Eigen::Dynamic>& lp)
{
/* Sizes */
std::size_t sx = lp.rows();
std::size_t sy = lp.cols();
/*-------------------------------------------------------------*/
/* Create LUT of from pairs of regions to */
/* list of contours and UCM threshold */
/*-------------------------------------------------------------*/
contour_container pairs_contours;
/* Scan horizontal contours*/
for (std::size_t xx=1; xx<sx; ++xx)
{
for (std::size_t yy=0; yy<sy; ++yy)
{
if (lp(xx,yy)!=lp(xx-1,yy))
{
/* Build a contour element and the two labels 'touching' it*/
label_type lab1 = std::min(lp(xx,yy),lp(xx-1,yy));
label_type lab2 = std::max(lp(xx,yy),lp(xx-1,yy));
/* Recover the element or build and empty one */
/* and push the contour piece */
pairs_contours[std::make_pair(lab1,lab2)].push_back(cont_elem(2*xx,2*yy+1));
}
}
}
/* Scan vertical contours*/
for (std::size_t xx=0; xx<sx; ++xx)
{
for (std::size_t yy=1; yy<sy; ++yy)
{
if (lp(xx,yy)!=lp(xx,yy-1))
{
/* Build a contour element and the two labels 'touching' it*/
label_type lab1 = std::min(lp(xx,yy),lp(xx,yy-1));
label_type lab2 = std::max(lp(xx,yy),lp(xx,yy-1));
/* Recover the element or build and empty one */
/* and push the contour piece */
pairs_contours[std::make_pair(lab1,lab2)].push_back(cont_elem(2*xx+1,2*yy));
}
}
}
return pairs_contours;
}
#endif
|
from torch.nn.modules.module import Module
import torch
import numpy as np
from torch.autograd import Variable
from ..functions import *
from ..functions.GANet import MyLossFunction
from ..functions.GANet import SgaFunction
from ..functions.GANet import LgaFunction
from ..functions.GANet import Lga2Function
from ..functions.GANet import Lga3Function
from ..functions.GANet import Lga3dFunction
from ..functions.GANet import Lga3d2Function
from ..functions.GANet import Lga3d3Function
from ..functions.GANet import MyLoss2Function
""" accuracy with threshold = 3 """
def valid_accu3(y_true_valid, y_pred_valid, thred = 3.0):
#epsilon = 0.0000000001
right_guess = torch.le(torch.abs(y_true_valid - y_pred_valid), thred).float()
accu3 = torch.mean(right_guess)
return accu3
class MyNormalize(Module):
def __init__(self, dim):
self.dim = dim
super(MyNormalize, self).__init__()
def forward(self, x):
# assert(x.is_contiguous() == True)
with torch.cuda.device_of(x):
norm = torch.sum(torch.abs(x),self.dim)
norm[norm <= 0] = norm[norm <= 0] - 1e-6
norm[norm >= 0] = norm[norm >= 0] + 1e-6
norm = torch.unsqueeze(norm, self.dim)
size = np.ones(x.dim(), dtype='int')
size[self.dim] = x.size()[self.dim]
norm = norm.repeat(*size)
x = torch.div(x, norm)
return x
class MyLoss2(Module):
def __init__(self, thresh=1, alpha=2):
super(MyLoss2, self).__init__()
self.thresh = thresh
self.alpha = alpha
def forward(self, input1, input2):
#result = MyLoss2Function(self.thresh, self.alpha)(input1, input2)
#added by CCJ: updated for applying "new style" static functions via ".apply"
result = MyLoss2Function.apply(input1, input2, self.thresh, self.alpha)
return result
class MyLoss(Module):
def __init__(self, upper_thresh=5, lower_thresh=1):
super(MyLoss, self).__init__()
self.upper_thresh = 5
self.lower_thresh = 1
def forward(self, input1, input2):
#result = MyLossFunction(self.upper_thresh, self.lower_thresh)(input1, input2)
#added by CCJ: updated for applying "new style" static functions via ".apply"
result = MyLossFunction.apply(input1, input2, self.upper_thresh, self.lower_thresh)
return result
class SGA(Module):
def __init__(self):
super(SGA, self).__init__()
def forward(self, input, g0, g1, g2, g3):
"""
> see: Difference between apply an call for an autograd function,
at https://discuss.pytorch.org/t/difference-between-apply-an-call-for-an-autograd-function/13845
This difference is that instantiating + calling the Function works with
“old style” functions (which are going to be deprecated in the future).
Using .apply is for the “new style” functions. You can differentiate the
two easily: new style functions are defined with only @staticmethod,
while old style ones have an __init__.
"""
#result = SgaFunction()(input, g0, g1, g2, g3)
#added by CCJ on 20191010;
result = SgaFunction.apply(input, g0, g1, g2, g3)
# or: you can do call Function.apply method, and alias this as 'sga'
#sga = SgaFunction.apply
#result = sga(g0,g1,g2,g3)
return result
class LGA3D3(Module):
def __init__(self, radius=2):
super(LGA3D3, self).__init__()
self.radius = radius
def forward(self, input1, input2):
#result = Lga3d3Function(self.radius)(input1, input2)
#added by CCJ: updated for applying "new style" static functions via ".apply"
result = Lga3d3Function.apply(input1, input2,self.radius)
return result
class LGA3D2(Module):
def __init__(self, radius=2):
super(LGA3D2, self).__init__()
self.radius = radius
def forward(self, input1, input2):
#result = Lga3d2Function(self.radius)(input1, input2)
#added by CCJ: updated for applying "new style" static functions via ".apply"
result = Lga3d2Function.apply(input1, input2,self.radius)
return result
class LGA3D(Module):
def __init__(self, radius=2):
super(LGA3D, self).__init__()
self.radius = radius
def forward(self, input1, input2):
#result = Lga3dFunction(self.radius)(input1, input2)
#added by CCJ: updated for applying "new style" static functions via ".apply"
result = Lga3dFunction.apply(input1, input2, self.radius)
return result
class LGA3(Module):
def __init__(self, radius=2):
super(LGA3, self).__init__()
self.radius = radius
def forward(self, input1, input2):
result = Lga3Function(self.radius)(input1, input2)
#added by CCJ: updated for applying "new style" static functions via ".apply"
result = Lga3Function.apply(input1, input2,self.radius)
return result
class LGA2(Module):
def __init__(self, radius=2):
super(LGA2, self).__init__()
self.radius = radius
def forward(self, input1, input2):
#result = Lga2Function(self.radius)(input1, input2)
#added by CCJ: updated for applying "new style" static functions via ".apply"
result = Lga2Function.apply(input1, input2, self.radius)
return result
class LGA(Module):
def __init__(self, radius=2):
super(LGA, self).__init__()
self.radius = radius
def forward(self, input1, input2):
#result = LgaFunction(self.radius)(input1, input2)
#added by CCJ: updated for applying "new style" static functions via ".apply"
#NOTE: radius = 2, means window size = 5;
# see the "GANet_kernel.cu" file for : around line 1147: wsize = 2 * radius + 1
result = LgaFunction.apply(input1, input2,self.radius)
return result
class GetCostVolume(Module):
def __init__(self, maxdisp):
super(GetCostVolume, self).__init__()
self.maxdisp = maxdisp + 1
def forward(self, x, y):
"""
args:
x : left feature, in size [N,C,H,W]
y : right feature, in size [N,C,H,W]
return:
cost: cost volume in size [N,C,D,H,W]
"""
assert(x.is_contiguous() == True)
with torch.cuda.device_of(x):
num, channels, height, width = x.size()
cost = x.new().resize_(num, channels * 2, self.maxdisp, height, width).zero_()
#cost = Variable(torch.FloatTensor(x.size()[0], x.size()[1]*2, self.maxdisp, x.size()[2], x.size()[3]).zero_(), volatile= not self.training).cuda()
for i in range(self.maxdisp):
if i > 0 :
cost[:, :x.size()[1], i, :,i:] = x[:,:,:,i:]
cost[:, x.size()[1]:, i, :,i:] = y[:,:,:,:-i]
else:
cost[:, :x.size()[1], i, :,:] = x
cost[:, x.size()[1]:, i, :,:] = y
cost = cost.contiguous()
return cost
class DisparityRegression(Module):
def __init__(self, maxdisp):
super(DisparityRegression, self).__init__()
self.maxdisp = maxdisp + 1
# self.disp = Variable(torch.Tensor(np.reshape(np.array(range(self.maxdisp)),[1,self.maxdisp,1,1])).cuda(), requires_grad=False)
def forward(self, x):
"""
args:
x : probability, in size [N,D+1,H,W] or write it as [N,D,H,W] if ignoring the small difference between D+1 and D;
"""
assert(x.is_contiguous() == True)
with torch.cuda.device_of(x):
disp = Variable(torch.Tensor(
np.reshape(np.array(range(self.maxdisp)),[1, self.maxdisp, 1, 1])
).cuda(), requires_grad=False)
disp = disp.repeat(x.size()[0], 1, x.size()[2], x.size()[3])
out = torch.sum(x * disp, 1) # disparity, in size [N,H,W]
return out |
(*
Copyright (C) 2017 M.A.L. Marques
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*)
(* type: gga_exc *)
rge2_kappa := 0.8040:
rge2_den := s -> rge2_kappa + 1*MU_GE*s^2 + MU_GE^2*s^4/rge2_kappa:
rge2_f0 := s -> 1 + rge2_kappa * (1 - rge2_kappa/rge2_den(s)):
rge2_f := x -> rge2_f0(X2S * x):
f := (rs, zeta, xt, xs0, xs1) -> gga_exchange(rge2_f, rs, zeta, xs0, xs1):
|
function VRes = spm_write_residuals(SPM,Ic)
% Write residual images
% FORMAT Vres = spm_write_residuals(SPM,Ic)
% SPM - structure containing generic analysis details
% Ic - contrast index used to adjust data (0: no adjustment)
% (NaN: adjust for everything)
%
% VRes - struct array of residual image handles
%__________________________________________________________________________
% Copyright (C) 2012-2013 Wellcome Trust Centre for Neuroimaging
% Guillaume Flandin
% $Id: spm_write_residuals.m 6656 2015-12-24 16:49:52Z guillaume $
%-Get SPM.mat
%--------------------------------------------------------------------------
if ~nargin || isempty(SPM)
[SPM,sts] = spm_select(1,'^SPM\.mat$','Select SPM.mat');
if ~sts, VRes = ''; return; end
end
if ~isstruct(SPM)
swd = spm_file(SPM,'fpath');
try
load(fullfile(swd,'SPM.mat'));
SPM.swd = swd;
catch
error(['Cannot read ' fullfile(swd,'SPM.mat')]);
end
end
try, SPM.swd; catch, SPM.swd = pwd; end
cwd = pwd; cd(SPM.swd);
%-Get contrast used to adjust data
%--------------------------------------------------------------------------
if nargin<2 || isempty(Ic)
q(1) = 0;
Con = {'<don''t adjust>'};
q(2) = NaN;
Con{2} = '<adjust for everything>';
for i = 1:length(SPM.xCon)
if strcmp(SPM.xCon(i).STAT,'F')
q(end + 1) = i;
Con{end + 1} = SPM.xCon(i).name;
end
end
i = spm_input('adjust data for (select contrast)','!+1','m',Con);
Ic = q(i);
end
%-Compute and write residuals
%==========================================================================
spm('Pointer','Watch')
M = SPM.xY.VY(1).mat;
DIM = SPM.xY.VY(1).dim(1:min(numel(SPM.xY.VY(1).dim),3));
[nScan, nBeta] = size(SPM.xX.X);
if spm_mesh_detect(SPM.xY.VY)
file_ext = '.gii';
else
file_ext = spm_file_ext;
end
%-Initialise residual images
%--------------------------------------------------------------------------
VRes(1:nScan) = deal(struct(...
'fname', [],...
'dim', DIM,...
'dt', [spm_type('float64') spm_platform('bigend')],...
'mat', M,...
'pinfo', [1 0 0]',...
'descrip', 'Residuals'));
for i = 1:nScan
VRes(i).fname = [sprintf('Res_%04d', i) file_ext];
VRes(i).descrip = sprintf('Residuals (%04d)', i);
end
VRes = spm_data_hdr_write(VRes);
%-Loop over chunks
%--------------------------------------------------------------------------
chunksize = floor(spm_get_defaults('stats.maxmem') / 8 / nScan);
nbchunks = ceil(prod(DIM) / chunksize);
chunks = min(cumsum([1 repmat(chunksize,1,nbchunks)]),prod(DIM)+1);
spm_progress_bar('Init',nbchunks,'Writing residuals','Chunks');
for i=1:nbchunks
chunk = chunks(i):chunks(i+1)-1;
%-Get mask
%----------------------------------------------------------------------
m = spm_data_read(SPM.VM,chunk) > 0;
m = m(:)';
%-Get raw data, whiten and filter
%----------------------------------------------------------------------
y = zeros(nScan,numel(chunk));
for j=1:nScan
y(j,:) = spm_data_read(SPM.xY.VY(j),chunk);
end
y(:,~m) = [];
y = spm_filter(SPM.xX.K,SPM.xX.W*y);
if Ic ~= 0
%-Parameter estimates: beta = xX.pKX*xX.K*y
%------------------------------------------------------------------
beta = zeros(nBeta,numel(chunk));
for j=1:nBeta
beta(j,:) = spm_data_read(SPM.Vbeta(j),chunk);
end
beta(:,~m) = [];
%-Subtract Y0 = XO*beta, Y = Yc + Y0 + e
%------------------------------------------------------------------
if ~isnan(Ic)
y = y - spm_FcUtil('Y0',SPM.xCon(Ic),SPM.xX.xKXs,beta);
else
y = y - SPM.xX.xKXs.X * beta;
end
end
%-Write residuals
%----------------------------------------------------------------------
yy = NaN(numel(chunk),1);
for j=1:nScan
yy(m) = y(j,:);
VRes(j) = spm_data_write(VRes(j), yy, chunk);
end
spm_progress_bar('Set',i)
end
cd(cwd);
spm_progress_bar('Clear')
spm('Pointer','Arrow')
|
[STATEMENT]
lemma symrun_interp_set_lifting:
assumes \<open>set \<Gamma> = set \<Gamma>'\<close>
shows \<open>\<lbrakk>\<lbrakk> \<Gamma> \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m = \<lbrakk>\<lbrakk> \<Gamma>' \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<lbrakk> \<Gamma> \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m = \<lbrakk>\<lbrakk> \<Gamma>' \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<lbrakk>\<lbrakk> \<Gamma> \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m = \<lbrakk>\<lbrakk> \<Gamma>' \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m
[PROOF STEP]
have \<open>set (remdups \<Gamma>) = set (remdups \<Gamma>')\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. set (remdups \<Gamma>) = set (remdups \<Gamma>')
[PROOF STEP]
by (simp add: assms)
[PROOF STATE]
proof (state)
this:
set (remdups \<Gamma>) = set (remdups \<Gamma>')
goal (1 subgoal):
1. \<lbrakk>\<lbrakk> \<Gamma> \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m = \<lbrakk>\<lbrakk> \<Gamma>' \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
set (remdups \<Gamma>) = set (remdups \<Gamma>')
goal (1 subgoal):
1. \<lbrakk>\<lbrakk> \<Gamma> \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m = \<lbrakk>\<lbrakk> \<Gamma>' \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m
[PROOF STEP]
have fxpnt\<Gamma>: \<open>\<Inter> ((\<lambda>\<gamma>. \<lbrakk> \<gamma> \<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m) ` set \<Gamma>) = \<lbrakk>\<lbrakk> \<Gamma> \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Inter> (symbolic_run_interpretation_primitive ` set \<Gamma>) = \<lbrakk>\<lbrakk> \<Gamma> \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m
[PROOF STEP]
by (simp add: symrun_interp_fixpoint)
[PROOF STATE]
proof (state)
this:
\<Inter> (symbolic_run_interpretation_primitive ` set \<Gamma>) = \<lbrakk>\<lbrakk> \<Gamma> \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m
goal (1 subgoal):
1. \<lbrakk>\<lbrakk> \<Gamma> \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m = \<lbrakk>\<lbrakk> \<Gamma>' \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
\<Inter> (symbolic_run_interpretation_primitive ` set \<Gamma>) = \<lbrakk>\<lbrakk> \<Gamma> \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m
goal (1 subgoal):
1. \<lbrakk>\<lbrakk> \<Gamma> \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m = \<lbrakk>\<lbrakk> \<Gamma>' \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m
[PROOF STEP]
have fxpnt\<Gamma>': \<open>\<Inter> ((\<lambda>\<gamma>. \<lbrakk> \<gamma> \<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m) ` set \<Gamma>') = \<lbrakk>\<lbrakk> \<Gamma>' \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Inter> (symbolic_run_interpretation_primitive ` set \<Gamma>') = \<lbrakk>\<lbrakk> \<Gamma>' \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m
[PROOF STEP]
by (simp add: symrun_interp_fixpoint)
[PROOF STATE]
proof (state)
this:
\<Inter> (symbolic_run_interpretation_primitive ` set \<Gamma>') = \<lbrakk>\<lbrakk> \<Gamma>' \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m
goal (1 subgoal):
1. \<lbrakk>\<lbrakk> \<Gamma> \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m = \<lbrakk>\<lbrakk> \<Gamma>' \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
\<Inter> (symbolic_run_interpretation_primitive ` set \<Gamma>') = \<lbrakk>\<lbrakk> \<Gamma>' \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m
goal (1 subgoal):
1. \<lbrakk>\<lbrakk> \<Gamma> \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m = \<lbrakk>\<lbrakk> \<Gamma>' \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m
[PROOF STEP]
have \<open>\<Inter> ((\<lambda>\<gamma>. \<lbrakk> \<gamma> \<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m) ` set \<Gamma>) = \<Inter> ((\<lambda>\<gamma>. \<lbrakk> \<gamma> \<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m) ` set \<Gamma>')\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Inter> (symbolic_run_interpretation_primitive ` set \<Gamma>) = \<Inter> (symbolic_run_interpretation_primitive ` set \<Gamma>')
[PROOF STEP]
by (simp add: assms)
[PROOF STATE]
proof (state)
this:
\<Inter> (symbolic_run_interpretation_primitive ` set \<Gamma>) = \<Inter> (symbolic_run_interpretation_primitive ` set \<Gamma>')
goal (1 subgoal):
1. \<lbrakk>\<lbrakk> \<Gamma> \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m = \<lbrakk>\<lbrakk> \<Gamma>' \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
set (remdups \<Gamma>) = set (remdups \<Gamma>')
\<Inter> (symbolic_run_interpretation_primitive ` set \<Gamma>) = \<lbrakk>\<lbrakk> \<Gamma> \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m
\<Inter> (symbolic_run_interpretation_primitive ` set \<Gamma>') = \<lbrakk>\<lbrakk> \<Gamma>' \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m
\<Inter> (symbolic_run_interpretation_primitive ` set \<Gamma>) = \<Inter> (symbolic_run_interpretation_primitive ` set \<Gamma>')
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
set (remdups \<Gamma>) = set (remdups \<Gamma>')
\<Inter> (symbolic_run_interpretation_primitive ` set \<Gamma>) = \<lbrakk>\<lbrakk> \<Gamma> \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m
\<Inter> (symbolic_run_interpretation_primitive ` set \<Gamma>') = \<lbrakk>\<lbrakk> \<Gamma>' \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m
\<Inter> (symbolic_run_interpretation_primitive ` set \<Gamma>) = \<Inter> (symbolic_run_interpretation_primitive ` set \<Gamma>')
goal (1 subgoal):
1. \<lbrakk>\<lbrakk> \<Gamma> \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m = \<lbrakk>\<lbrakk> \<Gamma>' \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m
[PROOF STEP]
using symrun_interp_remdups_absorb
[PROOF STATE]
proof (prove)
using this:
set (remdups \<Gamma>) = set (remdups \<Gamma>')
\<Inter> (symbolic_run_interpretation_primitive ` set \<Gamma>) = \<lbrakk>\<lbrakk> \<Gamma> \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m
\<Inter> (symbolic_run_interpretation_primitive ` set \<Gamma>') = \<lbrakk>\<lbrakk> \<Gamma>' \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m
\<Inter> (symbolic_run_interpretation_primitive ` set \<Gamma>) = \<Inter> (symbolic_run_interpretation_primitive ` set \<Gamma>')
\<lbrakk>\<lbrakk> ?\<Gamma> \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m = \<lbrakk>\<lbrakk> remdups ?\<Gamma> \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m
goal (1 subgoal):
1. \<lbrakk>\<lbrakk> \<Gamma> \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m = \<lbrakk>\<lbrakk> \<Gamma>' \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<lbrakk>\<lbrakk> \<Gamma> \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m = \<lbrakk>\<lbrakk> \<Gamma>' \<rbrakk>\<rbrakk>\<^sub>p\<^sub>r\<^sub>i\<^sub>m
goal:
No subgoals!
[PROOF STEP]
qed |
integrate.rect <- function(f, a, b, n, k=0) {
#k = 0 for left, 1 for right, 0.5 for midpoint
h <- (b-a)/n
x <- seq(a, b, len=n+1)
sum(f(x[-1]-h*(1-k)))*h
}
integrate.trapezoid <- function(f, a, b, n) {
h <- (b-a)/n
x <- seq(a, b, len=n+1)
fx <- f(x)
sum(fx[-1] + fx[-length(x)])*h/2
}
integrate.simpsons <- function(f, a, b, n) {
h <- (b-a)/n
x <- seq(a, b, len=n+1)
fx <- f(x)
sum(fx[-length(x)] + 4*f(x[-1]-h/2) + fx[-1]) * h/6
}
f1 <- (function(x) {x^3})
f2 <- (function(x) {1/x})
f3 <- (function(x) {x})
f4 <- (function(x) {x})
integrate.simpsons(f1,0,1,100) #0.25
integrate.simpsons(f2,1,100,1000) # 4.60517
integrate.simpsons(f3,0,5000,5000000) # 12500000
integrate.simpsons(f4,0,6000,6000000) # 1.8e+07
integrate.rect(f1,0,1,100,0) #TopLeft 0.245025
integrate.rect(f1,0,1,100,0.5) #Mid 0.2499875
integrate.rect(f1,0,1,100,1) #TopRight 0.255025
integrate.trapezoid(f1,0,1,100) # 0.250025
|
[STATEMENT]
lemma quadratic_shape1b:
fixes a b c x y::"real"
assumes agt: "a > 0"
assumes xy_roots: "x < y \<and> a*x^2 + b*x + c = 0 \<and> a*y^2 + b*y + c = 0"
shows "\<And>z. (z > y \<Longrightarrow> a*z^2 + b*z + c > 0)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>z. y < z \<Longrightarrow> 0 < a * z\<^sup>2 + b * z + c
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>z. y < z \<Longrightarrow> 0 < a * z\<^sup>2 + b * z + c
[PROOF STEP]
fix z
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>z. y < z \<Longrightarrow> 0 < a * z\<^sup>2 + b * z + c
[PROOF STEP]
assume z_gt :"z > y"
[PROOF STATE]
proof (state)
this:
y < z
goal (1 subgoal):
1. \<And>z. y < z \<Longrightarrow> 0 < a * z\<^sup>2 + b * z + c
[PROOF STEP]
have nogt: "\<not>(\<exists>w. w > y \<and> a*w^2 + b*w + c = 0)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<not> (\<exists>w>y. a * w\<^sup>2 + b * w + c = 0)
[PROOF STEP]
using xy_roots discriminant_iff
[PROOF STATE]
proof (prove)
using this:
x < y \<and> a * x\<^sup>2 + b * x + c = 0 \<and> a * y\<^sup>2 + b * y + c = 0
?a \<noteq> 0 \<Longrightarrow> (?a * ?x\<^sup>2 + ?b * ?x + ?c = 0) = (0 \<le> discrim ?a ?b ?c \<and> (?x = (- ?b + sqrt (discrim ?a ?b ?c)) / (2 * ?a) \<or> ?x = (- ?b - sqrt (discrim ?a ?b ?c)) / (2 * ?a)))
goal (1 subgoal):
1. \<not> (\<exists>w>y. a * w\<^sup>2 + b * w + c = 0)
[PROOF STEP]
by (metis agt less_eq_real_def linorder_not_less)
[PROOF STATE]
proof (state)
this:
\<not> (\<exists>w>y. a * w\<^sup>2 + b * w + c = 0)
goal (1 subgoal):
1. \<And>z. y < z \<Longrightarrow> 0 < a * z\<^sup>2 + b * z + c
[PROOF STEP]
have "\<exists>(w::real). \<forall>(y::real). (y > w \<longrightarrow> a*y^2 + b*y + c > 0)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>w. \<forall>y>w. 0 < a * y\<^sup>2 + b * y + c
[PROOF STEP]
using agt pos_lc_dom_quad
[PROOF STATE]
proof (prove)
using this:
0 < a
0 < ?a \<Longrightarrow> \<exists>w. \<forall>y>w. 0 < ?a * y\<^sup>2 + ?b * y + ?c
goal (1 subgoal):
1. \<exists>w. \<forall>y>w. 0 < a * y\<^sup>2 + b * y + c
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<exists>w. \<forall>y>w. 0 < a * y\<^sup>2 + b * y + c
goal (1 subgoal):
1. \<And>z. y < z \<Longrightarrow> 0 < a * z\<^sup>2 + b * z + c
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<exists>w. \<forall>y>w. 0 < a * y\<^sup>2 + b * y + c
[PROOF STEP]
have "\<exists>k > y. a*k^2 + b*k + c > 0"
[PROOF STATE]
proof (prove)
using this:
\<exists>w. \<forall>y>w. 0 < a * y\<^sup>2 + b * y + c
goal (1 subgoal):
1. \<exists>k>y. 0 < a * k\<^sup>2 + b * k + c
[PROOF STEP]
by (metis add.commute agt less_add_same_cancel1 linorder_neqE_linordered_idom pos_add_strict)
[PROOF STATE]
proof (state)
this:
\<exists>k>y. 0 < a * k\<^sup>2 + b * k + c
goal (1 subgoal):
1. \<And>z. y < z \<Longrightarrow> 0 < a * z\<^sup>2 + b * z + c
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<exists>k>y. 0 < a * k\<^sup>2 + b * k + c
[PROOF STEP]
obtain k where k_prop: "k > y \<and> a*k^2 + b*k + c > 0"
[PROOF STATE]
proof (prove)
using this:
\<exists>k>y. 0 < a * k\<^sup>2 + b * k + c
goal (1 subgoal):
1. (\<And>k. y < k \<and> 0 < a * k\<^sup>2 + b * k + c \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
y < k \<and> 0 < a * k\<^sup>2 + b * k + c
goal (1 subgoal):
1. \<And>z. y < z \<Longrightarrow> 0 < a * z\<^sup>2 + b * z + c
[PROOF STEP]
have kgt: "k > z \<Longrightarrow> a*z^2 + b*z + c > 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. z < k \<Longrightarrow> 0 < a * z\<^sup>2 + b * z + c
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. z < k \<Longrightarrow> 0 < a * z\<^sup>2 + b * z + c
[PROOF STEP]
assume kgt: "k > z"
[PROOF STATE]
proof (state)
this:
z < k
goal (1 subgoal):
1. z < k \<Longrightarrow> 0 < a * z\<^sup>2 + b * z + c
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
z < k
[PROOF STEP]
have zneq: "a*z^2 + b*z + c = 0 \<Longrightarrow> False"
[PROOF STATE]
proof (prove)
using this:
z < k
goal (1 subgoal):
1. a * z\<^sup>2 + b * z + c = 0 \<Longrightarrow> False
[PROOF STEP]
using nogt
[PROOF STATE]
proof (prove)
using this:
z < k
\<not> (\<exists>w>y. a * w\<^sup>2 + b * w + c = 0)
goal (1 subgoal):
1. a * z\<^sup>2 + b * z + c = 0 \<Longrightarrow> False
[PROOF STEP]
using z_gt
[PROOF STATE]
proof (prove)
using this:
z < k
\<not> (\<exists>w>y. a * w\<^sup>2 + b * w + c = 0)
y < z
goal (1 subgoal):
1. a * z\<^sup>2 + b * z + c = 0 \<Longrightarrow> False
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
a * z\<^sup>2 + b * z + c = 0 \<Longrightarrow> False
goal (1 subgoal):
1. z < k \<Longrightarrow> 0 < a * z\<^sup>2 + b * z + c
[PROOF STEP]
have znlt: "a*z^2 + b*z + c < 0 \<Longrightarrow> False"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. a * z\<^sup>2 + b * z + c < 0 \<Longrightarrow> False
[PROOF STEP]
using kgt k_prop quadratic_poly_eval[of c b a] z_gt nogt poly_IVT_pos[where a= "z", where b = "k", where p = "[:c, b, a:]"]
[PROOF STATE]
proof (prove)
using this:
z < k
y < k \<and> 0 < a * k\<^sup>2 + b * k + c
poly [:c, b, a:] ?x = a * ?x\<^sup>2 + b * ?x + c
y < z
\<not> (\<exists>w>y. a * w\<^sup>2 + b * w + c = 0)
\<lbrakk>z < k; poly [:c, b, a:] z < 0; 0 < poly [:c, b, a:] k\<rbrakk> \<Longrightarrow> \<exists>x>z. x < k \<and> poly [:c, b, a:] x = 0
goal (1 subgoal):
1. a * z\<^sup>2 + b * z + c < 0 \<Longrightarrow> False
[PROOF STEP]
by (metis less_eq_real_def less_le_trans)
[PROOF STATE]
proof (state)
this:
a * z\<^sup>2 + b * z + c < 0 \<Longrightarrow> False
goal (1 subgoal):
1. z < k \<Longrightarrow> 0 < a * z\<^sup>2 + b * z + c
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
a * z\<^sup>2 + b * z + c < 0 \<Longrightarrow> False
[PROOF STEP]
show "a*z^2 + b*z + c > 0"
[PROOF STATE]
proof (prove)
using this:
a * z\<^sup>2 + b * z + c < 0 \<Longrightarrow> False
goal (1 subgoal):
1. 0 < a * z\<^sup>2 + b * z + c
[PROOF STEP]
using zneq znlt
[PROOF STATE]
proof (prove)
using this:
a * z\<^sup>2 + b * z + c < 0 \<Longrightarrow> False
a * z\<^sup>2 + b * z + c = 0 \<Longrightarrow> False
a * z\<^sup>2 + b * z + c < 0 \<Longrightarrow> False
goal (1 subgoal):
1. 0 < a * z\<^sup>2 + b * z + c
[PROOF STEP]
using linorder_neqE_linordered_idom
[PROOF STATE]
proof (prove)
using this:
a * z\<^sup>2 + b * z + c < 0 \<Longrightarrow> False
a * z\<^sup>2 + b * z + c = 0 \<Longrightarrow> False
a * z\<^sup>2 + b * z + c < 0 \<Longrightarrow> False
\<lbrakk>?x \<noteq> ?y; ?x < ?y \<Longrightarrow> ?thesis; ?y < ?x \<Longrightarrow> ?thesis\<rbrakk> \<Longrightarrow> ?thesis
goal (1 subgoal):
1. 0 < a * z\<^sup>2 + b * z + c
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
0 < a * z\<^sup>2 + b * z + c
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
z < k \<Longrightarrow> 0 < a * z\<^sup>2 + b * z + c
goal (1 subgoal):
1. \<And>z. y < z \<Longrightarrow> 0 < a * z\<^sup>2 + b * z + c
[PROOF STEP]
have klt: "k < z \<Longrightarrow> a*z^2 + b*z + c > 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. k < z \<Longrightarrow> 0 < a * z\<^sup>2 + b * z + c
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. k < z \<Longrightarrow> 0 < a * z\<^sup>2 + b * z + c
[PROOF STEP]
assume klt: "k < z"
[PROOF STATE]
proof (state)
this:
k < z
goal (1 subgoal):
1. k < z \<Longrightarrow> 0 < a * z\<^sup>2 + b * z + c
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
k < z
[PROOF STEP]
have zneq: "a*z^2 + b*z + c = 0 \<Longrightarrow> False"
[PROOF STATE]
proof (prove)
using this:
k < z
goal (1 subgoal):
1. a * z\<^sup>2 + b * z + c = 0 \<Longrightarrow> False
[PROOF STEP]
using nogt
[PROOF STATE]
proof (prove)
using this:
k < z
\<not> (\<exists>w>y. a * w\<^sup>2 + b * w + c = 0)
goal (1 subgoal):
1. a * z\<^sup>2 + b * z + c = 0 \<Longrightarrow> False
[PROOF STEP]
using z_gt
[PROOF STATE]
proof (prove)
using this:
k < z
\<not> (\<exists>w>y. a * w\<^sup>2 + b * w + c = 0)
y < z
goal (1 subgoal):
1. a * z\<^sup>2 + b * z + c = 0 \<Longrightarrow> False
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
a * z\<^sup>2 + b * z + c = 0 \<Longrightarrow> False
goal (1 subgoal):
1. k < z \<Longrightarrow> 0 < a * z\<^sup>2 + b * z + c
[PROOF STEP]
have znlt: "a*z^2 + b*z + c < 0 \<Longrightarrow> False"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. a * z\<^sup>2 + b * z + c < 0 \<Longrightarrow> False
[PROOF STEP]
using klt k_prop quadratic_poly_eval[of c b a] z_gt nogt poly_IVT_neg[where a= "k", where b = "z", where p = "[:c, b, a:]"]
[PROOF STATE]
proof (prove)
using this:
k < z
y < k \<and> 0 < a * k\<^sup>2 + b * k + c
poly [:c, b, a:] ?x = a * ?x\<^sup>2 + b * ?x + c
y < z
\<not> (\<exists>w>y. a * w\<^sup>2 + b * w + c = 0)
\<lbrakk>k < z; 0 < poly [:c, b, a:] k; poly [:c, b, a:] z < 0\<rbrakk> \<Longrightarrow> \<exists>x>k. x < z \<and> poly [:c, b, a:] x = 0
goal (1 subgoal):
1. a * z\<^sup>2 + b * z + c < 0 \<Longrightarrow> False
[PROOF STEP]
by (metis add.commute add_less_cancel_left add_mono_thms_linordered_field(3) less_eq_real_def)
[PROOF STATE]
proof (state)
this:
a * z\<^sup>2 + b * z + c < 0 \<Longrightarrow> False
goal (1 subgoal):
1. k < z \<Longrightarrow> 0 < a * z\<^sup>2 + b * z + c
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
a * z\<^sup>2 + b * z + c < 0 \<Longrightarrow> False
[PROOF STEP]
show "a*z^2 + b*z + c > 0"
[PROOF STATE]
proof (prove)
using this:
a * z\<^sup>2 + b * z + c < 0 \<Longrightarrow> False
goal (1 subgoal):
1. 0 < a * z\<^sup>2 + b * z + c
[PROOF STEP]
using zneq znlt
[PROOF STATE]
proof (prove)
using this:
a * z\<^sup>2 + b * z + c < 0 \<Longrightarrow> False
a * z\<^sup>2 + b * z + c = 0 \<Longrightarrow> False
a * z\<^sup>2 + b * z + c < 0 \<Longrightarrow> False
goal (1 subgoal):
1. 0 < a * z\<^sup>2 + b * z + c
[PROOF STEP]
using linorder_neqE_linordered_idom
[PROOF STATE]
proof (prove)
using this:
a * z\<^sup>2 + b * z + c < 0 \<Longrightarrow> False
a * z\<^sup>2 + b * z + c = 0 \<Longrightarrow> False
a * z\<^sup>2 + b * z + c < 0 \<Longrightarrow> False
\<lbrakk>?x \<noteq> ?y; ?x < ?y \<Longrightarrow> ?thesis; ?y < ?x \<Longrightarrow> ?thesis\<rbrakk> \<Longrightarrow> ?thesis
goal (1 subgoal):
1. 0 < a * z\<^sup>2 + b * z + c
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
0 < a * z\<^sup>2 + b * z + c
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
k < z \<Longrightarrow> 0 < a * z\<^sup>2 + b * z + c
goal (1 subgoal):
1. \<And>z. y < z \<Longrightarrow> 0 < a * z\<^sup>2 + b * z + c
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
k < z \<Longrightarrow> 0 < a * z\<^sup>2 + b * z + c
[PROOF STEP]
show "a*z^2 + b*z + c > 0"
[PROOF STATE]
proof (prove)
using this:
k < z \<Longrightarrow> 0 < a * z\<^sup>2 + b * z + c
goal (1 subgoal):
1. 0 < a * z\<^sup>2 + b * z + c
[PROOF STEP]
using k_prop kgt klt
[PROOF STATE]
proof (prove)
using this:
k < z \<Longrightarrow> 0 < a * z\<^sup>2 + b * z + c
y < k \<and> 0 < a * k\<^sup>2 + b * k + c
z < k \<Longrightarrow> 0 < a * z\<^sup>2 + b * z + c
k < z \<Longrightarrow> 0 < a * z\<^sup>2 + b * z + c
goal (1 subgoal):
1. 0 < a * z\<^sup>2 + b * z + c
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
0 < a * z\<^sup>2 + b * z + c
goal:
No subgoals!
[PROOF STEP]
qed |
Require Export GeoCoq.Elements.OriginalProofs.lemma_parallelsymmetric.
Require Export GeoCoq.Elements.OriginalProofs.lemma_paralleldef2B.
Require Export GeoCoq.Elements.OriginalProofs.lemma_parallelNC.
Require Export GeoCoq.Elements.OriginalProofs.lemma_planeseparation.
Section Euclid.
Context `{Ax:euclidean_neutral_ruler_compass}.
Lemma lemma_parallelPasch :
forall A B C D E,
PG A B C D -> BetS A D E ->
exists X, BetS B X E /\ BetS C X D.
Proof.
intros.
assert (Par A B C D) by (conclude_def PG ).
assert (Par A D B C) by (conclude_def PG ).
assert (Par C D A B) by (conclude lemma_parallelsymmetric).
assert (TP C D A B) by (conclude lemma_paralleldef2B).
assert (OS A B C D) by (conclude_def TP ).
assert (OS B A C D) by (forward_using lemma_samesidesymmetric).
assert (eq D D) by (conclude cn_equalityreflexive).
assert (neq A D) by (forward_using lemma_betweennotequal).
assert (Col A D D) by (conclude_def Col ).
assert (Col A D E) by (conclude_def Col ).
assert (Col D D E) by (conclude lemma_collinear4).
assert (Col E D D) by (forward_using lemma_collinearorder).
assert (Col C D D) by (conclude_def Col ).
assert (nCol A C D) by (forward_using lemma_parallelNC).
assert (nCol C D A) by (forward_using lemma_NCorder).
assert (TS A C D E) by (conclude_def TS ).
assert (TS B C D E) by (conclude lemma_planeseparation).
rename_H H;let Tf:=fresh in
assert (Tf:exists H, (BetS B H E /\ Col C D H /\ nCol C D B)) by (conclude_def TS );destruct Tf as [H];spliter.
assert (BetS E H B) by (conclude axiom_betweennesssymmetry).
assert (Col D C H) by (forward_using lemma_collinearorder).
assert (neq A D) by (conclude_def Par ).
assert (~ Meet A D B C) by (conclude_def Par ).
assert (~ Meet E D C B).
{
intro.
let Tf:=fresh in
assert (Tf:exists p, (neq E D /\ neq C B /\ Col E D p /\ Col C B p)) by (conclude_def Meet );destruct Tf as [p];spliter.
assert (neq B C) by (conclude lemma_inequalitysymmetric).
assert (Col B C p) by (forward_using lemma_collinearorder).
assert (Col E D A) by (forward_using lemma_collinearorder).
assert (Col D A p) by (conclude lemma_collinear4).
assert (Col A D p) by (forward_using lemma_collinearorder).
assert (Meet A D B C) by (conclude_def Meet ).
contradict.
}
assert (eq C C) by (conclude cn_equalityreflexive).
assert (neq D E) by (forward_using lemma_betweennotequal).
assert (neq E D) by (conclude lemma_inequalitysymmetric).
assert (neq B C) by (conclude_def Par ).
assert (neq C B) by (conclude lemma_inequalitysymmetric).
assert (Col C C B) by (conclude_def Col ).
assert (BetS D H C) by (conclude lemma_collinearbetween).
assert (BetS C H D) by (conclude axiom_betweennesssymmetry).
close.
Qed.
End Euclid.
|
State Before: C : Type u
inst✝¹ : Category C
D : Type ?u.64328
inst✝ : Category D
X Y Z : C
f : X ⟶ Y
g : Y ⟶ Z
hf : MorphismProperty.epimorphisms C f
hg : MorphismProperty.epimorphisms C g
⊢ MorphismProperty.epimorphisms C (f ≫ g) State After: C : Type u
inst✝¹ : Category C
D : Type ?u.64328
inst✝ : Category D
X Y Z : C
f : X ⟶ Y
g : Y ⟶ Z
hf : Epi f
hg : Epi g
⊢ Epi (f ≫ g) Tactic: rw [epimorphisms.iff] at hf hg⊢ State Before: C : Type u
inst✝¹ : Category C
D : Type ?u.64328
inst✝ : Category D
X Y Z : C
f : X ⟶ Y
g : Y ⟶ Z
hf : Epi f
hg : Epi g
⊢ Epi (f ≫ g) State After: C : Type u
inst✝¹ : Category C
D : Type ?u.64328
inst✝ : Category D
X Y Z : C
f : X ⟶ Y
g : Y ⟶ Z
hf : Epi f
hg : Epi g
this : Epi f
⊢ Epi (f ≫ g) Tactic: haveI := hf State Before: C : Type u
inst✝¹ : Category C
D : Type ?u.64328
inst✝ : Category D
X Y Z : C
f : X ⟶ Y
g : Y ⟶ Z
hf : Epi f
hg : Epi g
this : Epi f
⊢ Epi (f ≫ g) State After: C : Type u
inst✝¹ : Category C
D : Type ?u.64328
inst✝ : Category D
X Y Z : C
f : X ⟶ Y
g : Y ⟶ Z
hf : Epi f
hg : Epi g
this✝ : Epi f
this : Epi g
⊢ Epi (f ≫ g) Tactic: haveI := hg State Before: C : Type u
inst✝¹ : Category C
D : Type ?u.64328
inst✝ : Category D
X Y Z : C
f : X ⟶ Y
g : Y ⟶ Z
hf : Epi f
hg : Epi g
this✝ : Epi f
this : Epi g
⊢ Epi (f ≫ g) State After: no goals Tactic: apply epi_comp |
{-# OPTIONS --universe-polymorphism #-}
module Categories.Square where
open import Level
open import Function renaming (id to idᶠ; _∘_ to _©_)
open import Categories.Support.PropositionalEquality
open import Categories.Category
import Categories.Morphisms as Mor
open import Relation.Binary hiding (_⇒_)
module GlueSquares {o ℓ e} (C : Category o ℓ e) where
private module C = Category C
open C
open Mor C
module Pulls {X Y Z} {a : Y ⇒ Z} {b : X ⇒ Y} {c : X ⇒ Z} (ab≡c : a ∘ b ≡ c) where
.pullʳ : ∀ {W} {f : Z ⇒ W} → (f ∘ a) ∘ b ≡ f ∘ c
pullʳ {f = f} =
begin
(f ∘ a) ∘ b
↓⟨ assoc ⟩
f ∘ (a ∘ b)
↓⟨ ∘-resp-≡ʳ ab≡c ⟩
f ∘ c
∎
where open HomReasoning
.pullˡ : ∀ {W} {f : W ⇒ X} → a ∘ (b ∘ f) ≡ c ∘ f
pullˡ {f = f} =
begin
a ∘ (b ∘ f)
↑⟨ assoc ⟩
(a ∘ b) ∘ f
↓⟨ ∘-resp-≡ˡ ab≡c ⟩
c ∘ f
∎
where open HomReasoning
open Pulls public
module Pushes {X Y Z} {a : Y ⇒ Z} {b : X ⇒ Y} {c : X ⇒ Z} (c≡ab : c ≡ a ∘ b) where
.pushʳ : ∀ {W} {f : Z ⇒ W} → f ∘ c ≡ (f ∘ a) ∘ b
pushʳ {f = f} =
begin
f ∘ c
↓⟨ ∘-resp-≡ʳ c≡ab ⟩
f ∘ (a ∘ b)
↑⟨ assoc ⟩
(f ∘ a) ∘ b
∎
where open HomReasoning
.pushˡ : ∀ {W} {f : W ⇒ X} → c ∘ f ≡ a ∘ (b ∘ f)
pushˡ {f = f} =
begin
c ∘ f
↓⟨ ∘-resp-≡ˡ c≡ab ⟩
(a ∘ b) ∘ f
↓⟨ assoc ⟩
a ∘ (b ∘ f)
∎
where open HomReasoning
open Pushes public
module IntroElim {X} {a : X ⇒ X} (a≡id : a ≡ id) where
.elimʳ : ∀ {W} {f : X ⇒ W} → (f ∘ a) ≡ f
elimʳ {f = f} =
begin
f ∘ a
↓⟨ ∘-resp-≡ʳ a≡id ⟩
f ∘ id
↓⟨ identityʳ ⟩
f
∎
where
open HomReasoning
.introʳ : ∀ {W} {f : X ⇒ W} → f ≡ f ∘ a
introʳ = Equiv.sym elimʳ
.elimˡ : ∀ {W} {f : W ⇒ X} → (a ∘ f) ≡ f
elimˡ {f = f} =
begin
a ∘ f
↓⟨ ∘-resp-≡ˡ a≡id ⟩
id ∘ f
↓⟨ identityˡ ⟩
f
∎
where
open HomReasoning
.introˡ : ∀ {W} {f : W ⇒ X} → f ≡ a ∘ f
introˡ = Equiv.sym elimˡ
open IntroElim public
module Extends {X Y Z W} {f : X ⇒ Y} {g : X ⇒ Z} {h : Y ⇒ W} {i : Z ⇒ W} (s : CommutativeSquare f g h i) where
.extendˡ : ∀ {A} {a : W ⇒ A} → CommutativeSquare f g (a ∘ h) (a ∘ i)
extendˡ {a = a} =
begin
(a ∘ h) ∘ f
↓⟨ pullʳ s ⟩
a ∘ i ∘ g
↑⟨ assoc ⟩
(a ∘ i) ∘ g
∎
where
open HomReasoning
.extendʳ : ∀ {A} {a : A ⇒ X} → CommutativeSquare (f ∘ a) (g ∘ a) h i
extendʳ {a = a} =
begin
h ∘ (f ∘ a)
↓⟨ pullˡ s ⟩
(i ∘ g) ∘ a
↓⟨ assoc ⟩
i ∘ (g ∘ a)
∎
where
open HomReasoning
.extend² : ∀ {A B} {a : W ⇒ A} {b : B ⇒ X} → CommutativeSquare (f ∘ b) (g ∘ b) (a ∘ h) (a ∘ i)
extend² {a = a} {b} =
begin
(a ∘ h) ∘ (f ∘ b)
↓⟨ pullʳ extendʳ ⟩
a ∘ (i ∘ (g ∘ b))
↑⟨ assoc ⟩
(a ∘ i) ∘ (g ∘ b)
∎
where
open HomReasoning
open Extends public
-- essentially composition in the arrow category
.glue : {X Y Y′ Z Z′ W : Obj} {a : Z ⇒ W} {a′ : Y′ ⇒ Z′} {b : Y ⇒ Z} {b′ : X ⇒ Y′} {c : X ⇒ Y} {c′ : Y′ ⇒ Z} {c″ : Z′ ⇒ W} → CommutativeSquare c′ a′ a c″ → CommutativeSquare c b′ b c′ → CommutativeSquare c (a′ ∘ b′) (a ∘ b) c″
glue {a = a} {a′} {b} {b′} {c} {c′} {c″} sq-a sq-b =
begin
(a ∘ b) ∘ c
↓⟨ pullʳ sq-b ⟩
a ∘ (c′ ∘ b′)
↓⟨ pullˡ sq-a ⟩
(c″ ∘ a′) ∘ b′
↓⟨ assoc ⟩
c″ ∘ (a′ ∘ b′)
∎
where
open HomReasoning
.glue◃◽ : {X Y Y′ Z W : Obj} {a : Z ⇒ W} {b : Y ⇒ Z} {b′ : X ⇒ Y′} {c : X ⇒ Y} {c′ : Y′ ⇒ Z} {c″ : Y′ ⇒ W} → a ∘ c′ ≡ c″ → CommutativeSquare c b′ b c′ → CommutativeSquare c b′ (a ∘ b) c″
glue◃◽ {a = a} {b} {b′} {c} {c′} {c″} tri-a sq-b =
begin
(a ∘ b) ∘ c
↓⟨ pullʳ sq-b ⟩
a ∘ (c′ ∘ b′)
↓⟨ pullˡ tri-a ⟩
c″ ∘ b′
∎
where
open HomReasoning
-- essentially composition in the over category
.glueTrianglesʳ : ∀ {X X′ X″ Y} {a : X ⇒ Y} {b : X′ ⇒ X} {a′ : X′ ⇒ Y} {b′ : X″ ⇒ X′} {a″ : X″ ⇒ Y}
→ a ∘ b ≡ a′ → a′ ∘ b′ ≡ a″ → a ∘ (b ∘ b′) ≡ a″
glueTrianglesʳ {a = a} {b} {a′} {b′} {a″} a∘b≡a′ a′∘b′≡a″ =
begin
a ∘ (b ∘ b′)
↓⟨ pullˡ a∘b≡a′ ⟩
a′ ∘ b′
↓⟨ a′∘b′≡a″ ⟩
a″
∎
where open HomReasoning
-- essentially composition in the under category
.glueTrianglesˡ : ∀ {X Y Y′ Y″} {b : X ⇒ Y} {a : Y ⇒ Y′} {b′ : X ⇒ Y′} {a′ : Y′ ⇒ Y″} {b″ : X ⇒ Y″} → a′ ∘ b′ ≡ b″ → a ∘ b ≡ b′ → (a′ ∘ a) ∘ b ≡ b″
glueTrianglesˡ {b = b} {a} {b′} {a′} {b″} a′∘b′≡b″ a∘b≡b′ =
begin
(a′ ∘ a) ∘ b
↓⟨ pullʳ a∘b≡b′ ⟩
a′ ∘ b′
↓⟨ a′∘b′≡b″ ⟩
b″
∎
where open HomReasoning
module Cancellers {Y Y′ : Obj} {h : Y′ ⇒ Y} {i : Y ⇒ Y′} (inv : h ∘ i ≡ id) where
.cancelRight : ∀ {Z} {f : Y ⇒ Z} → (f ∘ h) ∘ i ≡ f
cancelRight {f = f} =
begin
(f ∘ h) ∘ i
↓⟨ pullʳ inv ⟩
f ∘ id
↓⟨ identityʳ ⟩
f
∎
where open HomReasoning
.cancelLeft : ∀ {X} {f : X ⇒ Y} → h ∘ (i ∘ f) ≡ f
cancelLeft {f = f} =
begin
h ∘ (i ∘ f)
↓⟨ pullˡ inv ⟩
id ∘ f
↓⟨ identityˡ ⟩
f
∎
where open HomReasoning
.cancelInner : ∀ {X Z} {f : Y ⇒ Z} {g : X ⇒ Y} → (f ∘ h) ∘ (i ∘ g) ≡ f ∘ g
cancelInner {f = f} {g} =
begin
(f ∘ h) ∘ (i ∘ g)
↓⟨ pullˡ cancelRight ⟩
f ∘ g
∎
where open HomReasoning
open Cancellers public
module Switch {X Y} (i : X ≅ Y) where
open _≅_ i
.switch-fgˡ : ∀ {W} {h : W ⇒ X} {k : W ⇒ Y} → (f ∘ h ≡ k) → (h ≡ g ∘ k)
switch-fgˡ {h = h} {k} pf =
begin
h
↑⟨ cancelLeft isoˡ ⟩
g ∘ (f ∘ h)
↓⟨ ∘-resp-≡ʳ pf ⟩
g ∘ k
∎
where open HomReasoning
.switch-gfˡ : ∀ {W} {h : W ⇒ Y} {k : W ⇒ X} → (g ∘ h ≡ k) → (h ≡ f ∘ k)
switch-gfˡ {h = h} {k} pf =
begin
h
↑⟨ cancelLeft isoʳ ⟩
f ∘ (g ∘ h)
↓⟨ ∘-resp-≡ʳ pf ⟩
f ∘ k
∎
where open HomReasoning
.switch-fgʳ : ∀ {W} {h : Y ⇒ W} {k : X ⇒ W} → (h ∘ f ≡ k) → (h ≡ k ∘ g)
switch-fgʳ {h = h} {k} pf =
begin
h
↑⟨ cancelRight isoʳ ⟩
(h ∘ f) ∘ g
↓⟨ ∘-resp-≡ˡ pf ⟩
k ∘ g
∎
where open HomReasoning
.switch-gfʳ : ∀ {W} {h : X ⇒ W} {k : Y ⇒ W} → (h ∘ g ≡ k) → (h ≡ k ∘ f)
switch-gfʳ {h = h} {k} pf =
begin
h
↑⟨ cancelRight isoˡ ⟩
(h ∘ g) ∘ f
↓⟨ ∘-resp-≡ˡ pf ⟩
k ∘ f
∎
where open HomReasoning
open Switch public
module Yon-Eda {o ℓ e} (C : Category o ℓ e) where
private module C = Category C
open C
open Equiv
record Yon (X Y : Obj) : Set (o ⊔ ℓ ⊔ e) where
field
arr : X ⇒ Y
fun : ∀ {W} (f : W ⇒ X) → (W ⇒ Y)
.ok : ∀ {W} (f : W ⇒ X) → fun f ≡ arr ∘ f
norm : X ⇒ Y
norm = fun id
.norm≡arr : norm ≡ arr
norm≡arr = trans (ok id) identityʳ
record _≡′_ {X Y : Obj} (f g : Yon X Y) : Set (o ⊔ ℓ ⊔ e) where
constructor yeq
field
arr-≡ : Yon.arr f ≡ Yon.arr g
open _≡′_ public using (arr-≡)
module _ {X Y} where
.Yon-refl : Reflexive (_≡′_ {X} {Y})
Yon-refl = yeq refl
.Yon-sym : Symmetric (_≡′_ {X} {Y})
Yon-sym = yeq © sym © arr-≡
.Yon-trans : Transitive (_≡′_ {X} {Y})
Yon-trans eq eq′ = yeq (trans (arr-≡ eq) (arr-≡ eq′))
Yon-id : ∀ {X} → Yon X X
Yon-id = record
{ arr = id
; fun = idᶠ
; ok = λ _ → sym identityˡ
}
Yon-inject : ∀ {X Y} → (X ⇒ Y) → Yon X Y
Yon-inject f = record { arr = f; fun = _∘_ f; ok = λ _ → refl }
Yon-compose : ∀ {X Y Z} → (Yon Y Z) → (Yon X Y) → (Yon X Z)
Yon-compose g f = record
{ arr = g.fun f.arr
; fun = g.fun © f.fun
; ok = λ h → trans (g.ok (f.fun h)) (trans (∘-resp-≡ʳ (f.ok h)) (trans (sym assoc) (sym (∘-resp-≡ˡ (g.ok f.arr)))))
}
where
module g = Yon g
module f = Yon f
.Yon-assoc : ∀ {X Y Z W} (f : Yon Z W) (g : Yon Y Z) (h : Yon X Y) → Yon-compose f (Yon-compose g h) ≣ Yon-compose (Yon-compose f g) h
Yon-assoc f g h = ≣-refl
.Yon-identityˡ : ∀ {X Y} (f : Yon X Y) → Yon-compose Yon-id f ≣ f
Yon-identityˡ f = ≣-refl
.Yon-identityʳ : ∀ {X Y} (f : Yon X Y) → Yon-compose f Yon-id ≡′ f
Yon-identityʳ f = yeq (Yon.norm≡arr f)
.Yon-compose-resp-≡′ : ∀ {X Y Z} {f f′ : Yon Y Z} {g g′ : Yon X Y}
→ f ≡′ f′ → g ≡′ g′
→ Yon-compose f g ≡′ Yon-compose f′ g′
Yon-compose-resp-≡′ {f = f} {f′} {g} {g′} f≡′f′ g≡′g′
= yeq (trans (Yon.ok f (Yon.arr g))
(trans (∘-resp-≡ (arr-≡ f≡′f′) (arr-≡ g≡′g′))
(sym (Yon.ok f′ (Yon.arr g′)))))
record Eda (X Y : Obj) : Set (o ⊔ ℓ ⊔ e) where
field
yon : Yon X Y
fun : ∀ {Z} (f : Yon Y Z) → Yon X Z
.ok : ∀ {Z} (f : Yon Y Z) → fun f ≡′ Yon-compose f yon
norm : Yon X Y
norm = fun Yon-id
open Yon yon public using (arr)
Eda-id : ∀ {X} → Eda X X
Eda-id = record
{ yon = Yon-id
; fun = idᶠ
; ok = yeq © sym © arr-≡ © Yon-identityʳ
}
Eda-inject : ∀ {X Y} → Yon X Y → Eda X Y
Eda-inject f = record { yon = f; fun = flip Yon-compose f; ok = λ _ → yeq refl }
Eda-compose : ∀ {X Y Z} → (Eda Y Z) → (Eda X Y) → (Eda X Z)
Eda-compose {X} {Y} {Z} g f = record
{ yon = f.fun g.yon
; fun = f.fun © g.fun
; ok = λ {W} h → Yon-trans {X} {W} {f.fun (g.fun h)} (f.ok (g.fun h))
(Yon-trans (Yon-compose-resp-≡′ (g.ok h) (Yon-refl {x = f.yon}))
(Yon-sym (Yon-compose-resp-≡′ (Yon-refl {x = h}) (f.ok g.yon))))
}
where
module g = Eda g
module f = Eda f
.Eda-assoc : ∀ {X Y Z W} (f : Eda Z W) (g : Eda Y Z) (h : Eda X Y) → Eda-compose f (Eda-compose g h) ≣ Eda-compose (Eda-compose f g) h
Eda-assoc f g h = ≣-refl
-- .Eda-identityˡ : ∀ {X Y} (f : Eda X Y) → Eda-compose Eda-id f ≣ f
-- Eda-identityˡ f = {!!}
.Eda-identityʳ : ∀ {X Y} (f : Eda X Y) → Eda-compose f Eda-id ≣ f
Eda-identityʳ f = ≣-refl
record NormReasoning {o ℓ e} (C : Category o ℓ e) (o′ ℓ′ : _) : Set (suc o′ ⊔ o ⊔ ℓ ⊔ e ⊔ suc ℓ′) where
private module C = Category C
field
U : Set o′
T : U -> C.Obj
_#⇒_ : U -> U -> Set ℓ′
eval : ∀ {A B} -> A #⇒ B -> T A C.⇒ T B
norm : ∀ {A B} -> A #⇒ B -> T A C.⇒ T B
.norm≡eval : ∀ {A B} (f : A #⇒ B) -> norm f C.≡ eval f
open C.Equiv
open C
infix 4 _IsRelatedTo_
infix 1 begin_
infixr 2 _≈⟨_⟩_ _↓⟨_⟩_ _↑⟨_⟩_ _↓≡⟨_⟩_ _↑≡⟨_⟩_ _↕_
infix 3 _∎
data _IsRelatedTo_ {X Y} (f g : _#⇒_ X Y) : Set e where
relTo : (f∼g : norm f ≡ norm g) → f IsRelatedTo g
.begin_ : ∀ {X Y} {f g : _#⇒_ X Y} → f IsRelatedTo g → eval f ≡ eval g
begin_ {f = f} {g} (relTo f∼g) = trans (sym (norm≡eval f)) (trans f∼g (norm≡eval g))
._↓⟨_⟩_ : ∀ {X Y} (f : _#⇒_ X Y) {g h} → (norm f ≡ norm g) → g IsRelatedTo h → f IsRelatedTo h
_ ↓⟨ f∼g ⟩ relTo g∼h = relTo (trans f∼g g∼h)
._↑⟨_⟩_ : ∀ {X Y} (f : _#⇒_ X Y) {g h} → (norm g ≡ norm f) → g IsRelatedTo h → f IsRelatedTo h
_ ↑⟨ g∼f ⟩ relTo g∼h = relTo (trans (sym g∼f) g∼h)
-- the syntax of the ancients, for compatibility
._≈⟨_⟩_ : ∀ {X Y} (f : _#⇒_ X Y) {g h} → (norm f ≡ norm g) → g IsRelatedTo h → f IsRelatedTo h
_ ≈⟨ f∼g ⟩ relTo g∼h = relTo (trans f∼g g∼h)
._↓≡⟨_⟩_ : ∀ {X Y} (f : _#⇒_ X Y) {g h} → eval f ≡ eval g → g IsRelatedTo h → f IsRelatedTo h
_↓≡⟨_⟩_ f {g} f∼g (relTo g∼h) = relTo (trans (norm≡eval f) (trans f∼g (trans (sym (norm≡eval g)) g∼h)))
._↑≡⟨_⟩_ : ∀ {X Y} (f : _#⇒_ X Y) {g h} → eval g ≡ eval f → g IsRelatedTo h → f IsRelatedTo h
_↑≡⟨_⟩_ f {g} g∼f (relTo g∼h) = relTo (trans (norm≡eval f) (trans (sym g∼f) (trans (sym (norm≡eval g)) g∼h)))
._↕_ : ∀ {X Y} (f : _#⇒_ X Y) {h} → f IsRelatedTo h → f IsRelatedTo h
_ ↕ f∼h = f∼h
._∎ : ∀ {X Y} (f : _#⇒_ X Y) → f IsRelatedTo f
_∎ _ = relTo refl
.by_ : ∀ {X Y} {f g h : X ⇒ Y} -> ((h ≡ h) -> f ≡ g) -> f ≡ g
by eq = eq refl
.computation : ∀ {X Y} (f g : X #⇒ Y) -> norm f ≡ norm g → eval f ≡ eval g
computation f g eq = begin f ↓⟨ eq ⟩ g ∎
module AUReasoning {o ℓ e} (C : Category o ℓ e) where
private module C = Category C
open C
open Equiv
{-
infix 4 _IsRelatedTo_
infix 2 _∎
infixr 2 _≈⟨_⟩_
infixr 2 _↓⟨_⟩_
infixr 2 _↑⟨_⟩_
infixr 2 _↓≡⟨_⟩_
infixr 2 _↑≡⟨_⟩_
infixr 2 _↕_
infix 1 begin_
-}
infixr 8 _∙_
open Yon-Eda C public
data Climb : Rel Obj (o ⊔ ℓ) where
ID : ∀ {X} → Climb X X
leaf : ∀ {X Y} → (X ⇒ Y) → Climb X Y
_branch_ : ∀ {X Y Z} (l : Climb Y Z) (r : Climb X Y) → Climb X Z
interp : ∀ {p} (P : Rel Obj p)
(f-id : ∀ {X} → P X X)
(f-leaf : ∀ {X Y} → X ⇒ Y → P X Y)
(f-branch : ∀ {X Y Z} → P Y Z → P X Y → P X Z)
→ ∀ {X Y} → Climb X Y → P X Y
interp P f-id f-leaf f-branch ID = f-id
interp P f-id f-leaf f-branch (leaf y) = f-leaf y
interp P f-id f-leaf f-branch (l branch r) = f-branch
(interp P f-id f-leaf f-branch l)
(interp P f-id f-leaf f-branch r)
eval : ∀ {X Y} → Climb X Y → X ⇒ Y
eval = interp _⇒_ id idᶠ _∘_
yeval : ∀ {X Y} → Climb X Y → Yon X Y
yeval = interp Yon Yon-id Yon-inject Yon-compose
.yarr : ∀ {X Y} → (t : Climb X Y) → Yon.arr (yeval t) ≡ eval t
yarr ID = refl
yarr (leaf y) = refl
yarr (t branch t1) = trans (Yon.ok (yeval t) (Yon.arr (yeval t1))) (∘-resp-≡ (yarr t) (yarr t1))
eeval : ∀ {X Y} → Climb X Y → Eda X Y
eeval = interp Eda Eda-id (Eda-inject © Yon-inject) Eda-compose
.eyon : ∀ {X Y} → (t : Climb X Y) → Eda.yon (eeval t) ≡′ yeval t
eyon ID = Yon-refl
eyon (leaf y) = Yon-refl
eyon (t branch t1) = Yon-trans (Eda.ok (eeval t1) (Eda.yon (eeval t)))
(Yon-compose-resp-≡′ (eyon t) (eyon t1))
.earr : ∀ {X Y} → (t : Climb X Y) → Eda.arr (eeval t) ≡ eval t
earr t = trans (arr-≡ (eyon t)) (yarr t)
yyeval : ∀ {X Y} → (t : Climb X Y) → (X ⇒ Y)
yyeval = Eda.arr © eeval
record ClimbBuilder (X Y : Obj) {t} (T : Set t) : Set (o ⊔ ℓ ⊔ t) where
field build : T → Climb X Y
instance
leafBuilder : ∀ {X Y} → ClimbBuilder X Y (X ⇒ Y)
leafBuilder = record { build = leaf }
idBuilder : ∀ {X Y} → ClimbBuilder X Y (Climb X Y)
idBuilder = record { build = idᶠ }
_∙_ : ∀ {X Y Z} {s} {S : Set s} {{Sb : ClimbBuilder Y Z S}} (f : S) {t} {T : Set t} {{Tb : ClimbBuilder X Y T}} (g : T) → Climb X Z
_∙_ {{Sb}} f {{Tb}} g = ClimbBuilder.build Sb f branch ClimbBuilder.build Tb g
aureasoning : NormReasoning C o (ℓ ⊔ o)
aureasoning = record
{ U = Obj
; T = λ A → A
; _#⇒_ = Climb
; eval = eval
; norm = yyeval
; norm≡eval = earr
}
open NormReasoning aureasoning public hiding (eval)
{-
data _IsRelatedTo_ {X Y} (f g : Climb X Y) : Set e where
relTo : (f∼g : yyeval f ≡ yyeval g) → f IsRelatedTo g
.begin_ : ∀ {X Y} {f g : Climb X Y} → f IsRelatedTo g → eval f ≡ eval g
begin_ {f = f} {g} (relTo f∼g) = trans (sym (earr f)) (trans f∼g (earr g))
._↓⟨_⟩_ : ∀ {X Y} (f : Climb X Y) {g h} → (yyeval f ≡ yyeval g) → g IsRelatedTo h → f IsRelatedTo h
_ ↓⟨ f∼g ⟩ relTo g∼h = relTo (trans f∼g g∼h)
._↑⟨_⟩_ : ∀ {X Y} (f : Climb X Y) {g h} → (yyeval g ≡ yyeval f) → g IsRelatedTo h → f IsRelatedTo h
_ ↑⟨ g∼f ⟩ relTo g∼h = relTo (trans (sym g∼f) g∼h)
-- the syntax of the ancients, for compatibility
._≈⟨_⟩_ : ∀ {X Y} (f : Climb X Y) {g h} → (yyeval f ≡ yyeval g) → g IsRelatedTo h → f IsRelatedTo h
_ ≈⟨ f∼g ⟩ relTo g∼h = relTo (trans f∼g g∼h)
._↓≡⟨_⟩_ : ∀ {X Y} (f : Climb X Y) {g h} → eval f ≡ eval g → g IsRelatedTo h → f IsRelatedTo h
_↓≡⟨_⟩_ f {g} f∼g (relTo g∼h) = relTo (trans (earr f) (trans f∼g (trans (sym (earr g)) g∼h)))
._↑≡⟨_⟩_ : ∀ {X Y} (f : Climb X Y) {g h} → eval g ≡ eval f → g IsRelatedTo h → f IsRelatedTo h
_↑≡⟨_⟩_ f {g} g∼f (relTo g∼h) = relTo (trans (earr f) (trans (sym g∼f) (trans (sym (earr g)) g∼h)))
{-
-- XXX i want this to work whenever the Edas are equal -- but that probably
-- requires Climb to be indexed by yyeval! oh, for cheap ornamentation.
._↕_ : ∀ {X Y} (f : Climb X Y) {h} → f IsRelatedTo h → f IsRelatedTo h
_ ↕ f∼h = f∼h
-}
._∎ : ∀ {X Y} (f : Climb X Y) → f IsRelatedTo f
_∎ _ = relTo refl
-}
|
(************************************************************************)
(* v * The Coq Proof Assistant / The Coq Development Team *)
(* <O___,, * INRIA - CNRS - LIX - LRI - PPS - Copyright 1999-2011 *)
(* \VV/ **************************************************************)
(* // * This file is distributed under the terms of the *)
(* * GNU Lesser General Public License Version 2.1 *)
(************************************************************************)
(*i $Id: Even.v 14641 2011-11-06 11:59:10Z herbelin $ i*)
(** Here we define the predicates [even] and [odd] by mutual induction
and we prove the decidability and the exclusion of those predicates.
The main results about parity are proved in the module Div2. *)
Open Local Scope nat_scope.
Implicit Types m n : nat.
(** * Definition of [even] and [odd], and basic facts *)
Inductive even : nat -> Prop :=
| even_O : even 0
| even_S : forall n, odd n -> even (S n)
with odd : nat -> Prop :=
odd_S : forall n, even n -> odd (S n).
Hint Constructors even: arith.
Hint Constructors odd: arith.
Lemma even_or_odd : forall n, even n \/ odd n.
Proof.
induction n.
auto with arith.
elim IHn; auto with arith.
Qed.
Lemma even_odd_dec : forall n, {even n} + {odd n}.
Proof.
induction n.
auto with arith.
elim IHn; auto with arith.
Defined.
Lemma not_even_and_odd : forall n, even n -> odd n -> False.
Proof.
induction n.
intros even_0 odd_0. inversion odd_0.
intros even_Sn odd_Sn. inversion even_Sn. inversion odd_Sn. auto with arith.
Qed.
(** * Facts about [even] & [odd] wrt. [plus] *)
Lemma even_plus_split : forall n m,
(even (n + m) -> even n /\ even m \/ odd n /\ odd m)
with odd_plus_split : forall n m,
odd (n + m) -> odd n /\ even m \/ even n /\ odd m.
Proof.
intros. clear even_plus_split. destruct n; simpl in *.
auto with arith.
inversion_clear H;
apply odd_plus_split in H0 as [(H0,?)|(H0,?)]; auto with arith.
intros. clear odd_plus_split. destruct n; simpl in *.
auto with arith.
inversion_clear H;
apply even_plus_split in H0 as [(H0,?)|(H0,?)]; auto with arith.
Qed.
Lemma even_even_plus : forall n m, even n -> even m -> even (n + m)
with odd_plus_l : forall n m, odd n -> even m -> odd (n + m).
Proof.
intros n m [|] ?. trivial. apply even_S, odd_plus_l; trivial.
intros n m [] ?. apply odd_S, even_even_plus; trivial.
Qed.
Lemma odd_plus_r : forall n m, even n -> odd m -> odd (n + m)
with odd_even_plus : forall n m, odd n -> odd m -> even (n + m).
Proof.
intros n m [|] ?. trivial. apply odd_S, odd_even_plus; trivial.
intros n m [] ?. apply even_S, odd_plus_r; trivial.
Qed.
Lemma even_plus_aux : forall n m,
(odd (n + m) <-> odd n /\ even m \/ even n /\ odd m) /\
(even (n + m) <-> even n /\ even m \/ odd n /\ odd m).
Proof.
split; split; auto using odd_plus_split, even_plus_split.
intros [[]|[]]; auto using odd_plus_r, odd_plus_l.
intros [[]|[]]; auto using even_even_plus, odd_even_plus.
Qed.
Lemma even_plus_even_inv_r : forall n m, even (n + m) -> even n -> even m.
Proof.
intros n m H; destruct (even_plus_split n m) as [[]|[]]; auto.
intro; destruct (not_even_and_odd n); auto.
Qed.
Lemma even_plus_even_inv_l : forall n m, even (n + m) -> even m -> even n.
Proof.
intros n m H; destruct (even_plus_split n m) as [[]|[]]; auto.
intro; destruct (not_even_and_odd m); auto.
Qed.
Lemma even_plus_odd_inv_r : forall n m, even (n + m) -> odd n -> odd m.
Proof.
intros n m H; destruct (even_plus_split n m) as [[]|[]]; auto.
intro; destruct (not_even_and_odd n); auto.
Qed.
Lemma even_plus_odd_inv_l : forall n m, even (n + m) -> odd m -> odd n.
Proof.
intros n m H; destruct (even_plus_split n m) as [[]|[]]; auto.
intro; destruct (not_even_and_odd m); auto.
Qed.
Hint Resolve even_even_plus odd_even_plus: arith.
Lemma odd_plus_even_inv_l : forall n m, odd (n + m) -> odd m -> even n.
Proof.
intros n m H; destruct (odd_plus_split n m) as [[]|[]]; auto.
intro; destruct (not_even_and_odd m); auto.
Qed.
Lemma odd_plus_even_inv_r : forall n m, odd (n + m) -> odd n -> even m.
Proof.
intros n m H; destruct (odd_plus_split n m) as [[]|[]]; auto.
intro; destruct (not_even_and_odd n); auto.
Qed.
Lemma odd_plus_odd_inv_l : forall n m, odd (n + m) -> even m -> odd n.
Proof.
intros n m H; destruct (odd_plus_split n m) as [[]|[]]; auto.
intro; destruct (not_even_and_odd m); auto.
Qed.
Lemma odd_plus_odd_inv_r : forall n m, odd (n + m) -> even n -> odd m.
Proof.
intros n m H; destruct (odd_plus_split n m) as [[]|[]]; auto.
intro; destruct (not_even_and_odd n); auto.
Qed.
Hint Resolve odd_plus_l odd_plus_r: arith.
(** * Facts about [even] and [odd] wrt. [mult] *)
Lemma even_mult_aux :
forall n m,
(odd (n * m) <-> odd n /\ odd m) /\ (even (n * m) <-> even n \/ even m).
Proof.
intros n; elim n; simpl in |- *; auto with arith.
intros m; split; split; auto with arith.
intros H'; inversion H'.
intros H'; elim H'; auto.
intros n0 H' m; split; split; auto with arith.
intros H'0.
elim (even_plus_aux m (n0 * m)); intros H'3 H'4; case H'3; intros H'1 H'2;
case H'1; auto.
intros H'5; elim H'5; intros H'6 H'7; auto with arith.
split; auto with arith.
case (H' m).
intros H'8 H'9; case H'9.
intros H'10; case H'10; auto with arith.
intros H'11 H'12; case (not_even_and_odd m); auto with arith.
intros H'5; elim H'5; intros H'6 H'7; case (not_even_and_odd (n0 * m)); auto.
case (H' m).
intros H'8 H'9; case H'9; auto.
intros H'0; elim H'0; intros H'1 H'2; clear H'0.
elim (even_plus_aux m (n0 * m)); auto.
intros H'0 H'3.
elim H'0.
intros H'4 H'5; apply H'5; auto.
left; split; auto with arith.
case (H' m).
intros H'6 H'7; elim H'7.
intros H'8 H'9; apply H'9.
left.
inversion H'1; auto.
intros H'0.
elim (even_plus_aux m (n0 * m)); intros H'3 H'4; case H'4.
intros H'1 H'2.
elim H'1; auto.
intros H; case H; auto.
intros H'5; elim H'5; intros H'6 H'7; auto with arith.
left.
case (H' m).
intros H'8; elim H'8.
intros H'9; elim H'9; auto with arith.
intros H'0; elim H'0; intros H'1.
case (even_or_odd m); intros H'2.
apply even_even_plus; auto.
case (H' m).
intros H H0; case H0; auto.
apply odd_even_plus; auto.
inversion H'1; case (H' m); auto.
intros H1; case H1; auto.
apply even_even_plus; auto.
case (H' m).
intros H H0; case H0; auto.
Qed.
Lemma even_mult_l : forall n m, even n -> even (n * m).
Proof.
intros n m; case (even_mult_aux n m); auto.
intros H H0; case H0; auto.
Qed.
Lemma even_mult_r : forall n m, even m -> even (n * m).
Proof.
intros n m; case (even_mult_aux n m); auto.
intros H H0; case H0; auto.
Qed.
Hint Resolve even_mult_l even_mult_r: arith.
Lemma even_mult_inv_r : forall n m, even (n * m) -> odd n -> even m.
Proof.
intros n m H' H'0.
case (even_mult_aux n m).
intros H'1 H'2; elim H'2.
intros H'3; elim H'3; auto.
intros H; case (not_even_and_odd n); auto.
Qed.
Lemma even_mult_inv_l : forall n m, even (n * m) -> odd m -> even n.
Proof.
intros n m H' H'0.
case (even_mult_aux n m).
intros H'1 H'2; elim H'2.
intros H'3; elim H'3; auto.
intros H; case (not_even_and_odd m); auto.
Qed.
Lemma odd_mult : forall n m, odd n -> odd m -> odd (n * m).
Proof.
intros n m; case (even_mult_aux n m); intros H; case H; auto.
Qed.
Hint Resolve even_mult_l even_mult_r odd_mult: arith.
Lemma odd_mult_inv_l : forall n m, odd (n * m) -> odd n.
Proof.
intros n m H'.
case (even_mult_aux n m).
intros H'1 H'2; elim H'1.
intros H'3; elim H'3; auto.
Qed.
Lemma odd_mult_inv_r : forall n m, odd (n * m) -> odd m.
Proof.
intros n m H'.
case (even_mult_aux n m).
intros H'1 H'2; elim H'1.
intros H'3; elim H'3; auto.
Qed.
|
From Coq Require Import String List ZArith.
From compcert Require Import Coqlib Integers Floats AST Ctypes Cop Clight Clightdefs.
Import Clightdefs.ClightNotations.
Local Open Scope Z_scope.
Local Open Scope string_scope.
Local Open Scope clight_scope.
Module Info.
Definition version := "3.10".
Definition build_number := "".
Definition build_tag := "".
Definition build_branch := "".
Definition arch := "x86".
Definition model := "64".
Definition abi := "standard".
Definition bitsize := 64.
Definition big_endian := false.
Definition source_file := "stack.c".
Definition normalized := true.
End Info.
Definition ___builtin_annot : ident := 22%positive.
Definition ___builtin_annot_intval : ident := 23%positive.
Definition ___builtin_bswap : ident := 7%positive.
Definition ___builtin_bswap16 : ident := 9%positive.
Definition ___builtin_bswap32 : ident := 8%positive.
Definition ___builtin_bswap64 : ident := 6%positive.
Definition ___builtin_clz : ident := 10%positive.
Definition ___builtin_clzl : ident := 11%positive.
Definition ___builtin_clzll : ident := 12%positive.
Definition ___builtin_ctz : ident := 13%positive.
Definition ___builtin_ctzl : ident := 14%positive.
Definition ___builtin_ctzll : ident := 15%positive.
Definition ___builtin_debug : ident := 41%positive.
Definition ___builtin_expect : ident := 30%positive.
Definition ___builtin_fabs : ident := 16%positive.
Definition ___builtin_fabsf : ident := 17%positive.
Definition ___builtin_fmadd : ident := 33%positive.
Definition ___builtin_fmax : ident := 31%positive.
Definition ___builtin_fmin : ident := 32%positive.
Definition ___builtin_fmsub : ident := 34%positive.
Definition ___builtin_fnmadd : ident := 35%positive.
Definition ___builtin_fnmsub : ident := 36%positive.
Definition ___builtin_fsqrt : ident := 18%positive.
Definition ___builtin_membar : ident := 24%positive.
Definition ___builtin_memcpy_aligned : ident := 20%positive.
Definition ___builtin_read16_reversed : ident := 37%positive.
Definition ___builtin_read32_reversed : ident := 38%positive.
Definition ___builtin_sel : ident := 21%positive.
Definition ___builtin_sqrt : ident := 19%positive.
Definition ___builtin_unreachable : ident := 29%positive.
Definition ___builtin_va_arg : ident := 26%positive.
Definition ___builtin_va_copy : ident := 27%positive.
Definition ___builtin_va_end : ident := 28%positive.
Definition ___builtin_va_start : ident := 25%positive.
Definition ___builtin_write16_reversed : ident := 39%positive.
Definition ___builtin_write32_reversed : ident := 40%positive.
Definition ___compcert_i64_dtos : ident := 62%positive.
Definition ___compcert_i64_dtou : ident := 63%positive.
Definition ___compcert_i64_sar : ident := 74%positive.
Definition ___compcert_i64_sdiv : ident := 68%positive.
Definition ___compcert_i64_shl : ident := 72%positive.
Definition ___compcert_i64_shr : ident := 73%positive.
Definition ___compcert_i64_smod : ident := 70%positive.
Definition ___compcert_i64_smulh : ident := 75%positive.
Definition ___compcert_i64_stod : ident := 64%positive.
Definition ___compcert_i64_stof : ident := 66%positive.
Definition ___compcert_i64_udiv : ident := 69%positive.
Definition ___compcert_i64_umod : ident := 71%positive.
Definition ___compcert_i64_umulh : ident := 76%positive.
Definition ___compcert_i64_utod : ident := 65%positive.
Definition ___compcert_i64_utof : ident := 67%positive.
Definition ___compcert_va_composite : ident := 61%positive.
Definition ___compcert_va_float64 : ident := 60%positive.
Definition ___compcert_va_int32 : ident := 58%positive.
Definition ___compcert_va_int64 : ident := 59%positive.
Definition _cons : ident := 3%positive.
Definition _exit : ident := 44%positive.
Definition _free : ident := 43%positive.
Definition _i : ident := 47%positive.
Definition _main : ident := 57%positive.
Definition _malloc : ident := 42%positive.
Definition _n : ident := 52%positive.
Definition _newstack : ident := 46%positive.
Definition _next : ident := 2%positive.
Definition _p : ident := 45%positive.
Definition _pop : ident := 50%positive.
Definition _pop_and_add : ident := 56%positive.
Definition _push : ident := 49%positive.
Definition _push_increasing : ident := 53%positive.
Definition _q : ident := 48%positive.
Definition _s : ident := 55%positive.
Definition _st : ident := 51%positive.
Definition _stack : ident := 5%positive.
Definition _t : ident := 54%positive.
Definition _top : ident := 4%positive.
Definition _value : ident := 1%positive.
Definition _t'1 : ident := 77%positive.
Definition _t'2 : ident := 78%positive.
Definition f_newstack := {|
fn_return := (tptr (Tstruct _stack noattr));
fn_callconv := cc_default;
fn_params := nil;
fn_vars := nil;
fn_temps := ((_p, (tptr (Tstruct _stack noattr))) ::
(_t'1, (tptr tvoid)) :: nil);
fn_body :=
(Ssequence
(Ssequence
(Scall (Some _t'1)
(Evar _malloc (Tfunction (Tcons tulong Tnil) (tptr tvoid) cc_default))
((Esizeof (Tstruct _stack noattr) tulong) :: nil))
(Sset _p
(Ecast (Etempvar _t'1 (tptr tvoid)) (tptr (Tstruct _stack noattr)))))
(Ssequence
(Sifthenelse (Eunop Onotbool (Etempvar _p (tptr (Tstruct _stack noattr)))
tint)
(Scall None (Evar _exit (Tfunction (Tcons tint Tnil) tvoid cc_default))
((Econst_int (Int.repr 1) tint) :: nil))
Sskip)
(Ssequence
(Sassign
(Efield
(Ederef (Etempvar _p (tptr (Tstruct _stack noattr)))
(Tstruct _stack noattr)) _top (tptr (Tstruct _cons noattr)))
(Ecast (Econst_int (Int.repr 0) tint) (tptr tvoid)))
(Sreturn (Some (Etempvar _p (tptr (Tstruct _stack noattr))))))))
|}.
Definition f_push := {|
fn_return := tvoid;
fn_callconv := cc_default;
fn_params := ((_p, (tptr (Tstruct _stack noattr))) :: (_i, tint) :: nil);
fn_vars := nil;
fn_temps := ((_q, (tptr (Tstruct _cons noattr))) :: (_t'1, (tptr tvoid)) ::
(_t'2, (tptr (Tstruct _cons noattr))) :: nil);
fn_body :=
(Ssequence
(Ssequence
(Scall (Some _t'1)
(Evar _malloc (Tfunction (Tcons tulong Tnil) (tptr tvoid) cc_default))
((Esizeof (Tstruct _cons noattr) tulong) :: nil))
(Sset _q
(Ecast (Etempvar _t'1 (tptr tvoid)) (tptr (Tstruct _cons noattr)))))
(Ssequence
(Sifthenelse (Eunop Onotbool (Etempvar _q (tptr (Tstruct _cons noattr)))
tint)
(Scall None (Evar _exit (Tfunction (Tcons tint Tnil) tvoid cc_default))
((Econst_int (Int.repr 1) tint) :: nil))
Sskip)
(Ssequence
(Sassign
(Efield
(Ederef (Etempvar _q (tptr (Tstruct _cons noattr)))
(Tstruct _cons noattr)) _value tint) (Etempvar _i tint))
(Ssequence
(Ssequence
(Sset _t'2
(Efield
(Ederef (Etempvar _p (tptr (Tstruct _stack noattr)))
(Tstruct _stack noattr)) _top (tptr (Tstruct _cons noattr))))
(Sassign
(Efield
(Ederef (Etempvar _q (tptr (Tstruct _cons noattr)))
(Tstruct _cons noattr)) _next (tptr (Tstruct _cons noattr)))
(Etempvar _t'2 (tptr (Tstruct _cons noattr)))))
(Sassign
(Efield
(Ederef (Etempvar _p (tptr (Tstruct _stack noattr)))
(Tstruct _stack noattr)) _top (tptr (Tstruct _cons noattr)))
(Etempvar _q (tptr (Tstruct _cons noattr))))))))
|}.
Definition f_pop := {|
fn_return := tint;
fn_callconv := cc_default;
fn_params := ((_p, (tptr (Tstruct _stack noattr))) :: nil);
fn_vars := nil;
fn_temps := ((_q, (tptr (Tstruct _cons noattr))) :: (_i, tint) ::
(_t'1, (tptr (Tstruct _cons noattr))) :: nil);
fn_body :=
(Ssequence
(Sset _q
(Efield
(Ederef (Etempvar _p (tptr (Tstruct _stack noattr)))
(Tstruct _stack noattr)) _top (tptr (Tstruct _cons noattr))))
(Ssequence
(Ssequence
(Sset _t'1
(Efield
(Ederef (Etempvar _q (tptr (Tstruct _cons noattr)))
(Tstruct _cons noattr)) _next (tptr (Tstruct _cons noattr))))
(Sassign
(Efield
(Ederef (Etempvar _p (tptr (Tstruct _stack noattr)))
(Tstruct _stack noattr)) _top (tptr (Tstruct _cons noattr)))
(Etempvar _t'1 (tptr (Tstruct _cons noattr)))))
(Ssequence
(Sset _i
(Efield
(Ederef (Etempvar _q (tptr (Tstruct _cons noattr)))
(Tstruct _cons noattr)) _value tint))
(Ssequence
(Scall None
(Evar _free (Tfunction (Tcons (tptr tvoid) Tnil) tvoid cc_default))
((Etempvar _q (tptr (Tstruct _cons noattr))) :: nil))
(Sreturn (Some (Etempvar _i tint)))))))
|}.
Definition f_push_increasing := {|
fn_return := tvoid;
fn_callconv := cc_default;
fn_params := ((_st, (tptr (Tstruct _stack noattr))) :: (_n, tint) :: nil);
fn_vars := nil;
fn_temps := ((_i, tint) :: nil);
fn_body :=
(Ssequence
(Sset _i (Econst_int (Int.repr 0) tint))
(Swhile
(Ebinop Olt (Etempvar _i tint) (Etempvar _n tint) tint)
(Ssequence
(Sset _i
(Ebinop Oadd (Etempvar _i tint) (Econst_int (Int.repr 1) tint) tint))
(Scall None
(Evar _push (Tfunction
(Tcons (tptr (Tstruct _stack noattr))
(Tcons tint Tnil)) tvoid cc_default))
((Etempvar _st (tptr (Tstruct _stack noattr))) ::
(Etempvar _i tint) :: nil)))))
|}.
Definition f_pop_and_add := {|
fn_return := tint;
fn_callconv := cc_default;
fn_params := ((_st, (tptr (Tstruct _stack noattr))) :: (_n, tint) :: nil);
fn_vars := nil;
fn_temps := ((_i, tint) :: (_t, tint) :: (_s, tint) :: (_t'1, tint) :: nil);
fn_body :=
(Ssequence
(Sset _i (Econst_int (Int.repr 0) tint))
(Ssequence
(Sset _s (Econst_int (Int.repr 0) tint))
(Ssequence
(Swhile
(Ebinop Olt (Etempvar _i tint) (Etempvar _n tint) tint)
(Ssequence
(Ssequence
(Scall (Some _t'1)
(Evar _pop (Tfunction
(Tcons (tptr (Tstruct _stack noattr)) Tnil) tint
cc_default))
((Etempvar _st (tptr (Tstruct _stack noattr))) :: nil))
(Sset _t (Etempvar _t'1 tint)))
(Ssequence
(Sset _s
(Ebinop Oadd (Etempvar _s tint) (Etempvar _t tint) tint))
(Sset _i
(Ebinop Oadd (Etempvar _i tint) (Econst_int (Int.repr 1) tint)
tint)))))
(Sreturn (Some (Etempvar _s tint))))))
|}.
Definition f_main := {|
fn_return := tint;
fn_callconv := cc_default;
fn_params := nil;
fn_vars := nil;
fn_temps := ((_st, (tptr (Tstruct _stack noattr))) :: (_i, tint) ::
(_t, tint) :: (_s, tint) :: (_t'2, tint) ::
(_t'1, (tptr (Tstruct _stack noattr))) :: nil);
fn_body :=
(Ssequence
(Ssequence
(Ssequence
(Scall (Some _t'1)
(Evar _newstack (Tfunction Tnil (tptr (Tstruct _stack noattr))
cc_default)) nil)
(Sset _st (Etempvar _t'1 (tptr (Tstruct _stack noattr)))))
(Ssequence
(Scall None
(Evar _push_increasing (Tfunction
(Tcons (tptr (Tstruct _stack noattr))
(Tcons tint Tnil)) tvoid cc_default))
((Etempvar _st (tptr (Tstruct _stack noattr))) ::
(Econst_int (Int.repr 10) tint) :: nil))
(Ssequence
(Ssequence
(Scall (Some _t'2)
(Evar _pop_and_add (Tfunction
(Tcons (tptr (Tstruct _stack noattr))
(Tcons tint Tnil)) tint cc_default))
((Etempvar _st (tptr (Tstruct _stack noattr))) ::
(Econst_int (Int.repr 10) tint) :: nil))
(Sset _s (Etempvar _t'2 tint)))
(Sreturn (Some (Etempvar _s tint))))))
(Sreturn (Some (Econst_int (Int.repr 0) tint))))
|}.
Definition composites : list composite_definition :=
(Composite _cons Struct
(Member_plain _value tint ::
Member_plain _next (tptr (Tstruct _cons noattr)) :: nil)
noattr ::
Composite _stack Struct
(Member_plain _top (tptr (Tstruct _cons noattr)) :: nil)
noattr :: nil).
Definition global_definitions : list (ident * globdef fundef type) :=
((___compcert_va_int32,
Gfun(External (EF_runtime "__compcert_va_int32"
(mksignature (AST.Tlong :: nil) AST.Tint cc_default))
(Tcons (tptr tvoid) Tnil) tuint cc_default)) ::
(___compcert_va_int64,
Gfun(External (EF_runtime "__compcert_va_int64"
(mksignature (AST.Tlong :: nil) AST.Tlong cc_default))
(Tcons (tptr tvoid) Tnil) tulong cc_default)) ::
(___compcert_va_float64,
Gfun(External (EF_runtime "__compcert_va_float64"
(mksignature (AST.Tlong :: nil) AST.Tfloat cc_default))
(Tcons (tptr tvoid) Tnil) tdouble cc_default)) ::
(___compcert_va_composite,
Gfun(External (EF_runtime "__compcert_va_composite"
(mksignature (AST.Tlong :: AST.Tlong :: nil) AST.Tlong
cc_default)) (Tcons (tptr tvoid) (Tcons tulong Tnil))
(tptr tvoid) cc_default)) ::
(___compcert_i64_dtos,
Gfun(External (EF_runtime "__compcert_i64_dtos"
(mksignature (AST.Tfloat :: nil) AST.Tlong cc_default))
(Tcons tdouble Tnil) tlong cc_default)) ::
(___compcert_i64_dtou,
Gfun(External (EF_runtime "__compcert_i64_dtou"
(mksignature (AST.Tfloat :: nil) AST.Tlong cc_default))
(Tcons tdouble Tnil) tulong cc_default)) ::
(___compcert_i64_stod,
Gfun(External (EF_runtime "__compcert_i64_stod"
(mksignature (AST.Tlong :: nil) AST.Tfloat cc_default))
(Tcons tlong Tnil) tdouble cc_default)) ::
(___compcert_i64_utod,
Gfun(External (EF_runtime "__compcert_i64_utod"
(mksignature (AST.Tlong :: nil) AST.Tfloat cc_default))
(Tcons tulong Tnil) tdouble cc_default)) ::
(___compcert_i64_stof,
Gfun(External (EF_runtime "__compcert_i64_stof"
(mksignature (AST.Tlong :: nil) AST.Tsingle cc_default))
(Tcons tlong Tnil) tfloat cc_default)) ::
(___compcert_i64_utof,
Gfun(External (EF_runtime "__compcert_i64_utof"
(mksignature (AST.Tlong :: nil) AST.Tsingle cc_default))
(Tcons tulong Tnil) tfloat cc_default)) ::
(___compcert_i64_sdiv,
Gfun(External (EF_runtime "__compcert_i64_sdiv"
(mksignature (AST.Tlong :: AST.Tlong :: nil) AST.Tlong
cc_default)) (Tcons tlong (Tcons tlong Tnil)) tlong
cc_default)) ::
(___compcert_i64_udiv,
Gfun(External (EF_runtime "__compcert_i64_udiv"
(mksignature (AST.Tlong :: AST.Tlong :: nil) AST.Tlong
cc_default)) (Tcons tulong (Tcons tulong Tnil)) tulong
cc_default)) ::
(___compcert_i64_smod,
Gfun(External (EF_runtime "__compcert_i64_smod"
(mksignature (AST.Tlong :: AST.Tlong :: nil) AST.Tlong
cc_default)) (Tcons tlong (Tcons tlong Tnil)) tlong
cc_default)) ::
(___compcert_i64_umod,
Gfun(External (EF_runtime "__compcert_i64_umod"
(mksignature (AST.Tlong :: AST.Tlong :: nil) AST.Tlong
cc_default)) (Tcons tulong (Tcons tulong Tnil)) tulong
cc_default)) ::
(___compcert_i64_shl,
Gfun(External (EF_runtime "__compcert_i64_shl"
(mksignature (AST.Tlong :: AST.Tint :: nil) AST.Tlong
cc_default)) (Tcons tlong (Tcons tint Tnil)) tlong
cc_default)) ::
(___compcert_i64_shr,
Gfun(External (EF_runtime "__compcert_i64_shr"
(mksignature (AST.Tlong :: AST.Tint :: nil) AST.Tlong
cc_default)) (Tcons tulong (Tcons tint Tnil)) tulong
cc_default)) ::
(___compcert_i64_sar,
Gfun(External (EF_runtime "__compcert_i64_sar"
(mksignature (AST.Tlong :: AST.Tint :: nil) AST.Tlong
cc_default)) (Tcons tlong (Tcons tint Tnil)) tlong
cc_default)) ::
(___compcert_i64_smulh,
Gfun(External (EF_runtime "__compcert_i64_smulh"
(mksignature (AST.Tlong :: AST.Tlong :: nil) AST.Tlong
cc_default)) (Tcons tlong (Tcons tlong Tnil)) tlong
cc_default)) ::
(___compcert_i64_umulh,
Gfun(External (EF_runtime "__compcert_i64_umulh"
(mksignature (AST.Tlong :: AST.Tlong :: nil) AST.Tlong
cc_default)) (Tcons tulong (Tcons tulong Tnil)) tulong
cc_default)) ::
(___builtin_bswap64,
Gfun(External (EF_builtin "__builtin_bswap64"
(mksignature (AST.Tlong :: nil) AST.Tlong cc_default))
(Tcons tulong Tnil) tulong cc_default)) ::
(___builtin_bswap,
Gfun(External (EF_builtin "__builtin_bswap"
(mksignature (AST.Tint :: nil) AST.Tint cc_default))
(Tcons tuint Tnil) tuint cc_default)) ::
(___builtin_bswap32,
Gfun(External (EF_builtin "__builtin_bswap32"
(mksignature (AST.Tint :: nil) AST.Tint cc_default))
(Tcons tuint Tnil) tuint cc_default)) ::
(___builtin_bswap16,
Gfun(External (EF_builtin "__builtin_bswap16"
(mksignature (AST.Tint :: nil) AST.Tint16unsigned
cc_default)) (Tcons tushort Tnil) tushort cc_default)) ::
(___builtin_clz,
Gfun(External (EF_builtin "__builtin_clz"
(mksignature (AST.Tint :: nil) AST.Tint cc_default))
(Tcons tuint Tnil) tint cc_default)) ::
(___builtin_clzl,
Gfun(External (EF_builtin "__builtin_clzl"
(mksignature (AST.Tlong :: nil) AST.Tint cc_default))
(Tcons tulong Tnil) tint cc_default)) ::
(___builtin_clzll,
Gfun(External (EF_builtin "__builtin_clzll"
(mksignature (AST.Tlong :: nil) AST.Tint cc_default))
(Tcons tulong Tnil) tint cc_default)) ::
(___builtin_ctz,
Gfun(External (EF_builtin "__builtin_ctz"
(mksignature (AST.Tint :: nil) AST.Tint cc_default))
(Tcons tuint Tnil) tint cc_default)) ::
(___builtin_ctzl,
Gfun(External (EF_builtin "__builtin_ctzl"
(mksignature (AST.Tlong :: nil) AST.Tint cc_default))
(Tcons tulong Tnil) tint cc_default)) ::
(___builtin_ctzll,
Gfun(External (EF_builtin "__builtin_ctzll"
(mksignature (AST.Tlong :: nil) AST.Tint cc_default))
(Tcons tulong Tnil) tint cc_default)) ::
(___builtin_fabs,
Gfun(External (EF_builtin "__builtin_fabs"
(mksignature (AST.Tfloat :: nil) AST.Tfloat cc_default))
(Tcons tdouble Tnil) tdouble cc_default)) ::
(___builtin_fabsf,
Gfun(External (EF_builtin "__builtin_fabsf"
(mksignature (AST.Tsingle :: nil) AST.Tsingle cc_default))
(Tcons tfloat Tnil) tfloat cc_default)) ::
(___builtin_fsqrt,
Gfun(External (EF_builtin "__builtin_fsqrt"
(mksignature (AST.Tfloat :: nil) AST.Tfloat cc_default))
(Tcons tdouble Tnil) tdouble cc_default)) ::
(___builtin_sqrt,
Gfun(External (EF_builtin "__builtin_sqrt"
(mksignature (AST.Tfloat :: nil) AST.Tfloat cc_default))
(Tcons tdouble Tnil) tdouble cc_default)) ::
(___builtin_memcpy_aligned,
Gfun(External (EF_builtin "__builtin_memcpy_aligned"
(mksignature
(AST.Tlong :: AST.Tlong :: AST.Tlong :: AST.Tlong ::
nil) AST.Tvoid cc_default))
(Tcons (tptr tvoid)
(Tcons (tptr tvoid) (Tcons tulong (Tcons tulong Tnil)))) tvoid
cc_default)) ::
(___builtin_sel,
Gfun(External (EF_builtin "__builtin_sel"
(mksignature (AST.Tint :: nil) AST.Tvoid
{|cc_vararg:=(Some 1); cc_unproto:=false; cc_structret:=false|}))
(Tcons tbool Tnil) tvoid
{|cc_vararg:=(Some 1); cc_unproto:=false; cc_structret:=false|})) ::
(___builtin_annot,
Gfun(External (EF_builtin "__builtin_annot"
(mksignature (AST.Tlong :: nil) AST.Tvoid
{|cc_vararg:=(Some 1); cc_unproto:=false; cc_structret:=false|}))
(Tcons (tptr tschar) Tnil) tvoid
{|cc_vararg:=(Some 1); cc_unproto:=false; cc_structret:=false|})) ::
(___builtin_annot_intval,
Gfun(External (EF_builtin "__builtin_annot_intval"
(mksignature (AST.Tlong :: AST.Tint :: nil) AST.Tint
cc_default)) (Tcons (tptr tschar) (Tcons tint Tnil))
tint cc_default)) ::
(___builtin_membar,
Gfun(External (EF_builtin "__builtin_membar"
(mksignature nil AST.Tvoid cc_default)) Tnil tvoid
cc_default)) ::
(___builtin_va_start,
Gfun(External (EF_builtin "__builtin_va_start"
(mksignature (AST.Tlong :: nil) AST.Tvoid cc_default))
(Tcons (tptr tvoid) Tnil) tvoid cc_default)) ::
(___builtin_va_arg,
Gfun(External (EF_builtin "__builtin_va_arg"
(mksignature (AST.Tlong :: AST.Tint :: nil) AST.Tvoid
cc_default)) (Tcons (tptr tvoid) (Tcons tuint Tnil))
tvoid cc_default)) ::
(___builtin_va_copy,
Gfun(External (EF_builtin "__builtin_va_copy"
(mksignature (AST.Tlong :: AST.Tlong :: nil) AST.Tvoid
cc_default))
(Tcons (tptr tvoid) (Tcons (tptr tvoid) Tnil)) tvoid cc_default)) ::
(___builtin_va_end,
Gfun(External (EF_builtin "__builtin_va_end"
(mksignature (AST.Tlong :: nil) AST.Tvoid cc_default))
(Tcons (tptr tvoid) Tnil) tvoid cc_default)) ::
(___builtin_unreachable,
Gfun(External (EF_builtin "__builtin_unreachable"
(mksignature nil AST.Tvoid cc_default)) Tnil tvoid
cc_default)) ::
(___builtin_expect,
Gfun(External (EF_builtin "__builtin_expect"
(mksignature (AST.Tlong :: AST.Tlong :: nil) AST.Tlong
cc_default)) (Tcons tlong (Tcons tlong Tnil)) tlong
cc_default)) ::
(___builtin_fmax,
Gfun(External (EF_builtin "__builtin_fmax"
(mksignature (AST.Tfloat :: AST.Tfloat :: nil) AST.Tfloat
cc_default)) (Tcons tdouble (Tcons tdouble Tnil))
tdouble cc_default)) ::
(___builtin_fmin,
Gfun(External (EF_builtin "__builtin_fmin"
(mksignature (AST.Tfloat :: AST.Tfloat :: nil) AST.Tfloat
cc_default)) (Tcons tdouble (Tcons tdouble Tnil))
tdouble cc_default)) ::
(___builtin_fmadd,
Gfun(External (EF_builtin "__builtin_fmadd"
(mksignature
(AST.Tfloat :: AST.Tfloat :: AST.Tfloat :: nil)
AST.Tfloat cc_default))
(Tcons tdouble (Tcons tdouble (Tcons tdouble Tnil))) tdouble
cc_default)) ::
(___builtin_fmsub,
Gfun(External (EF_builtin "__builtin_fmsub"
(mksignature
(AST.Tfloat :: AST.Tfloat :: AST.Tfloat :: nil)
AST.Tfloat cc_default))
(Tcons tdouble (Tcons tdouble (Tcons tdouble Tnil))) tdouble
cc_default)) ::
(___builtin_fnmadd,
Gfun(External (EF_builtin "__builtin_fnmadd"
(mksignature
(AST.Tfloat :: AST.Tfloat :: AST.Tfloat :: nil)
AST.Tfloat cc_default))
(Tcons tdouble (Tcons tdouble (Tcons tdouble Tnil))) tdouble
cc_default)) ::
(___builtin_fnmsub,
Gfun(External (EF_builtin "__builtin_fnmsub"
(mksignature
(AST.Tfloat :: AST.Tfloat :: AST.Tfloat :: nil)
AST.Tfloat cc_default))
(Tcons tdouble (Tcons tdouble (Tcons tdouble Tnil))) tdouble
cc_default)) ::
(___builtin_read16_reversed,
Gfun(External (EF_builtin "__builtin_read16_reversed"
(mksignature (AST.Tlong :: nil) AST.Tint16unsigned
cc_default)) (Tcons (tptr tushort) Tnil) tushort
cc_default)) ::
(___builtin_read32_reversed,
Gfun(External (EF_builtin "__builtin_read32_reversed"
(mksignature (AST.Tlong :: nil) AST.Tint cc_default))
(Tcons (tptr tuint) Tnil) tuint cc_default)) ::
(___builtin_write16_reversed,
Gfun(External (EF_builtin "__builtin_write16_reversed"
(mksignature (AST.Tlong :: AST.Tint :: nil) AST.Tvoid
cc_default)) (Tcons (tptr tushort) (Tcons tushort Tnil))
tvoid cc_default)) ::
(___builtin_write32_reversed,
Gfun(External (EF_builtin "__builtin_write32_reversed"
(mksignature (AST.Tlong :: AST.Tint :: nil) AST.Tvoid
cc_default)) (Tcons (tptr tuint) (Tcons tuint Tnil))
tvoid cc_default)) ::
(___builtin_debug,
Gfun(External (EF_external "__builtin_debug"
(mksignature (AST.Tint :: nil) AST.Tvoid
{|cc_vararg:=(Some 1); cc_unproto:=false; cc_structret:=false|}))
(Tcons tint Tnil) tvoid
{|cc_vararg:=(Some 1); cc_unproto:=false; cc_structret:=false|})) ::
(_malloc,
Gfun(External EF_malloc (Tcons tulong Tnil) (tptr tvoid) cc_default)) ::
(_free, Gfun(External EF_free (Tcons (tptr tvoid) Tnil) tvoid cc_default)) ::
(_exit,
Gfun(External (EF_external "exit"
(mksignature (AST.Tint :: nil) AST.Tvoid cc_default))
(Tcons tint Tnil) tvoid cc_default)) ::
(_newstack, Gfun(Internal f_newstack)) :: (_push, Gfun(Internal f_push)) ::
(_pop, Gfun(Internal f_pop)) ::
(_push_increasing, Gfun(Internal f_push_increasing)) ::
(_pop_and_add, Gfun(Internal f_pop_and_add)) ::
(_main, Gfun(Internal f_main)) :: nil).
Definition public_idents : list ident :=
(_main :: _pop_and_add :: _push_increasing :: _pop :: _push :: _newstack ::
_exit :: _free :: _malloc :: ___builtin_debug ::
___builtin_write32_reversed :: ___builtin_write16_reversed ::
___builtin_read32_reversed :: ___builtin_read16_reversed ::
___builtin_fnmsub :: ___builtin_fnmadd :: ___builtin_fmsub ::
___builtin_fmadd :: ___builtin_fmin :: ___builtin_fmax ::
___builtin_expect :: ___builtin_unreachable :: ___builtin_va_end ::
___builtin_va_copy :: ___builtin_va_arg :: ___builtin_va_start ::
___builtin_membar :: ___builtin_annot_intval :: ___builtin_annot ::
___builtin_sel :: ___builtin_memcpy_aligned :: ___builtin_sqrt ::
___builtin_fsqrt :: ___builtin_fabsf :: ___builtin_fabs ::
___builtin_ctzll :: ___builtin_ctzl :: ___builtin_ctz :: ___builtin_clzll ::
___builtin_clzl :: ___builtin_clz :: ___builtin_bswap16 ::
___builtin_bswap32 :: ___builtin_bswap :: ___builtin_bswap64 ::
___compcert_i64_umulh :: ___compcert_i64_smulh :: ___compcert_i64_sar ::
___compcert_i64_shr :: ___compcert_i64_shl :: ___compcert_i64_umod ::
___compcert_i64_smod :: ___compcert_i64_udiv :: ___compcert_i64_sdiv ::
___compcert_i64_utof :: ___compcert_i64_stof :: ___compcert_i64_utod ::
___compcert_i64_stod :: ___compcert_i64_dtou :: ___compcert_i64_dtos ::
___compcert_va_composite :: ___compcert_va_float64 ::
___compcert_va_int64 :: ___compcert_va_int32 :: nil).
Definition prog : Clight.program :=
mkprogram composites global_definitions public_idents _main Logic.I.
|
State Before: M : Type ?u.264602
A✝ : Type ?u.264605
B : Type ?u.264608
A : Type u_1
inst✝ : CommMonoid A
a b c : A
⊢ c ∈ closure {a, b} ↔ ∃ m n, a ^ m * b ^ n = c State After: M : Type ?u.264602
A✝ : Type ?u.264605
B : Type ?u.264608
A : Type u_1
inst✝ : CommMonoid A
a b c : A
⊢ (∃ y, y ∈ closure {a} ∧ ∃ z, z ∈ closure {b} ∧ y * z = c) ↔ ∃ m n, a ^ m * b ^ n = c Tactic: rw [← Set.singleton_union, Submonoid.closure_union, mem_sup] State Before: M : Type ?u.264602
A✝ : Type ?u.264605
B : Type ?u.264608
A : Type u_1
inst✝ : CommMonoid A
a b c : A
⊢ (∃ y, y ∈ closure {a} ∧ ∃ z, z ∈ closure {b} ∧ y * z = c) ↔ ∃ m n, a ^ m * b ^ n = c State After: no goals Tactic: simp_rw [mem_closure_singleton, exists_exists_eq_and] |
(* Title: Countable Ordinals
Author: Brian Huffman, 2005
Maintainer: Brian Huffman <brianh at cse.ogi.edu>
*)
header {* Inverse Functions *}
theory OrdinalInverse
imports OrdinalArith
begin
lemma (in normal) oInv_ex:
"F 0 \<le> a \<Longrightarrow> \<exists>q. F q \<le> a \<and> a < F (oSuc q)"
apply (subgoal_tac "\<forall>z. a < F z \<longrightarrow> (\<exists>q<z. F q \<le> a \<and> a < F (oSuc q))")
apply (drule_tac x="oSuc a" in spec, drule mp)
apply (rule_tac y="F a" in order_le_less_trans)
apply (rule increasing)
apply (rule strict_monoD[OF less_oSuc])
apply force
apply (rule allI, rule_tac a=z in oLimit_induct)
apply simp
apply clarsimp
apply (case_tac "a < F x")
apply clarsimp
apply (rule_tac x=q in exI)
apply (simp add: order_less_trans[OF _ less_oSuc])
apply (rule_tac x=x in exI, simp)
apply (clarsimp simp add: oLimit)
apply (drule less_oLimitD, clarify)
apply (drule spec, drule mp, assumption)
apply (clarify, rule_tac x=q in exI)
apply (simp add: order_less_le_trans[OF _ le_oLimit])
done
lemma oInv_uniq:
"\<lbrakk>mono (F::ordinal \<Rightarrow> ordinal);
F x \<le> a \<and> a < F (oSuc x); F y \<le> a \<and> a < F (oSuc y)\<rbrakk>
\<Longrightarrow> x = y"
apply clarify
apply (rule_tac x=x and y=y in linorder_cases)
apply (subgoal_tac "a < a", simp)
apply (erule_tac y="F (oSuc x)" in order_less_le_trans)
apply (rule_tac y="F y" in order_trans)
apply (erule monoD, erule oSuc_leI)
apply assumption
apply assumption
apply (subgoal_tac "a < a", simp)
apply (erule_tac y="F (oSuc y)" in order_less_le_trans)
apply (rule_tac y="F x" in order_trans)
apply (erule monoD, erule oSuc_leI)
apply assumption
done
definition
oInv :: "(ordinal \<Rightarrow> ordinal) \<Rightarrow> ordinal \<Rightarrow> ordinal" where
"oInv F a = (if F 0 \<le> a then (THE x. F x \<le> a \<and> a < F (oSuc x)) else 0)"
lemma (in normal) oInv_bounds:
"F 0 \<le> a \<Longrightarrow> F (oInv F a) \<le> a \<and> a < F (oSuc (oInv F a))"
apply (simp add: oInv_def)
apply (rule theI')
apply (rule ex_ex1I)
apply (simp add: oInv_ex)
apply (simp add: oInv_uniq[OF mono])
done
lemma (in normal) oInv_bound1:
"F 0 \<le> a \<Longrightarrow> F (oInv F a) \<le> a"
by (rule oInv_bounds[THEN conjunct1])
lemma (in normal) oInv_bound2:
"a < F (oSuc (oInv F a))"
apply (case_tac "F 0 \<le> a")
apply (simp only: oInv_bounds[THEN conjunct2])
apply (simp add: oInv_def, simp add: linorder_not_le)
apply (erule order_less_le_trans)
apply (simp add: cancel_le)
done
lemma (in normal) oInv_equality:
"\<lbrakk>F x \<le> a; a < F (oSuc x)\<rbrakk> \<Longrightarrow> oInv F a = x"
apply (subgoal_tac "F 0 \<le> a")
apply (simp add: oInv_def)
apply (rule the_equality)
apply simp
apply (simp add: oInv_uniq[OF mono])
apply (rule_tac y="F x" in order_trans)
apply (simp add: cancel_le)
apply assumption
done
lemma (in normal) oInv_inverse: "oInv F (F x) = x"
by (rule oInv_equality, simp_all add: cancel_less)
lemma (in normal) oInv_equality': "a = F x \<Longrightarrow> oInv F a = x"
by (simp add: oInv_inverse)
lemma (in normal) oInv_eq_0: "a \<le> F 0 \<Longrightarrow> oInv F a = 0"
apply (case_tac "F 0 \<le> a")
apply (rule oInv_equality')
apply (simp only: order_antisym)
apply (simp add: oInv_def)
done
lemma (in normal) oInv_less:
"\<lbrakk>F 0 \<le> a; a < F z\<rbrakk> \<Longrightarrow> oInv F a < z"
apply (subst cancel_less[symmetric])
apply (simp only: order_le_less_trans[OF oInv_bound1])
done
lemma (in normal) le_oInv:
"F z \<le> a \<Longrightarrow> z \<le> oInv F a"
apply (subst less_oSuc_eq_le[symmetric])
apply (subst cancel_less[symmetric])
apply (erule order_le_less_trans)
apply (rule oInv_bound2)
done
lemma (in normal) less_oInvD:
"x < oInv F a \<Longrightarrow> F (oSuc x) \<le> a"
apply (case_tac "F 0 \<le> a")
apply (rule order_trans[OF _ oInv_bound1])
apply (simp add: cancel_le oSuc_leI)
apply assumption
apply (simp add: oInv_def)
done
lemma (in normal) oInv_le:
"a < F (oSuc x) \<Longrightarrow> oInv F a \<le> x"
apply (erule contrapos_pp)
apply (simp add: linorder_not_less linorder_not_le less_oInvD)
done
lemma (in normal) mono_oInv: "mono (oInv F)"
proof
fix x y :: ordinal
assume "x \<le> y"
show "oInv F x \<le> oInv F y"
proof (rule linorder_le_cases [of x "F 0"])
assume "x \<le> F 0" then show ?thesis by (simp add: oInv_eq_0)
next
assume "F 0 \<le> x" show ?thesis
by (rule le_oInv, simp only: `x \<le> y` `F 0 \<le> x` order_trans [OF oInv_bound1])
qed
qed
lemma (in normal) oInv_decreasing:
"F 0 \<le> x \<Longrightarrow> oInv F x \<le> x"
apply (subst cancel_le[symmetric])
apply (rule_tac y=x in order_trans)
apply (erule oInv_bound1)
apply (rule increasing)
done
subsection {* Division *}
instantiation ordinal :: "{Divides.div}"
begin
definition
div_ordinal_def:
"x div y = (if 0 < y then oInv (op * y) x else 0)"
definition
mod_ordinal_def:
"x mod y = ((x::ordinal) - y * (x div y))"
instance ..
end
lemma ordinal_divI: "\<lbrakk>x = y * q + r; r < y\<rbrakk> \<Longrightarrow> x div y = (q::ordinal)"
apply (simp add: div_ordinal_def, safe)
apply (simp add: normal.oInv_equality[OF normal_times])
done
lemma ordinal_times_div_le: "y * (x div y) \<le> (x::ordinal)"
apply (simp add: div_ordinal_def, safe)
apply (erule normal.oInv_bound1[OF normal_times])
apply simp
done
lemma ordinal_less_times_div_plus:
"0 < y \<Longrightarrow> x < y * (x div y) + (y::ordinal)"
apply (simp add: div_ordinal_def)
apply (subst ordinal_times_oSuc[symmetric])
apply (erule normal.oInv_bound2[OF normal_times])
done
lemma ordinal_modI: "\<lbrakk>x = y * q + r; r < y\<rbrakk> \<Longrightarrow> x mod y = (r::ordinal)"
apply (unfold mod_ordinal_def)
apply (rule ordinal_minusI)
apply (simp add: ordinal_divI)
done
lemma ordinal_mod_less: "0 < y \<Longrightarrow> x mod y < (y::ordinal)"
apply (unfold mod_ordinal_def)
apply (simp add: ordinal_times_div_le)
apply (simp add: div_ordinal_def)
apply (subst ordinal_times_oSuc[symmetric])
apply (erule normal.oInv_bound2[OF normal_times])
done
lemma ordinal_div_plus_mod: "y * (x div y) + (x mod y) = (x::ordinal)"
apply (simp add: mod_ordinal_def)
apply (rule ordinal_plus_minus2)
apply (rule ordinal_times_div_le)
done
lemma ordinal_div_less: "x < y * z \<Longrightarrow> x div y < (z::ordinal)"
apply (auto simp add: div_ordinal_def)
apply (simp add: normal.oInv_less[OF normal_times])
done
lemma ordinal_le_div: "\<lbrakk>0 < y; y * z \<le> x\<rbrakk> \<Longrightarrow> (z::ordinal) \<le> x div y"
apply (auto simp add: div_ordinal_def)
apply (simp add: normal.le_oInv[OF normal_times])
done
lemma ordinal_div_monoL: "x \<le> x' \<Longrightarrow> x div y \<le> x' div (y::ordinal)"
by (erule monoD[OF ordinal_mono_div])
lemma ordinal_div_decreasing: "(x::ordinal) div y \<le> x"
apply (auto simp add: div_ordinal_def)
apply (simp add: normal.oInv_decreasing[OF normal_times])
done
lemma ordinal_div_0: "x div 0 = (0::ordinal)"
by (simp add: div_ordinal_def)
lemma ordinal_mod_0: "x mod 0 = (x::ordinal)"
by (simp add: mod_ordinal_def)
subsection {* Derived properties of division *}
lemma ordinal_div_1 [simp]: "x div oSuc 0 = x"
by (rule_tac r=0 in ordinal_divI, simp_all)
lemma ordinal_mod_1 [simp]: "x mod oSuc 0 = 0"
by (rule_tac q=x in ordinal_modI, simp_all)
lemma ordinal_div_self [simp]: "0 < x \<Longrightarrow> x div x = (1::ordinal)"
by (rule_tac r=0 in ordinal_divI, simp_all)
lemma ordinal_mod_self [simp]: "x mod x = (0::ordinal)"
apply (case_tac "x=0", simp add: ordinal_mod_0, simp)
apply (rule_tac q=1 in ordinal_modI, simp_all)
done
lemma ordinal_div_greater [simp]: "x < y \<Longrightarrow> x div y = (0::ordinal)"
by (rule_tac r=x in ordinal_divI, simp_all)
lemma ordinal_mod_greater [simp]: "x < y \<Longrightarrow> x mod y = (x::ordinal)"
by (rule_tac q=0 in ordinal_modI, simp_all)
lemma ordinal_0_div [simp]: "0 div x = (0::ordinal)"
by (case_tac "x=0", simp add: ordinal_div_0, simp)
lemma ordinal_0_mod [simp]: "0 mod x = (0::ordinal)"
by (case_tac "x=0", simp add: ordinal_mod_0, simp)
lemma ordinal_1_dvd [simp]: "oSuc 0 dvd x"
by (rule_tac k=x in dvdI, simp)
lemma ordinal_dvd_mod: "y dvd x = (x mod y = (0::ordinal))"
apply safe
apply (erule dvdE)
apply (case_tac "y=0", simp add: ordinal_mod_0, simp)
apply (rule ordinal_modI, simp, simp)
apply (cut_tac x=x and y=y in ordinal_div_plus_mod)
apply (rule_tac k="x div y" in dvdI, simp)
done
lemma ordinal_dvd_times_div:
"y dvd x \<Longrightarrow> y * (x div y) = (x::ordinal)"
apply (cut_tac x=x and y=y in ordinal_div_plus_mod)
apply (simp add: ordinal_dvd_mod)
done
lemma ordinal_dvd_oLimit: "\<forall>n. x dvd f n \<Longrightarrow> x dvd oLimit f"
apply (rule_tac k="oLimit (\<lambda>n. f n div x)" in dvdI)
apply (simp add: ordinal_dvd_times_div)
done
subsection {* Logarithms *}
definition
oLog :: "ordinal \<Rightarrow> ordinal \<Rightarrow> ordinal" where
"oLog b = (\<lambda>x. if 1 < b then oInv (op ** b) x else 0)"
lemma ordinal_oLogI:
"\<lbrakk>b ** y \<le> x; x < b ** y * b\<rbrakk> \<Longrightarrow> oLog b x = y"
apply (rule_tac x=1 and y=b in linorder_cases, simp_all)
apply (simp add: oLog_def normal.oInv_equality[OF normal_exp])
done
lemma ordinal_exp_oLog_le:
"\<lbrakk>0 < x; oSuc 0 < b\<rbrakk> \<Longrightarrow> b ** (oLog b x) \<le> x"
apply (simp add: oLog_def)
apply (frule_tac order_less_trans[OF less_oSuc])
apply (simp add: normal.oInv_bound1[OF normal_exp] oSuc_leI)
done
lemma ordinal_less_exp_oLog:
"oSuc 0 < b \<Longrightarrow> x < b ** (oLog b x) * b"
apply (simp add: oLog_def)
apply (subst ordinal_exp_oSuc[symmetric])
apply (erule normal.oInv_bound2[OF normal_exp])
done
lemma ordinal_oLog_less:
"\<lbrakk>0 < x; oSuc 0 < b; x < b ** y\<rbrakk> \<Longrightarrow> oLog b x < y"
apply (simp add: oLog_def)
apply (frule_tac order_less_trans[OF less_oSuc])
apply (simp add: normal.oInv_less[OF normal_exp] oSuc_leI)
done
lemma ordinal_le_oLog:
"\<lbrakk>oSuc 0 < b; b ** y \<le> x\<rbrakk> \<Longrightarrow> y \<le> oLog b x"
by (simp add: oLog_def normal.le_oInv[OF normal_exp])
lemma ordinal_oLogI2:
"\<lbrakk>oSuc 0 < b; x = b ** y * q + r; 0 < q; q < b; r < b ** y\<rbrakk> \<Longrightarrow> oLog b x = y"
apply simp
apply (rule ordinal_oLogI)
apply (rule_tac y="b ** y * q" in order_trans, simp, simp)
apply (rule order_less_le_trans)
apply (erule ordinal_plus_strict_monoR)
apply (subst ordinal_times_oSuc[symmetric])
apply (rule ordinal_times_monoR)
apply (erule oSuc_leI)
done
lemma ordinal_div_exp_oLog_less:
"oSuc 0 < b \<Longrightarrow> x div (b ** oLog b x) < b"
apply (frule_tac order_less_trans[OF less_oSuc])
apply (case_tac "x=0", simp_all)
apply (rule ordinal_div_less)
by (rule ordinal_less_exp_oLog)
lemma ordinal_oLog_base_0: "oLog 0 x = 0"
by (simp add: oLog_def)
lemma ordinal_oLog_base_1: "oLog (oSuc 0) x = 0"
by (simp add: oLog_def)
lemma ordinal_oLog_exp: "oSuc 0 < b \<Longrightarrow> oLog b (b ** x) = x"
by (simp add: oLog_def normal.oInv_inverse[OF normal_exp])
lemma ordinal_oLog_self: "oSuc 0 < b \<Longrightarrow> oLog b b = oSuc 0"
apply (subgoal_tac "oLog b (b ** oSuc 0) = oSuc 0")
apply (simp only: ordinal_exp_1)
apply (simp only: ordinal_oLog_exp)
done
lemma ordinal_mono_oLog: "mono (oLog b)"
apply (case_tac "oSuc 0 < b")
apply (simp add: oLog_def normal.mono_oInv[OF normal_exp])
apply (simp add: oLog_def monoI)
done
lemma ordinal_oLog_monoR: "x \<le> y \<Longrightarrow> oLog b x \<le> oLog b y"
by (erule monoD[OF ordinal_mono_oLog])
lemma ordinal_oLog_decreasing: "oLog b x \<le> x"
apply (rule_tac x=b and y=1 in linorder_cases)
apply (simp add: ordinal_oLog_base_0)
apply (simp add: ordinal_oLog_base_1)
apply (case_tac "x = 0")
apply (simp add: ordinal_oLog_0)
apply (simp add: oLog_def)
apply (simp add: normal.oInv_decreasing[OF normal_exp] oSuc_leI)
done
end
|
[STATEMENT]
lemma range_adjoint_isometry:
assumes "isometry U"
shows "U* *\<^sub>S top = top"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. U* *\<^sub>S \<top> = \<top>
[PROOF STEP]
proof-
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. U* *\<^sub>S \<top> = \<top>
[PROOF STEP]
from assms
[PROOF STATE]
proof (chain)
picking this:
isometry U
[PROOF STEP]
have "top = U* *\<^sub>S U *\<^sub>S top"
[PROOF STATE]
proof (prove)
using this:
isometry U
goal (1 subgoal):
1. \<top> = U* *\<^sub>S U *\<^sub>S \<top>
[PROOF STEP]
by (simp add: cblinfun_assoc_left(2))
[PROOF STATE]
proof (state)
this:
\<top> = U* *\<^sub>S U *\<^sub>S \<top>
goal (1 subgoal):
1. U* *\<^sub>S \<top> = \<top>
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
\<top> = U* *\<^sub>S U *\<^sub>S \<top>
goal (1 subgoal):
1. U* *\<^sub>S \<top> = \<top>
[PROOF STEP]
have "\<dots> \<le> U* *\<^sub>S top"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. U* *\<^sub>S U *\<^sub>S \<top> \<le> U* *\<^sub>S \<top>
[PROOF STEP]
by (simp add: cblinfun_image_mono)
[PROOF STATE]
proof (state)
this:
U* *\<^sub>S U *\<^sub>S \<top> \<le> U* *\<^sub>S \<top>
goal (1 subgoal):
1. U* *\<^sub>S \<top> = \<top>
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
\<top> \<le> U* *\<^sub>S \<top>
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<top> \<le> U* *\<^sub>S \<top>
goal (1 subgoal):
1. U* *\<^sub>S \<top> = \<top>
[PROOF STEP]
using top.extremum_unique
[PROOF STATE]
proof (prove)
using this:
\<top> \<le> U* *\<^sub>S \<top>
(\<top> \<le> ?a) = (?a = \<top>)
goal (1 subgoal):
1. U* *\<^sub>S \<top> = \<top>
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
U* *\<^sub>S \<top> = \<top>
goal:
No subgoals!
[PROOF STEP]
qed |
/**
* @file modalbeamformer.h
* @brief Beamforming in the spherical harmonics domain.
* @author Kenichi Kumatani
*/
#ifndef MODALBEAMFORMER_H
#define MODALBEAMFORMER_H
#include <stdio.h>
#include <assert.h>
#include <float.h>
#include <gsl/gsl_block.h>
#include <gsl/gsl_vector.h>
#include <gsl/gsl_matrix.h>
#include <gsl/gsl_complex.h>
#include <gsl/gsl_complex_math.h>
#include <gsl/gsl_fft_complex.h>
#include <common/refcount.h>
#include "common/jexception.h"
#include "stream/stream.h"
//#include "stream/pyStream.h"
#include "beamformer/spectralinfoarray.h"
#include "modulated/modulated.h"
#include "beamformer/beamformer.h"
// ----- definition for class `ModeAmplitudeCalculator' -----
//
gsl_complex modeAmplitude(int order, double ka);
class ModeAmplitudeCalculator {
public:
ModeAmplitudeCalculator(int order, float minKa=0.01, float maxKa=20, float wid=0.01);
~ModeAmplitudeCalculator();
gsl_vector_complex *get() const { return mode_amplitude_; }
private:
gsl_vector_complex *mode_amplitude_;
float minKa_;
float maxKa_;
float wid_;
};
typedef refcount_ptr<ModeAmplitudeCalculator> ModeAmplitudeCalculatorPtr;
// ----- definition for class `EigenBeamformer' -----
//
/**
@class EigenBeamformer
@brief This beamformer is implemented based on Meyer and Elko's ICASSP paper.
In Boaz Rafaely's paper, this method is referred to as the phase-mode beamformer
@usage
1) construct this object, bf = EigenBeamformer(...)
2) set the radious of the spherical array with bf.set_eigenmike_geometry() or bf.set_array_geometry(..).
3) set the look direction with bf.set_look_direction(..)
4) process each block with bf.next() until it hits the end
*/
class EigenBeamformer : public SubbandDS {
public:
EigenBeamformer( unsigned sampleRate, unsigned fftLen = 512, bool halfBandShift = false, unsigned NC=1, unsigned maxOrder=4, bool normalizeWeight=false, const String& nm = "EigenBeamformer");
~EigenBeamformer();
virtual const gsl_vector_complex* next(int frame_no = -5);
virtual void reset();
virtual unsigned dim() const { return dim_;}
void set_sigma2(float sigma2){ sigma2_ = sigma2; }
void set_weight_gain(float wgain){ wgain_ = wgain; }
void set_eigenmike_geometry();
void set_array_geometry(double a, gsl_vector *theta_s, gsl_vector *phi_s);
virtual void set_look_direction(double theta, double phi);
const gsl_matrix_complex *mode_amplitudes();
const gsl_vector *array_geometry(int type); // type==0 -> theta, type==1 -> phi
virtual gsl_matrix *beampattern(unsigned fbinX, double theta = 0, double phi = 0,
double minTheta=-M_PI, double maxTheta=M_PI,
double minPhi=-M_PI, double maxPhi=M_PI,
double widthTheta=0.1, double widthPhi=0.1 );
/**
@brief obtain the spherical transformation coefficients at each frame
@return spherical harmonics transformation coefficients at the current frame
*/
virtual SnapShotArrayPtr snapshot_array() const { return(st_snapshot_array_); }
virtual SnapShotArrayPtr snapshot_array2() const { return(snapshot_array_); }
const gsl_matrix_complex *blocking_matrix(unsigned fbinX, unsigned unitX=0 ) const {
return (bfweight_vec_[unitX]->B())[fbinX];
}
#ifdef ENABLE_LEGACY_BTK_API
void setSigma2(float sigma2){ set_sigma2(sigma2); }
void setWeightGain(float wgain){ set_weight_gain(wgain); }
void setEigenMikeGeometry(){ set_eigenmike_geometry(); }
void setArrayGeometry(double a, gsl_vector *theta_s, gsl_vector *phi_s){ set_array_geometry(a, theta_s, phi_s); }
virtual void setLookDirection(double theta, double phi){ set_look_direction(theta, phi); }
const gsl_matrix_complex *getModeAmplitudes(){ return mode_amplitudes(); }
const gsl_vector *getArrayGeometry(int type){ return array_geometry(type);}
virtual gsl_matrix *getBeamPattern( unsigned fbinX, double theta = 0, double phi = 0,
double minTheta=-M_PI, double maxTheta=M_PI,
double minPhi=-M_PI, double maxPhi=M_PI,
double widthTheta=0.1, double widthPhi=0.1 ){
return beampattern(fbinX, theta, phi, minTheta, maxTheta, minPhi, maxPhi, widthTheta, widthPhi);
}
virtual SnapShotArrayPtr getSnapShotArray(){ return snapshot_array(); }
virtual SnapShotArrayPtr getSnapShotArray2(){ return snapshot_array2(); }
const gsl_matrix_complex *getBlockingMatrix(unsigned fbinX, unsigned unitX=0){
return blocking_matrix(fbinX, unitX);
}
#endif
protected:
virtual void calc_weights_( unsigned fbinX, gsl_vector_complex *weights );
virtual bool calc_spherical_harmonics_at_each_position_( gsl_vector *theta_s, gsl_vector *phi_s ); // need to be tested!!
virtual bool calc_steering_unit_( int unitX=0, bool isGSC=false );
virtual bool alloc_steering_unit_( int unitN=1 );
void alloc_image_( bool flag=true );
bool calc_mode_amplitudes_();
unsigned samplerate_;
unsigned NC_;
unsigned maxOrder_;
unsigned dim_; // the number of the spherical harmonics transformation coefficients
bool weights_normalized_;
gsl_matrix_complex *mode_mplitudes_; // [maxOrder_] the mode amplitudes.
gsl_vector_complex *F_; // Spherical Transform coefficients [dim_]
gsl_vector_complex **sh_s_; // Conjugate of spherical harmonics at each sensor position [dim_][nChan]: Y_n^{m*}
SnapShotArrayPtr st_snapshot_array_; // for compatibility with a post-filtering object
double theta_; // look direction
double phi_; // look direction
double a_; // the radius of the rigid sphere.
gsl_vector *theta_s_; // sensor positions
gsl_vector *phi_s_; // sensor positions
gsl_matrix *beampattern_;
gsl_vector *WNG_; // white noise gain
float wgain_; //
float sigma2_; // dialog loading
};
typedef Inherit<EigenBeamformer, SubbandDSPtr> EigenBeamformerPtr;
// ----- definition for class DOAEstimatorSRPEB' -----
//
/**
@class DOAEstimatorSRPEB
@brief estimate the direction of arrival based on the maximum steered response power
@usage
1) construct this object, doaEstimator = DOAEstimatorSRPEB(...)
2) set the radious of the spherical array, doaEstimator.set_eigenmike_geometry() or doaEstimator.set_array_geometry(..).
3) process each block, doaEstimator.next()
4) get the N-best hypotheses at the current instantaneous frame through doaEstimator.nbest_doas()
5) do doaEstimator.getFinalNBestHypotheses() after a static segment is processed.
You can then obtain the averaged N-best hypotheses of the static segment with doaEstimator.nbest_doas().
*/
class DOAEstimatorSRPEB :
public DOAEstimatorSRPBase, public EigenBeamformer {
public:
DOAEstimatorSRPEB( unsigned nBest, unsigned sampleRate, unsigned fftLen = 512, bool halfBandShift = false, unsigned NC=1, unsigned maxOrder=4, bool normalizeWeight=false, const String& nm = "DirectionEstimatorSRPBase");
~DOAEstimatorSRPEB();
const gsl_vector_complex* next(int frame_no = -5);
void reset();
protected:
virtual void calc_steering_unit_table_();
virtual float calc_response_power_( unsigned uttX );
};
typedef Inherit<DOAEstimatorSRPEB, EigenBeamformerPtr> DOAEstimatorSRPEBPtr;
// ----- definition for class `SphericalDSBeamformer' -----
//
/**
@class SphericalDSBeamformer
@usage
1) construct this object, mb = SphericalDSBeamformer(...)
2) set the radious of the spherical array mb.set_array_geometry(..) or
3) set the look direction mb.set_look_direction()
4) process each block mb.next()
@note this implementation is based on Boaz Rafaely's letter,
"Phase-Mode versus Delay-and-Sum Spherical Microphone Array", IEEE Signal Processing Letters, vol. 12, Oct. 2005.
*/
class SphericalDSBeamformer : public EigenBeamformer {
public:
SphericalDSBeamformer( unsigned sampleRate, unsigned fftLen = 512, bool halfBandShift = false, unsigned NC=1, unsigned maxOrder=4, bool normalizeWeight=false, const String& nm = "SphericalDSBeamformer");
~SphericalDSBeamformer();
virtual gsl_vector *calc_wng();
#ifdef ENABLE_LEGACY_BTK_API
virtual gsl_vector *calcWNG(){ return calc_wng(); }
#endif
protected:
virtual void calc_weights_( unsigned fbinX, gsl_vector_complex *weights );
virtual bool calc_spherical_harmonics_at_each_position_( gsl_vector *theta_s, gsl_vector *phi_s );
};
typedef Inherit<SphericalDSBeamformer, EigenBeamformerPtr> SphericalDSBeamformerPtr;
// ----- definition for class `DualSphericalDSBeamformer' -----
//
/**
@class DualSphericalDSBeamformer
@usage
1) construct this object, mb = SphericalDSBeamformer(...)
2) set the radious of the spherical array mb.set_array_geometry(..) or
3) set the look direction mb.set_look_direction()
4) process each block mb.next()
@note In addition to SphericalDSBeamformer, this class has an object of the *normal* D&S beamformer
*/
class DualSphericalDSBeamformer : public SphericalDSBeamformer {
public:
DualSphericalDSBeamformer( unsigned sampleRate, unsigned fftLen = 512, bool halfBandShift = false, unsigned NC=1, unsigned maxOrder=4, bool normalizeWeight=false, const String& nm = "SphericalDSBeamformer");
~DualSphericalDSBeamformer();
virtual SnapShotArrayPtr snapshot_array() const { return snapshot_array_; }
virtual BeamformerWeights* beamformer_weight_object(unsigned srcX=0) const {
return bfweight_vec2_[srcX];
}
#ifdef ENABLE_LEGACY_BTK_API
virtual SnapShotArrayPtr getSnapShotArray(){ return snapshot_array(); }
#endif
protected:
virtual void calc_weights_( unsigned fbinX, gsl_vector_complex *weights );
virtual bool alloc_steering_unit_( int unitN=1 );
vector<BeamformerWeights *> bfweight_vec2_; // weights of a normal D&S beamformer.
};
typedef Inherit<DualSphericalDSBeamformer, SphericalDSBeamformerPtr> DualSphericalDSBeamformerPtr;
// ----- definition for class DOAEstimatorSRPPSphDSB' -----
//
class DOAEstimatorSRPSphDSB : public DOAEstimatorSRPBase, public SphericalDSBeamformer {
public:
DOAEstimatorSRPSphDSB( unsigned nBest, unsigned sampleRate, unsigned fftLen = 512, bool halfBandShift = false, unsigned NC=1, unsigned maxOrder=4, bool normalizeWeight=false, const String& nm = "DOAEstimatorSRPPSphDSB" );
~DOAEstimatorSRPSphDSB();
const gsl_vector_complex* next(int frame_no = -5);
void reset();
protected:
virtual void calc_steering_unit_table_();
virtual float calc_response_power_( unsigned uttX );
};
typedef Inherit<DOAEstimatorSRPSphDSB, SphericalDSBeamformerPtr> DOAEstimatorSRPSphDSBPtr;
// ----- definition for class `SphericalHWNCBeamformer' -----
//
/**
@class SphericalHWNCBeamformer
@usage
1) construct this object, mb = SphericalDSBeamformer(...)
2) set the radious of the spherical array mb.set_array_geometry(..) or
3) set the look direction mb.set_look_direction()
4) process each block mb.next()
*/
class SphericalHWNCBeamformer : public EigenBeamformer {
public:
SphericalHWNCBeamformer( unsigned sampleRate, unsigned fftLen = 512, bool halfBandShift = false, unsigned NC=1, unsigned maxOrder=4, bool normalizeWeight=false, float ratio=1.0, const String& nm = "SphericalHWNCBeamformer");
~SphericalHWNCBeamformer();
virtual gsl_vector *calc_wng();
virtual void set_wng( double ratio){ ratio_=ratio; calc_wng();}
#ifdef ENABLE_LEGACY_BTK_API
gsl_vector *calcWNG(){ return calc_wng(); }
void setWNG( double ratio){ set_wng(ratio);}
#endif
protected:
virtual void calc_weights_( unsigned fbinX, gsl_vector_complex *weights );
protected:
float ratio_;
};
typedef Inherit<SphericalHWNCBeamformer, EigenBeamformerPtr> SphericalHWNCBeamformerPtr;
// ----- definition for class `SphericalGSCBeamformer' -----
//
/**
@class SphericalGSCBeamformer
@usage
1) construct this object, mb = SphericalDSBeamformer(...)
2) set the radious of the spherical array mb.set_array_geometry(..) or
3) set the look direction mb.set_look_direction()
4) process each block mb.next()
@note this implementation is based on Boaz Rafaely's letter,
"Phase-Mode versus Delay-and-Sum Spherical Microphone Array", IEEE Signal Processing Letters, vol. 12, Oct. 2005.
*/
class SphericalGSCBeamformer : public SphericalDSBeamformer {
public:
SphericalGSCBeamformer( unsigned sampleRate, unsigned fftLen = 512, bool halfBandShift = false, unsigned NC=1, unsigned maxOrder=4, bool normalizeWeight=false, const String& nm = "SphericalGSCBeamformer");
~SphericalGSCBeamformer();
virtual const gsl_vector_complex* next(int frame_no = -5);
virtual void reset();
void set_look_direction(double theta, double phi);
void set_active_weights_f(unsigned fbinX, const gsl_vector* packedWeight);
#ifdef ENABLE_LEGACY_BTK_API
void setLookDirection(double theta, double phi){ set_look_direction(theta, phi); }
void setActiveWeights_f( unsigned fbinX, const gsl_vector* packedWeight ){ set_active_weights_f(fbinX, packedWeight); }
#endif
};
typedef Inherit<SphericalGSCBeamformer, SphericalDSBeamformerPtr> SphericalGSCBeamformerPtr;
// ----- definition for class `SphericalHWNCGSCBeamformer' -----
//
/**
@class SphericalHWNCGSCBeamformer
@usage
1) construct this object, mb = SphericalHWNCGSCBeamformer(...)
2) set the radious of the spherical array mb.set_array_geometry(..) or
3) set the look direction mb.set_look_direction()
4) process each block mb.next()
*/
class SphericalHWNCGSCBeamformer : public SphericalHWNCBeamformer {
public:
SphericalHWNCGSCBeamformer( unsigned sampleRate, unsigned fftLen = 512, bool halfBandShift = false, unsigned NC=1, unsigned maxOrder=4, bool normalizeWeight=false, float ratio=1.0, const String& nm = "SphericalHWNCGSCBeamformer");
~SphericalHWNCGSCBeamformer();
virtual const gsl_vector_complex* next(int frame_no = -5);
virtual void reset();
void set_look_direction(double theta, double phi);
void set_active_weights_f(unsigned fbinX, const gsl_vector* packedWeight);
#ifdef ENABLE_LEGACY_BTK_API
void setLookDirection(double theta, double phi){ set_look_direction(theta, phi); }
void setActiveWeights_f( unsigned fbinX, const gsl_vector* packedWeight ){ set_active_weights_f(fbinX, packedWeight); }
#endif
};
typedef Inherit<SphericalHWNCGSCBeamformer, SphericalHWNCBeamformerPtr> SphericalHWNCGSCBeamformerPtr;
// ----- definition for class `DualSphericalGSCBeamformer' -----
//
/**
@class DualSphericalGSCBeamformer
@usage
1) construct this object, mb = DualSphericalGSCBeamformer(...)
2) set the radious of the spherical array mb.set_array_geometry(..) or
3) set the look direction mb.set_look_direction()
4) process each block mb.next()
@note In addition to DualSphericalGSCBeamformer, this class has an object of the *normal* D&S beamformer
*/
class DualSphericalGSCBeamformer : public SphericalGSCBeamformer {
public:
DualSphericalGSCBeamformer( unsigned sampleRate, unsigned fftLen = 512, bool halfBandShift = false, unsigned NC=1, unsigned maxOrder=4, bool normalizeWeight=false, const String& nm = "DualSphericalGSCBeamformer");
~DualSphericalGSCBeamformer();
virtual SnapShotArrayPtr snapshot_array() const {return(snapshot_array_);}
virtual BeamformerWeights* beamformer_weight_object(unsigned srcX=0) const {
return bfweight_vec2_[srcX];
}
#ifdef ENABLE_LEGACY_BTK_API
virtual SnapShotArrayPtr getSnapShotArray(){return(snapshot_array_);}
#endif
protected:
virtual void calc_weights_( unsigned fbinX, gsl_vector_complex *weights );
virtual bool alloc_steering_unit_( int unitN=1 );
vector<BeamformerWeights *> bfweight_vec2_; // weights of a normal D&S beamformer.
};
typedef Inherit<DualSphericalGSCBeamformer, SphericalGSCBeamformerPtr> DualSphericalGSCBeamformerPtr;
// ----- definition for class `SphericalMOENBeamformer' -----
//
/**
@class SphericalGSCBeamformer
@usage
1) construct this object, mb = SphericalDSBeamformer(...)
2) set the radious of the spherical array mb.set_array_geometry(..) or
3) set the look direction mb.set_look_direction()
4) process each block mb.next()
@note this implementation is based on Z. Li and R. Duraiswami's letter,
"Flexible and Optimal Design of Spherical Microphone Arrays for Beamforming", IEEE Trans. SAP.
*/
class SphericalMOENBeamformer : public SphericalDSBeamformer {
public:
SphericalMOENBeamformer( unsigned sampleRate, unsigned fftLen = 512, bool halfBandShift = false, unsigned NC=1, unsigned maxOrder=4, bool normalizeWeight=false, const String& nm = "SphericalMOENBeamformer");
~SphericalMOENBeamformer();
virtual const gsl_vector_complex* next(int frame_no = -5);
virtual void reset();
void fix_terms(bool flag){ is_term_fixed_ = flag; }
void set_diagonal_looading(unsigned fbinX, float diagonalWeight);
virtual SnapShotArrayPtr snapshot_array() const { return snapshot_array_; }
virtual gsl_matrix *beampattern(unsigned fbinX, double theta = 0, double phi = 0,
double minTheta=-M_PI, double maxTheta=M_PI,
double minPhi=-M_PI, double maxPhi=M_PI,
double widthTheta=0.1, double widthPhi=0.1 );
#ifdef ENABLE_LEGACY_BTK_API
void fixTerms( bool flag ){ fix_terms(flag); }
void setLevelOfDiagonalLoading( unsigned fbinX, float diagonalWeight){ set_diagonal_looading(fbinX, diagonalWeight); }
virtual gsl_matrix *getBeamPattern( unsigned fbinX, double theta = 0, double phi = 0,
double minTheta=-M_PI, double maxTheta=M_PI,
double minPhi=-M_PI, double maxPhi=M_PI,
double widthTheta=0.1, double widthPhi=0.1 ){
return beampattern(fbinX, theta, phi, minTheta, maxTheta, minPhi, maxPhi, widthTheta, widthPhi);
}
#endif
protected:
virtual bool alloc_steering_unit_( int unitN=1 );
virtual void calc_weights_( unsigned fbinX, gsl_vector_complex *weights );
bool calc_moen_weights_( unsigned fbinX, gsl_vector_complex *weights, double dThreshold = 1.0E-8, bool calcInverseMatrix = true, unsigned unitX=0 );
private:
// maxOrder_ == Neff in the Li's paper.
unsigned bf_order_; // N in the Li's paper.
float CN_;
gsl_matrix_complex** A_; /* A_[fftLen2+1][dim_][nChan]; Coeffcients of the spherical harmonics expansion; See Eq. (31) & (32) */
gsl_matrix_complex** fixedW_; /* _fixedW[fftLen2+1][nChan][dim_]; [ A^H A + l^2 I ]^{-1} A^H */
gsl_vector_complex** BN_; // _BN[fftLen2+1][dim_]
float* diagonal_weights_;
bool is_term_fixed_;
float dthreshold_;
};
typedef Inherit<SphericalMOENBeamformer, SphericalDSBeamformerPtr> SphericalMOENBeamformerPtr;
// ----- definition for class `SphericalSpatialDSBeamformer' -----
//
/**
@class SphericalSpatialDSBeamformer
@usage
1) construct this object, mb = SphericalSpatialDSBeamformer(...)
2) set the radious of the spherical array mb.set_array_geometry(..) or
3) set the look direction mb.set_look_direction()
4) process each block mb.next()
@note this implementation is based on Boaz Rafaely's letter,
"Phase-Mode versus Delay-and-Sum Spherical Microphone Array", IEEE Signal Processing Letters, vol. 12, Oct. 2005.
*/
class SphericalSpatialDSBeamformer : public SphericalDSBeamformer {
public:
SphericalSpatialDSBeamformer( unsigned sampleRate, unsigned fftLen = 512, bool halfBandShift = false, unsigned NC=1, unsigned maxOrder=4, bool normalizeWeight=false, const String& nm = "SphericalSpatialDSBeamformer");
~SphericalSpatialDSBeamformer();
virtual const gsl_vector_complex* next(int frame_no = -5);
protected:
virtual void calc_weights_( unsigned fbinX, gsl_vector_complex *weights );
virtual bool alloc_steering_unit_( int unitN = 1 );
virtual bool calc_steering_unit_( int unitX = 0, bool isGSC = false );
};
typedef Inherit<SphericalSpatialDSBeamformer, SphericalDSBeamformerPtr> SphericalSpatialDSBeamformerPtr;
// ----- definition for class `SphericalSpatialHWNCBeamformer' -----
//
/**
@class SphericalSpatialHWNCBeamformer
@usage
1) construct this object, mb = SphericalDSBeamformer(...)
2) set the radious of the spherical array mb.set_array_geometry(..) or
3) set the look direction mb.set_look_direction()
4) process each block mb.next()
*/
class SphericalSpatialHWNCBeamformer : public SphericalHWNCBeamformer {
public:
SphericalSpatialHWNCBeamformer( unsigned sampleRate, unsigned fftLen = 512, bool halfBandShift = false, unsigned NC=1, unsigned maxOrder=4, bool normalizeWeight=false, float ratio=1.0, const String& nm = "SphericalHWNCBeamformer");
~SphericalSpatialHWNCBeamformer();
virtual const gsl_vector_complex* next(int frame_no = -5);
protected:
virtual void calc_weights_( unsigned fbinX, gsl_vector_complex *weights );
virtual bool alloc_steering_unit_( int unitN = 1 );
virtual bool calc_steering_unit_( int unitX = 0, bool isGSC = false );
private:
gsl_matrix_complex *calc_diffuse_noise_model_( unsigned fbinX );
gsl_matrix_complex **SigmaSI_; // SigmaSI_[fftLen/2+1][chanN]
double dthreshold_;
};
typedef Inherit<SphericalSpatialHWNCBeamformer, SphericalHWNCBeamformerPtr> SphericalSpatialHWNCBeamformerPtr;
#endif
|
[STATEMENT]
lemma is_unit_const_poly_iff: "[:c:] dvd 1 \<longleftrightarrow> c dvd 1"
for c :: "'a::{comm_semiring_1,semiring_no_zero_divisors}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ([:c:] dvd 1) = (c dvd (1::'a))
[PROOF STEP]
by (auto simp: one_pCons) |
\chapter{The translation pipeline}
The compiler parses the source code into a general s-expression structure and
transforms it to the Spiral syntax tree. All modules and the main program are
then merged and translated to the first continuation-passing style intermediate
language Spine. The next step converts Spine into the imperative intermediate
language Grit. The optimization passes operate on Grit and then the code
generator emits assembly.
\section{S-expressions}
S-expressions are read using a simple hand-written parser. The resulting data
structure serves as an input for further processing. There is also a
pretty-printer to translate the s-expression back to textual form. Spine and
Grit can be read from s-expressions and written to them for testing and
debugging purposes.
\section{Spiral}
The syntax tree of Spiral is decoded from an s-expression using a simple but tedious
process. If the program contains syntax errors, the compiler detects them in
this phase and rejects the program. Imported modules are collected in a pass
through the tree, loaded and also examined in the same way. Then the compiler
computes a topological ordering of the modules for further processing, reporting
an error if the dependency graph is cyclic.
\section{Spine}
The first intermediate language is Spine. This language is derived from
$\lambda^U_\text{CPS}$ \cite{kennedy2007compiling} and based on
$\lambda$-calculus and continuation passing style.
A continuation is a special $\lambda$-value that cannot escape the local
function and never returns. Calling a continuation is equivalent to jumping to a
basic block in an imperative language or in SSA form (single static assignment).
Functions take a return continuation as a special argument and return value to
the caller by jumping to this continuation.
\begin{gram}
<program> = (program <cont-name> <term>)
<term> = (letcont <cont-def>... <term>)
| (letfun <fun-def>... <term>)
| (letobj <obj-def> <term>)
| (call <val> <cont-name> <val>...)
| (extern-call <extern-name> <cont-name> <val>...)
| (cont <cont-name> <val>...)
| (branch <boolval> <cont-name> <cont-name>)
<fun-def> = (<var> <cont-name> (<var>...) (<var>...) <term>)
<cont-def> = (<cont-name> (<var>...) <term>)
<obj-def> = (string <var> <string-literal>)
| (double <var> <double-literal>)
<val> = <var> | <int-literal> | (true) | (false)
<boolval> = (is-true <val>) | (is-false <val>)
\end{gram}
A program is defined by a term and a halting continuation. A jump to the halting
continuation terminates the program.
\begin{description}
\item[\texttt{(cont <cont-name> <arg>...)}] jumps to the given continuation,
passing the given arguments. The number of arguments on the call site must
match the definition of the continuation.
\item[\texttt{(branch <boolval> <then-name> <else-name>)}] evaluates a boolean
value and jumps to one of the passed continuations. Both continuations must
expect zero arguments.
\item[\texttt{(call <fun-val> <return-cont> <arg>...)}] calls the function with
the given arguments, passing \texttt{<return-cont>} as the return
continuation. This is a tail call if the continuation is also the caller's
return continuation.
\item[\texttt{(extern-call <extern-name> <return-cont> <arg>...)}] calls an
extern function by its name and passes its result to \texttt{<return-cont>}.
Extern calls are never translated to tail calls.
\item[\texttt{(letcont (<cont-name> (<arg>...) <body>)... <term>)}] defines
a group of mutually recursive continuations.
\item[\texttt{(letfun (<fun> <ret-cont> (<capture>...) (<arg>...) <body>)...
<term>)}] defines mutually recursive functions. The functions can use variables
visible at the definition, but must list them in the capture list.
However, no continuations from the outer context are available in the
function.
\item[\texttt{(letobj <obj-def> <term>)}] defines an object (a string or a real
number).
\end{description}
All values (\texttt{<val>}) are atomic (variables or constants) and can be
duplicated without restriction, as their evaluation is free.
\subsection{Translation from Spiral}
Translating expressions from Spiral to Spine requires converting the program
from direct style to continuation-passing style. The translation is driven by a
pair of functions, \texttt{translate-expr :: SpiralExpr -> (Onion, SpineVal)}
and \texttt{translate-expr-tail :: SpineContName -> SpiralExpr -> SpineTerm}.
\texttt{translate-expr} translates the Spiral expression to an
,,onion''\footnote{Blame the author for this name.} and a value.
The onion consists of layers of \texttt{letcont}, \texttt{letfun} and
\texttt{letobj} Spine terms. Inside, the value of the Spiral expression is
evaluated to the returned Spine value.
The function \texttt{translate-expr-tail} translates the Spiral expression
into a Spine term that jumps to the passed continuation with the evaluated value
of the expression. As an example, tail-calls are translated this way.
To illustrate these functions, let us consider the translation of a simple snippet
in Spiral:
\begin{spiral}
(fun big-enough? (x)
(if (< x 0)
(println "small")
(println "ok")))
\end{spiral}
First, we generate a name for the return continuation of the function
\texttt{big-enough?}, say \texttt{r}. To ensure that the calls in tail positions
will be translated correctly, returning directly to \texttt{r}, we pass the body
of the function (\texttt{(if (< x 0) (println "small") (println "ok"))}) to function
\texttt{translate-expr-tail "r"}.
To evaluate the \texttt{if} expression, we must first evaluate the condition
\texttt{(< x 0)} using \texttt{translate-expr}. We obtain the onion
\texttt{(letcont (lt-ret (lt-result) ?) (call < lt-ret x 0))}, where the
question mark \texttt{?} represents the ,,hole'', the place where we
get the result in variable \texttt{lt-result}.
We translate both arms in the \texttt{if} expression by
\texttt{translate-expr-tail} to preserve the tail calls and get Spine terms
\texttt{(letobj (string s1 "small") (call println r s1))} and \texttt{(letobj
(string s2 "ok") (call println r s2))}.
To generate the \texttt{branch} term, we need two continuations that would serve
as the targets of the conditional jump. We will call these continuations
\texttt{on-true} and \texttt{on-false} and set their bodies to the terms
we have generated from the \texttt{if} arms. The body of the translated function
then looks like this:
\begin{spine}
(letcont (lt-ret (lt-result)
(letcont (on-true ()
(letobj (string s1 "small") (call println r s1)))
(on-false ()
(letobj (string s2 "ok") (call println r s2)))
(branch (is-true lt-result) on-true on-false)))
(call < lt-ret x 0))
\end{spine}
The order of evaluation and the flow of information from the original program
are clearly expressed after the translation to Spine. We must first call
\texttt{<} with \texttt{x} and \texttt{0}. Then we get the result in
\texttt{lt-result} inside \texttt{lt-ret}, where we scrutinize the variable and
decide which branch will go next. In both branches we must first define the
string that we want to print and then pass it to \texttt{println}. Upon
returning, \texttt{println} will pass its result directly to the callee's return
continuation, so this is a tail-call.
\section{Grit}
Next stage of the pipeline is the language Grit. It is quite low-level and is
close to the assembler. All functions and objects are leveraged to the top level,
variables are mutable and named by integers. Functions are composed from basic
blocks, that contain a list of operations terminated by a jump.
\begin{gram}
<program> = (program <fun-name> <fun-def>... <obj-def>...)
<fun-def> = (fun <fun-name> <int> <int> <int> <label> <block>...)
<obj-def> = (string <obj-name> <string-literal>)
| (double <obj-name> <double-literal>)
<block> = (<label> <op>... <jump>)
<op> = (call <var> <callee> <val>...)
| (extern-call <var> <extern-name> <val>...)
| (alloc-clos (<var> <fun-name> <val>...)...)
| (assign (<var> <val>)...)
<jump> = (goto <label>)
| (return <val>)
| (tail-call <callee> <val>...)
| (branch <boolval> <label> <label>)
<callee> = (combinator <fun-name>)
| (known-closure <fun-name> <val>)
| (unknown <val>)
<val> = (var <int>)
| (arg <int>)
| (capture <int>)
| (combinator <fun-name>)
| (obj <obj-name>)
| (int <int>)
| (true)
| (false)
| (undefined)
<boolval> = (is-true <val>) | (is-false <val>)
\end{gram}
The operations represent all actions that the program can do:
\begin{description}
\item[\texttt{(call <var> <callee> <arg>...)}] calls the function determined
by \texttt{<callee>} with some arguments and writes the return value to a
variable. \texttt{<callee>} can be:
\begin{description}
\item[\texttt{(combinator <fun-name>)}] calls a combinator, which is a
function that has no captured variables. This call is very effective,
because there is no need to store the function value and pass it to the
callee at runtime. Furthermore, we can check the number of arguments
during compilation, so the callee does not have to check them at
runtime.
\item[\texttt{(known-closure <fun-name> <val>)}] calls a closure that is
known statically. To generate such a call, the compiler must be able to
prove that \texttt{<val>} will only ever be a function object of the
function \texttt{<fun-name>}, otherwise the behavior is undefined (and
probably catastrophic). We need to store the function value, but we can
jump directly to the function body and skip the argument check.
\item[\texttt{(unknown <val>)}] is a fully dynamic call. At runtime, we
must first check in the caller whether the value is a function and
report an error otherwise. The callee will then check the number of
arguments and also report an error if it does not match.
\end{description}
\item[\texttt{(extern-call <var> <extern-name> <arg>...)}] calls an external
function and writes the result into the variable \texttt{<var>}.
\item[\texttt{(alloc-clos (<var> <fun-name> <capture>...))}] allocates
closures for functions with given names and list of captured values. The
variables that hold the closure values are initialized before the captures
are evaluated, so the functions can reference each other. As a special case,
if the capture list is empty, no allocation is performed but the static
function value of the combinator is produced.
\item[\texttt{(assign (<var> <val>)...)}] rewrites the variables by
corresponding values. The whole operation is atomic, so a variable assigned
to on the left-hand side will be evaluated to its former value on the
right-hand side.
\end{description}
The jumps are formed as following:
\begin{description}
\item[\texttt{(goto <label>)}] jumps to the given block.
\item[\texttt{(return <val>)}] returns a value from the function.
\item[\texttt{(tail-call <callee> <arg>...)}] performs a tail-call to
\texttt{<callee>} (with the same semantics as in \texttt{call}). The stack
frame of the current function will be popped before the call.
\item[\texttt{(branch <boolval> <then> <else>)}] jumps to one of the two
blocks depending on the boolean value.
\end{description}
There are more types of values than in Spine, but all values are either constant
or reachable by a single load from memory.
\begin{description}
\item[\texttt{(var <index>)}] is value of a variable.
\item[\texttt{(arg <index>)}] is value of an argument.
\item[\texttt{(capture <index>)}] is value of a captured variable.
\item[\texttt{(combinator <fun-name>)}] is the constant value of a combinator.
\item[\texttt{(obj <obj-name>)}] is the constant value of a statically
allocated object.
\item[\texttt{(int <int>)}] is an integer constant.
\item[\texttt{(true)}, \texttt{(false)}] are boolean constants.
\item[\texttt{(undefined)}] is an undefined value. This special value can be
produced by optimizations, for example as a result of a read from
uninitialized variable. During the code generation, if an undefined value is
assigned to a register or a memory location, no code is generated.
\end{description}
\subsection{Translation from Spine}
Translation from Spine to Grit is straightforward. Call to a continuation is
translated as an assignment to variables generated as its arguments followed by
a jump to the first basic block of the continuation. Other structures from Spine
have a direct counterpart in Grit.
\subsection{Optimization}
Because Spiral has very few features, there are not many opportunities for
optimization. The first optimization phases thus operate on the low-level Grit
and usually just simplify the code.
\subsubsection{Known-value optimization}
This whole-program phase first estimates the set of possible values of each
variable, each captured variable and result of every function. This information
is then used at several places:
\begin{itemize}
\item Known call optimization by substituting \texttt{(call (unknown ...)
...)} for \texttt{(call (known-closure ...) ...)} or \texttt{(call
(combinator ...) ...)}.
\item Reduction of branches with a condition that is always true or
always false.
\item Constant propagation removes variables that have a simple constant value
(number, boolean or a statically allocated object).
\end{itemize}
\subsubsection{Dead value elimination}
This phase is also global and its main goal is to remove unused captured
variables, but unnecessary allocations and variables are also discarded. Many
functions lose all their captured variables and become combinators, so
the memory consumption and thus the pressure on the garbage collector at runtime is
reduced.
\subsubsection{Function inlining}
Inlining substitutes calls to selected functions by including the body of the
callee into the caller. This is a key optimization for functional programs,
because they are usually composed from many small functions. When functions are
expanded to the call site, we avoid the overhead of manipulating the stack and
two jumps. Further optimization possibilities are also exposed, because the
compiler can gather more information from the local context. The downside is the
possible increase in code side.
We currently inline functions that are small combinators and call no other
functions except external calls. This rule applies to most standard library
functions, as they are usually just thin wrappers around the runtime library.
\subsubsection{Dead code elimination}
This optimization removes the functions and static objects that are not
transitively referenced from the main function. These functions and objects can
never be used, so there is no need to generate any code for them. We observe
that the removed functions usually come from imported modules or are inlined
everywhere.
\subsubsection{The order of optimizations}
The ordering of optimizations is very important. Known value optimization leaves
some variables and captured variables unused, so they can be removed by
dead value elimination. It also increases the number of combinators, so we can
then inline more functions. The unused definitions left after inlining can be
pruned by dead code elimination.
The optimization level can be adjusted from the command line. Level 0 disables
all optimizations. Level 1 runs all phases in the order described above except
inlining, which is enabled from level 2. On level 3, we follow inlining with
another round of known value optimization and dead value elimination.
\section{Slot allocation}
Programs in Grit generally use many variables. It would be wasteful to allocate
a physical location for each variable, because their lifetimes are usually short
and the space would be unused most of the time. We allocate more variables to a
single location, but we must be careful not to read from a variable whose value has
been overwritten by a write to a variable assigned to the same location.
This phase corresponds to register allocation in practical compilers, but our
code generator is greatly simplified and allocates all variables to the stack
slots. We use the interference graph coloring approach
\cite{chaitin1981register,chaitin1982register,briggs1994improvements}, but the
number of colors is not limited, we only seek to minimize it. The algorithm
builds the interference graph and then greedily colors the nodes, ordered by the
decreasing number of outgoing edges.
\section{Assembler}
As the last step of the translation pipeline, we generate assembler for
IA-32 architecture from Grit. The emitted code contains all functions,
statically allocated objects and strings. To support tail-calls, the calling
convention used in Spiral is different to the C calling convention.
Arguments for C functions are placed in the caller's frame on the stack, but
during a tail-call, the frame of the caller must be discarded, so the callee
must receive the arguments in its frame.
The stack of a function with \texttt{N} slots is laid out like this (the
addresses are relative to the register \texttt{\%esp}):
\begin{ttcode}
4*N+4 : return address
4*N : slot 0 (argument 0)
4*N-4 : slot 1 (argument 1)
...
8 : slot (N-2)
4 : slot (N-1)
0 : closure value
\end{ttcode}
Callee receives arguments in the first slots, placed right under the return
address (saved by the \texttt{call} instruction). Remaining slots are placed
below. The value of the function (its closure) is saved in \texttt{\%ecx},
number of arguments during unknown calls is placed in \texttt{\%eax}. The
function writes the value of \texttt{\%ecx} to the end of the stack frame, to
help the garbage collector traverse the stack. The return value is passed back
to the caller in \texttt{\%eax}.
To call a function, the caller places the arguments under its stack frame and saves
the return address by \texttt{call} instruction. A tail call overwrites the
slots of the caller, shifts the stack upward and jumps to the callee
(\texttt{jmp} instruction). Upon returning with \texttt{ret} instruction, the
callee correctly jumps to the original caller.
The code generator can use fixed registers for temporary storage, because no
variables are placed in registers. \texttt{\%eax} and \texttt{\%edx} are used
for moving the values to and from memory, \texttt{\%ecx} stores the current
function value, which is used to access the captured variables. In certain
corner cases, the register \texttt{\%ebx} is used, too. The functions also
pass around the register \texttt{\%edi} with a pointer to the runtime background
(\texttt{Bg*}).
|
with(LinearAlgebra):
with(VectorCalculus):
with(plottools);
NALU := proc(w1,w2,ghat1,ghat2,x,t,epsilon)
local g1, g2, z1, z2, L, i;
g1 := 1/~(1 +~ exp~(-ghat1));
z1 := g1 *~ (w1.x) +~ (1 -~ g1) *~ exp~(w1.log~(abs(x) +~ epsilon));
g2 := 1/~(1 +~ exp~(-ghat2));
z2 := g2 *~ (w2.z1) +~ (1 -~ g2) *~ exp~(w2.log~(abs(z1) +~ epsilon));
L := (z2 -~ t)^~2;
return add(L[i],i=1..numelems(L));
end proc:
NALUsafe := proc(w1,w2,ghat1,ghat2,x,t)
local g1, g2, z1, z2, L, i;
g1 := 1/~(1 +~ exp~(-ghat1));
z1 := g1 *~ (w1.x) +~ (1 -~ g1) *~ exp~(w1.log~(abs(x -~ 1) +~ 1));
g2 := 1/~(1 +~ exp~(-ghat2));
z2 := g2 *~ (w2.z1) +~ (1 -~ g2) *~ exp~(w2.log~(abs(z1 -~ 1) +~ 1));
L := (z2 -~ t)^~2;
return add(L[i],i=1..numelems(L));
end proc:
NALU(<<w | w>, <w | w>>, <<w | w>>, <g, g>, <g>, <x[1], x[2]>, t, epsilon);
P := plot3d(
NALU(<<w | w>, <w | w>>, <<w | w>>, <g, g>, <g>, <2, 2>, 8, 10^(-8)),
w = -1.5..1.5, g = -3..3,
view = [-1.5..1.5, -3..3, 0..300],
axes=boxed):
P;
P := plot3d(
NALU(<<w | w>, <w | w>>, <<w | w>>, <g, g>, <g>, <2, 2>, 16, 10^(-8)),
w = -1.5..1.5, g = -3..3,
view = [-1.5..1.5, -3..3, 0..300],
axes=boxed):
P;
NALUsafe(<<w | w>, <w | w>>, <<w | w>>, <g, g>, <g>, <x[1], x[2]>, t, epsilon);
P := plot3d(
NALUsafe(<<w | w>, <w | w>>, <<w | w>>, <g, g>, <g>, <2, 2>, 8),
w = -1.5..1.5, g = -3..3,
view = [-1.5..1.5, -3..3, 0..300],
axes=boxed):
P;
P := plot3d(
NALUsafe(<<w | w>, <w | w>>, <<w | w>>, <g, g>, <g>, <2, 2>, 16),
w = -1.5..1.5, g = -3..3,
view = [-1.5..1.5, -3..3, 0..300],
axes=boxed):
P;
solveNALUsafe := proc(x, t)
local i, v, w, g, eq, sol1, sol2, sols;
eq := NALUsafe(<<w | w>, <w | w>>, <<w | w>>, <g, g>, <g>, x, t);
sols := [];
for v from -3 to 3 by 0.1 do
sol1 := fsolve(eval(eq, g=v) = 0, w);
sol2 := fsolve(eval(eq, g=v) = 0, w, avoid={{w = sol1}});
sols := [op(sols), [sol1, v], [sol2, v]];
end do;
return sols;
end proc:
solveNALU := proc(x, t, epsilon)
local i, v, w, g, eq, sols, sol1, sol2, sol3, sol4;
eq := NALU(<<w | w>, <w | w>>, <<w | w>>, <g, g>, <g>, x, t, epsilon);
sols := [];
for v from -3 to 3 by 0.1 do
sol1 := fsolve(eval(eq, g=v) = 0, w);
sol2 := fsolve(eval(eq, g=v) = 0, w, avoid={{w = sol1}});
if v < 0.9 and v >= 0 then
sol3 := fsolve(eval(eq, g=v) = 0, w, avoid={{w = sol1}, {w = sol2}});
sols := [op(sols), [sol1, v], [sol2, v], [sol3, v]];
elif v < 0 then
sol3 := fsolve(eval(eq, g=v) = 0, w, avoid={{w = sol1}, {w = sol2}});
sol4 := fsolve(eval(eq, g=v) = 0, w, avoid={{w = sol1}, {w = sol2}, {w = sol3}});
sols := [op(sols), [sol1, v], [sol2, v], [sol3, v], [sol4, v]];
else
sols := [op(sols), [sol1, v], [sol2, v]];
end if;
end do;
return sols;
end proc:
NALUsols := solveNALU(<2, 2>, 8, 10^(-8));
P := plot(NALUsols, style = 'point', view = [-1.5..1.5, -3..3]):
P;
NALUsafesols := solveNALUsafe(<2, 2>, 8);
P := plot(NALUsafesols, style = 'point', view = [-1.5..1.5, -3..3]):
P;
|
The interior of a set is a subset of the set. |
/**
* Copyright (c) 2017-20 Melown Technologies SE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <map>
#include <fstream>
#include <boost/format.hpp>
#include <boost/filesystem.hpp>
#include <boost/lexical_cast.hpp>
#include <boost/utility/in_place_factory.hpp>
#include "dbglog/dbglog.hpp"
#include "utility/streams.hpp"
#include "utility/path.hpp"
#include "utility/uncaught-exception.hpp"
#include "jsoncpp/json.hpp"
#include "jsoncpp/as.hpp"
#include "vef.hpp"
namespace fs = boost::filesystem;
namespace vef {
namespace {
namespace constants {
std::string ManifestName(MainFile);
std::string MeshNameFormat("mesh.%s");
std::string MtlExtension("mtl");
std::string TextureNameFormat("texture-%d.%s");
}
std::string asExtension(Mesh::Format format)
{
switch (format) {
case Mesh::Format::obj: return "obj";
case Mesh::Format::gzippedObj: return "obj.gz";
}
throw;
}
std::string asExtension(Texture::Format format)
{
switch (format) {
case Texture::Format::jpg: return "jpg";
case Texture::Format::png: return "png";
case Texture::Format::jpeg2000: return "jp2";
}
throw;
}
void saveTrafo(Json::Value &obj, const math::Matrix4 &trafo)
{
auto &jTrafo(obj["trafo"] = Json::arrayValue);
for (int j(0); j < 3; ++j) {
for (int i(0); i < 4; ++i) {
jTrafo.append(trafo(j, i));
}
}
}
void saveTrafo(Json::Value &obj, const boost::optional<math::Matrix4> &trafo)
{
if (!trafo) { return; }
saveTrafo(obj, *trafo);
}
void saveExtents(Json::Value &obj, const math::Extents3 &extents)
{
if (!math::valid(extents)) { return; }
auto &jExtents(obj["extents"] = Json::arrayValue);
jExtents.append(extents.ll(0));
jExtents.append(extents.ll(1));
jExtents.append(extents.ll(2));
jExtents.append(extents.ur(0));
jExtents.append(extents.ur(1));
jExtents.append(extents.ur(2));
}
void saveManifest(std::ostream &os, const fs::path &path
, const Manifest &manifest, const fs::path &root)
{
Json::Value mf(Json::objectValue);
mf["version"] = 1;
if (manifest.srs) {
mf["srs"] = boost::lexical_cast<std::string>(*manifest.srs);
}
saveTrafo(mf, manifest.trafo);
const auto localPath([](fs::path path, fs::path root)
-> std::string
{
if (path.filename() == ".") { path = path.parent_path(); }
if (root.filename() == ".") { root = root.parent_path(); }
const auto str(utility::lexically_relative(path, root).string());
if (str == ".") { return {}; }
return str;
});
auto &jwindows(mf["windows"] = Json::arrayValue);
for (const auto &window : manifest.windows) {
auto &jwindow(jwindows.append(Json::objectValue));
const auto windowPath(fs::absolute(window.path, root));
jwindow["path"] = localPath(windowPath, root);
saveTrafo(jwindow, window.trafo);
if (window.name) { jwindow["name"] = *window.name; }
saveExtents(jwindow, window.extents);
auto &jlods(jwindow["lods"] = Json::arrayValue);
for (const auto &lod : window.lods) {
auto &jlod(jlods.append(Json::objectValue));
const auto lodPath(fs::absolute(lod.path, windowPath));
jlod["path"] = localPath(lod.path, windowPath);
auto &jmesh(jlod["mesh"] = Json::objectValue);
const auto meshPath(fs::absolute(lod.mesh.path, lodPath));
jmesh["path"] = localPath(meshPath, lodPath);
jmesh["format"]
= boost::lexical_cast<std::string>(lod.mesh.format);
auto &atlas(jlod["atlas"] = Json::arrayValue);
for (const auto &texture : lod.atlas) {
auto &jtexture(atlas.append(Json::objectValue));
const auto texturePath(fs::absolute(texture.path, lodPath));
jtexture["path"] = localPath(texture.path, lodPath);
auto &size(jtexture["size"] = Json::arrayValue);
size.append(texture.size.width);
size.append(texture.size.height);
jtexture["format"]
= boost::lexical_cast<std::string>(texture.format);
}
}
}
{
Json::StreamWriterBuilder wb;
os.precision(15);
std::unique_ptr<Json::StreamWriter> writer(wb.newStreamWriter());
writer->write(mf, &os);
}
(void) path;
}
void saveManifest(const fs::path &path, const Manifest &manifest
, const fs::path &root)
{
std::ofstream f;
f.exceptions(std::ios::badbit | std::ios::failbit);
f.open(path.string(), std::ios_base::out);
saveManifest(f, path, manifest, root);
f.close();
}
} // namespace
ArchiveWriter::ArchiveWriter(const fs::path &root, bool overwrite
, bool flat)
: root_(fs::absolute(root)), changed_(false), flat_(flat)
{
if (!create_directories(root_)) {
// directory already exists -> fail if mode says so
if (!overwrite) {
LOGTHROW(err2, std::runtime_error)
<< "VEF archive at " << root_ << " already exists.";
}
}
}
ArchiveWriter::~ArchiveWriter()
{
if (changed_ && !utility::uncaught_exception()) {
LOG(warn4)
<< "Unflushed VEF archive at " << root_
<< "; all changes made will not be reflected in the storage.";
}
}
fs::path Mesh::mtlPath() const
{
const fs::path p(path);
const auto ext(asExtension(format));
auto fname(p.filename().string());
fname = fname.substr(0, fname.size() - ext.size());
return p.parent_path() / (fname + constants::MtlExtension);
}
void ArchiveWriter::flush()
{
if (!changed_) { return; }
saveManifest(root_ / constants::ManifestName, manifest_, root_);
// write MTL files
for (const auto &window : manifest_.windows) {
for (const auto &lod : window.lods) {
auto path(lod.mesh.mtlPath());
LOG(info1) << "Writing " << path;
std::ofstream f(path.string());
int index(0);
for (const auto &texture : lod.atlas) {
f << "newmtl " << index
<< "\nmap_Kd " << texture.path.filename().string()
<< "\n";
++index;
}
f.close();
}
}
changed_ = false;
}
Id ArchiveWriter::addWindow(const OptionalString &path
, const OptionalMatrix &trafo
, const OptionalString &name)
{
changed_ = true;
auto index(manifest_.windows.size());
if (!flat_) {
fs::path wPath;
if (path) {
wPath = fs::absolute(*path, root_);
} else if (name) {
wPath = fs::absolute(*name, root_);
} else {
wPath = root_ / boost::lexical_cast<std::string>(index);
}
manifest_.windows.emplace_back(wPath);
} else {
// flat structure: just single window rooted in the root
manifest_.windows.emplace_back(root_);
}
auto &window(manifest_.windows.back());
window.trafo = trafo;
create_directories(window.path);
window.name = name;
if (!name) {
// get name from path/index if not specified
if (path) {
window.name = fs::path(*path).filename().string();
} else {
window.name = boost::lexical_cast<std::string>(index);
}
}
return index;
}
void ArchiveWriter::deleteWindow(Id windowId)
{
if (windowId >= manifest_.windows.size()) {
LOGTHROW(err1, std::logic_error)
<< "Cannot remove window: invalid window index " << windowId
<< ".";
}
manifest_.windows.erase(manifest_.windows.begin() + windowId);
}
Id ArchiveWriter::addLod(Id windowId, const OptionalString &path
, Mesh::Format meshFormat)
{
if (windowId >= manifest_.windows.size()) {
LOGTHROW(err1, std::logic_error)
<< "Cannot add LOD to window: invalid window index " << windowId
<< ".";
}
changed_ = true;
auto &window(manifest_.windows[windowId]);
auto index(window.lods.size());
if (path) {
// TODO: convert / to _
window.lods.emplace_back(window.path / *path);
} else {
window.lods.emplace_back
(window.path / boost::lexical_cast<std::string>(index));
}
auto &windowLod(window.lods.back());
create_directories(windowLod.path);
std::string meshPath(str(boost::format(constants::MeshNameFormat)
% asExtension(meshFormat)));
if (flat_) {
if (!window.name) {
LOGTHROW(err1, std::logic_error)
<< "Cannot flatten nameless window " << windowId
<< ".";
}
meshPath = *window.name + "-" + meshPath;
}
windowLod.mesh.format = meshFormat;
windowLod.mesh.path = (windowLod.path / meshPath);
return index;
}
Mesh& ArchiveWriter::mesh(Id windowId, Id lod)
{
if (windowId >= manifest_.windows.size()) {
LOGTHROW(err1, std::logic_error)
<< "Cannot get mesh from window: invalid window index " << windowId
<< ".";
}
changed_ = true;
auto &window(manifest_.windows[windowId]);
if (lod >= window.lods.size()) {
LOGTHROW(err1, std::logic_error)
<< "Cannot get mesh from window lod: invalid lod " << lod
<< ".";
}
return window.lods[lod].mesh;
}
Texture ArchiveWriter::addTexture(Id windowId, Id lod, const Texture &t
, Texture::Format format)
{
if (windowId >= manifest_.windows.size()) {
LOGTHROW(err1, std::logic_error)
<< "Cannot add texture to window: invalid window index "
<< windowId << ".";
}
auto &window(manifest_.windows[windowId]);
if (lod >= window.lods.size()) {
LOGTHROW(err1, std::logic_error)
<< "Cannot add texture to window lod: invalid lod " << lod
<< ".";
}
changed_ = true;
auto &windowLod(window.lods[lod]);
auto &atlas(windowLod.atlas);
auto index(atlas.size());
atlas.push_back(t);
// set update path and return
auto &tt(atlas.back());
tt.format = format;
std::string texturePath(str(boost::format(constants::TextureNameFormat)
% index % asExtension(format)));
if (flat_) {
if (!window.name) {
LOGTHROW(err1, std::logic_error)
<< "Cannot flatten nameless window " << windowId
<< ".";
}
texturePath = *window.name + "-" + texturePath;
}
tt.path = (windowLod.path / texturePath);
return tt;
}
void ArchiveWriter::setSrs(const geo::SrsDefinition &srs)
{
manifest_.srs = srs;
changed_ = true;
}
boost::optional<geo::SrsDefinition> ArchiveWriter::getSrs() const
{
return manifest_.srs;
}
void ArchiveWriter::setTrafo(const OptionalMatrix &trafo)
{
manifest_.trafo = trafo;
changed_ = true;
}
void ArchiveWriter::setExtents(Id windowId, const math::Extents3 &extents)
{
if (windowId >= manifest_.windows.size()) {
LOGTHROW(err1, std::logic_error)
<< "Cannot add texture to window: invalid window index "
<< windowId << ".";
}
auto &window(manifest_.windows[windowId]);
window.extents = extents;
}
void ArchiveWriter::expectWindows(std::size_t size)
{
manifest_.windows.reserve(std::max(manifest_.windows.capacity(), size));
}
OptionalMatrix windowMatrix(const Manifest &manifest
, const LoddedWindow &window)
{
if (!manifest.trafo) { return window.trafo; }
if (!window.trafo) { return manifest.trafo; }
return math::Matrix4(prod(*manifest.trafo, *window.trafo));
}
} // namespace vef
|
; 2004-07-17 earl
make object! [
handle: func [/local res] [
if not space-exists? "appdata-sticky" [ return "" ]
res: copy ""
foreach s head reverse load space-get "appdata-sticky" [
append res rejoin ["*" s "*<br>"]
]
html-format res
]
]
|
!###############################################################################
! calctraj.wpet.lint_cubep.f90
! Author: Matthew Janiga ([email protected])
! Last Updated: Mar. 25, 2011
!
! Description: Calculation program using linear time and cubic pressure.
! All calctraj programs use bicubic x and y.
!###############################################################################
MODULE mod_calctraj
USE mod_fileio
USE mod_grid
USE netcdf
USE mod_traj
USE mod_checkinp
IMPLICIT NONE
SAVE
! #### Derived types #####
! Contains the 3D wind.
TYPE vec_wind
REAL :: u,v,w,sp
END TYPE vec_wind
! Contains the position as described by the descriptor arrays (time,lev,lat,lon).
! The position in geographic space.
TYPE geo_pos
REAL :: time,lev,lat,lon
END TYPE geo_pos
! Interpolation requries two sets of values the interpolation weighting (iw*)
! and the neighboorhood to read in (nb*). In addition, real pressure values
! are stored for the vertical coordinate to weight closer values more in the
! vertical polynomial interpolation.
! i, j, k, and l contain the full fractional element position (i.e. 1:nlat)
! iwi, iwj, iwk, and iwl contain the fractional position in the neigborhood (-1:2)
! p is the current pressure (equivalent to geo_pos%lev)
! ploc is the local array of pressures in the neighboorhood
TYPE interp_coord
REAL :: i, j, k, l
REAL :: iwi, iwj, iwk, iwl
INTEGER, DIMENSION(4) :: nbj, nbk, nbl
INTEGER, DIMENSION(2) :: nbi
REAL :: p
REAL, DIMENSION(4) :: ploc
END TYPE interp_coord
CONTAINS
!###########################################################################
! calctraj() ####
! Calls individual subroutines which perform calculate the trajectories ####
!###########################################################################
SUBROUTINE calctraj()
IMPLICIT NONE
! Local variables
INTEGER :: i,t ! Time step and trajectory counters
! Open the grid file and auxiliary file.
CALL check( nf90_open( gridfile, nf90_nowrite, gncid) )
CALL check( nf90_open( auxfile, nf90_nowrite, ancid) )
! Initializes the full trajectory arrays with initial positions
c_step = 1
CALL init_traj()
! Begin loop of time steps
DO i=2,nstep
c_step = i
IF( ANY(t_on) )THEN
PRINT *, 'Time step: ',i
DO t=1,ntraj
IF( t_on(t) )THEN
CALL petterson(t)
END IF
END DO
ELSE
PRINT *, 'All trajectories have left the domain, time steping halted'
EXIT
END IF
END DO
! Close the grid file.
CALL check( nf90_close(gncid) )
CALL check( nf90_close(ancid) )
END SUBROUTINE calctraj
!###########################################################################
! init_traj() ####
! Initializes the trajectory arrays finds initial values of wind / aux ####
!###########################################################################
SUBROUTINE init_traj()
IMPLICIT NONE
! Local variables
INTEGER :: t,a ! Counters
TYPE(interp_coord) :: ic ! Interpolation coordinates
TYPE(vec_wind) :: wind ! 3D wind vector
TYPE(geo_pos) :: gpos ! Geographic position
LOGICAL :: in_grid,no_miss,no_grnd ! Success flags
! Initialize arrays to missing
t_time = mv
t_lev = mv
t_lat = mv
t_lon = mv
t_u = mv
t_v = mv
t_w = mv
t_sp = mv
! Initialize last valid to last total
last_valid = nstep
PRINT *, "Trajectory arrays initialized to missing."
! Move over initial coordinates
DO t=1,ntraj
IF( t_on(t) )THEN
in_grid = .TRUE.
no_miss = .TRUE.
no_grnd = .TRUE.
t_time(t,c_step) = i_time(t)
t_lev(t,c_step) = i_lev(t)
t_lat(t,c_step) = i_lat(t)
t_lon(t,c_step) = i_lon(t)
gpos%time = i_time(t)
gpos%lev = i_lev(t)
gpos%lat = i_lat(t)
gpos%lon = i_lon(t)
CALL get_int_coords(t,gpos,ic,in_grid)
IF(in_grid)THEN
CALL get_grid_vals(t,ic,wind,no_miss)
IF(no_miss)THEN
t_sp(t,c_step) = wind%sp
! Get Auxiliary values
IF(naux >= 1)THEN
CALL get_aux_vals(t,ic)
END IF
! Underground check
IF(t_sp(t,c_step) <= t_lev(t,c_step))THEN
PRINT *, 'Trajectory encountered ground'
t_on(t) = .FALSE.
no_grnd = .FALSE.
ELSE
t_u(t,c_step) = wind%u
t_v(t,c_step) = wind%v
t_w(t,c_step) = wind%w
! Get Auxiliary values
IF(naux >= 1)THEN
CALL get_aux_vals(t,ic)
END IF
END IF
END IF
END IF
IF(in_grid.EQV..FALSE. .OR. no_grnd.EQV..FALSE. .OR. no_miss.EQV..FALSE.)THEN
t_u(t,c_step) = mv
t_v(t,c_step) = mv
t_w(t,c_step) = mv
! Set Auxiliary values to missing
IF(naux >= 1)THEN
DO a = 1,naux
t_aux(t,c_step,a) = mv
END DO
END IF
last_valid(t) = mvi ! None valid
END IF
END IF
END DO
END SUBROUTINE init_traj
!###########################################################################
! petterson() ####
! Performs iterative time-step following Petterson (1940) ####
!###########################################################################
SUBROUTINE petterson(t)
IMPLICIT NONE
! Input/output variables
INTEGER, INTENT(in) :: t ! Trajectory to compute
! Local variables
TYPE(interp_coord) :: ic ! The output interpolation coordinates
TYPE(geo_pos) :: gpos0,gpos1 ! Geographic position (initial and next)
TYPE(vec_wind) :: wind0, wind1, winda ! 3D Wind (initial = 0, next = 1, average = a)
LOGICAL :: in_grid,no_miss,no_grnd ! Success flags
INTEGER :: it ! For iteration
INTEGER :: ac ! Auxiliary loop
in_grid = .TRUE.
no_miss = .TRUE.
no_grnd = .TRUE.
! First Guess of X(0)
wind0%u = t_u(t,c_step-1)
wind0%v = t_v(t,c_step-1)
wind0%w = t_w(t,c_step-1)
gpos0%time = t_time(t,c_step-1)
gpos0%lev = t_lev(t,c_step-1)
gpos0%lat = t_lat(t,c_step-1)
gpos0%lon = t_lon(t,c_step-1)
CALL get_pos(wind0,gpos0,gpos1) ! Find new gpos given initial vector wind
CALL get_int_coords(t,gpos1,ic,in_grid) ! Find fractional element position given gpos0
IF(in_grid)THEN
CALL get_grid_vals(t,ic,winda,no_miss) ! Given interpolation coordiantes get vector wind
IF(no_miss)THEN
IF(winda%sp <= gpos1%lev)THEN
no_grnd = .FALSE.
END IF
END IF
END IF
! Solve for X(1) iteratively
DO it=1,3
IF(in_grid .AND. no_grnd .AND. no_miss)THEN
CALL get_pos(winda,gpos0,gpos1) ! Find new pos given average of initial and guess
CALL get_int_coords(t,gpos1,ic,in_grid) ! Find interpolation coordinates at iteration i
IF(in_grid)THEN
CALL get_grid_vals(t,ic,wind1,no_miss) ! Given interpolation coordiantes get vector wind at i
IF(no_miss)THEN
IF(winda%sp <= gpos1%lev)THEN
no_grnd = .FALSE.
ELSE
! Find average of initial and current guess
winda%u = 0.5*(wind0%u+wind1%u)
winda%v = 0.5*(wind0%v+wind1%v)
winda%w = 0.5*(wind0%w+wind1%w)
END IF
END IF
END IF
! IF(converged)THEN
! EXIT
! END IF
END IF
END DO
! Above steps have been successfully completed. Store values
IF(in_grid .AND. no_grnd .AND. no_miss)THEN
t_time(t,c_step) = gpos1%time
t_lev(t,c_step) = gpos1%lev
t_lat(t,c_step) = gpos1%lat
t_lon(t,c_step) = gpos1%lon
t_u(t,c_step) = wind1%u
t_v(t,c_step) = wind1%v
t_w(t,c_step) = wind1%w
t_sp(t,c_step) = wind1%sp
IF(naux >= 1)THEN
CALL get_aux_vals(t,ic)
END IF
ELSE
t_time(t,c_step) = mv
t_lev(t,c_step) = mv
t_lat(t,c_step) = mv
t_lon(t,c_step) = mv
t_u(t,c_step) = mv
t_v(t,c_step) = mv
t_w(t,c_step) = mv
t_sp(t,c_step) = wind1%sp
IF(naux >= 1)THEN
DO ac = 1,naux
t_aux(t,c_step,ac) = mv
END DO
END IF
last_valid(t) = c_step - 1
END IF
END SUBROUTINE petterson
!###########################################################################
! get_int_coords() ####
! Use linear interpolation to convert the geographic postions to values ####
! used to read in a neigboorhood and the interpolation weighting to get ####
! the interpolated value from this neighboorhood fo values.
!###########################################################################
SUBROUTINE get_int_coords(t,gpos,ic,valid)
IMPLICIT NONE
! Input/output variables
INTEGER, INTENT(in) :: t ! Trajectory
TYPE(geo_pos), INTENT(in) :: gpos ! Geographic position
TYPE(interp_coord), INTENT(out) :: ic ! The output interpolation coordinates
LOGICAL, INTENT(inout) :: valid ! Success flag
! Local variables
INTEGER :: p ! Counter
! Interpolate time to fractional element postion
IF( gpos%time < gtime(1) .OR. gpos%time > gtime(gntime) )THEN
valid = .FALSE.
t_on(t) = .FALSE.
ELSE
DO p=1,gntime-1
IF( gpos%time >= gtime(p) .AND. gpos%time <= gtime(p+1)) THEN
ic%i = 1.0*p+(gpos%time-gtime(p))/(gtime(p+1)-gtime(p))
EXIT
END IF
END DO
END IF
! Interpolate level to fractional element postion
IF( gpos%lev > glev(1) .OR. gpos%lev < glev(gnlev) )THEN
valid = .FALSE.
t_on(t) = .FALSE.
ELSE
DO p=1,gnlev-1
IF( gpos%lev <= glev(p) .AND. gpos%lev >= glev(p+1)) THEN
ic%j = 1.0*p+(glev(p)-gpos%lev)/(glev(p)-glev(p+1))
ic%p = gpos%lev ! Actual pressure level
EXIT
END IF
END DO
END IF
! Interpolate latitude to fractional element postion
IF( gpos%lat < glat(1) .OR. gpos%lat > glat(gnlat) )THEN
valid = .FALSE.
t_on(t) = .FALSE.
ELSE
DO p=1,gnlat-1
IF( gpos%lat >= glat(p) .AND. gpos%lat <= glat(p+1)) THEN
ic%k = 1.0*p+(gpos%lat-glat(p))/(glat(p+1)-glat(p))
EXIT
END IF
END DO
END IF
! Interpolate longtude to fractional element postion
IF( gpos%lon < glon(1) .OR. gpos%lon > glon(gnlon) )THEN
valid = .FALSE.
t_on(t) = .FALSE.
ELSE
DO p=1,gnlon-1
IF( gpos%lon >= glon(p) .AND. gpos%lon <= glon(p+1)) THEN
ic%l = 1.0*p+(gpos%lon-glon(p))/(glon(p+1)-glon(p))
EXIT
END IF
END DO
END IF
! Determine neighborhood and weigthting for time
IF( ic%i < 1.0 .OR. ic%i > gntime )THEN
! Out of bounds
valid = .FALSE.
t_on(t) = .FALSE.
ELSE IF( ic%i >= 1.0 .AND. ic%i <= (gntime) )THEN
! Central
ic%nbi = (/0,1/) + INT(ic%i)
ic%iwi = ic%i-INT(ic%i)
END IF
! Determine neighborhood and weigthting for level
IF( ic%j < 1.0 .OR. ic%j > gnlev )THEN
! Out of bounds
valid = .FALSE.
t_on(t) = .FALSE.
ELSE IF( ic%j >= 1.0 .AND. ic%j < 2.0)THEN
! Right end
ic%nbj = (/1,2,3,4/)
ic%iwj = ic%j-INT(ic%j)-1.0
ic%ploc = glev( ic%nbj ) ! Actual pressure levels
ELSE IF( ic%j >= (gnlev-1) .AND. ic%j <= gnlev)THEN
! Left end
ic%nbj = (/gnlev-3,gnlev-2,gnlev-1,gnlev/)
ic%iwj = ic%j-INT(ic%j)+1.0
ic%ploc = glev( ic%nbj ) ! Actual pressure levels
ELSE IF( ic%j >= 2.0 .AND. ic%j <= (gnlev-1) )THEN
! Central
ic%nbj = (/-1,0,1,2/) + INT(ic%j)
ic%iwj = ic%j-INT(ic%j)
ic%ploc = glev( ic%nbj ) ! Actual pressure levels
END IF
! Determine neighborhood and weigthting for latitude
IF( ic%k < 1.0 .OR. ic%k > gnlat )THEN
! Out of bounds
valid = .FALSE.
t_on(t) = .FALSE.
ELSE IF( ic%k >= 1.0 .AND. ic%k < 2.0)THEN
! Right end
ic%nbk = (/1,2,3,4/)
ic%iwk = ic%k-INT(ic%k)-1.0
ELSE IF( ic%k >= (gnlat-1) .AND. ic%k <= gnlat)THEN
! Left end
ic%nbk = (/gnlat-3,gnlat-2,gnlat-1,gnlat/)
ic%iwk = ic%k-INT(ic%k)+1.0
ELSE IF( ic%k >= 2.0 .AND. ic%k <= (gnlat-1) )THEN
! Central
ic%nbk = (/-1,0,1,2/) + INT(ic%k)
ic%iwk = ic%k-INT(ic%k)
END IF
! Determine neighborhood and weigthting for longitude
IF( ic%l < 1.0 .OR. ic%l > gnlon )THEN
! Out of bounds
valid = .FALSE.
t_on(t) = .FALSE.
ELSE IF( ic%l >= 1.0 .AND. ic%l < 2.0)THEN
! Right end
ic%nbl = (/1,2,3,4/)
ic%iwl = ic%l-INT(ic%l)-1.0
ELSE IF( ic%l >= (gnlon-1) .AND. ic%l <= gnlon)THEN
! Left end
ic%nbl = (/gnlon-3,gnlon-2,gnlon-1,gnlon/)
ic%iwl = ic%l-INT(ic%l)+1.0
ELSE IF( ic%l >= 2.0 .AND. ic%l <= (gnlon-1) )THEN
! Central
ic%nbl = (/-1,0,1,2/) + INT(ic%l)
ic%iwl = ic%l-INT(ic%l)
END IF
END SUBROUTINE get_int_coords
!###########################################################################
! get_pos() ####
! Calculates gpos1 given vector wind and gpos0 using Haversine's ####
! formula. ####
!###########################################################################
SUBROUTINE get_pos(wind,gpos0,gpos1)
IMPLICIT NONE
! Input/output variables
TYPE(geo_pos), INTENT(in) :: gpos0 ! Initial geographic position
TYPE(geo_pos), INTENT(out) :: gpos1 ! Next geographic position
TYPE(vec_wind), INTENT(in) :: wind ! 3D vector wind (initial or average depending on call)
! Local Variables
REAL, PARAMETER :: pi = 3.14159265358979 ! pi
REAL, PARAMETER :: radius = 6371220.0 ! Earth radius (m)
REAL :: latr0, lonr0, latr1, lonr1 ! Initial and guess lat/lon (radians)
REAL :: dir, rdist, adj ! Direction, radial distance, backward/forward switch
! Reverse direction if time_step < 0
IF(time_step < 0)THEN
adj = -1.0
ELSE
adj = 1.0
END IF
! Current lat and lon in radians
latr0 = gpos0%lat*pi/180.0
lonr0 = gpos0%lon*pi/180.0
! Solve for radial distance of the motion over 1 time step and the bearing
! of the wind based on u and v. Bearing is reversed for back trajectories.
rdist = SQRT( wind%u**2.0+wind%v**2.0 )*(adj*time_step/radius)
dir = pi+ATAN2( adj*-1.0*wind%u, adj*-1.0*wind%v )
! Calculate new lat and lon using Haversine's forumula
latr1 = ASIN( SIN(latr0)*COS(rdist)+COS(latr0)*SIN(rdist)*COS(dir) )
lonr1 = lonr0+ATAN2( SIN(dir)*SIN(rdist)*COS(latr0), COS(rdist)-SIN(latr0)*SIN(latr1) )
! Convert to degrees
gpos1%lat = latr1*180.0/pi
gpos1%lon= lonr1*180.0/pi
! New time
gpos1%time = gpos0%time+time_step
! New vertical position
gpos1%lev = gpos0%lev + wind%w*time_step
END SUBROUTINE get_pos
!###########################################################################
! get_grid_vals() ####
! Given the interpolation coordinates a neighborhood of values is read ####
! in and used to find an interpolated value given the input weightings. ####
!###########################################################################
SUBROUTINE get_grid_vals(t,ic,wind,no_miss)
IMPLICIT NONE
! Input/Output Variables
INTEGER, INTENT(in) :: t ! Trajectory number
TYPE(interp_coord), INTENT(in) :: ic ! Interpolation coordinates
TYPE(vec_wind), INTENT(inout) :: wind ! 3D wind interpolated from wind_nbor and fep
LOGICAL, INTENT(inout) :: no_miss ! Success flag
! Local Variables
TYPE(vec_wind), DIMENSION(4,4,4,2) :: wind_nbor ! Temporarily stores wind neighboorhood to get interpolated wind
REAL, DIMENSION(4,4,2) :: sp_nbor
! Get 4D neighborhood values
wind_nbor%u = get_nbor_xyzt(ic%nbi,ic%nbj,ic%nbk,ic%nbl,gncid,u_varid)
wind_nbor%v = get_nbor_xyzt(ic%nbi,ic%nbj,ic%nbk,ic%nbl,gncid,v_varid)
wind_nbor%w = get_nbor_xyzt(ic%nbi,ic%nbj,ic%nbk,ic%nbl,gncid,w_varid)
sp_nbor = get_nbor_xyt(ic%nbi,ic%nbk,ic%nbl,gncid,sp_varid)
! Check for missing values. If there are missing values interpolation
! is not performed and all values are set to missing. Also the trajectory
! is turned off.
IF( ANY(wind_nbor%u == mv) .OR. ANY(wind_nbor%v == mv) .OR. ANY(wind_nbor%w == mv) .OR. ANY(wind_nbor%sp == mv) )THEN
PRINT *, 'Missing values encountered'
no_miss = .FALSE.
t_on(t) = .FALSE.
ELSE
! Get interpolated value given weightings
wind%u = interp_4d(ic, wind_nbor%u)
wind%v = interp_4d(ic, wind_nbor%v)
wind%w = interp_4d(ic, wind_nbor%w)
wind%sp = interp_3d(ic, sp_nbor)
END IF
END SUBROUTINE get_grid_vals
!###########################################################################
! get_aux_vals() ####
! Given the interpolation coordinates a neighborhood of values is read ####
! in and used to find an interpolated value given the input weightings. ####
!###########################################################################
SUBROUTINE get_aux_vals(t,ic)
IMPLICIT NONE
! Input/Output Variables
INTEGER, INTENT(in) :: t ! Trajectory number
TYPE(interp_coord), INTENT(in) :: ic ! Interpolation coordinates
! Local Variables
REAL, DIMENSION(4,4,4,2) :: a4d_xyzt ! Temporarily stores 4D auxiliary (e.g. pv)
REAL, DIMENSION(4,4,2) :: a3d_xyt ! Temporaily stores 3D auxiliary (e.g. mslp)
INTEGER :: ac ! Loop
! Loop through the auxililiary values
DO ac = 1,naux
! If 4D auxiliary value
IF(aux_dim(ac) == 4)THEN
! Get neighborhood
a4d_xyzt = get_nbor_xyzt(ic%nbi,ic%nbj,ic%nbk,ic%nbl,ancid,aux_varid(ac))
IF( ANY(a4d_xyzt == mv) )THEN
t_aux(t,c_step,ac) = mv
ELSE
! Get interpolated value given weightings
t_aux(t,c_step,ac) = interp_4d(ic,a4d_xyzt)
END IF
END IF
! If 3D auxiliary value
IF(aux_dim(ac) == 3)THEN
! Get neighborhood
a3d_xyt = get_nbor_xyt(ic%nbi,ic%nbk,ic%nbl,ancid,aux_varid(ac))
IF( ANY(a3d_xyt == mv) )THEN
t_aux(t,c_step,ac) = mv
ELSE
! Get interpolated value given weightings
t_aux(t,c_step,ac) = interp_3d(ic,a3d_xyt)
END IF
END IF
END DO
END SUBROUTINE get_aux_vals
!###########################################################################
! get_nbor_xyzt() ####
! Reads in a 4d grid to be used to find var given g* elements. ####
!###########################################################################
FUNCTION get_nbor_xyzt(gis,gjs,gks,gls,ncid,var_varid)
IMPLICIT NONE
! Input Variables
INTEGER, INTENT(IN), DIMENSION(4) :: gjs, gks, gls
INTEGER, INTENT(IN), DIMENSION(2) :: gis
INTEGER, INTENT(IN) :: ncid, var_varid
! Local Variables
INTEGER, PARAMETER :: NDIMS = 4
INTEGER :: start(NDIMS), COUNT(NDIMS)
REAL, DIMENSION(4,4,4,2) :: get_nbor_xyzt
start = (/gls(1),gks(1),gjs(1),gis(1)/) ! Reverse order for NetCDF
count = (/4,4,4,2/) ! Number in each dimension.
! Read in neighborhood from NetCDF
CALL check( nf90_get_var(ncid, var_varid, get_nbor_xyzt, start, count ) )
END FUNCTION get_nbor_xyzt
!###########################################################################
! get_nbor_xyt() ####
! Reads in a 3d grid to be used to find var given g* elements. ####
!###########################################################################
FUNCTION get_nbor_xyt(gis,gks,gls,ncid,var_varid)
IMPLICIT NONE
! Input Variables
INTEGER, INTENT(IN), DIMENSION(4) :: gks,gls
INTEGER, INTENT(IN), DIMENSION(2) :: gis
INTEGER, INTENT(IN) :: ncid, var_varid
! Local Variables
INTEGER, PARAMETER :: NDIMS = 3
INTEGER :: start(NDIMS), COUNT(NDIMS)
REAL, DIMENSION(4,4,2) :: get_nbor_xyt
start = (/gls(1),gks(1),gis(1)/) ! Reverse order for NetCDF
count = (/4,4,2/) ! Number in each dimension.
! Read in neighborhood from NetCDF
CALL check( nf90_get_var(ncid, var_varid, get_nbor_xyt, start, count ) )
END FUNCTION get_nbor_xyt
!###########################################################################
! interp_4d() ####
! Given the g*s arrays and the g* arrays stored in memory the value ####
! at the fractional elements g* are determined. ####
!###########################################################################
FUNCTION interp_4d(ic, var_xyzt)
IMPLICIT NONE
! Input Variables
TYPE(interp_coord), INTENT(in) :: ic ! Interpolation coordinates
REAL, INTENT(IN), DIMENSION(4,4,4,2) :: var_xyzt ! Neighborhood of values
! Local Variables
REAL :: interp_4d, t1, t2
INTEGER :: i
REAL, DIMENSION(4) :: prof1, prof2, psub
! Generate two profiles at t1 and t2 using bicubic interpolation
DO i=1,4
prof1(i) = bicubic_interpolate_xy(var_xyzt(1:4,1:4,i,1), ic%iwl, ic%iwk)
prof2(i) = bicubic_interpolate_xy(var_xyzt(1:4,1:4,i,2), ic%iwl, ic%iwk)
END DO
! Determine the t1 and t2 for respective profiles using Newton polynomials
t1 = neville_interpolate_p(ic%ploc, prof1, ic%p)
t2 = neville_interpolate_p(ic%ploc, prof2, ic%p)
! Linear interpolation for time
interp_4d = t1*(1.0-ic%iwi) + t2*ic%iwi
END FUNCTION interp_4d
!###########################################################################
! interp_3d() ####
! Given the g*s arrays and the g* arrays stored in memory the value ####
! at the fractional elements g* are determined. ####
!###########################################################################
FUNCTION interp_3d(ic, var_xyt)
IMPLICIT NONE
! Input Variables
TYPE(interp_coord), INTENT(in) :: ic ! Interpolation coordinates
REAL, INTENT(IN), DIMENSION(4,4,2) :: var_xyt ! Neighborhood of values
! Local Variables
REAL :: interp_3d, t1, t2
! Generate values at t1 and t2 using bicubic interpolation
t1 = bicubic_interpolate_xy(var_xyt(1:4,1:4,1), ic%iwl, ic%iwk)
t2 = bicubic_interpolate_xy(var_xyt(1:4,1:4,2), ic%iwl, ic%iwk)
! Linear interpolation for time
interp_3d = t1*(1.0-ic%iwi) + t2*ic%iwi
END FUNCTION interp_3d
!###########################################################################
! cubic_interpolate() ####
! Perform cubic interpolation to find value at fractional element ####
!###########################################################################
! In the cubic interpolation functions p is an array of values (e.g. wind)
! w, x, y, and z are fractional elements (/-1:2/) with a center point defined
! as 0.5.
FUNCTION cubic_interpolate(p, w)
IMPLICIT NONE
! Input Variables
REAL, INTENT(IN) :: w
REAL, INTENT(IN), DIMENSION(4) :: p
! Local Variables
REAL :: cubic_interpolate
cubic_interpolate = p(2) + 0.5*w*(p(3) - p(1) + w*(2.0*p(1) - 5.0*p(2) + 4.0*p(3) - p(4) + w*(3.0*(p(2) - p(3)) + p(4) - p(1))))
END FUNCTION cubic_interpolate
!###########################################################################
! bicubic_interpolate_xy() ####
! Perform cubic interpolation to find value at fractional element ####
!###########################################################################
FUNCTION bicubic_interpolate_xy(p, w, x)
IMPLICIT NONE
! Input Variables
REAL, INTENT(IN) :: w, x
REAL, INTENT(IN), DIMENSION(4,4) :: p
! Local Variables
REAL, DIMENSION(4) :: p_temp
REAL :: bicubic_interpolate_xy
p_temp(1) = cubic_interpolate(p(1,:), x)
p_temp(2) = cubic_interpolate(p(2,:), x)
p_temp(3) = cubic_interpolate(p(3,:), x)
p_temp(4) = cubic_interpolate(p(4,:), x)
bicubic_interpolate_xy = cubic_interpolate(p_temp, w)
END FUNCTION bicubic_interpolate_xy
!###########################################################################
! neville_interpolate_p() ####
! Perform 3rd order Newton polynomial interpolation using ####
! Neville's algortihin ####
!###########################################################################
FUNCTION neville_interpolate_p(psub, prof, inp)
IMPLICIT NONE
! Input Variables
REAL, INTENT(IN) :: inp
REAL, INTENT(IN), DIMENSION(4) :: psub, prof
! Local Variables
REAL, DIMENSION(4) :: f
REAL :: neville_interpolate_p
INTEGER :: n, i, j
n = 4 ! Number of values in array
! Solve for the unique polynomial of order n-1
f = prof
DO j=1,n
DO i=n,j+1,-1
f(i) = ( (inp-psub(i-j))*f(i) - (inp-psub(i))*f(i-1) ) / ( psub(i)-psub(i-j) )
END DO
END DO
neville_interpolate_p = f(n)
END FUNCTION neville_interpolate_p
END MODULE mod_calctraj
|
{-# OPTIONS --without-K --safe #-}
open import Axiom.Extensionality.Propositional using (Extensionality)
module Cats.Category.Sets.Facts.Exponential
(funext : ∀ {a b} → Extensionality a b)
where
open import Data.Product using (_×_ ; _,_ ; proj₁ ; proj₂)
open import Relation.Binary.PropositionalEquality using (_≡_ ; refl ; sym)
open import Cats.Category
open import Cats.Category.Sets using (Sets)
open import Cats.Category.Sets.Facts.Product using (hasBinaryProducts)
instance
hasExponentials : ∀ {l} → HasExponentials (Sets l)
hasExponentials .HasExponentials.hasBinaryProducts = hasBinaryProducts
hasExponentials .HasExponentials._↝′_ B C = record
{ Cᴮ = B → C
; eval = λ { (f , x) → f x }
; curry′ = λ f → record
{ arr = λ a b → f (a , b)
; prop = λ x → refl
; unique = λ eq a → funext λ b → sym (eq _)
}
}
|
cc ------------ dpmjet3.4 - authors: S.Roesler, R.Engel, J.Ranft -------
cc -------- phojet1.12-40 - authors: S.Roesler, R.Engel, J.Ranft -------
cc - oct'13 -------
cc ----------- pythia-6.4 - authors: Torbjorn Sjostrand, Lund'10 -------
cc ---------------------------------------------------------------------
cc converted for use with FLUKA -------
cc - oct'13 -------
C...PYSHOW
C...Generates timelike parton showers from given partons.
SUBROUTINE PYSHOW(IP1,IP2,QMAX)
C...Double precision and integer declarations.
IMPLICIT DOUBLE PRECISION(A-H, O-Z)
IMPLICIT INTEGER(I-N)
INTEGER PYCOMP
C...Parameter statement to help give large particle numbers.
PARAMETER (KSUSY1=1000000,KSUSY2=2000000,KTECHN=3000000,
&KEXCIT=4000000,KDIMEN=5000000)
PARAMETER (MAXNUR=1000)
C...Commonblocks.
include 'inc/pypart'
include 'inc/pyjets'
include 'inc/pydat1'
include 'inc/pydat2'
include 'inc/pypars'
include 'inc/pyint1'
C...Local arrays.
DIMENSION PMTH(5,140),PS(5),PMA(100),PMSD(100),IEP(100),IPA(100),
&KFLA(100),KFLD(100),KFL(100),ITRY(100),ISI(100),ISL(100),DP(100),
&DPT(5,4),KSH(0:140),KCII(2),NIIS(2),IIIS(2,2),THEIIS(2,2),
&PHIIIS(2,2),ISII(2),ISSET(2),ISCOL(0:140),ISCHG(0:140),
&IREF(1000)
C...Check that QMAX not too low.
IF(MSTJ(41).LE.0) THEN
RETURN
ELSEIF(MSTJ(41).EQ.1.OR.MSTJ(41).EQ.11) THEN
IF(QMAX.LE.PARJ(82).AND.IP2.GE.-80) RETURN
ELSE
IF(QMAX.LE.MIN(PARJ(82),PARJ(83),PARJ(90)).AND.IP2.GE.-80)
& RETURN
ENDIF
C...Store positions of shower initiating partons.
MPSPD=0
IF(IP1.GT.0.AND.IP1.LE.MIN(N,MSTU(4)-MSTU(32)).AND.IP2.EQ.0) THEN
NPA=1
IPA(1)=IP1
ELSEIF(MIN(IP1,IP2).GT.0.AND.MAX(IP1,IP2).LE.MIN(N,MSTU(4)-
& MSTU(32))) THEN
NPA=2
IPA(1)=IP1
IPA(2)=IP2
ELSEIF(IP1.GT.0.AND.IP1.LE.MIN(N,MSTU(4)-MSTU(32)).AND.IP2.LT.0
& .AND.IP2.GE.-80) THEN
NPA=ABS(IP2)
DO 100 I=1,NPA
IPA(I)=IP1+I-1
100 CONTINUE
ELSEIF(IP1.GT.0.AND.IP1.LE.MIN(N,MSTU(4)-MSTU(32)).AND.
&IP2.EQ.-100) THEN
MPSPD=1
NPA=2
IPA(1)=IP1+6
IPA(2)=IP1+7
ELSE
CALL PYERRM(12,
& '(PYSHOW:) failed to reconstruct showering system')
IF(MSTU(21).GE.1) RETURN
ENDIF
C...Send off to PYPTFS for pT-ordered evolution if requested,
C...if at least 2 partons, and without predefined shower branchings.
IF((MSTJ(41).EQ.11.OR.MSTJ(41).EQ.12).AND.NPA.GE.2.AND.
&MPSPD.EQ.0) THEN
NPART=NPA
DO 110 II=1,NPART
IPART(II)=IPA(II)
PTPART(II)=0.5D0*QMAX
110 CONTINUE
CALL PYPTFS(2,0.5D0*QMAX,0D0,PTGEN)
RETURN
ENDIF
C...Initialization of cutoff masses etc.
DO 120 IFL=0,40
ISCOL(IFL)=0
ISCHG(IFL)=0
KSH(IFL)=0
120 CONTINUE
ISCOL(21)=1
KSH(21)=1
PMTH(1,21)=PYMASS(21)
PMTH(2,21)=SQRT(PMTH(1,21)**2+0.25D0*PARJ(82)**2)
PMTH(3,21)=2D0*PMTH(2,21)
PMTH(4,21)=PMTH(3,21)
PMTH(5,21)=PMTH(3,21)
PMTH(1,22)=PYMASS(22)
PMTH(2,22)=SQRT(PMTH(1,22)**2+0.25D0*PARJ(83)**2)
PMTH(3,22)=2D0*PMTH(2,22)
PMTH(4,22)=PMTH(3,22)
PMTH(5,22)=PMTH(3,22)
PMQTH1=PARJ(82)
IF(MSTJ(41).GE.2) PMQTH1=MIN(PARJ(82),PARJ(83))
PMQT1E=MIN(PMQTH1,PARJ(90))
PMQTH2=PMTH(2,21)
IF(MSTJ(41).GE.2) PMQTH2=MIN(PMTH(2,21),PMTH(2,22))
PMQT2E=MIN(PMQTH2,0.5D0*PARJ(90))
DO 130 IFL=1,5
ISCOL(IFL)=1
IF(MSTJ(41).GE.2) ISCHG(IFL)=1
KSH(IFL)=1
PMTH(1,IFL)=PYMASS(IFL)
PMTH(2,IFL)=SQRT(PMTH(1,IFL)**2+0.25D0*PMQTH1**2)
PMTH(3,IFL)=PMTH(2,IFL)+PMQTH2
PMTH(4,IFL)=SQRT(PMTH(1,IFL)**2+0.25D0*PARJ(82)**2)+PMTH(2,21)
PMTH(5,IFL)=SQRT(PMTH(1,IFL)**2+0.25D0*PARJ(83)**2)+PMTH(2,22)
130 CONTINUE
DO 140 IFL=11,15,2
IF(MSTJ(41).EQ.2.OR.MSTJ(41).GE.4) ISCHG(IFL)=1
IF(MSTJ(41).EQ.2.OR.MSTJ(41).GE.4) KSH(IFL)=1
PMTH(1,IFL)=PYMASS(IFL)
PMTH(2,IFL)=SQRT(PMTH(1,IFL)**2+0.25D0*PARJ(90)**2)
PMTH(3,IFL)=PMTH(2,IFL)+0.5D0*PARJ(90)
PMTH(4,IFL)=PMTH(3,IFL)
PMTH(5,IFL)=PMTH(3,IFL)
140 CONTINUE
PT2MIN=MAX(0.5D0*PARJ(82),1.1D0*PARJ(81))**2
ALAMS=PARJ(81)**2
ALFM=LOG(PT2MIN/ALAMS)
C...Check on phase space available for emission.
IREJ=0
DO 150 J=1,5
PS(J)=0D0
150 CONTINUE
PM=0D0
KFLA(2)=0
DO 170 I=1,NPA
KFLA(I)=ABS(K(IPA(I),2))
PMA(I)=P(IPA(I),5)
C...Special cutoff masses for initial partons (may be a heavy quark,
C...squark, ..., and need not be on the mass shell).
IR=30+I
IF(NPA.LE.1) IREF(I)=IR
IF(NPA.GE.2) IREF(I+1)=IR
ISCOL(IR)=0
ISCHG(IR)=0
KSH(IR)=0
IF(KFLA(I).LE.8) THEN
ISCOL(IR)=1
IF(MSTJ(41).GE.2) ISCHG(IR)=1
ELSEIF(KFLA(I).EQ.11.OR.KFLA(I).EQ.13.OR.KFLA(I).EQ.15.OR.
& KFLA(I).EQ.17) THEN
IF(MSTJ(41).EQ.2.OR.MSTJ(41).GE.4) ISCHG(IR)=1
ELSEIF(KFLA(I).EQ.21) THEN
ISCOL(IR)=1
ELSEIF((KFLA(I).GE.KSUSY1+1.AND.KFLA(I).LE.KSUSY1+8).OR.
& (KFLA(I).GE.KSUSY2+1.AND.KFLA(I).LE.KSUSY2+8)) THEN
ISCOL(IR)=1
ELSEIF(KFLA(I).EQ.KSUSY1+21) THEN
ISCOL(IR)=1
C...QUARKONIA+++
C...same for QQ~[3S18]
ELSEIF(MSTP(148).GE.1.AND.(KFLA(I).EQ.9900443.OR.
& KFLA(I).EQ.9900553)) THEN
ISCOL(IR)=1
C...QUARKONIA---
ENDIF
C...Option to switch off radiation from particle KF = MSTJ(39) entirely
C...(only intended for studying the effects of switching such rad on/off)
IF (MSTJ(39).GT.0.AND.KFLA(I).EQ.MSTJ(39)) THEN
ISCOL(IR)=0
ISCHG(IR)=0
ENDIF
IF(ISCOL(IR).EQ.1.OR.ISCHG(IR).EQ.1) KSH(IR)=1
PMTH(1,IR)=PMA(I)
IF(ISCOL(IR).EQ.1.AND.ISCHG(IR).EQ.1) THEN
PMTH(2,IR)=SQRT(PMTH(1,IR)**2+0.25D0*PMQTH1**2)
PMTH(3,IR)=PMTH(2,IR)+PMQTH2
PMTH(4,IR)=SQRT(PMTH(1,IR)**2+0.25D0*PARJ(82)**2)+PMTH(2,21)
PMTH(5,IR)=SQRT(PMTH(1,IR)**2+0.25D0*PARJ(83)**2)+PMTH(2,22)
ELSEIF(ISCOL(IR).EQ.1) THEN
PMTH(2,IR)=SQRT(PMTH(1,IR)**2+0.25D0*PARJ(82)**2)
PMTH(3,IR)=PMTH(2,IR)+0.5D0*PARJ(82)
PMTH(4,IR)=PMTH(3,IR)
PMTH(5,IR)=PMTH(3,IR)
ELSEIF(ISCHG(IR).EQ.1) THEN
PMTH(2,IR)=SQRT(PMTH(1,IR)**2+0.25D0*PARJ(90)**2)
PMTH(3,IR)=PMTH(2,IR)+0.5D0*PARJ(90)
PMTH(4,IR)=PMTH(3,IR)
PMTH(5,IR)=PMTH(3,IR)
ENDIF
IF(KSH(IR).EQ.1) PMA(I)=PMTH(3,IR)
PM=PM+PMA(I)
IF(KSH(IR).EQ.0.OR.PMA(I).GT.10D0*QMAX) IREJ=IREJ+1
DO 160 J=1,4
PS(J)=PS(J)+P(IPA(I),J)
160 CONTINUE
170 CONTINUE
IF(IREJ.EQ.NPA.AND.IP2.GE.-7) RETURN
PS(5)=SQRT(MAX(0D0,PS(4)**2-PS(1)**2-PS(2)**2-PS(3)**2))
IF(NPA.EQ.1) PS(5)=PS(4)
IF(PS(5).LE.PM+PMQT1E) RETURN
C...Identify source: q(1), ~q(2), V(3), S(4), chi(5), ~g(6), unknown(0).
KFSRCE=0
IF(IP2.LE.0) THEN
ELSEIF(K(IP1,3).EQ.K(IP2,3).AND.K(IP1,3).GT.0) THEN
KFSRCE=ABS(K(K(IP1,3),2))
ELSE
IPAR1=MAX(1,K(IP1,3))
IPAR2=MAX(1,K(IP2,3))
IF(K(IPAR1,3).EQ.K(IPAR2,3).AND.K(IPAR1,3).GT.0)
& KFSRCE=ABS(K(K(IPAR1,3),2))
ENDIF
ITYPES=0
IF(KFSRCE.GE.1.AND.KFSRCE.LE.8) ITYPES=1
IF(KFSRCE.GE.KSUSY1+1.AND.KFSRCE.LE.KSUSY1+8) ITYPES=2
IF(KFSRCE.GE.KSUSY2+1.AND.KFSRCE.LE.KSUSY2+8) ITYPES=2
IF(KFSRCE.GE.21.AND.KFSRCE.LE.24) ITYPES=3
IF(KFSRCE.GE.32.AND.KFSRCE.LE.34) ITYPES=3
IF(KFSRCE.EQ.25.OR.(KFSRCE.GE.35.AND.KFSRCE.LE.37)) ITYPES=4
IF(KFSRCE.GE.KSUSY1+22.AND.KFSRCE.LE.KSUSY1+37) ITYPES=5
IF(KFSRCE.EQ.KSUSY1+21) ITYPES=6
C...Identify two primary showerers.
ITYPE1=0
IF(KFLA(1).GE.1.AND.KFLA(1).LE.8) ITYPE1=1
IF(KFLA(1).GE.KSUSY1+1.AND.KFLA(1).LE.KSUSY1+8) ITYPE1=2
IF(KFLA(1).GE.KSUSY2+1.AND.KFLA(1).LE.KSUSY2+8) ITYPE1=2
IF(KFLA(1).GE.21.AND.KFLA(1).LE.24) ITYPE1=3
IF(KFLA(1).GE.32.AND.KFLA(1).LE.34) ITYPE1=3
IF(KFLA(1).EQ.25.OR.(KFLA(1).GE.35.AND.KFLA(1).LE.37)) ITYPE1=4
IF(KFLA(1).GE.KSUSY1+22.AND.KFLA(1).LE.KSUSY1+37) ITYPE1=5
IF(KFLA(1).EQ.KSUSY1+21) ITYPE1=6
ITYPE2=0
IF(KFLA(2).GE.1.AND.KFLA(2).LE.8) ITYPE2=1
IF(KFLA(2).GE.KSUSY1+1.AND.KFLA(2).LE.KSUSY1+8) ITYPE2=2
IF(KFLA(2).GE.KSUSY2+1.AND.KFLA(2).LE.KSUSY2+8) ITYPE2=2
IF(KFLA(2).GE.21.AND.KFLA(2).LE.24) ITYPE2=3
IF(KFLA(2).GE.32.AND.KFLA(2).LE.34) ITYPE2=3
IF(KFLA(2).EQ.25.OR.(KFLA(2).GE.35.AND.KFLA(2).LE.37)) ITYPE2=4
IF(KFLA(2).GE.KSUSY1+22.AND.KFLA(2).LE.KSUSY1+37) ITYPE2=5
IF(KFLA(2).EQ.KSUSY1+21) ITYPE2=6
C...Order of showerers. Presence of gluino.
ITYPMN=MIN(ITYPE1,ITYPE2)
ITYPMX=MAX(ITYPE1,ITYPE2)
IORD=1
IF(ITYPE1.GT.ITYPE2) IORD=2
IGLUI=0
IF(ITYPE1.EQ.6.OR.ITYPE2.EQ.6) IGLUI=1
C...Check if 3-jet matrix elements to be used.
M3JC=0
ALPHA=0.5D0
IF(NPA.EQ.2.AND.MSTJ(47).GE.1.AND.MPSPD.EQ.0) THEN
IF(MSTJ(38).NE.0) THEN
M3JC=MSTJ(38)
ALPHA=PARJ(80)
MSTJ(38)=0
ELSEIF(MSTJ(47).GE.6) THEN
M3JC=MSTJ(47)
ELSE
ICLASS=1
ICOMBI=4
C...Vector/axial vector -> q + qbar; q -> q + V.
IF(ITYPMN.EQ.1.AND.ITYPMX.EQ.1.AND.(ITYPES.EQ.0.OR.
& ITYPES.EQ.3)) THEN
ICLASS=2
IF(KFSRCE.EQ.21.OR.KFSRCE.EQ.22) THEN
ICOMBI=1
ELSEIF(KFSRCE.EQ.23.OR.(KFSRCE.EQ.0.AND.
& K(IPA(1),2)+K(IPA(2),2).EQ.0)) THEN
C...gamma*/Z0: assume e+e- initial state if unknown.
EI=-1D0
IF(KFSRCE.EQ.23) THEN
IANNFL=K(K(IP1,3),3)
IF(IANNFL.NE.0) THEN
KANNFL=ABS(K(IANNFL,2))
IF(KANNFL.GE.1.AND.KANNFL.LE.18) EI=KCHG(KANNFL,1)/3D0
ENDIF
ENDIF
AI=SIGN(1D0,EI+0.1D0)
VI=AI-4D0*EI*PARU(102)
EF=KCHG(KFLA(1),1)/3D0
AF=SIGN(1D0,EF+0.1D0)
VF=AF-4D0*EF*PARU(102)
XWC=1D0/(16D0*PARU(102)*(1D0-PARU(102)))
SH=PS(5)**2
SQMZ=PMAS(23,1)**2
SQWZ=PS(5)*PMAS(23,2)
SBWZ=1D0/((SH-SQMZ)**2+SQWZ**2)
VECT=EI**2*EF**2+2D0*EI*VI*EF*VF*XWC*SH*(SH-SQMZ)*SBWZ+
& (VI**2+AI**2)*VF**2*XWC**2*SH**2*SBWZ
AXIV=(VI**2+AI**2)*AF**2*XWC**2*SH**2*SBWZ
ICOMBI=3
ALPHA=VECT/(VECT+AXIV)
ELSEIF(KFSRCE.EQ.24.OR.KFSRCE.EQ.0) THEN
ICOMBI=4
ENDIF
C...For chi -> chi q qbar, use V/A -> q qbar as first approximation.
ELSEIF(ITYPMN.EQ.1.AND.ITYPMX.EQ.1.AND.ITYPES.EQ.5) THEN
ICLASS=2
ELSEIF(ITYPMN.EQ.1.AND.ITYPMX.EQ.3.AND.(ITYPES.EQ.0.OR.
& ITYPES.EQ.1)) THEN
ICLASS=3
C...Scalar/pseudoscalar -> q + qbar; q -> q + S.
ELSEIF(ITYPMN.EQ.1.AND.ITYPMX.EQ.1.AND.ITYPES.EQ.4) THEN
ICLASS=4
IF(KFSRCE.EQ.25.OR.KFSRCE.EQ.35.OR.KFSRCE.EQ.37) THEN
ICOMBI=1
ELSEIF(KFSRCE.EQ.36) THEN
ICOMBI=2
ENDIF
ELSEIF(ITYPMN.EQ.1.AND.ITYPMX.EQ.4.AND.(ITYPES.EQ.0.OR.
& ITYPES.EQ.1)) THEN
ICLASS=5
C...V -> ~q + ~qbar; ~q -> ~q + V; S -> ~q + ~qbar; ~q -> ~q + S.
ELSEIF(ITYPMN.EQ.2.AND.ITYPMX.EQ.2.AND.(ITYPES.EQ.0.OR.
& ITYPES.EQ.3)) THEN
ICLASS=6
ELSEIF(ITYPMN.EQ.2.AND.ITYPMX.EQ.3.AND.(ITYPES.EQ.0.OR.
& ITYPES.EQ.2)) THEN
ICLASS=7
ELSEIF(ITYPMN.EQ.2.AND.ITYPMX.EQ.2.AND.ITYPES.EQ.4) THEN
ICLASS=8
ELSEIF(ITYPMN.EQ.2.AND.ITYPMX.EQ.4.AND.(ITYPES.EQ.0.OR.
& ITYPES.EQ.2)) THEN
ICLASS=9
C...chi -> q + ~qbar; ~q -> q + chi; q -> ~q + chi.
ELSEIF(ITYPMN.EQ.1.AND.ITYPMX.EQ.2.AND.(ITYPES.EQ.0.OR.
& ITYPES.EQ.5)) THEN
ICLASS=10
ELSEIF(ITYPMN.EQ.1.AND.ITYPMX.EQ.5.AND.(ITYPES.EQ.0.OR.
& ITYPES.EQ.2)) THEN
ICLASS=11
ELSEIF(ITYPMN.EQ.2.AND.ITYPMX.EQ.5.AND.(ITYPES.EQ.0.OR.
& ITYPES.EQ.1)) THEN
ICLASS=12
C...~g -> q + ~qbar; ~q -> q + ~g; q -> ~q + ~g.
ELSEIF(ITYPMN.EQ.1.AND.ITYPMX.EQ.2.AND.ITYPES.EQ.6) THEN
ICLASS=13
ELSEIF(ITYPMN.EQ.1.AND.ITYPMX.EQ.6.AND.(ITYPES.EQ.0.OR.
& ITYPES.EQ.2)) THEN
ICLASS=14
ELSEIF(ITYPMN.EQ.2.AND.ITYPMX.EQ.6.AND.(ITYPES.EQ.0.OR.
& ITYPES.EQ.1)) THEN
ICLASS=15
C...g -> ~g + ~g (eikonal approximation).
ELSEIF(ITYPMN.EQ.6.AND.ITYPMX.EQ.6.AND.ITYPES.EQ.0) THEN
ICLASS=16
ENDIF
M3JC=5*ICLASS+ICOMBI
ENDIF
ENDIF
C...Find if interference with initial state partons.
MIIS=0
IF(MSTJ(50).GE.1.AND.MSTJ(50).LE.3.AND.NPA.EQ.2.AND.KFSRCE.EQ.0
&.AND.MPSPD.EQ.0) MIIS=MSTJ(50)
IF(MSTJ(50).GE.4.AND.MSTJ(50).LE.6.AND.NPA.EQ.2.AND.MPSPD.EQ.0)
&MIIS=MSTJ(50)-3
IF(MIIS.NE.0) THEN
DO 190 I=1,2
KCII(I)=0
KCA=PYCOMP(KFLA(I))
IF(KCA.NE.0) KCII(I)=KCHG(KCA,2)*SIGN(1,K(IPA(I),2))
NIIS(I)=0
IF(KCII(I).NE.0) THEN
DO 180 J=1,2
ICSI=MOD(K(IPA(I),3+J)/MSTU(5),MSTU(5))
IF(ICSI.GT.0.AND.ICSI.NE.IPA(1).AND.ICSI.NE.IPA(2).AND.
& (KCII(I).EQ.(-1)**(J+1).OR.KCII(I).EQ.2)) THEN
NIIS(I)=NIIS(I)+1
IIIS(I,NIIS(I))=ICSI
ENDIF
180 CONTINUE
ENDIF
190 CONTINUE
IF(NIIS(1)+NIIS(2).EQ.0) MIIS=0
ENDIF
C...Boost interfering initial partons to rest frame
C...and reconstruct their polar and azimuthal angles.
IF(MIIS.NE.0) THEN
DO 210 I=1,2
DO 200 J=1,5
K(N+I,J)=K(IPA(I),J)
P(N+I,J)=P(IPA(I),J)
V(N+I,J)=0D0
200 CONTINUE
210 CONTINUE
DO 230 I=3,2+NIIS(1)
DO 220 J=1,5
K(N+I,J)=K(IIIS(1,I-2),J)
P(N+I,J)=P(IIIS(1,I-2),J)
V(N+I,J)=0D0
220 CONTINUE
230 CONTINUE
DO 250 I=3+NIIS(1),2+NIIS(1)+NIIS(2)
DO 240 J=1,5
K(N+I,J)=K(IIIS(2,I-2-NIIS(1)),J)
P(N+I,J)=P(IIIS(2,I-2-NIIS(1)),J)
V(N+I,J)=0D0
240 CONTINUE
250 CONTINUE
CALL PYROBO(N+1,N+2+NIIS(1)+NIIS(2),0D0,0D0,-PS(1)/PS(4),
& -PS(2)/PS(4),-PS(3)/PS(4))
PHI=PYANGL(P(N+1,1),P(N+1,2))
CALL PYROBO(N+1,N+2+NIIS(1)+NIIS(2),0D0,-PHI,0D0,0D0,0D0)
THE=PYANGL(P(N+1,3),P(N+1,1))
CALL PYROBO(N+1,N+2+NIIS(1)+NIIS(2),-THE,0D0,0D0,0D0,0D0)
DO 260 I=3,2+NIIS(1)
THEIIS(1,I-2)=PYANGL(P(N+I,3),SQRT(P(N+I,1)**2+P(N+I,2)**2))
PHIIIS(1,I-2)=PYANGL(P(N+I,1),P(N+I,2))
260 CONTINUE
DO 270 I=3+NIIS(1),2+NIIS(1)+NIIS(2)
THEIIS(2,I-2-NIIS(1))=PARU(1)-PYANGL(P(N+I,3),
& SQRT(P(N+I,1)**2+P(N+I,2)**2))
PHIIIS(2,I-2-NIIS(1))=PYANGL(P(N+I,1),P(N+I,2))
270 CONTINUE
ENDIF
C...Boost 3 or more partons to their rest frame.
IF(NPA.GE.3) CALL PYROBO(IPA(1),IPA(NPA),0D0,0D0,-PS(1)/PS(4),
&-PS(2)/PS(4),-PS(3)/PS(4))
C...Define imagined single initiator of shower for parton system.
NS=N
IF(N.GT.MSTU(4)-MSTU(32)-10) THEN
CALL PYERRM(11,'(PYSHOW:) no more memory left in PYJETS')
IF(MSTU(21).GE.1) RETURN
ENDIF
280 N=NS
IF(NPA.GE.2) THEN
K(N+1,1)=11
K(N+1,2)=21
K(N+1,3)=0
K(N+1,4)=0
K(N+1,5)=0
P(N+1,1)=0D0
P(N+1,2)=0D0
P(N+1,3)=0D0
P(N+1,4)=PS(5)
P(N+1,5)=PS(5)
V(N+1,5)=PS(5)**2
N=N+1
IREF(1)=21
ENDIF
C...Loop over partons that may branch.
NEP=NPA
IM=NS
IF(NPA.EQ.1) IM=NS-1
290 IM=IM+1
IF(N.GT.NS) THEN
IF(IM.GT.N) GOTO 600
KFLM=ABS(K(IM,2))
IR=IREF(IM-NS)
IF(KSH(IR).EQ.0) GOTO 290
IF(P(IM,5).LT.PMTH(2,IR)) GOTO 290
IGM=K(IM,3)
ELSE
IGM=-1
ENDIF
IF(N+NEP.GT.MSTU(4)-MSTU(32)-10) THEN
CALL PYERRM(11,'(PYSHOW:) no more memory left in PYJETS')
IF(MSTU(21).GE.1) RETURN
ENDIF
C...Position of aunt (sister to branching parton).
C...Origin and flavour of daughters.
IAU=0
IF(IGM.GT.0) THEN
IF(K(IM-1,3).EQ.IGM) IAU=IM-1
IF(N.GE.IM+1.AND.K(IM+1,3).EQ.IGM) IAU=IM+1
ENDIF
IF(IGM.GE.0) THEN
K(IM,4)=N+1
DO 300 I=1,NEP
K(N+I,3)=IM
300 CONTINUE
ELSE
K(N+1,3)=IPA(1)
ENDIF
IF(IGM.LE.0) THEN
DO 310 I=1,NEP
K(N+I,2)=K(IPA(I),2)
310 CONTINUE
ELSEIF(KFLM.NE.21) THEN
K(N+1,2)=K(IM,2)
K(N+2,2)=K(IM,5)
IREF(N+1-NS)=IREF(IM-NS)
IREF(N+2-NS)=ABS(K(N+2,2))
ELSEIF(K(IM,5).EQ.21) THEN
K(N+1,2)=21
K(N+2,2)=21
IREF(N+1-NS)=21
IREF(N+2-NS)=21
ELSE
K(N+1,2)=K(IM,5)
K(N+2,2)=-K(IM,5)
IREF(N+1-NS)=ABS(K(N+1,2))
IREF(N+2-NS)=ABS(K(N+2,2))
ENDIF
C...Reset flags on daughters and tries made.
DO 320 IP=1,NEP
K(N+IP,1)=3
K(N+IP,4)=0
K(N+IP,5)=0
KFLD(IP)=ABS(K(N+IP,2))
IF(KCHG(PYCOMP(KFLD(IP)),2).EQ.0) K(N+IP,1)=1
ITRY(IP)=0
ISL(IP)=0
ISI(IP)=0
IF(KSH(IREF(N+IP-NS)).EQ.1) ISI(IP)=1
320 CONTINUE
ISLM=0
C...Maximum virtuality of daughters.
IF(IGM.LE.0) THEN
DO 330 I=1,NPA
IF(NPA.GE.3) P(N+I,4)=P(IPA(I),4)
P(N+I,5)=MIN(QMAX,PS(5))
IR=IREF(N+I-NS)
IF(IP2.LE.-8) P(N+I,5)=MAX(P(N+I,5),2D0*PMTH(3,IR))
IF(ISI(I).EQ.0) P(N+I,5)=P(IPA(I),5)
330 CONTINUE
ELSE
IF(MSTJ(43).LE.2) PEM=V(IM,2)
IF(MSTJ(43).GE.3) PEM=P(IM,4)
P(N+1,5)=MIN(P(IM,5),V(IM,1)*PEM)
P(N+2,5)=MIN(P(IM,5),(1D0-V(IM,1))*PEM)
IF(K(N+2,2).EQ.22) P(N+2,5)=PMTH(1,22)
ENDIF
DO 340 I=1,NEP
PMSD(I)=P(N+I,5)
IF(ISI(I).EQ.1) THEN
IR=IREF(N+I-NS)
IF(P(N+I,5).LE.PMTH(3,IR)) P(N+I,5)=PMTH(1,IR)
ENDIF
V(N+I,5)=P(N+I,5)**2
340 CONTINUE
C...Choose one of the daughters for evolution.
350 INUM=0
IF(NEP.EQ.1) INUM=1
DO 360 I=1,NEP
IF(INUM.EQ.0.AND.ISL(I).EQ.1) INUM=I
360 CONTINUE
DO 370 I=1,NEP
IF(INUM.EQ.0.AND.ITRY(I).EQ.0.AND.ISI(I).EQ.1) THEN
IR=IREF(N+I-NS)
IF(P(N+I,5).GE.PMTH(2,IR)) INUM=I
ENDIF
370 CONTINUE
IF(INUM.EQ.0) THEN
RMAX=0D0
DO 380 I=1,NEP
IF(ISI(I).EQ.1.AND.PMSD(I).GE.PMQT2E) THEN
RPM=P(N+I,5)/PMSD(I)
IR=IREF(N+I-NS)
IF(RPM.GT.RMAX.AND.P(N+I,5).GE.PMTH(2,IR)) THEN
RMAX=RPM
INUM=I
ENDIF
ENDIF
380 CONTINUE
ENDIF
C...Cancel choice of predetermined daughter already treated.
INUM=MAX(1,INUM)
INUMT=INUM
IF(MPSPD.EQ.1.AND.IGM.EQ.0.AND.ITRY(INUMT).GE.1) THEN
IF(K(IP1-1+INUM,4).GT.0) INUM=3-INUM
ELSEIF(MPSPD.EQ.1.AND.IM.EQ.NS+2.AND.ITRY(INUMT).GE.1) THEN
IF(KFLD(INUMT).NE.21.AND.K(IP1+2,4).GT.0) INUM=3-INUM
IF(KFLD(INUMT).EQ.21.AND.K(IP1+3,4).GT.0) INUM=3-INUM
ENDIF
C...Store information on choice of evolving daughter.
IEP(1)=N+INUM
DO 390 I=2,NEP
IEP(I)=IEP(I-1)+1
IF(IEP(I).GT.N+NEP) IEP(I)=N+1
390 CONTINUE
DO 400 I=1,NEP
KFL(I)=ABS(K(IEP(I),2))
400 CONTINUE
ITRY(INUM)=ITRY(INUM)+1
IF(ITRY(INUM).GT.200) THEN
CALL PYERRM(14,'(PYSHOW:) caught in infinite loop')
IF(MSTU(21).GE.1) RETURN
ENDIF
Z=0.5D0
IR=IREF(IEP(1)-NS)
IF(KSH(IR).EQ.0) GOTO 450
IF(P(IEP(1),5).LT.PMTH(2,IR)) GOTO 450
C...Check if evolution already predetermined for daughter.
IPSPD=0
IF(MPSPD.EQ.1.AND.IGM.EQ.0) THEN
IF(K(IP1-1+INUM,4).GT.0) IPSPD=IP1-1+INUM
ELSEIF(MPSPD.EQ.1.AND.IM.EQ.NS+2) THEN
IF(KFL(1).NE.21.AND.K(IP1+2,4).GT.0) IPSPD=IP1+2
IF(KFL(1).EQ.21.AND.K(IP1+3,4).GT.0) IPSPD=IP1+3
ENDIF
IF(INUM.EQ.1.OR.INUM.EQ.2) THEN
ISSET(INUM)=0
IF(IPSPD.NE.0) ISSET(INUM)=1
ENDIF
C...Select side for interference with initial state partons.
IF(MIIS.GE.1.AND.IEP(1).LE.NS+3) THEN
III=IEP(1)-NS-1
ISII(III)=0
IF(ABS(KCII(III)).EQ.1.AND.NIIS(III).EQ.1) THEN
ISII(III)=1
ELSEIF(KCII(III).EQ.2.AND.NIIS(III).EQ.1) THEN
IF(PYR(0).GT.0.5D0) ISII(III)=1
ELSEIF(KCII(III).EQ.2.AND.NIIS(III).EQ.2) THEN
ISII(III)=1
IF(PYR(0).GT.0.5D0) ISII(III)=2
ENDIF
ENDIF
C...Calculate allowed z range.
IF(NEP.EQ.1) THEN
PMED=PS(4)
ELSEIF(IGM.EQ.0.OR.MSTJ(43).LE.2) THEN
PMED=P(IM,5)
ELSE
IF(INUM.EQ.1) PMED=V(IM,1)*PEM
IF(INUM.EQ.2) PMED=(1D0-V(IM,1))*PEM
ENDIF
IF(MOD(MSTJ(43),2).EQ.1) THEN
ZC=PMTH(2,21)/PMED
ZCE=PMTH(2,22)/PMED
IF(ISCOL(IR).EQ.0) ZCE=0.5D0*PARJ(90)/PMED
ELSE
ZC=0.5D0*(1D0-SQRT(MAX(0D0,1D0-(2D0*PMTH(2,21)/PMED)**2)))
IF(ZC.LT.1D-6) ZC=(PMTH(2,21)/PMED)**2
PMTMPE=PMTH(2,22)
IF(ISCOL(IR).EQ.0) PMTMPE=0.5D0*PARJ(90)
ZCE=0.5D0*(1D0-SQRT(MAX(0D0,1D0-(2D0*PMTMPE/PMED)**2)))
IF(ZCE.LT.1D-6) ZCE=(PMTMPE/PMED)**2
ENDIF
ZC=MIN(ZC,0.491D0)
ZCE=MIN(ZCE,0.49991D0)
IF(((MSTJ(41).EQ.1.AND.ZC.GT.0.49D0).OR.(MSTJ(41).GE.2.AND.
&MIN(ZC,ZCE).GT.0.4999D0)).AND.IPSPD.EQ.0) THEN
P(IEP(1),5)=PMTH(1,IR)
V(IEP(1),5)=P(IEP(1),5)**2
GOTO 450
ENDIF
C...Integral of Altarelli-Parisi z kernel for QCD.
C...(Includes squark and gluino; with factor N_C/C_F extra for latter).
IF(MSTJ(49).EQ.0.AND.KFL(1).EQ.21) THEN
FBR=6D0*LOG((1D0-ZC)/ZC)+MSTJ(45)*0.5D0
C...QUARKONIA+++
C...Evolution of QQ~[3S18] state if MSTP(148)=1.
ELSEIF(MSTJ(49).EQ.0.AND.MSTP(149).GE.0.AND.
& (KFL(1).EQ.9900443.OR.KFL(1).EQ.9900553)) THEN
FBR=6D0*LOG((1D0-ZC)/ZC)
C...QUARKONIA---
ELSEIF(MSTJ(49).EQ.0) THEN
FBR=(8D0/3D0)*LOG((1D0-ZC)/ZC)
IF(IGLUI.EQ.1.AND.IR.GE.31) FBR=FBR*(9D0/4D0)
C...Integral of Altarelli-Parisi z kernel for scalar gluon.
ELSEIF(MSTJ(49).EQ.1.AND.KFL(1).EQ.21) THEN
FBR=(PARJ(87)+MSTJ(45)*PARJ(88))*(1D0-2D0*ZC)
ELSEIF(MSTJ(49).EQ.1) THEN
FBR=(1D0-2D0*ZC)/3D0
IF(IGM.EQ.0.AND.M3JC.GE.1) FBR=4D0*FBR
C...Integral of Altarelli-Parisi z kernel for Abelian vector gluon.
ELSEIF(KFL(1).EQ.21) THEN
FBR=6D0*MSTJ(45)*(0.5D0-ZC)
ELSE
FBR=2D0*LOG((1D0-ZC)/ZC)
ENDIF
C...Reset QCD probability for colourless.
IF(ISCOL(IR).EQ.0) FBR=0D0
C...Integral of Altarelli-Parisi kernel for photon emission.
FBRE=0D0
IF(MSTJ(41).GE.2.AND.ISCHG(IR).EQ.1) THEN
IF(KFL(1).LE.18) THEN
FBRE=(KCHG(KFL(1),1)/3D0)**2*2D0*LOG((1D0-ZCE)/ZCE)
ENDIF
IF(MSTJ(41).EQ.10) FBRE=PARJ(84)*FBRE
ENDIF
C...Inner veto algorithm starts. Find maximum mass for evolution.
410 PMS=V(IEP(1),5)
IF(IGM.GE.0) THEN
PM2=0D0
DO 420 I=2,NEP
PM=P(IEP(I),5)
IRI=IREF(IEP(I)-NS)
IF(KSH(IRI).EQ.1) PM=PMTH(2,IRI)
PM2=PM2+PM
420 CONTINUE
PMS=MIN(PMS,(P(IM,5)-PM2)**2)
ENDIF
C...Select mass for daughter in QCD evolution.
B0=27D0/6D0
DO 430 IFF=4,MSTJ(45)
IF(PMS.GT.4D0*PMTH(2,IFF)**2) B0=(33D0-2D0*IFF)/6D0
430 CONTINUE
C...Shift m^2 for evolution in Q^2 = m^2 - m(onshell)^2.
PMSC=MAX(0.5D0*PARJ(82),PMS-PMTH(1,IR)**2)
C...Already predetermined choice.
IF(IPSPD.NE.0) THEN
PMSQCD=P(IPSPD,5)**2
ELSEIF(FBR.LT.1D-3) THEN
PMSQCD=0D0
ELSEIF(MSTJ(44).LE.0) THEN
PMSQCD=PMSC*EXP(MAX(-50D0,LOG(PYR(0))*PARU(2)/(PARU(111)*FBR)))
ELSEIF(MSTJ(44).EQ.1) THEN
PMSQCD=4D0*ALAMS*(0.25D0*PMSC/ALAMS)**(PYR(0)**(B0/FBR))
ELSE
PMSQCD=PMSC*EXP(MAX(-50D0,ALFM*B0*LOG(PYR(0))/FBR))
ENDIF
C...Shift back m^2 from evolution in Q^2 = m^2 - m(onshell)^2.
IF(IPSPD.EQ.0) PMSQCD=PMSQCD+PMTH(1,IR)**2
IF(ZC.GT.0.49D0.OR.PMSQCD.LE.PMTH(4,IR)**2) PMSQCD=PMTH(2,IR)**2
V(IEP(1),5)=PMSQCD
MCE=1
C...Select mass for daughter in QED evolution.
IF(MSTJ(41).GE.2.AND.ISCHG(IR).EQ.1.AND.IPSPD.EQ.0) THEN
C...Shift m^2 for evolution in Q^2 = m^2 - m(onshell)^2.
PMSE=MAX(0.5D0*PARJ(83),PMS-PMTH(1,IR)**2)
IF(FBRE.LT.1D-3) THEN
PMSQED=0D0
ELSE
PMSQED=PMSE*EXP(MAX(-50D0,LOG(PYR(0))*PARU(2)/
& (PARU(101)*FBRE)))
ENDIF
C...Shift back m^2 from evolution in Q^2 = m^2 - m(onshell)^2.
PMSQED=PMSQED+PMTH(1,IR)**2
IF(ZCE.GT.0.4999D0.OR.PMSQED.LE.PMTH(5,IR)**2) PMSQED=
& PMTH(2,IR)**2
IF(PMSQED.GT.PMSQCD) THEN
V(IEP(1),5)=PMSQED
MCE=2
ENDIF
ENDIF
C...Check whether daughter mass below cutoff.
P(IEP(1),5)=SQRT(V(IEP(1),5))
IF(P(IEP(1),5).LE.PMTH(3,IR)) THEN
P(IEP(1),5)=PMTH(1,IR)
V(IEP(1),5)=P(IEP(1),5)**2
GOTO 450
ENDIF
C...Already predetermined choice of z, and flavour in g -> qqbar.
IF(IPSPD.NE.0) THEN
IPSGD1=K(IPSPD,4)
IPSGD2=K(IPSPD,5)
PMSGD1=P(IPSGD1,5)**2
PMSGD2=P(IPSGD2,5)**2
ALAMPS=SQRT(MAX(1D-10,(PMSQCD-PMSGD1-PMSGD2)**2-
& 4D0*PMSGD1*PMSGD2))
Z=0.5D0*(PMSQCD*(2D0*P(IPSGD1,4)/P(IPSPD,4)-1D0)+ALAMPS-
& PMSGD1+PMSGD2)/ALAMPS
Z=MAX(0.00001D0,MIN(0.99999D0,Z))
IF(KFL(1).NE.21) THEN
K(IEP(1),5)=21
ELSE
K(IEP(1),5)=ABS(K(IPSGD1,2))
ENDIF
C...Select z value of branching: q -> qgamma.
ELSEIF(MCE.EQ.2) THEN
Z=1D0-(1D0-ZCE)*(ZCE/(1D0-ZCE))**PYR(0)
IF(1D0+Z**2.LT.2D0*PYR(0)) GOTO 410
K(IEP(1),5)=22
C...QUARKONIA+++
C...Select z value of branching: QQ~[3S18] -> QQ~[3S18]g.
ELSEIF(MSTJ(49).EQ.0.AND.
& (KFL(1).EQ.9900443.OR.KFL(1).EQ.9900553)) THEN
Z=(1D0-ZC)*(ZC/(1D0-ZC))**PYR(0)
C...Select always the harder 'gluon' if the switch MSTP(149)<=0.
IF(MSTP(149).LE.0.OR.PYR(0).GT.0.5D0) Z=1D0-Z
IF((1D0-Z*(1D0-Z))**2.LT.PYR(0)) GOTO 410
K(IEP(1),5)=21
C...QUARKONIA---
C...Select z value of branching: q -> qg, g -> gg, g -> qqbar.
ELSEIF(MSTJ(49).NE.1.AND.KFL(1).NE.21) THEN
Z=1D0-(1D0-ZC)*(ZC/(1D0-ZC))**PYR(0)
C...Only do z weighting when no ME correction afterwards.
IF(M3JC.EQ.0.AND.1D0+Z**2.LT.2D0*PYR(0)) GOTO 410
K(IEP(1),5)=21
ELSEIF(MSTJ(49).EQ.0.AND.MSTJ(45)*0.5D0.LT.PYR(0)*FBR) THEN
Z=(1D0-ZC)*(ZC/(1D0-ZC))**PYR(0)
IF(PYR(0).GT.0.5D0) Z=1D0-Z
IF((1D0-Z*(1D0-Z))**2.LT.PYR(0)) GOTO 410
K(IEP(1),5)=21
ELSEIF(MSTJ(49).NE.1) THEN
Z=PYR(0)
IF(Z**2+(1D0-Z)**2.LT.PYR(0)) GOTO 410
KFLB=1+INT(MSTJ(45)*PYR(0))
PMQ=4D0*PMTH(2,KFLB)**2/V(IEP(1),5)
IF(PMQ.GE.1D0) GOTO 410
IF(MSTJ(44).LE.2.OR.MSTJ(44).EQ.4) THEN
IF(Z.LT.ZC.OR.Z.GT.1D0-ZC) GOTO 410
PMQ0=4D0*PMTH(2,21)**2/V(IEP(1),5)
IF(MOD(MSTJ(43),2).EQ.0.AND.(1D0+0.5D0*PMQ)*SQRT(1D0-PMQ)
& .LT.PYR(0)*(1D0+0.5D0*PMQ0)*SQRT(1D0-PMQ0)) GOTO 410
ELSE
IF((1D0+0.5D0*PMQ)*SQRT(1D0-PMQ).LT.PYR(0)) GOTO 410
ENDIF
K(IEP(1),5)=KFLB
C...Ditto for scalar gluon model.
ELSEIF(KFL(1).NE.21) THEN
Z=1D0-SQRT(ZC**2+PYR(0)*(1D0-2D0*ZC))
K(IEP(1),5)=21
ELSEIF(PYR(0)*(PARJ(87)+MSTJ(45)*PARJ(88)).LE.PARJ(87)) THEN
Z=ZC+(1D0-2D0*ZC)*PYR(0)
K(IEP(1),5)=21
ELSE
Z=ZC+(1D0-2D0*ZC)*PYR(0)
KFLB=1+INT(MSTJ(45)*PYR(0))
PMQ=4D0*PMTH(2,KFLB)**2/V(IEP(1),5)
IF(PMQ.GE.1D0) GOTO 410
K(IEP(1),5)=KFLB
ENDIF
C...Correct to alpha_s(pT^2) (optionally m^2/4 for g -> q qbar).
IF(MCE.EQ.1.AND.MSTJ(44).GE.2.AND.IPSPD.EQ.0) THEN
IF(KFL(1).EQ.21.AND.K(IEP(1),5).LT.10.AND.
& (MSTJ(44).EQ.3.OR.MSTJ(44).EQ.5)) THEN
IF(ALFM/LOG(V(IEP(1),5)*0.25D0/ALAMS).LT.PYR(0)) GOTO 410
ELSE
PT2APP=Z*(1D0-Z)*V(IEP(1),5)
IF(MSTJ(44).GE.4) PT2APP=PT2APP*
& (1D0-PMTH(1,IR)**2/V(IEP(1),5))**2
IF(PT2APP.LT.PT2MIN) GOTO 410
IF(ALFM/LOG(PT2APP/ALAMS).LT.PYR(0)) GOTO 410
ENDIF
ENDIF
C...Check if z consistent with chosen m.
IF(KFL(1).EQ.21) THEN
IRGD1=ABS(K(IEP(1),5))
IRGD2=IRGD1
ELSE
IRGD1=IR
IRGD2=ABS(K(IEP(1),5))
ENDIF
IF(NEP.EQ.1) THEN
PED=PS(4)
ELSEIF(NEP.GE.3) THEN
PED=P(IEP(1),4)
ELSEIF(IGM.EQ.0.OR.MSTJ(43).LE.2) THEN
PED=0.5D0*(V(IM,5)+V(IEP(1),5)-PM2**2)/P(IM,5)
ELSE
IF(IEP(1).EQ.N+1) PED=V(IM,1)*PEM
IF(IEP(1).EQ.N+2) PED=(1D0-V(IM,1))*PEM
ENDIF
IF(MOD(MSTJ(43),2).EQ.1) THEN
PMQTH3=0.5D0*PARJ(82)
IF(IRGD2.EQ.22) PMQTH3=0.5D0*PARJ(83)
IF(IRGD2.EQ.22.AND.ISCOL(IR).EQ.0) PMQTH3=0.5D0*PARJ(90)
PMQ1=(PMTH(1,IRGD1)**2+PMQTH3**2)/V(IEP(1),5)
PMQ2=(PMTH(1,IRGD2)**2+PMQTH3**2)/V(IEP(1),5)
ZD=SQRT(MAX(0D0,(1D0-V(IEP(1),5)/PED**2)*((1D0-PMQ1-PMQ2)**2-
& 4D0*PMQ1*PMQ2)))
ZH=1D0+PMQ1-PMQ2
ELSE
ZD=SQRT(MAX(0D0,1D0-V(IEP(1),5)/PED**2))
ZH=1D0
ENDIF
IF(KFL(1).EQ.21.AND.K(IEP(1),5).LT.10.AND.
&(MSTJ(44).EQ.3.OR.MSTJ(44).EQ.5)) THEN
ELSEIF(IPSPD.NE.0) THEN
ELSE
ZL=0.5D0*(ZH-ZD)
ZU=0.5D0*(ZH+ZD)
IF(Z.LT.ZL.OR.Z.GT.ZU) GOTO 410
ENDIF
IF(KFL(1).EQ.21) V(IEP(1),3)=LOG(ZU*(1D0-ZL)/MAX(1D-20,ZL*
&(1D0-ZU)))
IF(KFL(1).NE.21) V(IEP(1),3)=LOG((1D0-ZL)/MAX(1D-10,1D0-ZU))
C...Width suppression for q -> q + g.
IF(MSTJ(40).NE.0.AND.KFL(1).NE.21.AND.IPSPD.EQ.0) THEN
IF(IGM.EQ.0) THEN
EGLU=0.5D0*PS(5)*(1D0-Z)*(1D0+V(IEP(1),5)/V(NS+1,5))
ELSE
EGLU=PMED*(1D0-Z)
ENDIF
CHI=PARJ(89)**2/(PARJ(89)**2+EGLU**2)
IF(MSTJ(40).EQ.1) THEN
IF(CHI.LT.PYR(0)) GOTO 410
ELSEIF(MSTJ(40).EQ.2) THEN
IF(1D0-CHI.LT.PYR(0)) GOTO 410
ENDIF
ENDIF
C...Three-jet matrix element correction.
IF(M3JC.GE.1) THEN
WME=1D0
WSHOW=1D0
C...QED matrix elements: only for massless case so far.
IF(MCE.EQ.2.AND.IGM.EQ.0) THEN
X1=Z*(1D0+V(IEP(1),5)/V(NS+1,5))
X2=1D0-V(IEP(1),5)/V(NS+1,5)
X3=(1D0-X1)+(1D0-X2)
KI1=K(IPA(INUM),2)
KI2=K(IPA(3-INUM),2)
QF1=KCHG(PYCOMP(KI1),1)*SIGN(1,KI1)/3D0
QF2=KCHG(PYCOMP(KI2),1)*SIGN(1,KI2)/3D0
WSHOW=QF1**2*(1D0-X1)/X3*(1D0+(X1/(2D0-X2))**2)+
& QF2**2*(1D0-X2)/X3*(1D0+(X2/(2D0-X1))**2)
WME=(QF1*(1D0-X1)/X3-QF2*(1D0-X2)/X3)**2*(X1**2+X2**2)
ELSEIF(MCE.EQ.2) THEN
C...QCD matrix elements, including mass effects.
ELSEIF(MSTJ(49).NE.1.AND.K(IEP(1),2).NE.21) THEN
PS1ME=V(IEP(1),5)
PM1ME=PMTH(1,IR)
M3JCC=M3JC
IF(IR.GE.31.AND.IGM.EQ.0) THEN
C...QCD ME: original parton, first branching.
PM2ME=PMTH(1,63-IR)
ECMME=PS(5)
ELSEIF(IR.GE.31) THEN
C...QCD ME: original parton, subsequent branchings.
PM2ME=PMTH(1,63-IR)
PEDME=PEM*(V(IM,1)+(1D0-V(IM,1))*PS1ME/V(IM,5))
ECMME=PEDME+SQRT(MAX(0D0,PEDME**2-PS1ME+PM2ME**2))
ELSEIF(K(IM,2).EQ.21) THEN
C...QCD ME: secondary partons, first branching.
PM2ME=PM1ME
ZMME=V(IM,1)
IF(IEP(1).GT.IEP(2)) ZMME=1D0-ZMME
PMLME=SQRT(MAX(0D0,(V(IM,5)-PS1ME-PM2ME**2)**2-
& 4D0*PS1ME*PM2ME**2))
PEDME=PEM*(0.5D0*(V(IM,5)-PMLME+PS1ME-PM2ME**2)+PMLME*ZMME)/
& V(IM,5)
ECMME=PEDME+SQRT(MAX(0D0,PEDME**2-PS1ME+PM2ME**2))
M3JCC=66
ELSE
C...QCD ME: secondary partons, subsequent branchings.
PM2ME=PM1ME
PEDME=PEM*(V(IM,1)+(1D0-V(IM,1))*PS1ME/V(IM,5))
ECMME=PEDME+SQRT(MAX(0D0,PEDME**2-PS1ME+PM2ME**2))
M3JCC=66
ENDIF
C...Construct ME variables.
R1ME=PM1ME/ECMME
R2ME=PM2ME/ECMME
X1=(1D0+PS1ME/ECMME**2-R2ME**2)*(Z+(1D0-Z)*PM1ME**2/PS1ME)
X2=1D0+R2ME**2-PS1ME/ECMME**2
C...Call ME, with right order important for two inequivalent showerers.
IF(IR.EQ.IORD+30) THEN
WME=PYMAEL(M3JCC,X1,X2,R1ME,R2ME,ALPHA)
ELSE
WME=PYMAEL(M3JCC,X2,X1,R2ME,R1ME,ALPHA)
ENDIF
C...Split up total ME when two radiating partons.
ISPRAD=1
IF((M3JCC.GE.16.AND.M3JCC.LE.19).OR.
& (M3JCC.GE.26.AND.M3JCC.LE.29).OR.
& (M3JCC.GE.36.AND.M3JCC.LE.39).OR.
& (M3JCC.GE.46.AND.M3JCC.LE.49).OR.
& (M3JCC.GE.56.AND.M3JCC.LE.64)) ISPRAD=0
IF(ISPRAD.EQ.1) WME=WME*MAX(1D-10,1D0+R1ME**2-R2ME**2-X1)/
& MAX(1D-10,2D0-X1-X2)
C...Evaluate shower rate to be compared with.
WSHOW=2D0/(MAX(1D-10,2D0-X1-X2)*
& MAX(1D-10,1D0+R2ME**2-R1ME**2-X2))
IF(IGLUI.EQ.1.AND.IR.GE.31) WSHOW=(9D0/4D0)*WSHOW
ELSEIF(MSTJ(49).NE.1) THEN
C...Toy model scalar theory matrix elements; no mass effects.
ELSE
X1=Z*(1D0+V(IEP(1),5)/V(NS+1,5))
X2=1D0-V(IEP(1),5)/V(NS+1,5)
X3=(1D0-X1)+(1D0-X2)
WSHOW=4D0*X3*((1D0-X1)/(2D0-X2)**2+(1D0-X2)/(2D0-X1)**2)
WME=X3**2
IF(MSTJ(102).GE.2) WME=X3**2-2D0*(1D0+X3)*(1D0-X1)*(1D0-X2)*
& PARJ(171)
ENDIF
IF(WME.LT.PYR(0)*WSHOW) GOTO 410
ENDIF
C...Impose angular ordering by rejection of nonordered emission.
IF(MCE.EQ.1.AND.IGM.GT.0.AND.MSTJ(42).GE.2.AND.IPSPD.EQ.0) THEN
PEMAO=V(IM,1)*P(IM,4)
IF(IEP(1).EQ.N+2) PEMAO=(1D0-V(IM,1))*P(IM,4)
IF(IR.GE.31.AND.MSTJ(42).GE.5) THEN
MAOD=0
ELSEIF(KFL(1).EQ.21.AND.K(IEP(1),5).LE.10.AND.(MSTJ(42).EQ.4
& .OR.MSTJ(42).EQ.7)) THEN
MAOD=0
ELSEIF(KFL(1).EQ.21.AND.K(IEP(1),5).LE.10.AND.(MSTJ(42).EQ.3
& .OR.MSTJ(42).EQ.6)) THEN
MAOD=1
PMDAO=PMTH(2,K(IEP(1),5))
THE2ID=Z*(1D0-Z)*PEMAO**2/(V(IEP(1),5)-4D0*PMDAO**2)
ELSE
MAOD=1
THE2ID=Z*(1D0-Z)*PEMAO**2/V(IEP(1),5)
IF(MSTJ(42).GE.3.AND.MSTJ(42).NE.5) THE2ID=THE2ID*
& (1D0+PMTH(1,IR)**2*(1D0-Z)/(V(IEP(1),5)*Z))**2
ENDIF
MAOM=1
IAOM=IM
440 IF(K(IAOM,5).EQ.22) THEN
IAOM=K(IAOM,3)
IF(K(IAOM,3).LE.NS) MAOM=0
IF(MAOM.EQ.1) GOTO 440
ENDIF
IF(MAOM.EQ.1.AND.MAOD.EQ.1) THEN
THE2IM=V(IAOM,1)*(1D0-V(IAOM,1))*P(IAOM,4)**2/V(IAOM,5)
IF(THE2ID.LT.THE2IM) GOTO 410
ENDIF
ENDIF
C...Impose user-defined maximum angle at first branching.
IF(MSTJ(48).EQ.1.AND.IPSPD.EQ.0) THEN
IF(NEP.EQ.1.AND.IM.EQ.NS) THEN
THE2ID=Z*(1D0-Z)*PS(4)**2/V(IEP(1),5)
IF(PARJ(85)**2*THE2ID.LT.1D0) GOTO 410
ELSEIF(NEP.EQ.2.AND.IEP(1).EQ.NS+2) THEN
THE2ID=Z*(1D0-Z)*(0.5D0*P(IM,4))**2/V(IEP(1),5)
IF(PARJ(85)**2*THE2ID.LT.1D0) GOTO 410
ELSEIF(NEP.EQ.2.AND.IEP(1).EQ.NS+3) THEN
THE2ID=Z*(1D0-Z)*(0.5D0*P(IM,4))**2/V(IEP(1),5)
IF(PARJ(86)**2*THE2ID.LT.1D0) GOTO 410
ENDIF
ENDIF
C...Impose angular constraint in first branching from interference
C...with initial state partons.
IF(MIIS.GE.2.AND.IEP(1).LE.NS+3) THEN
THE2D=MAX((1D0-Z)/Z,Z/(1D0-Z))*V(IEP(1),5)/(0.5D0*P(IM,4))**2
IF(IEP(1).EQ.NS+2.AND.ISII(1).GE.1) THEN
IF(THE2D.GT.THEIIS(1,ISII(1))**2) GOTO 410
ELSEIF(IEP(1).EQ.NS+3.AND.ISII(2).GE.1) THEN
IF(THE2D.GT.THEIIS(2,ISII(2))**2) GOTO 410
ENDIF
ENDIF
C...End of inner veto algorithm. Check if only one leg evolved so far.
450 V(IEP(1),1)=Z
ISL(1)=0
ISL(2)=0
IF(NEP.EQ.1) GOTO 490
IF(NEP.EQ.2.AND.P(IEP(1),5)+P(IEP(2),5).GE.P(IM,5)) GOTO 350
DO 460 I=1,NEP
IR=IREF(N+I-NS)
IF(ITRY(I).EQ.0.AND.KSH(IR).EQ.1) THEN
IF(P(N+I,5).GE.PMTH(2,IR)) GOTO 350
ENDIF
460 CONTINUE
C...Check if chosen multiplet m1,m2,z1,z2 is physical.
IF(NEP.GE.3) THEN
PMSUM=0D0
DO 470 I=1,NEP
PMSUM=PMSUM+P(N+I,5)
470 CONTINUE
IF(PMSUM.GE.PS(5)) GOTO 350
ELSEIF(IGM.EQ.0.OR.MSTJ(43).LE.2.OR.MOD(MSTJ(43),2).EQ.0) THEN
DO 480 I1=N+1,N+2
IRDA=IREF(I1-NS)
IF(KSH(IRDA).EQ.0) GOTO 480
IF(P(I1,5).LT.PMTH(2,IRDA)) GOTO 480
IF(IRDA.EQ.21) THEN
IRGD1=ABS(K(I1,5))
IRGD2=IRGD1
ELSE
IRGD1=IRDA
IRGD2=ABS(K(I1,5))
ENDIF
I2=2*N+3-I1
IF(IGM.EQ.0.OR.MSTJ(43).LE.2) THEN
PED=0.5D0*(V(IM,5)+V(I1,5)-V(I2,5))/P(IM,5)
ELSE
IF(I1.EQ.N+1) ZM=V(IM,1)
IF(I1.EQ.N+2) ZM=1D0-V(IM,1)
PML=SQRT((V(IM,5)-V(N+1,5)-V(N+2,5))**2-
& 4D0*V(N+1,5)*V(N+2,5))
PED=PEM*(0.5D0*(V(IM,5)-PML+V(I1,5)-V(I2,5))+PML*ZM)/
& V(IM,5)
ENDIF
IF(MOD(MSTJ(43),2).EQ.1) THEN
PMQTH3=0.5D0*PARJ(82)
IF(IRGD2.EQ.22) PMQTH3=0.5D0*PARJ(83)
IF(IRGD2.EQ.22.AND.ISCOL(IRDA).EQ.0) PMQTH3=0.5D0*PARJ(90)
PMQ1=(PMTH(1,IRGD1)**2+PMQTH3**2)/V(I1,5)
PMQ2=(PMTH(1,IRGD2)**2+PMQTH3**2)/V(I1,5)
ZD=SQRT(MAX(0D0,(1D0-V(I1,5)/PED**2)*((1D0-PMQ1-PMQ2)**2-
& 4D0*PMQ1*PMQ2)))
ZH=1D0+PMQ1-PMQ2
ELSE
ZD=SQRT(MAX(0D0,1D0-V(I1,5)/PED**2))
ZH=1D0
ENDIF
IF(IRDA.EQ.21.AND.IRGD1.LT.10.AND.
& (MSTJ(44).EQ.3.OR.MSTJ(44).EQ.5)) THEN
ELSE
ZL=0.5D0*(ZH-ZD)
ZU=0.5D0*(ZH+ZD)
IF(I1.EQ.N+1.AND.(V(I1,1).LT.ZL.OR.V(I1,1).GT.ZU).AND.
& ISSET(1).EQ.0) THEN
ISL(1)=1
ELSEIF(I1.EQ.N+2.AND.(V(I1,1).LT.ZL.OR.V(I1,1).GT.ZU).AND.
& ISSET(2).EQ.0) THEN
ISL(2)=1
ENDIF
ENDIF
IF(IRDA.EQ.21) V(I1,4)=LOG(ZU*(1D0-ZL)/MAX(1D-20,
& ZL*(1D0-ZU)))
IF(IRDA.NE.21) V(I1,4)=LOG((1D0-ZL)/MAX(1D-10,1D0-ZU))
480 CONTINUE
IF(ISL(1).EQ.1.AND.ISL(2).EQ.1.AND.ISLM.NE.0) THEN
ISL(3-ISLM)=0
ISLM=3-ISLM
ELSEIF(ISL(1).EQ.1.AND.ISL(2).EQ.1) THEN
ZDR1=MAX(0D0,V(N+1,3)/MAX(1D-6,V(N+1,4))-1D0)
ZDR2=MAX(0D0,V(N+2,3)/MAX(1D-6,V(N+2,4))-1D0)
IF(ZDR2.GT.PYR(0)*(ZDR1+ZDR2)) ISL(1)=0
IF(ISL(1).EQ.1) ISL(2)=0
IF(ISL(1).EQ.0) ISLM=1
IF(ISL(2).EQ.0) ISLM=2
ENDIF
IF(ISL(1).EQ.1.OR.ISL(2).EQ.1) GOTO 350
ENDIF
IRD1=IREF(N+1-NS)
IRD2=IREF(N+2-NS)
IF(IGM.GT.0) THEN
IF(MOD(MSTJ(43),2).EQ.1.AND.(P(N+1,5).GE.
& PMTH(2,IRD1).OR.P(N+2,5).GE.PMTH(2,IRD2))) THEN
PMQ1=V(N+1,5)/V(IM,5)
PMQ2=V(N+2,5)/V(IM,5)
ZD=SQRT(MAX(0D0,(1D0-V(IM,5)/PEM**2)*((1D0-PMQ1-PMQ2)**2-
& 4D0*PMQ1*PMQ2)))
ZH=1D0+PMQ1-PMQ2
ZL=0.5D0*(ZH-ZD)
ZU=0.5D0*(ZH+ZD)
IF(V(IM,1).LT.ZL.OR.V(IM,1).GT.ZU) GOTO 350
ENDIF
ENDIF
C...Accepted branch. Construct four-momentum for initial partons.
490 MAZIP=0
MAZIC=0
IF(NEP.EQ.1) THEN
P(N+1,1)=0D0
P(N+1,2)=0D0
P(N+1,3)=SQRT(MAX(0D0,(P(IPA(1),4)+P(N+1,5))*(P(IPA(1),4)-
& P(N+1,5))))
P(N+1,4)=P(IPA(1),4)
V(N+1,2)=P(N+1,4)
ELSEIF(IGM.EQ.0.AND.NEP.EQ.2) THEN
PED1=0.5D0*(V(IM,5)+V(N+1,5)-V(N+2,5))/P(IM,5)
P(N+1,1)=0D0
P(N+1,2)=0D0
P(N+1,3)=SQRT(MAX(0D0,(PED1+P(N+1,5))*(PED1-P(N+1,5))))
P(N+1,4)=PED1
P(N+2,1)=0D0
P(N+2,2)=0D0
P(N+2,3)=-P(N+1,3)
P(N+2,4)=P(IM,5)-PED1
V(N+1,2)=P(N+1,4)
V(N+2,2)=P(N+2,4)
ELSEIF(NEP.GE.3) THEN
C...Rescale all momenta for energy conservation.
LOOP=0
PES=0D0
PQS=0D0
DO 510 I=1,NEP
DO 500 J=1,4
P(N+I,J)=P(IPA(I),J)
500 CONTINUE
PES=PES+P(N+I,4)
PQS=PQS+P(N+I,5)**2/P(N+I,4)
510 CONTINUE
520 LOOP=LOOP+1
FAC=(PS(5)-PQS)/(PES-PQS)
PES=0D0
PQS=0D0
DO 540 I=1,NEP
DO 530 J=1,3
P(N+I,J)=FAC*P(N+I,J)
530 CONTINUE
P(N+I,4)=SQRT(P(N+I,5)**2+P(N+I,1)**2+P(N+I,2)**2+P(N+I,3)**2)
V(N+I,2)=P(N+I,4)
PES=PES+P(N+I,4)
PQS=PQS+P(N+I,5)**2/P(N+I,4)
540 CONTINUE
IF(LOOP.LT.10.AND.ABS(PES-PS(5)).GT.1D-12*PS(5)) GOTO 520
C...Construct transverse momentum for ordinary branching in shower.
ELSE
ZM=V(IM,1)
LOOPPT=0
550 LOOPPT=LOOPPT+1
PZM=SQRT(MAX(0D0,(PEM+P(IM,5))*(PEM-P(IM,5))))
PMLS=(V(IM,5)-V(N+1,5)-V(N+2,5))**2-4D0*V(N+1,5)*V(N+2,5)
IF(PZM.LE.0D0) THEN
PTS=0D0
ELSEIF(K(IM,2).EQ.21.AND.ABS(K(N+1,2)).LE.10.AND.
& (MSTJ(44).EQ.3.OR.MSTJ(44).EQ.5)) THEN
PTS=PMLS*ZM*(1D0-ZM)/V(IM,5)
ELSEIF(MOD(MSTJ(43),2).EQ.1) THEN
PTS=(PEM**2*(ZM*(1D0-ZM)*V(IM,5)-(1D0-ZM)*V(N+1,5)-
& ZM*V(N+2,5))-0.25D0*PMLS)/PZM**2
ELSE
PTS=PMLS*(ZM*(1D0-ZM)*PEM**2/V(IM,5)-0.25D0)/PZM**2
ENDIF
IF(PTS.LT.0D0.AND.LOOPPT.LT.10) THEN
ZM=0.05D0+0.9D0*ZM
GOTO 550
ELSEIF(PTS.LT.0D0) THEN
GOTO 280
ENDIF
PT=SQRT(MAX(0D0,PTS))
C...Global statistics.
MINT(353)=MINT(353)+1
VINT(353)=VINT(353)+PT
IF (MINT(353).EQ.1) VINT(358)=PT
C...Find coefficient of azimuthal asymmetry due to gluon polarization.
HAZIP=0D0
IF(MSTJ(49).NE.1.AND.MOD(MSTJ(46),2).EQ.1.AND.K(IM,2).EQ.21
& .AND.IAU.NE.0) THEN
IF(K(IGM,3).NE.0) MAZIP=1
ZAU=V(IGM,1)
IF(IAU.EQ.IM+1) ZAU=1D0-V(IGM,1)
IF(MAZIP.EQ.0) ZAU=0D0
IF(K(IGM,2).NE.21) THEN
HAZIP=2D0*ZAU/(1D0+ZAU**2)
ELSE
HAZIP=(ZAU/(1D0-ZAU*(1D0-ZAU)))**2
ENDIF
IF(K(N+1,2).NE.21) THEN
HAZIP=HAZIP*(-2D0*ZM*(1D0-ZM))/(1D0-2D0*ZM*(1D0-ZM))
ELSE
HAZIP=HAZIP*(ZM*(1D0-ZM)/(1D0-ZM*(1D0-ZM)))**2
ENDIF
ENDIF
C...Find coefficient of azimuthal asymmetry due to soft gluon
C...interference.
HAZIC=0D0
IF(MSTJ(49).NE.2.AND.MSTJ(46).GE.2.AND.(K(N+1,2).EQ.21.OR.
& K(N+2,2).EQ.21).AND.IAU.NE.0) THEN
IF(K(IGM,3).NE.0) MAZIC=N+1
IF(K(IGM,3).NE.0.AND.K(N+1,2).NE.21) MAZIC=N+2
IF(K(IGM,3).NE.0.AND.K(N+1,2).EQ.21.AND.K(N+2,2).EQ.21.AND.
& ZM.GT.0.5D0) MAZIC=N+2
IF(K(IAU,2).EQ.22) MAZIC=0
ZS=ZM
IF(MAZIC.EQ.N+2) ZS=1D0-ZM
ZGM=V(IGM,1)
IF(IAU.EQ.IM-1) ZGM=1D0-V(IGM,1)
IF(MAZIC.EQ.0) ZGM=1D0
IF(MAZIC.NE.0) HAZIC=(P(IM,5)/P(IGM,5))*
& SQRT((1D0-ZS)*(1D0-ZGM)/(ZS*ZGM))
HAZIC=MIN(0.95D0,HAZIC)
ENDIF
ENDIF
C...Construct energies for ordinary branching in shower.
560 IF(NEP.EQ.2.AND.IGM.GT.0) THEN
IF(K(IM,2).EQ.21.AND.ABS(K(N+1,2)).LE.10.AND.
& (MSTJ(44).EQ.3.OR.MSTJ(44).EQ.5)) THEN
P(N+1,4)=0.5D0*(PEM*(V(IM,5)+V(N+1,5)-V(N+2,5))+
& PZM*SQRT(MAX(0D0,PMLS))*(2D0*ZM-1D0))/V(IM,5)
ELSEIF(MOD(MSTJ(43),2).EQ.1) THEN
P(N+1,4)=PEM*V(IM,1)
ELSE
P(N+1,4)=PEM*(0.5D0*(V(IM,5)-SQRT(PMLS)+V(N+1,5)-V(N+2,5))+
& SQRT(PMLS)*ZM)/V(IM,5)
ENDIF
C...Already predetermined choice of phi angle or not
PHI=PARU(2)*PYR(0)
IF(MPSPD.EQ.1.AND.IGM.EQ.NS+1) THEN
IPSPD=IP1+IM-NS-2
IF(K(IPSPD,4).GT.0) THEN
IPSGD1=K(IPSPD,4)
IF(IM.EQ.NS+2) THEN
PHI=PYANGL(P(IPSGD1,1),P(IPSGD1,2))
ELSE
PHI=PYANGL(-P(IPSGD1,1),P(IPSGD1,2))
ENDIF
ENDIF
ELSEIF(MPSPD.EQ.1.AND.IGM.EQ.NS+2) THEN
IPSPD=IP1+IM-NS-2
IF(K(IPSPD,4).GT.0) THEN
IPSGD1=K(IPSPD,4)
PHIPSM=PYANGL(P(IPSPD,1),P(IPSPD,2))
THEPSM=PYANGL(P(IPSPD,3),SQRT(P(IPSPD,1)**2+P(IPSPD,2)**2))
CALL PYROBO(IPSGD1,IPSGD1,0D0,-PHIPSM,0D0,0D0,0D0)
CALL PYROBO(IPSGD1,IPSGD1,-THEPSM,0D0,0D0,0D0,0D0)
PHI=PYANGL(P(IPSGD1,1),P(IPSGD1,2))
CALL PYROBO(IPSGD1,IPSGD1,THEPSM,PHIPSM,0D0,0D0,0D0)
ENDIF
ENDIF
C...Construct momenta for ordinary branching in shower.
P(N+1,1)=PT*COS(PHI)
P(N+1,2)=PT*SIN(PHI)
IF(K(IM,2).EQ.21.AND.ABS(K(N+1,2)).LE.10.AND.
& (MSTJ(44).EQ.3.OR.MSTJ(44).EQ.5)) THEN
P(N+1,3)=0.5D0*(PZM*(V(IM,5)+V(N+1,5)-V(N+2,5))+
& PEM*SQRT(MAX(0D0,PMLS))*(2D0*ZM-1D0))/V(IM,5)
ELSEIF(PZM.GT.0D0) THEN
P(N+1,3)=0.5D0*(V(N+2,5)-V(N+1,5)-V(IM,5)+
& 2D0*PEM*P(N+1,4))/PZM
ELSE
P(N+1,3)=0D0
ENDIF
P(N+2,1)=-P(N+1,1)
P(N+2,2)=-P(N+1,2)
P(N+2,3)=PZM-P(N+1,3)
P(N+2,4)=PEM-P(N+1,4)
IF(MSTJ(43).LE.2) THEN
V(N+1,2)=(PEM*P(N+1,4)-PZM*P(N+1,3))/P(IM,5)
V(N+2,2)=(PEM*P(N+2,4)-PZM*P(N+2,3))/P(IM,5)
ENDIF
ENDIF
C...Rotate and boost daughters.
IF(IGM.GT.0) THEN
IF(MSTJ(43).LE.2) THEN
BEX=P(IGM,1)/P(IGM,4)
BEY=P(IGM,2)/P(IGM,4)
BEZ=P(IGM,3)/P(IGM,4)
GA=P(IGM,4)/P(IGM,5)
GABEP=GA*(GA*(BEX*P(IM,1)+BEY*P(IM,2)+BEZ*P(IM,3))/(1D0+GA)-
& P(IM,4))
ELSE
BEX=0D0
BEY=0D0
BEZ=0D0
GA=1D0
GABEP=0D0
ENDIF
PTIMB=SQRT((P(IM,1)+GABEP*BEX)**2+(P(IM,2)+GABEP*BEY)**2)
THE=PYANGL(P(IM,3)+GABEP*BEZ,PTIMB)
IF(PTIMB.GT.1D-4) THEN
PHI=PYANGL(P(IM,1)+GABEP*BEX,P(IM,2)+GABEP*BEY)
ELSE
PHI=0D0
ENDIF
DO 570 I=N+1,N+2
DP(1)=COS(THE)*COS(PHI)*P(I,1)-SIN(PHI)*P(I,2)+
& SIN(THE)*COS(PHI)*P(I,3)
DP(2)=COS(THE)*SIN(PHI)*P(I,1)+COS(PHI)*P(I,2)+
& SIN(THE)*SIN(PHI)*P(I,3)
DP(3)=-SIN(THE)*P(I,1)+COS(THE)*P(I,3)
DP(4)=P(I,4)
DBP=BEX*DP(1)+BEY*DP(2)+BEZ*DP(3)
DGABP=GA*(GA*DBP/(1D0+GA)+DP(4))
P(I,1)=DP(1)+DGABP*BEX
P(I,2)=DP(2)+DGABP*BEY
P(I,3)=DP(3)+DGABP*BEZ
P(I,4)=GA*(DP(4)+DBP)
570 CONTINUE
ENDIF
C...Weight with azimuthal distribution, if required.
IF(MAZIP.NE.0.OR.MAZIC.NE.0) THEN
DO 580 J=1,3
DPT(1,J)=P(IM,J)
DPT(2,J)=P(IAU,J)
DPT(3,J)=P(N+1,J)
580 CONTINUE
DPMA=DPT(1,1)*DPT(2,1)+DPT(1,2)*DPT(2,2)+DPT(1,3)*DPT(2,3)
DPMD=DPT(1,1)*DPT(3,1)+DPT(1,2)*DPT(3,2)+DPT(1,3)*DPT(3,3)
DPMM=DPT(1,1)**2+DPT(1,2)**2+DPT(1,3)**2
DO 590 J=1,3
DPT(4,J)=DPT(2,J)-DPMA*DPT(1,J)/MAX(1D-10,DPMM)
DPT(5,J)=DPT(3,J)-DPMD*DPT(1,J)/MAX(1D-10,DPMM)
590 CONTINUE
DPT(4,4)=SQRT(DPT(4,1)**2+DPT(4,2)**2+DPT(4,3)**2)
DPT(5,4)=SQRT(DPT(5,1)**2+DPT(5,2)**2+DPT(5,3)**2)
IF(MIN(DPT(4,4),DPT(5,4)).GT.0.1D0*PARJ(82)) THEN
CAD=(DPT(4,1)*DPT(5,1)+DPT(4,2)*DPT(5,2)+
& DPT(4,3)*DPT(5,3))/(DPT(4,4)*DPT(5,4))
IF(MAZIP.NE.0) THEN
IF(1D0+HAZIP*(2D0*CAD**2-1D0).LT.PYR(0)*(1D0+ABS(HAZIP)))
& GOTO 560
ENDIF
IF(MAZIC.NE.0) THEN
IF(MAZIC.EQ.N+2) CAD=-CAD
IF((1D0-HAZIC)*(1D0-HAZIC*CAD)/(1D0+HAZIC**2-2D0*HAZIC*CAD)
& .LT.PYR(0)) GOTO 560
ENDIF
ENDIF
ENDIF
C...Azimuthal anisotropy due to interference with initial state partons.
IF(MOD(MIIS,2).EQ.1.AND.IGM.EQ.NS+1.AND.(K(N+1,2).EQ.21.OR.
&K(N+2,2).EQ.21)) THEN
III=IM-NS-1
IF(ISII(III).GE.1) THEN
IAZIID=N+1
IF(K(N+1,2).NE.21) IAZIID=N+2
IF(K(N+1,2).EQ.21.AND.K(N+2,2).EQ.21.AND.
& P(N+1,4).GT.P(N+2,4)) IAZIID=N+2
THEIID=PYANGL(P(IAZIID,3),SQRT(P(IAZIID,1)**2+P(IAZIID,2)**2))
IF(III.EQ.2) THEIID=PARU(1)-THEIID
PHIIID=PYANGL(P(IAZIID,1),P(IAZIID,2))
HAZII=MIN(0.95D0,THEIID/THEIIS(III,ISII(III)))
CAD=COS(PHIIID-PHIIIS(III,ISII(III)))
PHIREL=ABS(PHIIID-PHIIIS(III,ISII(III)))
IF(PHIREL.GT.PARU(1)) PHIREL=PARU(2)-PHIREL
IF((1D0-HAZII)*(1D0-HAZII*CAD)/(1D0+HAZII**2-2D0*HAZII*CAD)
& .LT.PYR(0)) GOTO 560
ENDIF
ENDIF
C...Continue loop over partons that may branch, until none left.
IF(IGM.GE.0) K(IM,1)=14
N=N+NEP
NEP=2
IF(N.GT.MSTU(4)-MSTU(32)-10) THEN
CALL PYERRM(11,'(PYSHOW:) no more memory left in PYJETS')
IF(MSTU(21).GE.1) N=NS
IF(MSTU(21).GE.1) RETURN
ENDIF
GOTO 290
C...Set information on imagined shower initiator.
600 IF(NPA.GE.2) THEN
K(NS+1,1)=11
K(NS+1,2)=94
K(NS+1,3)=IP1
IF(IP2.GT.0.AND.IP2.LT.IP1) K(NS+1,3)=IP2
K(NS+1,4)=NS+2
K(NS+1,5)=NS+1+NPA
IIM=1
ELSE
IIM=0
ENDIF
C...Reconstruct string drawing information.
DO 610 I=NS+1+IIM,N
KQ=KCHG(PYCOMP(K(I,2)),2)
IF(K(I,1).LE.10.AND.K(I,2).EQ.22) THEN
K(I,1)=1
ELSEIF(K(I,1).LE.10.AND.ABS(K(I,2)).GE.11.AND.
& ABS(K(I,2)).LE.18) THEN
K(I,1)=1
ELSEIF(K(I,1).LE.10) THEN
K(I,4)=MSTU(5)*(K(I,4)/MSTU(5))
K(I,5)=MSTU(5)*(K(I,5)/MSTU(5))
ELSEIF(K(MOD(K(I,4),MSTU(5))+1,2).NE.22) THEN
ID1=MOD(K(I,4),MSTU(5))
IF(KQ.EQ.1.AND.K(I,2).GT.0) ID1=MOD(K(I,4),MSTU(5))+1
IF(KQ.EQ.2.AND.(K(ID1,2).EQ.21.OR.K(ID1+1,2).EQ.21).AND.
& PYR(0).GT.0.5D0) ID1=MOD(K(I,4),MSTU(5))+1
ID2=2*MOD(K(I,4),MSTU(5))+1-ID1
K(I,4)=MSTU(5)*(K(I,4)/MSTU(5))+ID1
K(I,5)=MSTU(5)*(K(I,5)/MSTU(5))+ID2
K(ID1,4)=K(ID1,4)+MSTU(5)*I
K(ID1,5)=K(ID1,5)+MSTU(5)*ID2
K(ID2,4)=K(ID2,4)+MSTU(5)*ID1
K(ID2,5)=K(ID2,5)+MSTU(5)*I
ELSE
ID1=MOD(K(I,4),MSTU(5))
ID2=ID1+1
K(I,4)=MSTU(5)*(K(I,4)/MSTU(5))+ID1
K(I,5)=MSTU(5)*(K(I,5)/MSTU(5))+ID1
IF(KQ.EQ.1.OR.K(ID1,1).GE.11) THEN
K(ID1,4)=K(ID1,4)+MSTU(5)*I
K(ID1,5)=K(ID1,5)+MSTU(5)*I
ELSE
K(ID1,4)=0
K(ID1,5)=0
ENDIF
K(ID2,4)=0
K(ID2,5)=0
ENDIF
610 CONTINUE
C...Transformation from CM frame.
IF(NPA.EQ.1) THEN
THE=PYANGL(P(IPA(1),3),SQRT(P(IPA(1),1)**2+P(IPA(1),2)**2))
PHI=PYANGL(P(IPA(1),1),P(IPA(1),2))
MSTU(33)=1
CALL PYROBO(NS+1,N,THE,PHI,0D0,0D0,0D0)
ELSEIF(NPA.EQ.2) THEN
BEX=PS(1)/PS(4)
BEY=PS(2)/PS(4)
BEZ=PS(3)/PS(4)
GA=PS(4)/PS(5)
GABEP=GA*(GA*(BEX*P(IPA(1),1)+BEY*P(IPA(1),2)+BEZ*P(IPA(1),3))
& /(1D0+GA)-P(IPA(1),4))
THE=PYANGL(P(IPA(1),3)+GABEP*BEZ,SQRT((P(IPA(1),1)
& +GABEP*BEX)**2+(P(IPA(1),2)+GABEP*BEY)**2))
PHI=PYANGL(P(IPA(1),1)+GABEP*BEX,P(IPA(1),2)+GABEP*BEY)
MSTU(33)=1
CALL PYROBO(NS+1,N,THE,PHI,BEX,BEY,BEZ)
ELSE
CALL PYROBO(IPA(1),IPA(NPA),0D0,0D0,PS(1)/PS(4),PS(2)/PS(4),
& PS(3)/PS(4))
MSTU(33)=1
CALL PYROBO(NS+1,N,0D0,0D0,PS(1)/PS(4),PS(2)/PS(4),PS(3)/PS(4))
ENDIF
C...Decay vertex of shower.
DO 630 I=NS+1,N
DO 620 J=1,5
V(I,J)=V(IP1,J)
620 CONTINUE
630 CONTINUE
C...Delete trivial shower, else connect initiators.
IF(N.LE.NS+NPA+IIM) THEN
N=NS
ELSE
DO 640 IP=1,NPA
K(IPA(IP),1)=14
K(IPA(IP),4)=K(IPA(IP),4)+NS+IIM+IP
K(IPA(IP),5)=K(IPA(IP),5)+NS+IIM+IP
K(NS+IIM+IP,3)=IPA(IP)
IF(IIM.EQ.1.AND.MSTU(16).NE.2) K(NS+IIM+IP,3)=NS+1
IF(K(NS+IIM+IP,1).NE.1) THEN
K(NS+IIM+IP,4)=MSTU(5)*IPA(IP)+K(NS+IIM+IP,4)
K(NS+IIM+IP,5)=MSTU(5)*IPA(IP)+K(NS+IIM+IP,5)
ENDIF
640 CONTINUE
ENDIF
RETURN
END
|
#Jenny Smith
#3/28/18
color_bars <- function(list.labels,colorDends_Groups.res){
cb1 <- list.labels[[1]][names(colorDends_Groups.res$groups)] #subset & order
cb2 <- list.labels[[2]][names(colorDends_Groups.res$groups)]
cb.all <- data.frame(Cytogenetics=cb1,
FLT3.ITD=cb2,
Cytogenetics.Num=as.numeric(as.factor(cb1)),
FLT3.ITD.Num=as.numeric(as.factor(cb2)),
Hier.Cluster.Group=colorDends_Groups.res$groups)
c <- c("red","deepskyblue","darkorchid1","blue3","gold2")
c2 <- c("darkolivegreen2","darkslategray")
c3 <- c("firebrick1", "dodgerblue3")
colors <- data.frame(Fusions=c[cb.all[,"Cytogenetics.Num"]],
FLT3=c2[cb.all[,"FLT3.ITD.Num"]],
Group=c3[cb.all[,"Hier.Cluster.Group"]],
stringsAsFactors = TRUE)
return(colors)
}
#CDE
merged <- read.csv("~/reference_mapping-files/TARGET_AML_1031_0531_Merged_CDE_3.30.18.csv",
stringsAsFactors = FALSE)
#Color Bars
cols <- c("CBFA2T3.GLIS2","NUP98.KDM5A","RBM15.MKL1","DEK.NUP214")
cols2 <- c("CEBPA.mutation","NPM.mutation")
labels1 <- pheno_bars(CDE=merged, IDCol = "TARGET.USI.1", cols=cols)
labels2 <- pheno_bars(CDE=merged, IDCol = "TARGET.USI.1", cols="FLT3.ITD.positive.")
labels3 <- pheno_bars(CDE=merged, IDCol="TARGET.USI.1",cols=cols2)
color.df <- color_bars(list(labels1,labels2), CBF.GLIS.like)
#Genes of Interest (GOI)
topDEGs <- CBFvsOtherAML.1031 %>%
filter(logFC < -3.488684 | logFC > 7.541174 ) %>%
select(gene) %>%
unlist()
#Cluster on GOI
d.top <- dge_dendrograms(expnData=CBFvsAML$cts.hd.1031$InputExpnMatrix,
pheno=CBFvsAML$cts.hd.1031$phenovector,
genelist = topDEGs,
method = "ward.D2")
#Color dengrogram labels and branches
col.top <- ifelse(CBFvsAML$cts.hd.1031$phenovector == "GroupA", "Red", "dark blue")
cc.top <- c(GroupA="red",GroupB="dark blue")
CBF.GLIS.like <- colorDends_Groups(dge_dendrograms.res = d.top,
phenovector = CBFvsAML$cts.hd.1031$phenovector,
k=2,
branchCol = c("firebrick","navy"),
colorcodes = cc.top)
plot(CBF.GLIS.like$split_dends[[1]])
plot(CBF.GLIS.like$split_dends[[2]])
#Add Color bar below color dendrogram
par(mfrow=c(1,1), cex=0.125, mar=c(35, 7.5, 8.5, 2), pty="m")
plot(CBF.GLIS.like$dend, axes=TRUE,cex.axis=9, horiz=FALSE)
par(cex=0.8, cex.main = 1, cex.lab = 0.85)
colored_bars(colors = color.df, y_scale=80, rowLabels=c("", ""))
|
/-
Copyright (c) 2018 Simon Hudon. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Simon Hudon
Traversable instance for buffers.
-/
import data.buffer data.array.lemmas
import category.traversable.instances data.equiv.basic
tactic.ext
namespace buffer
open function
variables {α : Type*} {xs : list α}
@[extensionality]
lemma ext : ∀ {b₁ b₂ : buffer α}, to_list b₁ = to_list b₂ → b₁ = b₂
| ⟨n₁, a₁⟩ ⟨n₂, a₂⟩ h := begin
simp [to_list, to_array] at h,
have e : n₁ = n₂ :=
by rw [←array.to_list_length a₁, ←array.to_list_length a₂, h],
subst e,
have h : a₁ == a₂.to_list.to_array := h ▸ a₁.to_list_to_array.symm,
rw eq_of_heq (h.trans a₂.to_list_to_array)
end
instance (α) [decidable_eq α] : decidable_eq (buffer α) :=
by tactic.mk_dec_eq_instance
@[simp]
lemma to_list_append_list {b : buffer α} :
to_list (append_list b xs) = to_list b ++ xs :=
by induction xs generalizing b; simp! [*]; cases b; simp! [to_list,to_array]
@[simp]
lemma append_list_mk_buffer :
append_list mk_buffer xs = array.to_buffer (list.to_array xs) :=
by ext x : 1; simp [array.to_buffer,to_list,to_list_append_list];
induction xs; [refl,skip]; simp [to_array]; refl
def list_equiv_buffer (α : Type*) : list α ≃ buffer α :=
begin
refine { to_fun := list.to_buffer, inv_fun := buffer.to_list, .. };
simp [left_inverse,function.right_inverse],
{ intro x, induction x, refl,
simp [list.to_buffer,append_list],
rw ← x_ih, refl },
{ intro x, cases x,
simp [to_list,to_array,list.to_buffer],
congr, simp, refl, apply array.to_list_to_array }
end
instance : traversable buffer :=
equiv.traversable list_equiv_buffer
instance : is_lawful_traversable buffer :=
equiv.is_lawful_traversable list_equiv_buffer
end buffer
|
function [mu, B] = clg_Mstep_simple(w, Y, YY, YTY, X, XX, XY)
% CLG_MSTEP_SIMPLE Same as CLG_MSTEP, but doesn;t estimate Sigma, so is slightly faster
% function [mu, B] = clg_Mstep_simple(w, Y, YY, YTY, X, XX, XY)
%
% See clg_Mstep for details.
% Unlike clg_Mstep, there are no optional arguments, which are slow to process
% if this function is inside a tight loop.
[Ysz Q] = size(Y);
if isempty(X) % no regression
%B = [];
B2 = zeros(Ysz, 1, Q);
for i=1:Q
B(:,:,i) = B2(:,1:0,i); % make an empty array of size Ysz x 0 x Q
end
[mu, Sigma] = mixgauss_Mstep(w, Y, YY, YTY);
return;
end
N = sum(w);
%YY = YY + cov_prior; % regularize the scatter matrix
% Set any zero weights to one before dividing
% This is valid because w(i)=0 => Y(:,i)=0, etc
w = w + (w==0);
Xsz = size(X,1);
% Append 1 to X to get Z
ZZ = zeros(Xsz+1, Xsz+1, Q);
ZY = zeros(Xsz+1, Ysz, Q);
for i=1:Q
ZZ(:,:,i) = [XX(:,:,i) X(:,i);
X(:,i)' w(i)];
ZY(:,:,i) = [XY(:,:,i);
Y(:,i)'];
end
mu = zeros(Ysz, Q);
B = zeros(Ysz, Xsz, Q);
for i=1:Q
% eqn 9
if rcond(ZZ(:,:,i)) < 1e-10
sprintf('clg_Mstep warning: ZZ(:,:,%d) is ill-conditioned', i);
%probably because there are too few cases for a high-dimensional input
ZZ(:,:,i) = ZZ(:,:,i) + 1e-5*eye(Xsz+1);
end
%A = ZY(:,:,i)' * inv(ZZ(:,:,i));
A = (ZZ(:,:,i) \ ZY(:,:,i))';
B(:,:,i) = A(:, 1:Xsz);
mu(:,i) = A(:, Xsz+1);
end
|
module Wavefronts
export intensity, field, Wavefront, AddWavefront, amplitude
export Piston, Tilt, Tip, Defocus, Astigmatism, Coma, AddAberration
export Trefoil, Spherical, VerticalAstigmatism, ObliqueAstigmatism
export Aberration, HorizontalComa, VerticalComa, ObliqueTrefoil, VerticalTrefoil
export phase, project, correct
export circ
export rawphase, unwrapphase, retrievephase
cartesian_to_polar(x,y) = √(x^2+y^2), atan(x,y)
polar_to_cartesian(r,θ) = r*cos(θ),r*sin(θ)
include("coordinates.jl")
include("aberrations.jl")
include("wavefronts.jl")
include("phasestep.jl")
end # module
|
function [beta, t, pvals, convals, con_t, con_pvals, sigma, Phi, df, stebeta, conste, F] = fit_gls(y, X, c, p, varargin)
% Fit a linear model using generalized least squares and an AR(p) model
%
% :Usage:
% ::
%
% [beta, t, pvals, convals, con_t, con_pvals, sigma, Phi, df, stebeta, conste, F] = fit_gls(y,X,c,p,[PX, equal to pinv(X), for speed], [Weights])
%
% This program uses the Cochrane-Orcutt algorithm to iteratively find the
% GLS solution and estimate the noise parameters.
%
% Step 1: Find the OLS solution.
%
% Step 2: Use residuals from the previous fit to estimate the parameters in
% the AR(p) noise model.
%
% Step 3: Find the GLS solution using the covariance matrix corresponding
% to an AR(p) model with parameters estimated in Step 2 inserted.
%
% Step 4: Repeat steps 2-3 until convergence.
%
% :Inputs:
%
% **y:**
% fMRI time course (T x 1 vector)
%
% **X:**
% Design matrix (T x param matrix)
%
% **c:**
% contrast vector(s) (param x # contrasts matrix)
%
% **p:**
% order of AR model.
%
% **PX:**
% pinv(X), for speeded, repeated calculations with different y vectors
%
% Note: if using weights, px = inv(X' * W * X) * X' * W;
% where W = diag(Weights);
%
% **Weights:**
% Optional, vector of weights for each observation
%
% Empty or missing: Unweighted analysis.
%
% Note that setting p=0 implies a white noise model.
%
% :Output:
%
% **t:**
% t-value for the contrast c'beta
%
% **df:**
% degrees of freedom using Satterthwaite approximation
%
% **beta:**
% beta vector
%
% **Phi:**
% vector of coefficients in AR(p) model
%
% **sigma:**
% standard deviation
%
% **stebeta:**
% standard error of betas
%
% ..
% by Martin Lindquist
%
% Last updated: 3/29/08, Tor Wager, added weighted least squares
% verified that beta and t-values are identical to
% glmfit.m in matlab7.5 with ar p = 0
% ***AR(p) with weighted least squares needs to be
% checked. behaving reasonably.
% 4/1/08, Tor : output stats for boht betas and contrasts
% Reorganized order of outputs
% 5/15/08 Tor : Weird things happening with single inputs; particulaly with aryule; force
% double
% ..
y = double(y);
X = double(X);
T = length(y); % Length of time course
k = size(X, 2); % predictors
if length(varargin) > 1
w = double(varargin{2}); % weights for weighted least squares
else
w = ones(T, 1);
end
W = diag(w);
sqrtW = sqrt(W); % for weighted residuals
if ~isempty(varargin) && ~isempty(varargin{1})
px = double(varargin{1});
else
invxvx = inv(X' * W * X); % we can re-use this later if p == 0
px = invxvx * X' * W;
end
% Step 1: Find the OLS solution
beta = px*y; % Beta values; weighted, if weights are used
resid = sqrtW * y - sqrtW * X * beta; % Residuals (Weighted, if weights are used)
sigma = sqrt((1 / (T - k)) * resid' * resid); %sum(resid.^2))); % Estimate of Sigma
% Weighted residuals: Three equivalent ways
% We pick one that is compatible with AR estimation
% 1)
% beta = px*y; % Beta values
% resid = y - X*beta; % Residuals
% sigma = sqrt((1 / (T - k)) * resid' * W * resid); %sum(resid.^2))); % Estimate of Sigma
% 2)
%r2 = sqrt(W) * resid; sigma2 = sqrt((1 / (T - k)) * r2' * r2)
% 3)
% r2 = sqrt(W) * y - sqrt(W) * X * beta;
% sigma2 = sqrt((1 / (T - k)) * r2' * r2)
% Stuff needed for future iterations
iV = W; % for ar p = 0 case
A = W; % for ar p = 0 case
Phi = 0;
betaold = 0;
% Steps 2-4: Find the GLS solution by iteration
% If p=0, skip this step. Appropriate solution already calculated above.
% Continue iteration until convergence or for at most 10 loops.
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Note to Jack and Tor: Keith Worsley uses a similar algorithm when fitting
% a GLM with an AR(p) noise model. However, he skips the iterative step
% and only goes through the loop one time. He claims that this is enough. I
% am not entirely convinced, therefore it is probably better to go through
% a few times if needed.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
i=1; % Set counter
while i < 10 && p > 0 && (i == 1 || sum((beta - betaold).^2) > 0.001)
% Do up to 10 iterations, if arp > 0 and either first iteration or
% there's a difference from last iteration
% resid = y - X*beta; % Calculate residuals of current model fit
resid = sqrtW * y - sqrtW * X * beta; % Residuals (Weighted, if weights are used)
% Estimate AR parameters using residuals
[a,e] = aryule(resid, p);
Phi =zeros(length(a)-1,1);
Phi(1:p) = -a(2:(p+1));
%sigma = sqrt(e); % swrtW*e?? % Moved later, because never used until
%after iteration loop
% Find the inverse of the covariance matrix
A = sqrtW; % ***should be sqrt(W)?
for j=1:p
%A = A + diag(-Phi(j)*ones((T-j),1),-j);
A = A + sqrtW * diag(-Phi(j)*ones((T-j),1),-j);
end;
%create_figure('A'); imagesc(A, [-.2 .2]); colorbar, drawnow, input(' ')
iV = A*A'; % New weights, with AR estimates, The inverse of the covariance matrix
betaold = beta; % Set old solution to be betaold
beta = inv(X'*iV*X)*X'*iV*y; % Calculate new solution
i = i+1; % Add one to counter
%create_figure('COV'); imagesc(iV, [-.02 .02]); colorbar, drawnow, input(' ')
end
if p > 0
% re-calc sigma
sigma = sqrt(e);
invxvx = inv(X'*iV*X);
% Should we use the kind of thing below? Seems like sqrt(e) is unweighted,
% though it seems reasonable...
% sqrtiv = sqrt(iV);
% resid = sqrtiv * y - sqrtiv * X * beta; % Residuals (Weighted, if weights are used)
% sigma = sqrt((1 / (T - k)) * resid' * resid)
end
R = (eye(T) - X * invxvx * X' * iV); % Residual inducing matrix
Wd = R * A * inv(iV) * A'; % inv(iV) = Covariance matrix
df = (trace(Wd).^2)./trace(Wd*Wd); % Satterthwaite approximation for degrees of freedom
% Should check on below: this creates difference in t-values from
% glmfit...but why wouldn't we need to re-calculate a (larger) sigma if we have reduced df?
% if df ~= (T - k) % Tor added 3/29, have to re-calculate sigma
% sigma = sqrt((1 / df) * resid' * iV * resid); %sum(resid.^2))); % Estimate of Sigma
% end
varbeta = sigma^2.* invxvx; % Var(beta)
stebeta = diag(varbeta).^.5;
t = beta ./ stebeta;
convals = [];
conste = [];
con_t = [];
con_pvals = [];
F = [];
if ~isempty(c)
% Contrast(s)
convals = (c' * beta);
conste = diag(c' * varbeta * c).^.5; % tor added as output
con_t = convals ./ conste; %sqrt(c'*varbeta*c); % t-value
end
% get rid of nuisance regressors that aren't in any contrast
% wh = stebeta > 0;
% c = c(wh,wh);
% beta = beta(wh);
% stebeta = stebeta(wh);
%
% if ~isempty(c)
% beta = (c' * beta);
% %beta = beta(wh);
% t = beta ./ stebeta; %sqrt(c'*varbeta*c); % t-value
% else
% t = beta(wh) ./ stebeta;
% end
%%%%%% Test H0: beta_1 = beta_2 = .... = beta_param = 0
if nargout > 6
SSE = y'*y - beta'*X'*y; % Error sum of squares
mSSE = SSE/df;
J = ones(T);
SST=y'*y - (1/T).*y'*J*y; % Total sum of squares
SSM=SST-SSE; % Model sum of squares
dfSSM=length(c) - 1; % degrees of freedom for model (param - 1)
mSSM=SSM/dfSSM;
F=mSSM/mSSE; % F-statistic - compare with F-distruibution with (param-1, df) degrees of freedom
end
% % get contrast values if we need those
% if ~isempty(c)
% beta = (beta' * c)';
% end
if nargout > 2
pvals = 2 .* (1 - tcdf(abs(t), df)); % two-tailed
% make sure p-values for valid results are not zero...
pvals(pvals == 0 & beta ~= 0 & ~isnan(beta)) = 1000*eps;
if ~isempty(c) && nargout > 5
con_pvals = 2 .* (1 - tcdf(abs(con_t), df)); % two-tailed
con_pvals(con_pvals == 0 & convals ~= 0 & ~isnan(convals)) = 1000*eps;
end
end
return
|
function s = i4_to_s_zero ( intval, s_len )
%*****************************************************************************80
%
%% I4_TO_S_ZERO converts an I4 to a string, with zero padding.
%
% Discussion:
%
% An I4 is an integer.
%
% Example:
%
% Assume that S is 6 characters long:
%
% INTVAL S
%
% 1 000001
% -1 -00001
% 0 000000
% 1952 001952
% 123456 123456
% 1234567 ****** <-- Not enough room%
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 08 December 2008
%
% Author:
%
% John Burkardt
%
% Parameters:
%
% Input, integer INTVAL, an integer to be converted.
%
% Input, integer S_LEN, the length of the string to be used.
%
% Output, string S, the representation of the integer.
% The integer will be right justified, and zero padded.
% If there is not enough space, the string will be filled with stars.
%
s = [];
if ( s_len <= 0 )
return
end
ilo = 1;
%
% Make a copy of the integer.
%
ival = intval;
%
% Handle the negative sign.
%
if ( ival < 0 )
if ( s_len <= 1 )
s(1) = '*';
s = char ( s );
return
end
ival = - ival;
s(1) = '-';
ilo = 2;
end
%
% Working from right to left, strip off the digits of the integer
% and place them into S(ILO:S_LEN).
%
ipos = s_len;
while ( ival ~= 0 | ipos == s_len )
idig = mod ( ival, 10 );
ival = floor ( ival / 10 );
if ( ipos < ilo )
s(1:s_len) = '*';
s = char ( s );
return
end
c = digit_to_ch ( idig );
s(ipos) = c;
ipos = ipos - 1;
end
%
% Fill the empties with zeroes.
%
s(ilo:ipos) = '0';
s = char ( s );
return
end
|
Describe Users/ChuckSteak here.
20120711 14:11:05 nbsp Welcome to the Wiki! Your review on The Arbors was pretty entertaining, as well as informative. Its good of you to bring a flashlight to the storage closets. Im sure the spiders appreciate being able to see. Users/TomGarberson
|
# Script to explore the numerical consquences of changes the Euler integration
# step size.
library(rstan)
library(doParallel)
library(plyr)
library(magrittr)
library(lubridate)
library(ggplot2)
library(gridExtra)
# Loading and processing mosquito and case data
tseries <- read.csv("Data/Vitoria.data.csv")
data <- ddply(tseries, .(tot.week), summarise,
obs = sum(Cases, na.rm = T),
q = sum(Mosquitoes, na.rm = T),
tau = sum(Trap, na.rm = T),
year = unique(Year),
week = unique(Week))
# Population size
pop <- 327801
# Wrangling weather data and computing EIP
weather <- read.csv("Data/Vitoria.weather.csv") %>%
mutate(date = ymd(BRST), year = year(date), week = week(date))
weather <- subset(weather, date < date[1] + weeks(243))
weather$tot.week <- rep(c(1:243), each = 7)
covars <- ddply(weather, .(tot.week), summarise,
temp = mean(Mean.TemperatureC, na.rm = T))
rov <- 7 * exp(0.2 * covars$temp - 8)
#===============================================================================
# Loading mcmc samples
fit <- readRDS("Results/gamma_eip_dv0.rds")
samples <- rstan::extract(fit)[1:17]
extract_sample <- function(x, k){
if(length(dim(x)) == 1) return(x[k])
else return(x[k, ])
}
# Selecting a sample to run the simulation with
init <- list(lapply(samples, extract_sample, 1))
# Loading Stan code
model <- stan_model(file = "Code/gammaeip.stan")
#===============================================================================
# Simulating dynamics with smaller and smaller Euler steps
# Number of Euler steps per week
steps <- c(7, 14, 28, 56, 112, 224)
# Loop over each of the step sizes and simulate dynamics
runs <- foreach(k = 1:6, .packages = c("rstan", "magrittr"), .combine = "rbind") %do% {
dat.stan <- list(T = 243,
steps = steps[k],
y =data$obs,
q = data$q,
tau = data$tau,
rov = rov,
control = matrix(1, nrow = 243, ncol = 3),
pop = pop)
sim <- sampling(model,
data = dat.stan,
init = init,
iter = 1,
chains = 1,
warmup = 0,
algorithm = "Fixed_param")
cases <- diff(rstan::extract(sim, "state", permute = T)[[1]][1, , 11])
return(cases)
}
#===============================================================================
# Compare number of cases per week for the different step sizes
plot(runs[6, ])
points(runs[5, ], pch = 20, col = "blue")
plot(runs[5, ])
points(runs[4, ], pch = 20, col = "blue")
plot(runs[4, ])
points(runs[3, ], pch = 20, col = "blue")
plot(runs[3, ])
points(runs[2, ], pch = 20, col = "blue")
plot(runs[2, ])
points(runs[1, ], pch = 20, col = "blue")
|
/* bspline/gsl_bspline.h
*
* Copyright (C) 2006 Patrick Alken
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __GSL_BSPLINE_H__
#define __GSL_BSPLINE_H__
#include <stdlib.h>
#include <gsl/gsl_math.h>
#include <gsl/gsl_vector.h>
#undef __BEGIN_DECLS
#undef __END_DECLS
#ifdef __cplusplus
# define __BEGIN_DECLS extern "C" {
# define __END_DECLS }
#else
# define __BEGIN_DECLS /* empty */
# define __END_DECLS /* empty */
#endif
__BEGIN_DECLS
typedef struct
{
size_t k; /* spline order */
size_t km1; /* k - 1 (polynomial order) */
size_t l; /* number of polynomial pieces on interval */
size_t nbreak; /* number of breakpoints (l + 1) */
size_t n; /* number of bspline basis functions (l + k - 1) */
gsl_vector *knots; /* knots vector */
gsl_vector *deltal; /* left delta */
gsl_vector *deltar; /* right delta */
gsl_vector *B; /* temporary spline results */
} gsl_bspline_workspace;
gsl_bspline_workspace *
gsl_bspline_alloc(const size_t k, const size_t nbreak);
void gsl_bspline_free(gsl_bspline_workspace *w);
size_t gsl_bspline_ncoeffs (gsl_bspline_workspace * w);
size_t gsl_bspline_order (gsl_bspline_workspace * w);
size_t gsl_bspline_nbreak (gsl_bspline_workspace * w);
double gsl_bspline_breakpoint (size_t i, gsl_bspline_workspace * w);
int
gsl_bspline_knots(const gsl_vector *breakpts, gsl_bspline_workspace *w);
int gsl_bspline_knots_uniform(const double a, const double b,
gsl_bspline_workspace *w);
int
gsl_bspline_eval(const double x, gsl_vector *B,
gsl_bspline_workspace *w);
__END_DECLS
#endif /* __GSL_BSPLINE_H__ */
|
#redirect Residence Halls
|
Maddie Hinch and her England team-mates should be proud of their Hockey Women’s World Cup exploits, according to their head coach.
The West Chiltington-based goalkeeper’s dreams of adding another medal to her collection were extinguished last Thursday night as they were knocked out at the quarter-final stage.
The hosts nation suffered a 2-0 defeat to defending champions Netherlands at the Lee Valley Hockey Centre in London.
Lidewij Welten opened the scoring for the Dutch in the first quarter and Laurien Leurink then doubled the lead early in the second half as the world number one side dominated.
The Dutch went on to beat Australia in the semi-finals and then overcame shock finaliasts Ireland with a crushing 6-0 victory in Sunday’s final to retain their crown.
It wasn’t the tournament England were expecting after drawing their opening two Pool B matches. They then beat Ireland to finish second in the group, but had to beat South Korea in the crossover game to make the main knockout stages.
Head coach Danny Kerry, however, defended his team’s performance and pointed to the difficulties they had faced.
He said: “I told the players I was really proud. We have had a tough tournament with injury and for all sorts of reasons.
“One of our players played the entire tournament with a broken big toe but they all grit it out and carried on.
Hinch won Olympic gold on 2016 and Commonwealth Games bronze in Australia earlier this year. |
module Main
import System
import System.Concurrency
main : IO ()
main = do
barrier <- makeBarrier 3
threadIDs <- for [1,2,3] $ \n => fork $ do
putStrLn "Hello"
barrierWait barrier
putStrLn "Goodbye"
for threadIDs $ \threadID =>
threadWait threadID
sleep 1
|
\section{Preliminaries and exploratory data analysis}
We're going to revisit data we considered in the cluster analysis lab., so you need to load the \texttt{cluster} and the \texttt{flexclust} libraries to make the data available.
We'll start by considering the \texttt{milk} data; you should have done some interesting eda in the cluster lab. We follow this up by carrying out multidimensional scaling using \texttt{cmdscale} from the \texttt{MASS} library (look at the helpfile):
\begin{verbatim}
library(cluster)
library(flexclust)
library(MASS)
data(milk)
milk.dist <- dist(milk)
milk.pco <- cmdscale(milk.dist)
par(xpd = NA, bty = "n") ## let the labels run past the
## plotting region, and remove the box
plot(milk.pco, type = "n", main = "PCO representation")
text(milk.pco, row.names(milk.pco), cex = 0.5)
\end{verbatim}
Note that we've produced a blank plot but then drawn text on in the positions of the data with the row names. Can you make sense of the resulting plot?
If you're very interested, you could even plot the cluster solutions you find when doing cluster analysis against the 2-dimensional representation of the distance matrix:
\begin{verbatim}
milk.hclust <- hclust(milk.dist)
milk.cut <- cutree(milk.hclust, 3)
plot(milk.pco, type = "n", main = "PCO representation")
text(milk.pco, row.names(milk.pco), cex = 0.5, col = milk.cut, pch = milk.cut)
\end{verbatim}
More importantly this week is to consider some diagnostics.
Lets look at the $n-1$ solution, using the delightfully titled \texttt{zapsmall} function to round our eigenvalues to 8 digits (computer arithmetic means we have a lot of negative eigenvalues in the 15th decimal place which we get rid of).
\begin{verbatim}
milk.pco.24 <- cmdscale(milk.dist, eig = TRUE, k = 24)
evals <- zapsmall(milk.pco.24$eig, digits = 8)
evals
\end{verabtim}
Now, if we want to examine the fit of a $q$ dimensional approximation we use $2 \times n \times \sum_{j=q+1}^{n-1} \lambda_{j}$ (note we are summing all the discarded eigenvalues) to give us:
\begin{verbatim}
2 * dim(milk)[1] * sum(milk.pco.24$eig[3:24])
\end{verbatim}
You can adjust this last line to see the SS for a 3 dimensional approximation (use \texttt{4:24} in the square brackets) and so on.
And now try to calculate the same thing directly from the projection and from the data. We already know $\boldsymbol{Delta}$ (although we have to tell \textbf{R} we want a matrix and not a distance matrix. We can get an estimate of $d$ by taking the distance matrix of the points in the $q$ dimensional approximation that interests us. Finally, we just want the sum of the differences in the squared distances:
\begin{verbatim}
milk.pco.2 <- cmdscale(milk.dist, eig = TRUE, k = 2)
delta <- as.matrix(milk.dist)
d <- as.matrix(dist(milk.pco.2$points))
sum(as.vector(delta)^2 - as.vector(d)^2)
\end{verbatim}
The other obvious method is just to use the percentage of variance explained / discarded. Consider dividing the cumulative sum of the eigenvalues by the sum of the eigenvalues:
\begin{verbatim}
cumsum(evals) / sum(evals)
\end{verbatim}
Careful examination should convince you that a 2-dimensional approximation might well be adequate.
\section{Sammon Mapping}
You could try ``Sammon'' mapping on the mvmclass data. Assuming you've used \texttt{daisy} to generate a distance matrix from the class data (we did this in an earlier lab.)
\begin{verbatim}
mvmclass <- read.csv("class06.csv", row.names = 1)
mvmclass.dist <- daisy(mvmclass)
mvmclass.sammon <- sammon(mvmclass.dist)
\end{verbatim}
If you want to plot the representation, the material you need is under \verb+mvmclass.sammon$points+.
\begin{itemize}
\item How do you determine whether your sammon fit is a good one or not?
\end{itemize}
\section{Further analyses}
In case you wish to follow this from Johnson and Wichern's perspective, two datasets have been placed in the portal are very interesting: \texttt{utilities.csv} and \texttt{USUni.csv}. The former we saw last time when we were considering clustering. The latter considers a number of measures on 25 US Universities.
Another interesting analysis involves the Painters data (see \texttt{?painters} in \texttt{MASS}) - in particular you may like to see whether there any evidence that the first dimension corresponds with a time axis?
\section{Summary}
\fbox{\parbox[c]{0.9\textwidth}{\color{blue}
In some ways, it might have been better for this topic to follow cluster analysis. It is really a different way of examining the relationships between individuals in a dataset. By the end of this week, we should be able to:
\begin{itemize}
\item Computer and plot a p.c.o projection of a distance matrix
\item Understand and justify methods for determining whether our low dimension projection is adequate
\item Explain and interpret results, especially if those results are placed relative to a cluster analysis
\item Be able to relate p.c.o. to p.c.a.
\end{itemize}
}}
|
/-
Copyright (c) 2018 Simon Hudon. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro, Johannes Hölzl, Simon Hudon, Kenny Lau
-/
import data.multiset.bind
import control.traversable.lemmas
import control.traversable.instances
/-!
# Functoriality of `multiset`.
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
-/
universes u
namespace multiset
open list
instance : functor multiset :=
{ map := @map }
@[simp] lemma fmap_def {α' β'} {s : multiset α'} (f : α' → β') : f <$> s = s.map f := rfl
instance : is_lawful_functor multiset :=
by refine { .. }; intros; simp
open is_lawful_traversable is_comm_applicative
variables {F : Type u → Type u} [applicative F] [is_comm_applicative F]
variables {α' β' : Type u} (f : α' → F β')
def traverse : multiset α' → F (multiset β') :=
quotient.lift (functor.map coe ∘ traversable.traverse f)
begin
introv p, unfold function.comp,
induction p,
case perm.nil { refl },
case perm.cons
{ have : multiset.cons <$> f p_x <*> (coe <$> traverse f p_l₁) =
multiset.cons <$> f p_x <*> (coe <$> traverse f p_l₂),
{ rw [p_ih] },
simpa with functor_norm },
case perm.swap
{ have : (λa b (l:list β'), (↑(a :: b :: l) : multiset β')) <$> f p_y <*> f p_x =
(λa b l, ↑(a :: b :: l)) <$> f p_x <*> f p_y,
{ rw [is_comm_applicative.commutative_map],
congr, funext a b l, simpa [flip] using perm.swap b a l },
simp [(∘), this] with functor_norm },
case perm.trans { simp [*] }
end
instance : monad multiset :=
{ pure := λ α x, {x},
bind := @bind,
.. multiset.functor }
@[simp] lemma pure_def {α} : (pure : α → multiset α) = singleton := rfl
@[simp] lemma bind_def {α β} : (>>=) = @bind α β := rfl
instance : is_lawful_monad multiset :=
{ bind_pure_comp_eq_map := λ α β f s, multiset.induction_on s rfl $ λ a s ih, by simp,
pure_bind := λ α β x f, by simp [pure],
bind_assoc := @bind_assoc }
open functor
open traversable is_lawful_traversable
@[simp]
lemma lift_coe {α β : Type*} (x : list α) (f : list α → β)
(h : ∀ a b : list α, a ≈ b → f a = f b) :
quotient.lift f h (x : multiset α) = f x :=
quotient.lift_mk _ _ _
@[simp]
lemma map_comp_coe {α β} (h : α → β) :
functor.map h ∘ coe = (coe ∘ functor.map h : list α → multiset β) :=
by funext; simp [functor.map]
lemma id_traverse {α : Type*} (x : multiset α) :
traverse id.mk x = x :=
quotient.induction_on x begin intro, simp [traverse], refl end
lemma comp_traverse {G H : Type* → Type*}
[applicative G] [applicative H]
[is_comm_applicative G] [is_comm_applicative H]
{α β γ : Type*}
(g : α → G β) (h : β → H γ) (x : multiset α) :
traverse (comp.mk ∘ functor.map h ∘ g) x =
comp.mk (functor.map (traverse h) (traverse g x)) :=
quotient.induction_on x
(by intro;
simp [traverse,comp_traverse] with functor_norm;
simp [(<$>),(∘)] with functor_norm)
lemma map_traverse {G : Type* → Type*}
[applicative G] [is_comm_applicative G]
{α β γ : Type*}
(g : α → G β) (h : β → γ)
(x : multiset α) :
functor.map (functor.map h) (traverse g x) =
traverse (functor.map h ∘ g) x :=
quotient.induction_on x
(by intro; simp [traverse] with functor_norm;
rw [is_lawful_functor.comp_map, map_traverse])
lemma traverse_map {G : Type* → Type*}
[applicative G] [is_comm_applicative G]
{α β γ : Type*}
(g : α → β) (h : β → G γ)
(x : multiset α) :
traverse h (map g x) =
traverse (h ∘ g) x :=
quotient.induction_on x
(by intro; simp [traverse];
rw [← traversable.traverse_map h g];
[ refl, apply_instance ])
lemma naturality {G H : Type* → Type*}
[applicative G] [applicative H]
[is_comm_applicative G] [is_comm_applicative H]
(eta : applicative_transformation G H)
{α β : Type*} (f : α → G β) (x : multiset α) :
eta (traverse f x) = traverse (@eta _ ∘ f) x :=
quotient.induction_on x
(by intro; simp [traverse,is_lawful_traversable.naturality] with functor_norm)
end multiset
|
-- -------------------------------------------------------- [ Problem.idr<Sif> ]
-- Module : Problem.idr<Sif>
-- Copyright : (c) Jan de Muijnck-Hughes
-- License : see LICENSE
-- --------------------------------------------------------------------- [ EOH ]
||| Parser problem specifications
module Sif.DSL.Parser.Problem
-- ----------------------------------------------------------------- [ Imports ]
import Lightyear
import Lightyear.Char
import Lightyear.Strings
import Sif.Types
import Sif.AbsSyntax
import Sif.Pattern.Model
import Sif.DSL.Parser.Utils
import Sif.DSL.Parser.Common
-- -------------------------------------------------------------- [ Directives ]
%default partial
%access private
-- ----------------------------------------------------------------- [ Parsers ]
data VariableDecl : Type where
MkVar : String -> String -> (Maybe String) -> VariableDecl
furpsTy : Parser RTy
furpsTy = (keyword "Functional" *> return FUNC)
<|> (keyword "Usability" *> return USAB)
<|> (keyword "Reliability" *> return RELI)
<|> (keyword "Performance" *> return PERF)
<|> (keyword "Supportability" *> return SUPP)
<?> "Requirement Type"
variable : Parser a -> Parser $ Pair a VariableDecl
variable getTy = do
d <- opt sifDoc
i <- ident
token "<-"
ty <- getTy
t <- title
sifComment
pure $ MkPair ty (MkVar i t d)
<?> "Variable"
requirement : Parser $ SifAST TyREQ
requirement = do
(ty, MkVar i t d) <- variable furpsTy
spaces
pure $ AST.Req i ty t d
<?> "Requirement"
context : Parser $ Pair String SifDomain
context = do
(_, MkVar i t d) <- variable (keyword "Context")
pure $ MkPair i $ MkDomain t d
<?> "Context"
problemDef : Parser $ VariableDecl
problemDef = do
(_, var) <- variable (keyword "Problem")
pure var
<?> "Problem"
export
problem : Parser $ SifAST TyPROBLEM
problem = do
sifComment
keyword "sif"
string "problem"
endOfLine
sifComment
(MkVar i t d) <- problemDef
c <- opt $ some context
sifComment
rs <- many requirement
sifComment
case c of
Nothing => pure $ (AST.Problem i t d [("std", defaultDomain)] rs)
Just Nil => pure $ (AST.Problem i t d [("std", defaultDomain)] rs)
Just cs => pure $ (AST.Problem i t d cs rs)
<?> "Problem Specification"
-- --------------------------------------------------------------------- [ EOF ]
|
import Control.Monad.State
data Tree a = Empty
| Node (Tree a) a (Tree a)
testTree : Tree String
testTree = Node (Node (Node Empty "Jim" Empty) "Fred"
(Node Empty "Sheila" Empty)) "Alice"
(Node Empty "Bob" (Node Empty "Eve" Empty))
flatten : Tree a -> List a
flatten Empty = []
flatten (Node left val right) = flatten left ++ val :: flatten right
treeLabelWith : Tree a -> State (Stream labelType) (Tree (labelType, a))
treeLabelWith Empty = pure Empty
treeLabelWith (Node left val right)
= do left_labelled <- treeLabelWith left
(this :: rest) <- get
put rest
right_labelled <- treeLabelWith right
pure (Node left_labelled (this, val) right_labelled)
treeLabel : Tree a -> Tree (Integer, a)
treeLabel tree = evalState [1..] (treeLabelWith tree)
|
Set Ltac Profiling.
Lemma foobar : forall n : nat, S (S n) = S (S n) -> S n = S n -> n = n.
Proof.
auto.
Qed.
Lemma nat_eq_refl : forall n : nat, n = n.
Proof.
induction n; try case n; intros; apply foobar; reflexivity.
Qed.
(* Show Ltac Profile. *) |
#include <boost/mpl/aux_/filter_iter.hpp>
|
#' @export
#' @title by2
#' @description "slower but more flexible"
#' @family poorly documented
#' @author unknown, \email{<unknown>@@dfo-mpo.gc.ca}
#' @export
by2 = function (x, indices, func, var, newvars, ...) {
x = convert2factor(x, indices)
y = as.data.frame.table( by( data=x[,var], INDICES=x[,indices], FUN=func, ... ) )
names(y) = c(indices, newvars)
y = factor2character (y, indices)
return (y)
}
|
State Before: α : Type u_1
inst✝³ : DecidableEq α
inst✝² : Zero α
inst✝¹ : Mul α
inst✝ : LT α
a b : WithTop α
ha : a < ⊤
hb : b < ⊤
⊢ a * b < ⊤ State After: α : Type u_1
inst✝³ : DecidableEq α
inst✝² : Zero α
inst✝¹ : Mul α
inst✝ : LT α
a b : WithTop α
ha : a ≠ ⊤
hb : b ≠ ⊤
⊢ a * b ≠ ⊤ Tactic: rw [WithTop.lt_top_iff_ne_top] at * State Before: α : Type u_1
inst✝³ : DecidableEq α
inst✝² : Zero α
inst✝¹ : Mul α
inst✝ : LT α
a b : WithTop α
ha : a ≠ ⊤
hb : b ≠ ⊤
⊢ a * b ≠ ⊤ State After: no goals Tactic: simp only [Ne.def, mul_eq_top_iff, *, and_false, false_and, false_or] |
[STATEMENT]
lemma word_upto_upt:
"word_upto a b = (if a \<le> b then map of_nat (upt (unat a) (Suc (unat b))) else word_upto a b)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. word_upto a b = (if a \<le> b then map word_of_nat [unat a..<Suc (unat b)] else word_upto a b)
[PROOF STEP]
using word_upto_alt
[PROOF STATE]
proof (prove)
using this:
?a \<le> ?b \<Longrightarrow> word_upto ?a ?b = map word_of_nat [unat ?a..<Suc (unat ?b)]
goal (1 subgoal):
1. word_upto a b = (if a \<le> b then map word_of_nat [unat a..<Suc (unat b)] else word_upto a b)
[PROOF STEP]
by metis |
{-
Closure properties of FinSet under several type constructors.
-}
{-# OPTIONS --safe #-}
module Cubical.Data.FinSet.Constructors where
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.HLevels
open import Cubical.Foundations.Equiv
open import Cubical.HITs.PropositionalTruncation renaming (rec to TruncRec)
open import Cubical.Data.Nat
open import Cubical.Data.Unit
open import Cubical.Data.Empty renaming (rec to EmptyRec)
open import Cubical.Data.Sum
open import Cubical.Data.Sigma
open import Cubical.Data.Fin
open import Cubical.Data.SumFin renaming (Fin to SumFin) hiding (discreteFin)
open import Cubical.Data.FinSet.Base
open import Cubical.Data.FinSet.Properties
open import Cubical.Data.FinSet.FiniteChoice
open import Cubical.Relation.Nullary
open import Cubical.Functions.Embedding
open import Cubical.Functions.Surjection
private
variable
ℓ ℓ' ℓ'' ℓ''' : Level
module _
(X : Type ℓ)(p : ≃Fin X) where
≃Fin∥∥ : ≃Fin ∥ X ∥
≃Fin∥∥ = ≃SumFin→Fin (_ , compEquiv (propTrunc≃ (≃Fin→SumFin p .snd)) (SumFin∥∥≃ _))
module _
(X : Type ℓ )(p : ≃Fin X)
(Y : Type ℓ')(q : ≃Fin Y) where
≃Fin⊎ : ≃Fin (X ⊎ Y)
≃Fin⊎ = ≃SumFin→Fin (_ , compEquiv (⊎-equiv (≃Fin→SumFin p .snd) (≃Fin→SumFin q .snd)) (SumFin⊎≃ _ _))
≃Fin× : ≃Fin (X × Y)
≃Fin× = ≃SumFin→Fin (_ , compEquiv (Σ-cong-equiv (≃Fin→SumFin p .snd) (λ _ → ≃Fin→SumFin q .snd)) (SumFin×≃ _ _))
module _
(X : Type ℓ )(p : ≃Fin X)
(Y : X → Type ℓ')(q : (x : X) → ≃Fin (Y x)) where
private
p' = ≃Fin→SumFin p
m = p' .fst
e = p' .snd
q' : (x : X) → ≃SumFin (Y x)
q' x = ≃Fin→SumFin (q x)
f : (x : X) → ℕ
f x = q' x .fst
≃SumFinΣ : ≃SumFin (Σ X Y)
≃SumFinΣ = _ ,
Σ-cong-equiv {B' = λ x → Y (invEq (p' .snd) x)} (p' .snd) (transpFamily p')
⋆ Σ-cong-equiv-snd (λ x → q' (invEq e x) .snd)
⋆ SumFinΣ≃ _ _
≃SumFinΠ : ≃SumFin ((x : X) → Y x)
≃SumFinΠ = _ ,
equivΠ {B' = λ x → Y (invEq (p' .snd) x)} (p' .snd) (transpFamily p')
⋆ equivΠCod (λ x → q' (invEq e x) .snd)
⋆ SumFinΠ≃ _ _
≃FinΣ : ≃Fin (Σ X Y)
≃FinΣ = ≃SumFin→Fin ≃SumFinΣ
≃FinΠ : ≃Fin ((x : X) → Y x)
≃FinΠ = ≃SumFin→Fin ≃SumFinΠ
module _
(X : FinSet ℓ)
(Y : X .fst → FinSet ℓ') where
isFinSetΣ : isFinSet (Σ (X .fst) (λ x → Y x .fst))
isFinSetΣ =
elim2 (λ _ _ → isPropIsFinSet {A = Σ (X .fst) (λ x → Y x .fst)})
(λ p q → ∣ ≃FinΣ (X .fst) p (λ x → Y x .fst) q ∣)
(X .snd) (choice X (λ x → ≃Fin (Y x .fst)) (λ x → Y x .snd))
isFinSetΠ : isFinSet ((x : X .fst) → Y x .fst)
isFinSetΠ =
elim2 (λ _ _ → isPropIsFinSet {A = ((x : X .fst) → Y x .fst)})
(λ p q → ∣ ≃FinΠ (X .fst) p (λ x → Y x .fst) q ∣)
(X .snd) (choice X (λ x → ≃Fin (Y x .fst)) (λ x → Y x .snd))
module _
(X : FinSet ℓ)
(Y : X .fst → FinSet ℓ')
(Z : (x : X .fst) → Y x .fst → FinSet ℓ'') where
isFinSetΠ2 : isFinSet ((x : X .fst) → (y : Y x .fst) → Z x y .fst)
isFinSetΠ2 = isFinSetΠ X (λ x → _ , isFinSetΠ (Y x) (Z x))
module _
(X : FinSet ℓ)
(Y : X .fst → FinSet ℓ')
(Z : (x : X .fst) → Y x .fst → FinSet ℓ'')
(W : (x : X .fst) → (y : Y x .fst) → Z x y .fst → FinSet ℓ''') where
isFinSetΠ3 : isFinSet ((x : X .fst) → (y : Y x .fst) → (z : Z x y .fst) → W x y z .fst)
isFinSetΠ3 = isFinSetΠ X (λ x → _ , isFinSetΠ2 (Y x) (Z x) (W x))
module _
(X : FinSet ℓ) where
isFinSet≡ : (a b : X .fst) → isFinSet (a ≡ b)
isFinSet≡ a b = isDecProp→isFinSet (isFinSet→isSet (X .snd) a b) (isFinSet→Discrete (X .snd) a b)
isFinSetIsContr : isFinSet (isContr (X .fst))
isFinSetIsContr = isFinSetΣ X (λ x → _ , (isFinSetΠ X (λ y → _ , isFinSet≡ x y)))
isFinSet∥∥ : isFinSet ∥ X .fst ∥
isFinSet∥∥ = TruncRec isPropIsFinSet (λ p → ∣ ≃Fin∥∥ (X .fst) p ∣) (X .snd)
module _
(X : FinSet ℓ )
(Y : FinSet ℓ')
(f : X .fst → Y .fst) where
isFinSetFiber : (y : Y .fst) → isFinSet (fiber f y)
isFinSetFiber y = isFinSetΣ X (λ x → _ , isFinSet≡ Y (f x) y)
isFinSetIsEquiv : isFinSet (isEquiv f)
isFinSetIsEquiv =
EquivPresIsFinSet
(invEquiv (isEquiv≃isEquiv' f))
(isFinSetΠ Y (λ y → _ , isFinSetIsContr (_ , isFinSetFiber y)))
module _
(X : FinSet ℓ )
(Y : FinSet ℓ') where
isFinSet⊎ : isFinSet (X .fst ⊎ Y .fst)
isFinSet⊎ = elim2 (λ _ _ → isPropIsFinSet) (λ p q → ∣ ≃Fin⊎ (X .fst) p (Y .fst) q ∣) (X .snd) (Y .snd)
isFinSet× : isFinSet (X .fst × Y .fst)
isFinSet× = elim2 (λ _ _ → isPropIsFinSet) (λ p q → ∣ ≃Fin× (X .fst) p (Y .fst) q ∣) (X .snd) (Y .snd)
isFinSet→ : isFinSet (X .fst → Y .fst)
isFinSet→ = isFinSetΠ X (λ _ → Y)
isFinSet≃ : isFinSet (X .fst ≃ Y .fst)
isFinSet≃ = isFinSetΣ (_ , isFinSet→) (λ f → _ , isFinSetIsEquiv X Y f)
module _
(X : FinSet ℓ) where
isFinSet¬ : isFinSet (¬ (X .fst))
isFinSet¬ = isFinSet→ X (⊥ , ∣ 0 , uninhabEquiv (λ x → x) ¬Fin0 ∣)
module _
(X : FinSet ℓ) where
isFinSetNonEmpty : isFinSet (NonEmpty (X .fst))
isFinSetNonEmpty = isFinSet¬ (_ , isFinSet¬ X)
module _
(X : FinSet ℓ )
(Y : FinSet ℓ')
(f : X .fst → Y .fst) where
isFinSetIsEmbedding : isFinSet (isEmbedding f)
isFinSetIsEmbedding =
isFinSetΠ2 X (λ _ → X)
(λ a b → _ , isFinSetIsEquiv (_ , isFinSet≡ X a b) (_ , isFinSet≡ Y (f a) (f b)) (cong f))
isFinSetIsSurjection : isFinSet (isSurjection f)
isFinSetIsSurjection =
isFinSetΠ Y (λ y → _ , isFinSet∥∥ (_ , isFinSetFiber X Y f y))
module _
(X : FinSet ℓ )
(Y : FinSet ℓ') where
isFinSet↪ : isFinSet (X .fst ↪ Y .fst)
isFinSet↪ = isFinSetΣ (_ , isFinSet→ X Y) (λ f → _ , isFinSetIsEmbedding X Y f)
isFinSet↠ : isFinSet (X .fst ↠ Y .fst)
isFinSet↠ = isFinSetΣ (_ , isFinSet→ X Y) (λ f → _ , isFinSetIsSurjection X Y f)
|
#include <boost/asio.hpp>
#include <common/net_common.h>
#include <istream>
#include <ostream>
namespace dariadb {
namespace net {
std::ostream &operator<<(std::ostream &stream, const CLIENT_STATE &state) {
switch (state) {
case dariadb::net::CLIENT_STATE::WORK:
stream << "CLIENT_STATE::WORK";
break;
case dariadb::net::CLIENT_STATE::DISCONNECTED:
stream << "CLIENT_STATE::DISCONNECTED";
break;
case dariadb::net::CLIENT_STATE::DISCONNETION_START:
stream << "CLIENT_STATE::DISCONNETION_START";
break;
case dariadb::net::CLIENT_STATE::CONNECT:
stream << "CLIENT_STATE::CONNECT";
break;
}
return stream;
}
std::ostream &operator<<(std::ostream &stream, const ERRORS &state) {
switch (state) {
case dariadb::net::ERRORS::WRONG_PROTOCOL_VERSION:
stream << "ERRORS::WRONG_PROTOCOL_VERSION";
break;
case dariadb::net::ERRORS::WRONG_QUERY_PARAM_FROM_GE_TO:
stream << "ERRORS::WRONG_QUERY_PARAM_FROM_GE_TO";
break;
case dariadb::net::ERRORS::APPEND_ERROR:
stream << "ERRORS::APPEND_ERROR";
break;
}
return stream;
}
std::string to_string(const CLIENT_STATE &st) {
std::stringstream ss;
ss << st;
return ss.str();
}
std::string to_string(const ERRORS &st) {
std::stringstream ss;
ss << st;
return ss.str();
}
}
}
|
import argparse
import datetime
import os
import tensorflow as tf
import numpy as np
import socket
import tensorflow.keras.backend as K
from tensorflow.keras.models import model_from_json
import TimeSeriesSR_Final.data_loader_helpers as dataloaders
from tensorflow.python.ops import math_ops
import stippy
import matplotlib.pyplot as plt
import pickle
from tensorflow.keras.optimizers import Adam
import subprocess
import sys
class PM:
def __init__(self,timesteps,folderI,trainLoss,includeModis,batch_size=1,album='laton-ca-20km',img_h=256,img_width=256,cloudC=0.2,startT='31-03-2018',endT='01-07-2018'):
self.img_h = img_h
self.img_w = img_width
self.timesteps = timesteps
self.trainLoss = trainLoss
self.batch_size = batch_size
self.host_addr=socket.gethostbyname(socket.gethostname()) + ':15606'
self.album=album
self.cloudC = float(cloudC)
self.pathToModel = "/s/" + socket.gethostname() + "/a/nobackup/galileo/paahuni/" + str(folderI)
self.folderI=folderI
# gpus = tf.config.experimental.list_physical_devices('GPU')
# if gpus:
# tf.config.experimental.set_visible_devices(gpus[0], 'GPU')
# for gpu in gpus:
# tf.config.experimental.set_memory_growth(gpu, True)
self.lstmOp = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
self.dataloader = dataloaders.DatasetHandling(self.img_w, self.img_h, no_of_timesteps=timesteps, startT=startT,
endT=endT,album=album,cloud_cov=self.cloudC)
self.includeModis=includeModis
# self.albums = albums
def ssim_loss_custom(self, y_true, y_pred):
ss = tf.image.ssim(y_true, y_pred, 2.0)
ss = tf.where(tf.math.is_nan(ss), -K.ones_like(ss), ss)
return -tf.reduce_mean(ss)
def custom_loss_mse_ssim(self, y_true, y_pred):
alpha = 0.84
mse = tf.keras.losses.mean_squared_error(y_true, y_pred)
ssimL = self.ssim_loss_custom(y_true, y_pred)
return ((1- alpha)*mse) + (alpha*ssimL)
def sample_image(self, imgs_unscaled_train,imgs_unscaled_target,fakeImg,imgs_unscaled_modis,psnr, mse,epoch):
r, c = 2, 3
fig, axarr = plt.subplots(r, c, figsize=(15, 12))
np.vectorize(lambda axarr: axarr.axis('off'))(axarr)
titles = ['T1', 'T2', 'T3', 'Original HR', 'Predicted ' + str(psnr) + "," + str(mse)]
for row in range(r - 1):
for col in range(self.timesteps):
axarr[row, col].imshow(imgs_unscaled_train[0][col])
axarr[row, col].set_title(titles[col], fontdict={'fontsize': 15})
axarr[r - 1, c - 2].imshow(imgs_unscaled_target[-1])
axarr[r - 1, c - 2].set_title(titles[-2], fontdict={'fontsize': 15})
# Print Predicted
axarr[r - 1, c - 1].imshow(fakeImg[-1])
axarr[r - 1, c - 1].set_title(titles[-1], fontdict={'fontsize': 15})
if True:
axarr[r - 1, 0].imshow(imgs_unscaled_modis[-1])
axarr[r - 1, 0].set_title('MODIS', fontdict={'fontsize': 15})
# plt.suptitle("Target Sentinel Tile Season: {}, WeekOfYear: {}"
# .format(int(targetSOY[-1] * 5), int(targetTimeStamp[-1] * 53)), fontsize=20)
fig.savefig(self.pathToModel + '/test/' + "%s.png" % (epoch))
plt.close()
def clusterbased(self,clusterS=16):
dicG={}
basep="/s/chopin/a/grad/paahuni/PycharmProjects/ImageSuperResolution/TimeSeriesSR_Final/"
if clusterS==8:
pathN="geohash-clusters-8.txt"
elif clusterS==16:
pathN="geohash-clusters-16.txt"
elif clusterS==24:
pathN="geohash-clusters-24.txt"
else:
return "Wrong number of cluster"
with open(basep+pathN, 'r') as reader:
for line in reader.readlines():
line=line.strip()
dicG[line.split(" ")[0]] = int(line.split(" ")[1])
# print(dicG)
return dicG
def trainLossForGen(self):
if self.trainLoss == 1:
return 'huber'
elif self.trainLoss == 3:
return 'logcosh'
elif self.trainLoss == 4:
return self.custom_loss_mse_ssim
elif self.trainLoss == 5:
return 'mae'
else:
return 'mse'
def scale_images_11(self, imgs):
""" Returns normalized images between (-1 to 1) pixel value"""
return imgs / 127.5 - 1
def unscale_images_11(self, imgs):
return (imgs + 1.) * 127.5
def load_model(self):
json_file = open(self.pathToModel + "/ModelCheckp/GeneratorModel.json", 'r')
loaded_model_json = json_file.read()
json_file.close()
lstm_generator=model_from_json(loaded_model_json)
lstm_generator.load_weights(self.pathToModel + "/ModelCheckp/GeneratorModel.h5")
lstm_generator.compile(loss=self.trainLossForGen(), optimizer=self.lstmOp, experimental_run_tf_function=False,
metrics=["mae", "accuracy"])
return lstm_generator
def performaceAcc(self, targets, predicted):
a = tf.image.convert_image_dtype(targets, tf.float64)
b = tf.image.convert_image_dtype(predicted, tf.float64)
mse = math_ops.reduce_mean(math_ops.squared_difference(a, b), [-3, -2, -1])
psnr = tf.image.psnr(targets,predicted,255.)
return mse.numpy(),psnr
def mseOnly(self,targets,predicted):
a = tf.image.convert_image_dtype(targets, tf.float64)
b = tf.image.convert_image_dtype(predicted, tf.float64)
mse = math_ops.reduce_mean(math_ops.squared_difference(a, b), [-3, -2,-1])
return mse,None
def saveMetrics(self, name, metric):
with open(name, 'wb') as filehandle:
pickle.dump(metric, filehandle)
def perform_testing_model(self):
# timeT=[]
self.lstm_generator = self.load_model()
imgs_iterTest = self.dataloader.get_non_random_image_iterator_testing(batch_size=self.batch_size,
no_of_timesteps=self.timesteps,
sendMetaInfo=True,
includeModis=self.includeModis)
psnrs,mses,count=[],[],0
redMSE,blueMS,greenMS =[],[],[]
while True:
# print("Epoch : ", count)
try:
count+=1
imgs, target, targetSOY, targetGeo, targetTimeStamp, modisT, _= next(imgs_iterTest)
# print("images = ", imgs)
imgs = tf.cast(imgs, tf.float32)
modisT = tf.cast(modisT, tf.float32)
# start = datetime.datetime.now()
fakeImg = self.unscale_images_11(
self.lstm_generator([imgs, tf.cast(targetGeo, tf.float32), tf.cast(targetTimeStamp, tf.float32),
tf.cast(targetSOY, tf.float32), modisT]))
# end = datetime.datetime.now()
# timeT.append(((end-start).microseconds)/self.batch_size)
fakeImg = tf.cast(fakeImg, tf.uint8)
# print("Fake image: ", fakeImg)
# fakeImg = (self.dataloader.unscale_images_11(self.lstm_generator.predict([imgs, targetGeo, targetTimeStamp, targetSOY, modisT]))).astype(np.uint8)
imgs_unscaled_target = self.dataloader.unscale_images_11(target).astype(np.uint8)
iC,ps = self.performaceAcc(imgs_unscaled_target, fakeImg)
psnrs = psnrs + list(K.eval(ps))
mses = mses + list(K.eval(iC))
# redM,_ = self.mseOnly(imgs_unscaled_targev[:,:,:,0], fakeImg[:,:,:,0])
# blueM,_ = self.mseOnly(imgs_unscaled_target[:,:,:,1], fakeImg[:,:,:,1])
# greenM, _ = self.mseOnly(imgs_unscaled_target[:,:,:,2], fakeImg[:,:,:,2])
# print("RedM: ", redMSE)
self.sample_image(tf.cast(self.unscale_images_11(imgs),tf.uint8),imgs_unscaled_target,fakeImg,self.unscale_images_11(modisT),K.eval(ps),K.eval(iC),count)
# redMSE.append(K.eval(redM))
# blueMS.append(K.eval(blueM))
# greenMS.append(K.eval(greenM))
# print("Predicting," ,count)
except StopIteration:
break
# print("Time: ", timeT)
# print("Time taken for inference : microseconds", np.average(np.array(timeT)))
if psnrs==[]:
return "None","None","None"
else:
# print(greenMS)
return np.mean(np.array(psnrs)), np.mean(np.array(mses)),np.std(np.array(mses)),len(psnrs)
# return np.array(redMSE),np.array(blueMS), np.array(greenMS)
def perform_testing_model_cluster(self):
self.lstm_generator = self.load_model()
imgs_iterTest = self.dataloader.get_non_random_image_iterator_testing(batch_size=1,
no_of_timesteps=self.timesteps,
sendMetaInfo=True,
includeModis=self.includeModis)
cou=0
psnrs = {0:0,1:0,2:0,3:0,4:0,5:0,6:0,7:0,
8:0,9:0,10:0,11:0,12:0,13:0,14:0,15:0}
mses= {0:0,1:0,2:0,3:0,4:0,5:0,6:0,7:0,
8:0,9:0,10:0,11:0,12:0,13:0,14:0,15:0}
localC=self.clusterbased(16)
finalClusCo={0:0,1:0,2:0,3:0,4:0,5:0,6:0,7:0,
8:0,9:0,10:0,11:0,12:0,13:0,14:0,15:0}
totalI = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0,
8: 0, 9: 0, 10: 0, 11: 0, 12: 0, 13: 0, 14: 0, 15: 0}
localCn=set()
while True:
try:
imgs, target, targetSOY, targetGeo, targetTimeStamp, modisT,g = next(imgs_iterTest)
fakeImg = (self.dataloader.unscale_images_11(
self.lstm_generator.predict([imgs, targetGeo, targetTimeStamp, targetSOY, modisT]))).astype(
np.uint8)
imgs_unscaled_target = self.dataloader.unscale_images_11(target).astype(np.uint8)
iC, ps = self.performaceAcc(imgs_unscaled_target, fakeImg)
clustFound = localC[g]
psnrs[clustFound] += np.sum(K.eval(ps))
mses[clustFound] += np.sum(K.eval(iC))
totalI[clustFound] += 1
localCn.add(g)
except StopIteration:
break
for g in localCn:
clustFound = localC[g]
finalClusCo[clustFound]+=1
for c in finalClusCo:
print(c, finalClusCo[c],psnrs[c], mses[c], totalI[c])
def targetNAIP(self):
infoNAIP={}
startT, endT = self.dataloader.getTime('01-01-2018', '01-01-2020')
listImages = stippy.list_node_images(self.host_addr, album=self.album, geocode='9q', recurse=True,
platform='NAIP', source='filled',
start_timestamp=startT, end_timestamp=endT, min_pixel_coverage=1.0)
count = 0
for (node, image) in listImages:
for p in image.files:
if p.path.endswith('-0.tif'):
g = image.geocode
count += 1
if g not in infoNAIP:
infoNAIP[g] = np.array([[image.timestamp, p.path]], dtype=np.dtype(object))
else:
paths = infoNAIP.get(g)
paths = np.concatenate((paths, np.array([[image.timestamp, p.path]], dtype=np.dtype(object))))
infoNAIP[g]=paths
return infoNAIP
def runDistributedPredictionClust(self,worker=1):
start,end=176,219
countC={}
psrnC={}
mseC={}
imageC={}
for HOST in range(start,end):
if HOST==192:
continue
bashCmd = ["ssh", "lattice-%s" % str(HOST), "python3",
"PycharmProjects/ImageSuperResolution/TimeSeriesSR_Final/PredictionForModel.py","--outputDir " + str(self.folderI),"--timesteps "+ str(self.timesteps),"--worker "+ str(worker) ,"--cloudCov " + str(self.cloudC)]
process = subprocess.Popen(bashCmd, stdout=subprocess.PIPE)
result = process.stdout
print("Testing on Machine : ", HOST)
prs = result.readlines()
for results in prs:
pr = results.decode("utf-8")
if pr.split(" ")[0].strip()!='None':
cls = int(pr.split(" ")[0].strip())
count = int(pr.split(" ")[1].strip())
machPsnr=float(pr.split(" ")[2].strip())
machMSE=float(pr.split(" ")[3].strip())
machI=int(pr.split(" ")[4].strip())
if cls in countC:
ps = psrnC.get(cls)
ps.append(machPsnr)
psrnC[cls]=ps
ms = mseC.get(cls)
ms.append(machMSE)
mseC[cls] = ms
countC[cls] += count
imageC[cls]+=machI
else:
psrnC[cls] = [machPsnr]
mseC[cls] = [machMSE]
imageC[cls]=machI
countC[cls] = count
# print(psrnC)
# print(countC)
for g in countC:
if imageC.get(g)<1:
continue
print("Geohash Cluster: {} Geohash counts: {} Average PSNR: {} Average MSE: {} CoungG: {}".format(g,countC.get(g),(np.sum(psrnC.get(g))/imageC.get(g)),np.sum(mseC.get(g))/imageC.get(g),imageC.get(g)))
def runDistributedPrediction(self,worker=1):
psnrsSum,msesSum,std,counts=0,0,0,0
start,end=176,177
redB,blueB,greenB=[],[],[]
cou=0
for HOST in range(start,end):
if HOST==192:
continue
bashCmd = ["ssh", "lattice-%s" % str(HOST), "python3",
"PycharmProjects/ImageSuperResolution/TimeSeriesSR_Final/PredictionForModel.py","--outputDir " + str(self.folderI),"--timesteps "+ str(self.timesteps),"--worker "+ str(worker) ,"--cloudCov " + str(self.cloudC)]
process = subprocess.Popen(bashCmd, stdout=subprocess.PIPE)
result = process.stdout
print("Testing on Machine : ", HOST)
pr = result.readlines()[0].decode("utf-8")
if pr.split(" ")[0].strip()!='None':
# psnrsSum+=float(pr.split(" ")[0].strip())
# msesSum+=float(pr.split(" ")[1].strip())
# std+=float(pr.split(" ")[2].strip())
# counts += int(pr.split(" ")[3].strip())
print("Got: ", pr.strip())
redB.append(list(pr))
# blueB = np.array(pr.split(" ")[1])
# greenB = np.array(pr.split(" ")[2])
cou +=1
# print("Final Results ---> DIR: {} CloudCov: {} PSNR: {} MSE: {} stdDev: {} TotalCount: {}".format(str(self.folderI), self.cloudC, str(psnrsSum/float(cou)), str(msesSum/float(cou)),str(std/float(cou)),counts))
print("Final r: ", redB)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Change the output directory
parser.add_argument('--outputDir', type=int, default=26)
parser.add_argument('--timesteps', type=int, default=3)
parser.add_argument('--worker', type=int,default=1)
parser.add_argument('--cloudCov', type=float, default=0.6)
parser.add_argument('--album', type=str, default='laton-ca-20km')
args = parser.parse_args()
#
predictingModel = PM(timesteps=args.timesteps,folderI=args.outputDir,trainLoss=1,batch_size=1,cloudC=args.cloudCov,
album='laton-ca-20km',includeModis=0,img_h=256,img_width=256,startT='01-04-2019',endT='01-10-2020')
# predictingModel.perform_testing_model_cluster()
if args.worker:
predictingModel.perform_testing_model()
# print(psnrs, mses, std, count)
# print("Finished")
else:
predictingModel.runDistributedPrediction(1)
# if args.worker:
# predictingModel.perform_testing_model_cluster()
# else:
# predictingModel.runDistributedPredictionClust(1)
# count=0
# imgs_iterTest = predictingModel.dataloader.get_non_random_image_iterator_new(
# batch_size=4,
# no_of_timesteps=predictingModel.timesteps,
# sendMetaInfo=True,
# includeModis=predictingModel.includeModis)
#
# while True:
# try:
# imgs, target, targetSOY, targetGeo, targetTimeStamp, modisT = next(imgs_iterTest)
# count +=1
# print("vcount =" , count)
# except StopIteration:
# break
|
module Structure.Category.Monoidal where
import Lvl
open import Data.Tuple as Tuple using (_,_ ; _⨯_)
open import Data.Tuple.Category
open import Data.Tuple.Equivalence
import Functional as Fn
open import Logic.Predicate
open import Structure.Setoid
open import Structure.Category
open import Structure.Category.Functor
import Structure.Category.Functor.Functors as Functors
open import Structure.Category.NaturalTransformation
open import Syntax.Function
open import Type
private variable ℓ ℓₒ ℓₘ ℓₑ ℓₒ₁ ℓₘ₁ ℓₑ₁ ℓₒ₂ ℓₘ₂ ℓₑ₂ : Lvl.Level
private variable Obj : Type{ℓ}
private variable Morphism : Obj → Obj → Type{ℓ}
open Functors.Wrapped
module _
{C : CategoryObject{ℓₒ}{ℓₘ}{ℓₑ}}
(product@([∃]-intro _) : (C ⨯ᶜᵃᵗ C) →ᶠᵘⁿᶜᵗᵒʳ C)
(𝟏 : CategoryObject.Object(C))
where
open CategoryObject(C)
open Category.ArrowNotation(category)
open Category(category)
open Functor
record MonoidalCategory : Type{Lvl.of(Type.of C)} where
constructor intro
field
associator : (((product ∘ᶠᵘⁿᶜᵗᵒʳ (Tupleᶜᵃᵗ.mapLeft product))) ∘ᶠᵘⁿᶜᵗᵒʳ Tupleᶜᵃᵗ.associateLeft) ↔ᴺᵀ (product ∘ᶠᵘⁿᶜᵗᵒʳ (Tupleᶜᵃᵗ.mapRight product))
unitorₗ : (product ∘ᶠᵘⁿᶜᵗᵒʳ Tupleᶜᵃᵗ.constₗ 𝟏) ↔ᴺᵀ idᶠᵘⁿᶜᵗᵒʳ
unitorᵣ : (product ∘ᶠᵘⁿᶜᵗᵒʳ Tupleᶜᵃᵗ.constᵣ 𝟏) ↔ᴺᵀ idᶠᵘⁿᶜᵗᵒʳ
_⊗_ : Object → Object → Object
_⊗_ = Tuple.curry([∃]-witness product)
_<⊗>_ : ∀{x₁ x₂ y₁ y₂} → (x₁ ⟶ x₂) → (y₁ ⟶ y₂) → ((x₁ ⊗ y₁) ⟶ (x₂ ⊗ y₂))
_<⊗>_ = Tuple.curry(map([∃]-proof product))
α : ∀(x)(y)(z) → (((x ⊗ y) ⊗ z) ⟶ (x ⊗ (y ⊗ z)))
α x y z = [∃]-witness associator (x , (y , z))
υₗ : ∀(x) → ((𝟏 ⊗ x) ⟶ x)
υₗ = [∃]-witness unitorₗ
υᵣ : ∀(x) → ((x ⊗ 𝟏) ⟶ x)
υᵣ = [∃]-witness unitorᵣ
α⁻¹ : ∀(x)(y)(z) → ((x ⊗ (y ⊗ z)) ⟶ ((x ⊗ y) ⊗ z))
α⁻¹ x y z = [∃]-witness (invᴺᵀ associator) (x , (y , z))
υₗ⁻¹ : ∀(x) → (x ⟶ (𝟏 ⊗ x))
υₗ⁻¹ = [∃]-witness (invᴺᵀ unitorₗ)
υᵣ⁻¹ : ∀(x) → (x ⟶ (x ⊗ 𝟏))
υᵣ⁻¹ = [∃]-witness (invᴺᵀ unitorᵣ)
α-natural : ∀{(x₁ , (x₂ , x₃)) (y₁ , (y₂ , y₃)) : Object ⨯ (Object ⨯ Object)}
{(f₁ , f₂ , f₃) : ((x₁ ⟶ y₁) ⨯ ((x₂ ⟶ y₂) ⨯ (x₃ ⟶ y₃)))} →
((α y₁ y₂ y₃) ∘ ((f₁ <⊗> f₂) <⊗> f₃) ≡ (f₁ <⊗> (f₂ <⊗> f₃)) ∘ (α x₁ x₂ x₃))
α-natural = NaturalTransformation.natural(NaturalIsomorphism.naturalTransformation([∃]-proof associator))
υₗ-natural : ∀{x y}{f : x ⟶ y} → (υₗ(y) ∘ (id <⊗> f) ≡ f ∘ υₗ(x))
υₗ-natural = NaturalTransformation.natural(NaturalIsomorphism.naturalTransformation([∃]-proof unitorₗ))
υᵣ-natural : ∀{x y}{f : x ⟶ y} → (υᵣ(y) ∘ (f <⊗> id) ≡ f ∘ υᵣ(x))
υᵣ-natural = NaturalTransformation.natural(NaturalIsomorphism.naturalTransformation([∃]-proof unitorᵣ))
α⁻¹-natural : ∀{((x₁ , x₂) , x₃) ((y₁ , y₂) , y₃) : (Object ⨯ Object) ⨯ Object}
{(f₁ , f₂ , f₃) : ((x₁ ⟶ y₁) ⨯ ((x₂ ⟶ y₂) ⨯ (x₃ ⟶ y₃)))} →
((α⁻¹ y₁ y₂ y₃) ∘ (f₁ <⊗> (f₂ <⊗> f₃)) ≡ ((f₁ <⊗> f₂) <⊗> f₃) ∘ (α⁻¹ x₁ x₂ x₃))
α⁻¹-natural = NaturalTransformation.natural(NaturalIsomorphism.naturalTransformation([∃]-proof (invᴺᵀ associator)))
υₗ⁻¹-natural : ∀{x y}{f : x ⟶ y} → (υₗ⁻¹(y) ∘ f ≡ (id <⊗> f) ∘ υₗ⁻¹(x))
υₗ⁻¹-natural = NaturalTransformation.natural(NaturalIsomorphism.naturalTransformation([∃]-proof (invᴺᵀ unitorₗ)))
υᵣ⁻¹-natural : ∀{x y}{f : x ⟶ y} → (υᵣ⁻¹(y) ∘ f ≡ (f <⊗> id) ∘ υᵣ⁻¹(x))
υᵣ⁻¹-natural = NaturalTransformation.natural(NaturalIsomorphism.naturalTransformation([∃]-proof (invᴺᵀ unitorᵣ)))
-- TODO: And the coherence conditions?
record Monoidalᶜᵃᵗ{ℓₒ}{ℓₘ}{ℓₑ} (C : CategoryObject{ℓₒ}{ℓₘ}{ℓₑ}) : Type{Lvl.𝐒(ℓₒ Lvl.⊔ ℓₘ Lvl.⊔ ℓₑ)} where
constructor intro
field
productFunctor : (C ⨯ᶜᵃᵗ C) →ᶠᵘⁿᶜᵗᵒʳ C
unitObject : CategoryObject.Object(C)
⦃ monoidalCategory ⦄ : MonoidalCategory(productFunctor)(unitObject)
module _
{C₁ : CategoryObject{ℓₒ}{ℓₘ}{ℓₑ}} ⦃ (intro product₁ 𝟏₁) : Monoidalᶜᵃᵗ(C₁) ⦄
{C₂ : CategoryObject{ℓₒ}{ℓₘ}{ℓₑ}} ⦃ (intro product₂ 𝟏₂) : Monoidalᶜᵃᵗ(C₂) ⦄
(functor@([∃]-intro F) : (C₁ →ᶠᵘⁿᶜᵗᵒʳ C₂))
where
instance _ = C₁
instance _ = C₂
open CategoryObject ⦃ … ⦄
open Category ⦃ … ⦄
open Category.ArrowNotation ⦃ … ⦄
open Functor ⦃ … ⦄
open MonoidalCategory ⦃ … ⦄
-- Also called: Lax monoidal functor, applicative functor, idiom.
record MonoidalFunctor : Type{Lvl.of(Type.of C₁)} where
constructor intro
field
ε : 𝟏₂ ⟶ F(𝟏₁)
Μ : (product₂ ∘ᶠᵘⁿᶜᵗᵒʳ Tupleᶜᵃᵗ.map functor functor) →ᴺᵀ (functor ∘ᶠᵘⁿᶜᵗᵒʳ product₁)
μ : ∀{x y} → ((F(x) ⊗ F(y)) ⟶ F(x ⊗ y))
μ{x}{y} = [∃]-witness Μ (x , y)
μ-natural : ∀{(x₁ , x₂) (y₁ , y₂) : Object ⦃ C₁ ⦄ ⨯ Object ⦃ C₁ ⦄}
{(f₁ , f₂) : ((x₁ ⟶ y₁) ⨯ (x₂ ⟶ y₂))} →
(μ ∘ (map(f₁) <⊗> map(f₂)) ≡ map(f₁ <⊗> f₂) ∘ μ)
μ-natural = NaturalTransformation.natural([∃]-proof Μ)
-- TODO: Coherence conditions
module _
{C : CategoryObject{ℓₒ}{ℓₘ}{ℓₑ}} ⦃ (intro product 𝟏) : Monoidalᶜᵃᵗ(C) ⦄
(functor@([∃]-intro F) : (⟲ᶠᵘⁿᶜᵗᵒʳ C))
where
instance _ = C
open CategoryObject ⦃ … ⦄
open Category ⦃ … ⦄
open Category.ArrowNotation ⦃ … ⦄
open Functor ⦃ … ⦄
open MonoidalCategory ⦃ … ⦄
record TensorialStrength : Type{Lvl.of(Type.of C)} where
constructor intro
field
Β : (product ∘ᶠᵘⁿᶜᵗᵒʳ Tupleᶜᵃᵗ.mapRight functor) →ᴺᵀ (functor ∘ᶠᵘⁿᶜᵗᵒʳ product)
β : ∀{x y} → ((x ⊗ F(y)) ⟶ F(x ⊗ y))
β{x}{y} = [∃]-witness Β (x , y)
β-natural : ∀{(x₁ , x₂) (y₁ , y₂) : Object ⦃ C ⦄ ⨯ Object ⦃ C ⦄}
{(f₁ , f₂) : ((x₁ ⟶ y₁) ⨯ (x₂ ⟶ y₂))} →
(β ∘ (f₁ <⊗> map(f₂)) ≡ map(f₁ <⊗> f₂) ∘ β)
β-natural = NaturalTransformation.natural([∃]-proof Β)
module TensorialStrengthenedMonoidalEndofunctor ⦃ monoidal : MonoidalFunctor(functor) ⦄ ⦃ strength : TensorialStrength ⦄ where
open MonoidalFunctor(monoidal)
open TensorialStrength(strength)
Ι : idᶠᵘⁿᶜᵗᵒʳ →ᴺᵀ functor
∃.witness Ι x = {!!} ∘ μ{x}{𝟏} ∘ {!!}
∃.proof Ι = {!!}
ι : ∀{x} → (x ⟶ F(x))
ι{x} = [∃]-witness Ι x
|
# ......................................................................................
# ...............................Cvičení 1 - Kombinatorika..............................
# ..................Adéla Vrtková, Michal Béreš, Martina Litschmannová..................
# ......................................................................................
# Nezobrazuje-li se vám text korektně, nastavte File \ Reopen with Encoding... na UTF-8
# Pro zobrazení obsahu skriptu použijte CTRL+SHIFT+O
# Pro spouštění příkazů v jednotlivých řádcích použijte CTRL+ENTER
# * Variace ####
#
# V(n,k) - variace bez opakování, první argument bude celkový počet entit, druhý
# argument velikost výběru
# funkce se vytváří příkazem fucntion, je to objekt jehož jméno je dáno až proměnnou
# do které tento objekt přiřadím
variace = function(n,k) # zde zadávám počet parametrů a jejich jména
{ # celé tělo funkce je uzavřeno mezi závorkami {...}
citatel = factorial(n) # faktoriál v originálním Rku existuje tak jej použijeme
jmenovatel = factorial(n-k)
return(citatel/jmenovatel) # to co funkce vrátí se dává do příkazu return(...)
}
# V*(n,k) - variace s opakováním
variace_opak = function(n,k)
{
return(n^k)
}
# * Permutace ####
#
# P(n)=V(n,n) - permutace
permutace = function(n)
{
return(variace(n,n))
}
# P*(n1,n2,n3,....,nk) - permutace s opakováním, vstup bude vektor s jednotlivými počty
# unikátních entit
permutace_opak = function(vec_n) # vec_n je vektro počtů hodnot př.: vec_n = c(2,2,2,4,3)
{
n = sum(vec_n) # spočteme kolik máme hodnot celkem
res_temp=factorial(n) #jejich faktoriál = hodnota v čitateli
# jednoduchý cyklus začíná příkazem for, pak v závorkách následuje název iterátoru a z
# jakého seznamu bude brán
for(pocet in vec_n) # pocet je iterátor a postupně bude nabývat hodnot z vektoru vec_n
{
# postupně dělíme faktoriálem každého počtu unikátních entit
res_temp=res_temp/factorial(pocet)
}
return(res_temp)
}
# * Kombinace ####
#
# C(n,k) - kombinace
kombinace = function(n,k)
{
return(choose(n,k)) # funkce for kombinace už existuje v Rku a jmenuje se choose
}
# C*(n,k) - kombinace s opakováním
kombinace_opak = function(n,k)
{
return(choose(n+k-1,k)) # použijeme známý vzorec
}
# Úlohy na cvičení ####
#
# * Příklad 1. ####
#
# V prodejně mají k dispozici tři typy zámků. Pro otevření prvního
# zámku je nutno zmáčknout čtyři z deseti tlačítek označených číslicemi 0 až 9. (Na
# pořadí nezáleží - tlačítka zůstávají zmáčknuta.)
# Druhý zámek se otevře pokud zmáčkneme šest tlačítek z deseti.
# Pro otevření třetího zámku je nutno nastavit správnou kombinaci
# na čtyřech kotoučích. Který z těchto zámků nejlépé chrání před
# zloději?
z1=kombinace(10,4)
z2=kombinace(10,6)
z3=variace_opak(10,4)
paste("pocet kombinaci: ",z1,",",z2,",",z3)
paste("pradvedepodobnost nahodnehootevreni: ",1/z1,",",1/z2,",",1/z3)
# * Příklad 2. ####
#
# V prodejně nabízejí dva druhy zamykání kufříku. První kufřík se zamyká šifrou, která
# se skládá
# z šesti číslic. Druhý kufřík se zamyká dvěma zámky, které se otevírají současně. Šifra
# každého
# z nich se skládá ze tří číslic. Určete pro každý kufřík pravděpodobnost otevření
# zlodějem při
# prvním pokusu. Který typ zámku je bezpečnější?
z1=variace_opak(10,6);
z2=variace_opak(10,3)*variace_opak(10,3);
z2_v2=variace_opak(10,3)+variace_opak(10,3);
paste("pocet kombinaci: ",z1,",",z2,",druha varianta - ",z2_v2)
# * Příklad 3. ####
#
# V urně je 40 koulí - 2 červené a 38 bílých. Z urny náhodně vytáhneme 2 koule. S jakou
# pravděpodobností budou obě červené?
poc_moz=kombinace(40,2);
poc_priz=kombinace(2,2);
prob=poc_priz/poc_moz;
paste("pravdepodobnost je: ",prob)
# * Příklad 4. ####
#
# Student si měl ke zkoušce připravit odpovědi na 40 otázek. Na dvě otázky, které mu dal
# zkoušející, neuměl odpovědět a tak řekl „To mám smůlu! To jsou jediné dvě otázky, na
# které neumím odpovědět.“ S jakou pravděpodobností mluví pravdu?
# * Příklad 5. ####
#
# Test z chemie žák složí, pokud v seznamu 40 chemických sloučenin podtrhne jediné dva
# aldehydy, které v seznamu jsou. Jaká je pravděpodobnost, že test složí žák, který
# provede výběr
# sloučenin náhodně?
# * Příklad 6. ####
#
# Ze zahraničí se vracela skupina 40 turistů a mezi nimi byli 2 pašeráci. Na hranici
# celník 2 turisty
# vyzval k osobní prohlídce a ukázalo se, že oba dva jsou pašeráci. Zbylí turisté na to
# reagovali:
# „Celník měl opravdu štěstí!“, „Pašeráky někdo udal!“, . . .. Jak se postavit k těmto
# výrokům?
# Je oprávněné podezření, že pašeráky někdo udal?
# * Příklad 7. ####
#
# Z urny se třemi koulemi, dvěma červenými a jednou bílou, budou současně vybrány dvě
# koule.
# Student a učitel uzavřou sázku. Pokud budou obě koule stejné barvy, vyhraje student.
# Pokud
# budou mít koule různou barvu, vyhraje učitel. Je hra férová? Jaké jsou
# pravděpodobnosti výhry
# učitele a studenta?
# funkce combn vyrobí kombinace o předepsané velikosti - první parametr je vektor hodnot, druhý velikost výběru
combn(c('cerna','cerna','cervena'),2)
# * Příklad 8. ####
#
#
# Hra popsaná v příkladu 7 nebyla férová. Jakou kouli (červenou nebo bílou) musíme do
# urny přidat, aby hra férová byla?
combn(c('cerna','cerna','cerna','cervena'),2)
combn(c('cerna','cerna','cervena','cervena'),2)
# * Příklad 9. ####
#
# Chcete hrát Člověče nezlob se, ale ztratila se hrací kostka. Čím a jak lze nahradit
# hrací kostku,
# máte-li k dispozici hrací karty (balíček 32 karet) a 4 různobarevné kuličky?
# * Příklad 10. ####
#
# Chcete hrát Člověče nezlob se, ale ztratila se hrací kostka. Jak lze nahradit hrací
# kostku, máte-li
# k dispozici 3 různobarevné kuličky?
# * Příklad 11. ####
#
# V prodejně vozů Škoda mají v měsíci únoru prodejní akci. Ke standardnímu vybavení
# nabízejí
# 3 položky z nadstandardní výbavy zdarma. Nadstandardní výbava zahrnuje 7 položek:
# - tempomat, vyhřívání sedadel, zadní airbagy, xenonová světla, stropní okénko,
# bezpečnostní
# zámek převodovky, speciální odolný metalízový lak.
#
# Kolik možností má zákazník, jak zvolit 3 položky z nadstandardní výbavy?
kombinace(7,3)
# * Příklad 12. ####
#
# Při zkoušce si do 5. řady sedlo 12 studentů. Zkoušející chce určit sám, jak tyto
# studenty v řadě
# rozesadit.
# - Kolik je možností jak studenty rozesadit?
# - Student Brahý žádá, aby mohl sedět na kraji a odejít dříve, aby stihl vlak. Kolik je
# možností jak studenty rozesadit, chce-li zkoušející vyhovět požadavku studenta
# Brahého?
# - Kolik je možností jak studenty rozesadit, nesmějí-li Pažout a Horáček sedět vedle
# sebe?
#a
permutace(12)
prazdnych_sedadel=8
permutace_opak(c(1,1,1,1,1,1,1,1,1,1,1,1,prazdnych_sedadel))
#b
1*permutace(11)+permutace(11)*1
#c
vedle_sebe=permutace(11)+permutace(11)
permutace(12)-vedle_sebe
# * Příklad 13. ####
#
# Kolik anagramů lze vytvořit ze slova STATISTIKA?
statistika=c(2,3,2,2,1)
permutace_opak(statistika)
# * Příklad 14. ####
#
# V Tescu dostali nové zboží – 6 druhů chlapeckých trik. Od každého druhu mají alespoň 7
# kusů.
# Maminka chce synovi koupit 4 trika. Kolik je možností, jak je vybrat
# - mají-li být všechna různá?
# - připouští-li, že mohou být všechna stejná?
#a
kombinace(6,4)
#b
kombinace_opak(6,4)
# * Příklad 15. ####
# Kolik hesel délky 5 můžeme vytvořit ze znaků abecedy
# - nejsou-li rozlišována velká a malá písmena?
# - jsou-li rozlišována velká a malá písmena?
#a
variace_opak(26,5)
#b
variace_opak(52,5)
|
/-
Copyright (c) 2019 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison
-/
import category_theory.limits.shapes.finite_products
import category_theory.limits.shapes.binary_products
import category_theory.preadditive
/-!
# Biproducts and binary biproducts
We introduce the notion of (finite) biproducts and binary biproducts.
These are slightly unusual relative to the other shapes in the library,
as they are simultaneously limits and colimits.
(Zero objects are similar; they are "biterminal".)
We treat first the case of a general category with zero morphisms,
and subsequently the case of a preadditive category.
In a category with zero morphisms, we model the (binary) biproduct of `P Q : C`
using a `binary_bicone`, which has a cone point `X`,
and morphisms `fst : X ⟶ P`, `snd : X ⟶ Q`, `inl : P ⟶ X` and `inr : X ⟶ Q`,
such that `inl ≫ fst = 𝟙 P`, `inl ≫ snd = 0`, `inr ≫ fst = 0`, and `inr ≫ snd = 𝟙 Q`.
Such a `binary_bicone` is a biproduct if the cone is a limit cone, and the cocone is a colimit
cocone.
In a preadditive category,
* any `binary_biproduct` satisfies `total : fst ≫ inl + snd ≫ inr = 𝟙 X`
* any `binary_product` is a `binary_biproduct`
* any `binary_coproduct` is a `binary_biproduct`
For biproducts indexed by a `fintype J`, a `bicone` again consists of a cone point `X`
and morphisms `π j : X ⟶ F j` and `ι j : F j ⟶ X` for each `j`,
such that `ι j ≫ π j'` is the identity when `j = j'` and zero otherwise.
In a preadditive category,
* any `biproduct` satisfies `total : ∑ j : J, biproduct.π f j ≫ biproduct.ι f j = 𝟙 (⨁ f)`
* any `product` is a `biproduct`
* any `coproduct` is a `biproduct`
## Notation
As `⊕` is already taken for the sum of types, we introduce the notation `X ⊞ Y` for
a binary biproduct. We introduce `⨁ f` for the indexed biproduct.
-/
noncomputable theory
universes v u
open category_theory
open category_theory.functor
namespace category_theory.limits
variables {J : Type v} [decidable_eq J]
variables {C : Type u} [category.{v} C] [has_zero_morphisms C]
/--
A `c : bicone F` is:
* an object `c.X` and
* morphisms `π j : X ⟶ F j` and `ι j : F j ⟶ X` for each `j`,
* such that `ι j ≫ π j'` is the identity when `j = j'` and zero otherwise.
-/
@[nolint has_inhabited_instance]
structure bicone (F : J → C) :=
(X : C)
(π : Π j, X ⟶ F j)
(ι : Π j, F j ⟶ X)
(ι_π : ∀ j j', ι j ≫ π j' = if h : j = j' then eq_to_hom (congr_arg F h) else 0)
@[simp] lemma bicone_ι_π_self {F : J → C} (B : bicone F) (j : J) : B.ι j ≫ B.π j = 𝟙 (F j) :=
by simpa using B.ι_π j j
@[simp] lemma bicone_ι_π_ne {F : J → C} (B : bicone F) {j j' : J} (h : j ≠ j') :
B.ι j ≫ B.π j' = 0 :=
by simpa [h] using B.ι_π j j'
variables {F : J → C}
namespace bicone
/-- Extract the cone from a bicone. -/
@[simps]
def to_cone (B : bicone F) : cone (discrete.functor F) :=
{ X := B.X,
π := { app := λ j, B.π j }, }
/-- Extract the cocone from a bicone. -/
@[simps]
def to_cocone (B : bicone F) : cocone (discrete.functor F) :=
{ X := B.X,
ι := { app := λ j, B.ι j }, }
end bicone
/--
A bicone over `F : J → C`, which is both a limit cone and a colimit cocone.
-/
@[nolint has_inhabited_instance]
structure limit_bicone (F : J → C) :=
(bicone : bicone F)
(is_limit : is_limit bicone.to_cone)
(is_colimit : is_colimit bicone.to_cocone)
/--
`has_biproduct F` expresses the mere existence of a bicone which is
simultaneously a limit and a colimit of the diagram `F`.
-/
class has_biproduct (F : J → C) : Prop :=
mk' :: (exists_biproduct : nonempty (limit_bicone F))
lemma has_biproduct.mk {F : J → C} (d : limit_bicone F) : has_biproduct F :=
⟨nonempty.intro d⟩
/-- Use the axiom of choice to extract explicit `biproduct_data F` from `has_biproduct F`. -/
def get_biproduct_data (F : J → C) [has_biproduct F] : limit_bicone F :=
classical.choice has_biproduct.exists_biproduct
/-- A bicone for `F` which is both a limit cone and a colimit cocone. -/
def biproduct.bicone (F : J → C) [has_biproduct F] : bicone F :=
(get_biproduct_data F).bicone
/-- `biproduct.bicone F` is a limit cone. -/
def biproduct.is_limit (F : J → C) [has_biproduct F] : is_limit (biproduct.bicone F).to_cone :=
(get_biproduct_data F).is_limit
/-- `biproduct.bicone F` is a colimit cocone. -/
def biproduct.is_colimit (F : J → C) [has_biproduct F] :
is_colimit (biproduct.bicone F).to_cocone :=
(get_biproduct_data F).is_colimit
@[priority 100]
instance has_product_of_has_biproduct [has_biproduct F] : has_limit (discrete.functor F) :=
has_limit.mk { cone := (biproduct.bicone F).to_cone,
is_limit := biproduct.is_limit F, }
@[priority 100]
instance has_coproduct_of_has_biproduct [has_biproduct F] : has_colimit (discrete.functor F) :=
has_colimit.mk { cocone := (biproduct.bicone F).to_cocone,
is_colimit := biproduct.is_colimit F, }
variables (J C)
/--
`C` has biproducts of shape `J` if we have
a limit and a colimit, with the same cone points,
of every function `F : J → C`.
-/
class has_biproducts_of_shape : Prop :=
(has_biproduct : Π F : J → C, has_biproduct F)
attribute [instance, priority 100] has_biproducts_of_shape.has_biproduct
/-- `has_finite_biproducts C` represents a choice of biproduct for every family of objects in `C`
indexed by a finite type with decidable equality. -/
class has_finite_biproducts : Prop :=
(has_biproducts_of_shape : Π (J : Type v) [decidable_eq J] [fintype J],
has_biproducts_of_shape J C)
attribute [instance, priority 100] has_finite_biproducts.has_biproducts_of_shape
@[priority 100]
instance has_finite_products_of_has_finite_biproducts [has_finite_biproducts C] :
has_finite_products C :=
{ out := λ J _ _, ⟨λ F, by exactI has_limit_of_iso discrete.nat_iso_functor.symm⟩ }
@[priority 100]
instance has_finite_coproducts_of_has_finite_biproducts [has_finite_biproducts C] :
has_finite_coproducts C :=
{ out := λ J _ _, ⟨λ F, by exactI has_colimit_of_iso discrete.nat_iso_functor⟩ }
variables {J C}
/--
The isomorphism between the specified limit and the specified colimit for
a functor with a bilimit.
-/
def biproduct_iso (F : J → C) [has_biproduct F] :
limits.pi_obj F ≅ limits.sigma_obj F :=
(is_limit.cone_point_unique_up_to_iso (limit.is_limit _) (biproduct.is_limit F)).trans $
is_colimit.cocone_point_unique_up_to_iso (biproduct.is_colimit F) (colimit.is_colimit _)
end category_theory.limits
namespace category_theory.limits
variables {J : Type v} [decidable_eq J]
variables {C : Type u} [category.{v} C] [has_zero_morphisms C]
/-- `biproduct f` computes the biproduct of a family of elements `f`. (It is defined as an
abbreviation for `limit (discrete.functor f)`, so for most facts about `biproduct f`, you will
just use general facts about limits and colimits.) -/
abbreviation biproduct (f : J → C) [has_biproduct f] : C :=
(biproduct.bicone f).X
notation `⨁ ` f:20 := biproduct f
/-- The projection onto a summand of a biproduct. -/
abbreviation biproduct.π (f : J → C) [has_biproduct f] (b : J) : ⨁ f ⟶ f b :=
(biproduct.bicone f).π b
@[simp]
lemma biproduct.bicone_π (f : J → C) [has_biproduct f] (b : J) :
(biproduct.bicone f).π b = biproduct.π f b := rfl
/-- The inclusion into a summand of a biproduct. -/
abbreviation biproduct.ι (f : J → C) [has_biproduct f] (b : J) : f b ⟶ ⨁ f :=
(biproduct.bicone f).ι b
@[simp]
lemma biproduct.bicone_ι (f : J → C) [has_biproduct f] (b : J) :
(biproduct.bicone f).ι b = biproduct.ι f b := rfl
@[reassoc]
lemma biproduct.ι_π (f : J → C) [has_biproduct f] (j j' : J) :
biproduct.ι f j ≫ biproduct.π f j' = if h : j = j' then eq_to_hom (congr_arg f h) else 0 :=
(biproduct.bicone f).ι_π j j'
@[simp,reassoc]
lemma biproduct.ι_π_self (f : J → C) [has_biproduct f] (j : J) :
biproduct.ι f j ≫ biproduct.π f j = 𝟙 _ :=
by simp [biproduct.ι_π]
@[simp,reassoc]
lemma biproduct.ι_π_ne (f : J → C) [has_biproduct f] {j j' : J} (h : j ≠ j') :
biproduct.ι f j ≫ biproduct.π f j' = 0 :=
by simp [biproduct.ι_π, h]
/-- Given a collection of maps into the summands, we obtain a map into the biproduct. -/
abbreviation biproduct.lift
{f : J → C} [has_biproduct f] {P : C} (p : Π b, P ⟶ f b) : P ⟶ ⨁ f :=
(biproduct.is_limit f).lift (fan.mk P p)
/-- Given a collection of maps out of the summands, we obtain a map out of the biproduct. -/
abbreviation biproduct.desc
{f : J → C} [has_biproduct f] {P : C} (p : Π b, f b ⟶ P) : ⨁ f ⟶ P :=
(biproduct.is_colimit f).desc (cofan.mk P p)
@[simp, reassoc]
lemma biproduct.lift_π {f : J → C} [has_biproduct f] {P : C} (p : Π b, P ⟶ f b) (j : J) :
biproduct.lift p ≫ biproduct.π f j = p j :=
(biproduct.is_limit f).fac _ _
@[simp, reassoc]
lemma biproduct.ι_desc {f : J → C} [has_biproduct f] {P : C} (p : Π b, f b ⟶ P) (j : J) :
biproduct.ι f j ≫ biproduct.desc p = p j :=
(biproduct.is_colimit f).fac _ _
/-- Given a collection of maps between corresponding summands of a pair of biproducts
indexed by the same type, we obtain a map between the biproducts. -/
abbreviation biproduct.map [fintype J] {f g : J → C} [has_finite_biproducts C]
(p : Π b, f b ⟶ g b) : ⨁ f ⟶ ⨁ g :=
is_limit.map (biproduct.bicone f).to_cone (biproduct.is_limit g) (discrete.nat_trans p)
/-- An alternative to `biproduct.map` constructed via colimits.
This construction only exists in order to show it is equal to `biproduct.map`. -/
abbreviation biproduct.map' [fintype J] {f g : J → C} [has_finite_biproducts C]
(p : Π b, f b ⟶ g b) : ⨁ f ⟶ ⨁ g :=
is_colimit.map (biproduct.is_colimit f) (biproduct.bicone g).to_cocone (discrete.nat_trans p)
@[ext] lemma biproduct.hom_ext {f : J → C} [has_biproduct f]
{Z : C} (g h : Z ⟶ ⨁ f)
(w : ∀ j, g ≫ biproduct.π f j = h ≫ biproduct.π f j) : g = h :=
(biproduct.is_limit f).hom_ext w
@[ext] lemma biproduct.hom_ext' {f : J → C} [has_biproduct f]
{Z : C} (g h : ⨁ f ⟶ Z)
(w : ∀ j, biproduct.ι f j ≫ g = biproduct.ι f j ≫ h) : g = h :=
(biproduct.is_colimit f).hom_ext w
lemma biproduct.map_eq_map' [fintype J] {f g : J → C} [has_finite_biproducts C]
(p : Π b, f b ⟶ g b) : biproduct.map p = biproduct.map' p :=
begin
ext j j',
simp only [discrete.nat_trans_app, limits.is_colimit.ι_map, limits.is_limit.map_π, category.assoc,
←bicone.to_cone_π_app, ←biproduct.bicone_π, ←bicone.to_cocone_ι_app, ←biproduct.bicone_ι],
simp only [biproduct.bicone_ι, biproduct.bicone_π, bicone.to_cocone_ι_app, bicone.to_cone_π_app],
rw [biproduct.ι_π_assoc, biproduct.ι_π],
split_ifs,
{ subst h, rw [eq_to_hom_refl, category.id_comp], erw category.comp_id, },
{ simp, },
end
@[simp, reassoc]
lemma biproduct.map_π [fintype J] {f g : J → C} [has_finite_biproducts C]
(p : Π j, f j ⟶ g j) (j : J) :
biproduct.map p ≫ biproduct.π g j = biproduct.π f j ≫ p j :=
limits.is_limit.map_π _ _ _ _
@[simp, reassoc]
lemma biproduct.ι_map [fintype J] {f g : J → C} [has_finite_biproducts C]
(p : Π j, f j ⟶ g j) (j : J) :
biproduct.ι f j ≫ biproduct.map p = p j ≫ biproduct.ι g j :=
begin
rw biproduct.map_eq_map',
convert limits.is_colimit.ι_map _ _ _ _; refl
end
@[simp, reassoc]
lemma biproduct.map_desc [fintype J] {f g : J → C} [has_finite_biproducts C]
(p : Π j, f j ⟶ g j) {P : C} (k : Π j, g j ⟶ P) :
biproduct.map p ≫ biproduct.desc k = biproduct.desc (λ j, p j ≫ k j) :=
by { ext, simp, }
@[simp, reassoc]
lemma biproduct.lift_map [fintype J] {f g : J → C} [has_finite_biproducts C]
{P : C} (k : Π j, P ⟶ f j) (p : Π j, f j ⟶ g j) :
biproduct.lift k ≫ biproduct.map p = biproduct.lift (λ j, k j ≫ p j) :=
by { ext, simp, }
/-- Given a collection of isomorphisms between corresponding summands of a pair of biproducts
indexed by the same type, we obtain an isomorphism between the biproducts. -/
@[simps]
def biproduct.map_iso [fintype J] {f g : J → C} [has_finite_biproducts C]
(p : Π b, f b ≅ g b) : ⨁ f ≅ ⨁ g :=
{ hom := biproduct.map (λ b, (p b).hom),
inv := biproduct.map (λ b, (p b).inv), }
section
variables [fintype J] {K : Type v} [fintype K] [decidable_eq K] {f : J → C} {g : K → C}
[has_finite_biproducts C]
/--
Convert a (dependently typed) matrix to a morphism of biproducts.
-/
def biproduct.matrix (m : Π j k, f j ⟶ g k) : ⨁ f ⟶ ⨁ g :=
biproduct.desc (λ j, biproduct.lift (λ k, m j k))
@[simp, reassoc]
lemma biproduct.matrix_π (m : Π j k, f j ⟶ g k) (k : K) :
biproduct.matrix m ≫ biproduct.π g k = biproduct.desc (λ j, m j k) :=
by { ext, simp [biproduct.matrix], }
@[simp, reassoc]
lemma biproduct.ι_matrix (m : Π j k, f j ⟶ g k) (j : J) :
biproduct.ι f j ≫ biproduct.matrix m = biproduct.lift (λ k, m j k) :=
by { ext, simp [biproduct.matrix], }
/--
Extract the matrix components from a morphism of biproducts.
-/
def biproduct.components (m : ⨁ f ⟶ ⨁ g) (j : J) (k : K) : f j ⟶ g k :=
biproduct.ι f j ≫ m ≫ biproduct.π g k
@[simp] lemma biproduct.matrix_components (m : Π j k, f j ⟶ g k) (j : J) (k : K) :
biproduct.components (biproduct.matrix m) j k = m j k :=
by simp [biproduct.components]
@[simp] lemma biproduct.components_matrix (m : ⨁ f ⟶ ⨁ g) :
biproduct.matrix (λ j k, biproduct.components m j k) = m :=
by { ext, simp [biproduct.components], }
/-- Morphisms between direct sums are matrices. -/
@[simps]
def biproduct.matrix_equiv : (⨁ f ⟶ ⨁ g) ≃ (Π j k, f j ⟶ g k) :=
{ to_fun := biproduct.components,
inv_fun := biproduct.matrix,
left_inv := biproduct.components_matrix,
right_inv := λ m, by { ext, apply biproduct.matrix_components } }
end
instance biproduct.ι_mono (f : J → C) [has_biproduct f]
(b : J) : split_mono (biproduct.ι f b) :=
{ retraction := biproduct.desc $
λ b', if h : b' = b then eq_to_hom (congr_arg f h) else biproduct.ι f b' ≫ biproduct.π f b }
instance biproduct.π_epi (f : J → C) [has_biproduct f]
(b : J) : split_epi (biproduct.π f b) :=
{ section_ := biproduct.lift $
λ b', if h : b = b' then eq_to_hom (congr_arg f h) else biproduct.ι f b ≫ biproduct.π f b' }
variables {C}
/--
A binary bicone for a pair of objects `P Q : C` consists of the cone point `X`,
maps from `X` to both `P` and `Q`, and maps from both `P` and `Q` to `X`,
so that `inl ≫ fst = 𝟙 P`, `inl ≫ snd = 0`, `inr ≫ fst = 0`, and `inr ≫ snd = 𝟙 Q`
-/
@[nolint has_inhabited_instance]
structure binary_bicone (P Q : C) :=
(X : C)
(fst : X ⟶ P)
(snd : X ⟶ Q)
(inl : P ⟶ X)
(inr : Q ⟶ X)
(inl_fst' : inl ≫ fst = 𝟙 P . obviously)
(inl_snd' : inl ≫ snd = 0 . obviously)
(inr_fst' : inr ≫ fst = 0 . obviously)
(inr_snd' : inr ≫ snd = 𝟙 Q . obviously)
restate_axiom binary_bicone.inl_fst'
restate_axiom binary_bicone.inl_snd'
restate_axiom binary_bicone.inr_fst'
restate_axiom binary_bicone.inr_snd'
attribute [simp, reassoc] binary_bicone.inl_fst binary_bicone.inl_snd
binary_bicone.inr_fst binary_bicone.inr_snd
namespace binary_bicone
variables {P Q : C}
/-- Extract the cone from a binary bicone. -/
def to_cone (c : binary_bicone P Q) : cone (pair P Q) :=
binary_fan.mk c.fst c.snd
@[simp]
lemma to_cone_X (c : binary_bicone P Q) :
c.to_cone.X = c.X := rfl
@[simp]
lemma to_cone_π_app_left (c : binary_bicone P Q) :
c.to_cone.π.app (walking_pair.left) = c.fst := rfl
@[simp]
lemma to_cone_π_app_right (c : binary_bicone P Q) :
c.to_cone.π.app (walking_pair.right) = c.snd := rfl
/-- Extract the cocone from a binary bicone. -/
def to_cocone (c : binary_bicone P Q) : cocone (pair P Q) :=
binary_cofan.mk c.inl c.inr
@[simp]
lemma to_cocone_X (c : binary_bicone P Q) :
c.to_cocone.X = c.X := rfl
@[simp]
lemma to_cocone_ι_app_left (c : binary_bicone P Q) :
c.to_cocone.ι.app (walking_pair.left) = c.inl := rfl
@[simp]
lemma to_cocone_ι_app_right (c : binary_bicone P Q) :
c.to_cocone.ι.app (walking_pair.right) = c.inr := rfl
end binary_bicone
namespace bicone
/-- Convert a `bicone` over a function on `walking_pair` to a binary_bicone. -/
@[simps]
def to_binary_bicone {X Y : C} (b : bicone (pair X Y).obj) : binary_bicone X Y :=
{ X := b.X,
fst := b.π walking_pair.left,
snd := b.π walking_pair.right,
inl := b.ι walking_pair.left,
inr := b.ι walking_pair.right,
inl_fst' := by { simp [bicone.ι_π], refl, },
inr_fst' := by simp [bicone.ι_π],
inl_snd' := by simp [bicone.ι_π],
inr_snd' := by { simp [bicone.ι_π], refl, }, }
/--
If the cone obtained from a bicone over `pair X Y` is a limit cone,
so is the cone obtained by converting that bicone to a binary_bicone, then to a cone.
-/
def to_binary_bicone_is_limit {X Y : C} {b : bicone (pair X Y).obj}
(c : is_limit (b.to_cone)) :
is_limit (b.to_binary_bicone.to_cone) :=
{ lift := λ s, c.lift s,
fac' := λ s j, by { cases j; erw c.fac, },
uniq' := λ s m w,
begin
apply c.uniq s,
rintro (⟨⟩|⟨⟩),
exact w walking_pair.left,
exact w walking_pair.right,
end, }
/--
If the cocone obtained from a bicone over `pair X Y` is a colimit cocone,
so is the cocone obtained by converting that bicone to a binary_bicone, then to a cocone.
-/
def to_binary_bicone_is_colimit {X Y : C} {b : bicone (pair X Y).obj}
(c : is_colimit (b.to_cocone)) :
is_colimit (b.to_binary_bicone.to_cocone) :=
{ desc := λ s, c.desc s,
fac' := λ s j, by { cases j; erw c.fac, },
uniq' := λ s m w,
begin
apply c.uniq s,
rintro (⟨⟩|⟨⟩),
exact w walking_pair.left,
exact w walking_pair.right,
end, }
end bicone
/--
A bicone over `P Q : C`, which is both a limit cone and a colimit cocone.
-/
@[nolint has_inhabited_instance]
structure binary_biproduct_data (P Q : C) :=
(bicone : binary_bicone P Q)
(is_limit : is_limit bicone.to_cone)
(is_colimit : is_colimit bicone.to_cocone)
/--
`has_binary_biproduct P Q` expresses the mere existence of a bicone which is
simultaneously a limit and a colimit of the diagram `pair P Q`.
-/
class has_binary_biproduct (P Q : C) : Prop :=
mk' :: (exists_binary_biproduct : nonempty (binary_biproduct_data P Q))
lemma has_binary_biproduct.mk {P Q : C} (d : binary_biproduct_data P Q) :
has_binary_biproduct P Q :=
⟨nonempty.intro d⟩
/--
Use the axiom of choice to extract explicit `binary_biproduct_data F` from `has_binary_biproduct F`.
-/
def get_binary_biproduct_data (P Q : C) [has_binary_biproduct P Q] : binary_biproduct_data P Q :=
classical.choice has_binary_biproduct.exists_binary_biproduct
/-- A bicone for `P Q ` which is both a limit cone and a colimit cocone. -/
def binary_biproduct.bicone (P Q : C) [has_binary_biproduct P Q] : binary_bicone P Q :=
(get_binary_biproduct_data P Q).bicone
/-- `binary_biproduct.bicone P Q` is a limit cone. -/
def binary_biproduct.is_limit (P Q : C) [has_binary_biproduct P Q] :
is_limit (binary_biproduct.bicone P Q).to_cone :=
(get_binary_biproduct_data P Q).is_limit
/-- `binary_biproduct.bicone P Q` is a colimit cocone. -/
def binary_biproduct.is_colimit (P Q : C) [has_binary_biproduct P Q] :
is_colimit (binary_biproduct.bicone P Q).to_cocone :=
(get_binary_biproduct_data P Q).is_colimit
section
variable (C)
/--
`has_binary_biproducts C` represents the existence of a bicone which is
simultaneously a limit and a colimit of the diagram `pair P Q`, for every `P Q : C`.
-/
class has_binary_biproducts : Prop :=
(has_binary_biproduct : Π (P Q : C), has_binary_biproduct P Q)
attribute [instance, priority 100] has_binary_biproducts.has_binary_biproduct
/--
A category with finite biproducts has binary biproducts.
This is not an instance as typically in concrete categories there will be
an alternative construction with nicer definitional properties.
-/
lemma has_binary_biproducts_of_finite_biproducts [has_finite_biproducts C] :
has_binary_biproducts C :=
{ has_binary_biproduct := λ P Q, has_binary_biproduct.mk
{ bicone := (biproduct.bicone (pair P Q).obj).to_binary_bicone,
is_limit := bicone.to_binary_bicone_is_limit (biproduct.is_limit _),
is_colimit := bicone.to_binary_bicone_is_colimit (biproduct.is_colimit _) } }
end
variables {P Q : C}
instance has_binary_biproduct.has_limit_pair [has_binary_biproduct P Q] :
has_limit (pair P Q) :=
has_limit.mk ⟨_, binary_biproduct.is_limit P Q⟩
instance has_binary_biproduct.has_colimit_pair [has_binary_biproduct P Q] :
has_colimit (pair P Q) :=
has_colimit.mk ⟨_, binary_biproduct.is_colimit P Q⟩
@[priority 100]
instance has_binary_products_of_has_binary_biproducts [has_binary_biproducts C] :
has_binary_products C :=
{ has_limit := λ F, has_limit_of_iso (diagram_iso_pair F).symm }
@[priority 100]
instance has_binary_coproducts_of_has_binary_biproducts [has_binary_biproducts C] :
has_binary_coproducts C :=
{ has_colimit := λ F, has_colimit_of_iso (diagram_iso_pair F) }
/--
The isomorphism between the specified binary product and the specified binary coproduct for
a pair for a binary biproduct.
-/
def biprod_iso (X Y : C) [has_binary_biproduct X Y] :
limits.prod X Y ≅ limits.coprod X Y :=
(is_limit.cone_point_unique_up_to_iso (limit.is_limit _) (binary_biproduct.is_limit X Y)).trans $
is_colimit.cocone_point_unique_up_to_iso (binary_biproduct.is_colimit X Y) (colimit.is_colimit _)
/-- An arbitrary choice of biproduct of a pair of objects. -/
abbreviation biprod (X Y : C) [has_binary_biproduct X Y] := (binary_biproduct.bicone X Y).X
notation X ` ⊞ `:20 Y:20 := biprod X Y
/-- The projection onto the first summand of a binary biproduct. -/
abbreviation biprod.fst {X Y : C} [has_binary_biproduct X Y] : X ⊞ Y ⟶ X :=
(binary_biproduct.bicone X Y).fst
/-- The projection onto the second summand of a binary biproduct. -/
abbreviation biprod.snd {X Y : C} [has_binary_biproduct X Y] : X ⊞ Y ⟶ Y :=
(binary_biproduct.bicone X Y).snd
/-- The inclusion into the first summand of a binary biproduct. -/
abbreviation biprod.inl {X Y : C} [has_binary_biproduct X Y] : X ⟶ X ⊞ Y :=
(binary_biproduct.bicone X Y).inl
/-- The inclusion into the second summand of a binary biproduct. -/
abbreviation biprod.inr {X Y : C} [has_binary_biproduct X Y] : Y ⟶ X ⊞ Y :=
(binary_biproduct.bicone X Y).inr
section
variables {X Y : C} [has_binary_biproduct X Y]
@[simp] lemma binary_biproduct.bicone_fst : (binary_biproduct.bicone X Y).fst = biprod.fst := rfl
@[simp] lemma binary_biproduct.bicone_snd : (binary_biproduct.bicone X Y).snd = biprod.snd := rfl
@[simp] lemma binary_biproduct.bicone_inl : (binary_biproduct.bicone X Y).inl = biprod.inl := rfl
@[simp] lemma binary_biproduct.bicone_inr : (binary_biproduct.bicone X Y).inr = biprod.inr := rfl
end
@[simp,reassoc]
lemma biprod.inl_fst {X Y : C} [has_binary_biproduct X Y] :
(biprod.inl : X ⟶ X ⊞ Y) ≫ (biprod.fst : X ⊞ Y ⟶ X) = 𝟙 X :=
(binary_biproduct.bicone X Y).inl_fst
@[simp,reassoc]
lemma biprod.inl_snd {X Y : C} [has_binary_biproduct X Y] :
(biprod.inl : X ⟶ X ⊞ Y) ≫ (biprod.snd : X ⊞ Y ⟶ Y) = 0 :=
(binary_biproduct.bicone X Y).inl_snd
@[simp,reassoc]
lemma biprod.inr_fst {X Y : C} [has_binary_biproduct X Y] :
(biprod.inr : Y ⟶ X ⊞ Y) ≫ (biprod.fst : X ⊞ Y ⟶ X) = 0 :=
(binary_biproduct.bicone X Y).inr_fst
@[simp,reassoc]
lemma biprod.inr_snd {X Y : C} [has_binary_biproduct X Y] :
(biprod.inr : Y ⟶ X ⊞ Y) ≫ (biprod.snd : X ⊞ Y ⟶ Y) = 𝟙 Y :=
(binary_biproduct.bicone X Y).inr_snd
/-- Given a pair of maps into the summands of a binary biproduct,
we obtain a map into the binary biproduct. -/
abbreviation biprod.lift {W X Y : C} [has_binary_biproduct X Y] (f : W ⟶ X) (g : W ⟶ Y) :
W ⟶ X ⊞ Y :=
(binary_biproduct.is_limit X Y).lift (binary_fan.mk f g)
/-- Given a pair of maps out of the summands of a binary biproduct,
we obtain a map out of the binary biproduct. -/
abbreviation biprod.desc {W X Y : C} [has_binary_biproduct X Y] (f : X ⟶ W) (g : Y ⟶ W) :
X ⊞ Y ⟶ W :=
(binary_biproduct.is_colimit X Y).desc (binary_cofan.mk f g)
@[simp, reassoc]
lemma biprod.lift_fst {W X Y : C} [has_binary_biproduct X Y] (f : W ⟶ X) (g : W ⟶ Y) :
biprod.lift f g ≫ biprod.fst = f :=
(binary_biproduct.is_limit X Y).fac _ walking_pair.left
@[simp, reassoc]
lemma biprod.lift_snd {W X Y : C} [has_binary_biproduct X Y] (f : W ⟶ X) (g : W ⟶ Y) :
biprod.lift f g ≫ biprod.snd = g :=
(binary_biproduct.is_limit X Y).fac _ walking_pair.right
@[simp, reassoc]
lemma biprod.inl_desc {W X Y : C} [has_binary_biproduct X Y] (f : X ⟶ W) (g : Y ⟶ W) :
biprod.inl ≫ biprod.desc f g = f :=
(binary_biproduct.is_colimit X Y).fac _ walking_pair.left
@[simp, reassoc]
lemma biprod.inr_desc {W X Y : C} [has_binary_biproduct X Y] (f : X ⟶ W) (g : Y ⟶ W) :
biprod.inr ≫ biprod.desc f g = g :=
(binary_biproduct.is_colimit X Y).fac _ walking_pair.right
instance biprod.mono_lift_of_mono_left {W X Y : C} [has_binary_biproduct X Y] (f : W ⟶ X)
(g : W ⟶ Y) [mono f] : mono (biprod.lift f g) :=
mono_of_mono_fac $ biprod.lift_fst _ _
instance biprod.mono_lift_of_mono_right {W X Y : C} [has_binary_biproduct X Y] (f : W ⟶ X)
(g : W ⟶ Y) [mono g] : mono (biprod.lift f g) :=
mono_of_mono_fac $ biprod.lift_snd _ _
instance biprod.epi_desc_of_epi_left {W X Y : C} [has_binary_biproduct X Y] (f : X ⟶ W) (g : Y ⟶ W)
[epi f] : epi (biprod.desc f g) :=
epi_of_epi_fac $ biprod.inl_desc _ _
instance biprod.epi_desc_of_epi_right {W X Y : C} [has_binary_biproduct X Y] (f : X ⟶ W) (g : Y ⟶ W)
[epi g] : epi (biprod.desc f g) :=
epi_of_epi_fac $ biprod.inr_desc _ _
/-- Given a pair of maps between the summands of a pair of binary biproducts,
we obtain a map between the binary biproducts. -/
abbreviation biprod.map {W X Y Z : C} [has_binary_biproduct W X] [has_binary_biproduct Y Z]
(f : W ⟶ Y) (g : X ⟶ Z) : W ⊞ X ⟶ Y ⊞ Z :=
is_limit.map (binary_biproduct.bicone W X).to_cone (binary_biproduct.is_limit Y Z)
(@map_pair _ _ (pair W X) (pair Y Z) f g)
/-- An alternative to `biprod.map` constructed via colimits.
This construction only exists in order to show it is equal to `biprod.map`. -/
abbreviation biprod.map' {W X Y Z : C} [has_binary_biproduct W X] [has_binary_biproduct Y Z]
(f : W ⟶ Y) (g : X ⟶ Z) : W ⊞ X ⟶ Y ⊞ Z :=
is_colimit.map (binary_biproduct.is_colimit W X) (binary_biproduct.bicone Y Z).to_cocone
(@map_pair _ _ (pair W X) (pair Y Z) f g)
@[ext] lemma biprod.hom_ext {X Y Z : C} [has_binary_biproduct X Y] (f g : Z ⟶ X ⊞ Y)
(h₀ : f ≫ biprod.fst = g ≫ biprod.fst) (h₁ : f ≫ biprod.snd = g ≫ biprod.snd) : f = g :=
binary_fan.is_limit.hom_ext (binary_biproduct.is_limit X Y) h₀ h₁
@[ext] lemma biprod.hom_ext' {X Y Z : C} [has_binary_biproduct X Y] (f g : X ⊞ Y ⟶ Z)
(h₀ : biprod.inl ≫ f = biprod.inl ≫ g) (h₁ : biprod.inr ≫ f = biprod.inr ≫ g) : f = g :=
binary_cofan.is_colimit.hom_ext (binary_biproduct.is_colimit X Y) h₀ h₁
lemma biprod.map_eq_map' {W X Y Z : C} [has_binary_biproduct W X] [has_binary_biproduct Y Z]
(f : W ⟶ Y) (g : X ⟶ Z) : biprod.map f g = biprod.map' f g :=
begin
ext,
{ simp only [map_pair_left, is_colimit.ι_map, is_limit.map_π, biprod.inl_fst_assoc,
category.assoc, ←binary_bicone.to_cone_π_app_left, ←binary_biproduct.bicone_fst,
←binary_bicone.to_cocone_ι_app_left, ←binary_biproduct.bicone_inl],
simp },
{ simp only [map_pair_left, is_colimit.ι_map, is_limit.map_π, zero_comp,
biprod.inl_snd_assoc, category.assoc,
←binary_bicone.to_cone_π_app_right, ←binary_biproduct.bicone_snd,
←binary_bicone.to_cocone_ι_app_left, ←binary_biproduct.bicone_inl],
simp },
{ simp only [map_pair_right, biprod.inr_fst_assoc, is_colimit.ι_map, is_limit.map_π,
zero_comp, category.assoc,
←binary_bicone.to_cone_π_app_left, ←binary_biproduct.bicone_fst,
←binary_bicone.to_cocone_ι_app_right, ←binary_biproduct.bicone_inr],
simp },
{ simp only [map_pair_right, is_colimit.ι_map, is_limit.map_π, biprod.inr_snd_assoc,
category.assoc, ←binary_bicone.to_cone_π_app_right, ←binary_biproduct.bicone_snd,
←binary_bicone.to_cocone_ι_app_right, ←binary_biproduct.bicone_inr],
simp }
end
instance biprod.inl_mono {X Y : C} [has_binary_biproduct X Y] :
split_mono (biprod.inl : X ⟶ X ⊞ Y) :=
{ retraction := biprod.desc (𝟙 X) (biprod.inr ≫ biprod.fst) }
instance biprod.inr_mono {X Y : C} [has_binary_biproduct X Y] :
split_mono (biprod.inr : Y ⟶ X ⊞ Y) :=
{ retraction := biprod.desc (biprod.inl ≫ biprod.snd) (𝟙 Y)}
instance biprod.fst_epi {X Y : C} [has_binary_biproduct X Y] :
split_epi (biprod.fst : X ⊞ Y ⟶ X) :=
{ section_ := biprod.lift (𝟙 X) (biprod.inl ≫ biprod.snd) }
instance biprod.snd_epi {X Y : C} [has_binary_biproduct X Y] :
split_epi (biprod.snd : X ⊞ Y ⟶ Y) :=
{ section_ := biprod.lift (biprod.inr ≫ biprod.fst) (𝟙 Y) }
@[simp,reassoc]
lemma biprod.map_fst {W X Y Z : C} [has_binary_biproduct W X] [has_binary_biproduct Y Z]
(f : W ⟶ Y) (g : X ⟶ Z) :
biprod.map f g ≫ biprod.fst = biprod.fst ≫ f :=
is_limit.map_π _ _ _ walking_pair.left
@[simp,reassoc]
lemma biprod.map_snd {W X Y Z : C} [has_binary_biproduct W X] [has_binary_biproduct Y Z]
(f : W ⟶ Y) (g : X ⟶ Z) :
biprod.map f g ≫ biprod.snd = biprod.snd ≫ g :=
is_limit.map_π _ _ _ walking_pair.right
-- Because `biprod.map` is defined in terms of `lim` rather than `colim`,
-- we need to provide additional `simp` lemmas.
@[simp,reassoc]
lemma biprod.inl_map {W X Y Z : C} [has_binary_biproduct W X] [has_binary_biproduct Y Z]
(f : W ⟶ Y) (g : X ⟶ Z) :
biprod.inl ≫ biprod.map f g = f ≫ biprod.inl :=
begin
rw biprod.map_eq_map',
exact is_colimit.ι_map (binary_biproduct.is_colimit W X) _ _ walking_pair.left
end
@[simp,reassoc]
lemma biprod.inr_map {W X Y Z : C} [has_binary_biproduct W X] [has_binary_biproduct Y Z]
(f : W ⟶ Y) (g : X ⟶ Z) :
biprod.inr ≫ biprod.map f g = g ≫ biprod.inr :=
begin
rw biprod.map_eq_map',
exact is_colimit.ι_map (binary_biproduct.is_colimit W X) _ _ walking_pair.right
end
/-- Given a pair of isomorphisms between the summands of a pair of binary biproducts,
we obtain an isomorphism between the binary biproducts. -/
@[simps]
def biprod.map_iso {W X Y Z : C} [has_binary_biproduct W X] [has_binary_biproduct Y Z]
(f : W ≅ Y) (g : X ≅ Z) : W ⊞ X ≅ Y ⊞ Z :=
{ hom := biprod.map f.hom g.hom,
inv := biprod.map f.inv g.inv }
section
variables [has_binary_biproducts C]
/-- The braiding isomorphism which swaps a binary biproduct. -/
@[simps] def biprod.braiding (P Q : C) : P ⊞ Q ≅ Q ⊞ P :=
{ hom := biprod.lift biprod.snd biprod.fst,
inv := biprod.lift biprod.snd biprod.fst }
/--
An alternative formula for the braiding isomorphism which swaps a binary biproduct,
using the fact that the biproduct is a coproduct.
-/
@[simps]
def biprod.braiding' (P Q : C) : P ⊞ Q ≅ Q ⊞ P :=
{ hom := biprod.desc biprod.inr biprod.inl,
inv := biprod.desc biprod.inr biprod.inl }
lemma biprod.braiding'_eq_braiding {P Q : C} :
biprod.braiding' P Q = biprod.braiding P Q :=
by tidy
/-- The braiding isomorphism can be passed through a map by swapping the order. -/
@[reassoc] lemma biprod.braid_natural {W X Y Z : C} (f : X ⟶ Y) (g : Z ⟶ W) :
biprod.map f g ≫ (biprod.braiding _ _).hom = (biprod.braiding _ _).hom ≫ biprod.map g f :=
by tidy
@[reassoc] lemma biprod.braiding_map_braiding {W X Y Z : C} (f : W ⟶ Y) (g : X ⟶ Z) :
(biprod.braiding X W).hom ≫ biprod.map f g ≫ (biprod.braiding Y Z).hom = biprod.map g f :=
by tidy
@[simp, reassoc] lemma biprod.symmetry' (P Q : C) :
biprod.lift biprod.snd biprod.fst ≫ biprod.lift biprod.snd biprod.fst = 𝟙 (P ⊞ Q) :=
by tidy
/-- The braiding isomorphism is symmetric. -/
@[reassoc] lemma biprod.symmetry (P Q : C) :
(biprod.braiding P Q).hom ≫ (biprod.braiding Q P).hom = 𝟙 _ :=
by simp
end
-- TODO:
-- If someone is interested, they could provide the constructions:
-- has_binary_biproducts ↔ has_finite_biproducts
end category_theory.limits
namespace category_theory.limits
section preadditive
variables {C : Type u} [category.{v} C] [preadditive C]
variables {J : Type v} [decidable_eq J] [fintype J]
open category_theory.preadditive
open_locale big_operators
/--
In a preadditive category, we can construct a biproduct for `f : J → C` from
any bicone `b` for `f` satisfying `total : ∑ j : J, b.π j ≫ b.ι j = 𝟙 b.X`.
(That is, such a bicone is a limit cone and a colimit cocone.)
-/
lemma has_biproduct_of_total {f : J → C} (b : bicone f) (total : ∑ j : J, b.π j ≫ b.ι j = 𝟙 b.X) :
has_biproduct f :=
has_biproduct.mk
{ bicone := b,
is_limit :=
{ lift := λ s, ∑ j, s.π.app j ≫ b.ι j,
uniq' := λ s m h,
begin
erw [←category.comp_id m, ←total, comp_sum],
apply finset.sum_congr rfl,
intros j m,
erw [reassoc_of (h j)],
end,
fac' := λ s j,
begin
simp only [sum_comp, category.assoc, bicone.to_cone_π_app, b.ι_π, comp_dite],
-- See note [dsimp, simp].
dsimp, simp,
end },
is_colimit :=
{ desc := λ s, ∑ j, b.π j ≫ s.ι.app j,
uniq' := λ s m h,
begin
erw [←category.id_comp m, ←total, sum_comp],
apply finset.sum_congr rfl,
intros j m,
erw [category.assoc, h],
end,
fac' := λ s j,
begin
simp only [comp_sum, ←category.assoc, bicone.to_cocone_ι_app, b.ι_π, dite_comp],
dsimp, simp,
end } }
/-- In a preadditive category, if the product over `f : J → C` exists,
then the biproduct over `f` exists. -/
lemma has_biproduct.of_has_product (f : J → C) [has_product f] :
has_biproduct f :=
has_biproduct_of_total
{ X := pi_obj f,
π := limits.pi.π f,
ι := λ j, pi.lift (λ j', if h : j = j' then eq_to_hom (congr_arg f h) else 0),
ι_π := λ j j', by simp, }
(by { ext, simp [sum_comp, comp_dite] })
/-- In a preadditive category, if the coproduct over `f : J → C` exists,
then the biproduct over `f` exists. -/
lemma has_biproduct.of_has_coproduct (f : J → C) [has_coproduct f] :
has_biproduct f :=
has_biproduct_of_total
{ X := sigma_obj f,
π := λ j, sigma.desc (λ j', if h : j' = j then eq_to_hom (congr_arg f h) else 0),
ι := limits.sigma.ι f,
ι_π := λ j j', by simp, }
begin
ext,
simp only [comp_sum, limits.colimit.ι_desc_assoc, eq_self_iff_true,
limits.colimit.ι_desc, category.comp_id],
dsimp,
simp only [dite_comp, finset.sum_dite_eq, finset.mem_univ, if_true, category.id_comp,
eq_to_hom_refl, zero_comp],
end
/-- A preadditive category with finite products has finite biproducts. -/
lemma has_finite_biproducts.of_has_finite_products [has_finite_products C] :
has_finite_biproducts C :=
⟨λ J _ _, { has_biproduct := λ F, by exactI has_biproduct.of_has_product _ }⟩
/-- A preadditive category with finite coproducts has finite biproducts. -/
lemma has_finite_biproducts.of_has_finite_coproducts [has_finite_coproducts C] :
has_finite_biproducts C :=
⟨λ J _ _, { has_biproduct := λ F, by exactI has_biproduct.of_has_coproduct _ }⟩
section
variables {f : J → C} [has_biproduct f]
/--
In any preadditive category, any biproduct satsifies
`∑ j : J, biproduct.π f j ≫ biproduct.ι f j = 𝟙 (⨁ f)`
-/
@[simp] lemma biproduct.total : ∑ j : J, biproduct.π f j ≫ biproduct.ι f j = 𝟙 (⨁ f) :=
begin
ext j j',
simp [comp_sum, sum_comp, biproduct.ι_π, comp_dite, dite_comp],
end
lemma biproduct.lift_eq {T : C} {g : Π j, T ⟶ f j} :
biproduct.lift g = ∑ j, g j ≫ biproduct.ι f j :=
begin
ext j,
simp [sum_comp, biproduct.ι_π, comp_dite],
end
lemma biproduct.desc_eq {T : C} {g : Π j, f j ⟶ T} :
biproduct.desc g = ∑ j, biproduct.π f j ≫ g j :=
begin
ext j,
simp [comp_sum, biproduct.ι_π_assoc, dite_comp],
end
@[simp, reassoc] lemma biproduct.lift_desc {T U : C} {g : Π j, T ⟶ f j} {h : Π j, f j ⟶ U} :
biproduct.lift g ≫ biproduct.desc h = ∑ j : J, g j ≫ h j :=
by simp [biproduct.lift_eq, biproduct.desc_eq, comp_sum, sum_comp, biproduct.ι_π_assoc,
comp_dite, dite_comp]
lemma biproduct.map_eq [has_finite_biproducts C] {f g : J → C} {h : Π j, f j ⟶ g j} :
biproduct.map h = ∑ j : J, biproduct.π f j ≫ h j ≫ biproduct.ι g j :=
begin
ext,
simp [biproduct.ι_π, biproduct.ι_π_assoc, comp_sum, sum_comp, comp_dite, dite_comp],
end
@[simp, reassoc]
lemma biproduct.matrix_desc
{K : Type v} [fintype K] [decidable_eq K] [has_finite_biproducts C]
{f : J → C} {g : K → C} (m : Π j k, f j ⟶ g k) {P} (x : Π k, g k ⟶ P) :
biproduct.matrix m ≫ biproduct.desc x = biproduct.desc (λ j, ∑ k, m j k ≫ x k) :=
by { ext, simp, }
@[simp, reassoc]
lemma biproduct.lift_matrix
{K : Type v} [fintype K] [decidable_eq K] [has_finite_biproducts C]
{f : J → C} {g : K → C} {P} (x : Π j, P ⟶ f j) (m : Π j k, f j ⟶ g k) :
biproduct.lift x ≫ biproduct.matrix m = biproduct.lift (λ k, ∑ j, x j ≫ m j k) :=
by { ext, simp, }
@[reassoc]
lemma biproduct.matrix_map
{K : Type v} [fintype K] [decidable_eq K] [has_finite_biproducts C]
{f : J → C} {g : K → C} {h : K → C} (m : Π j k, f j ⟶ g k) (n : Π k, g k ⟶ h k) :
biproduct.matrix m ≫ biproduct.map n = biproduct.matrix (λ j k, m j k ≫ n k) :=
by { ext, simp, }
@[reassoc]
lemma biproduct.map_matrix
{K : Type v} [fintype K] [decidable_eq K] [has_finite_biproducts C]
{f : J → C} {g : J → C} {h : K → C} (m : Π k, f k ⟶ g k) (n : Π j k, g j ⟶ h k) :
biproduct.map m ≫ biproduct.matrix n = biproduct.matrix (λ j k, m j ≫ n j k) :=
by { ext, simp, }
end
/--
In a preadditive category, we can construct a binary biproduct for `X Y : C` from
any binary bicone `b` satisfying `total : b.fst ≫ b.inl + b.snd ≫ b.inr = 𝟙 b.X`.
(That is, such a bicone is a limit cone and a colimit cocone.)
-/
lemma has_binary_biproduct_of_total {X Y : C} (b : binary_bicone X Y)
(total : b.fst ≫ b.inl + b.snd ≫ b.inr = 𝟙 b.X) :
has_binary_biproduct X Y :=
has_binary_biproduct.mk
{ bicone := b,
is_limit :=
{ lift := λ s, binary_fan.fst s ≫ b.inl +
binary_fan.snd s ≫ b.inr,
uniq' := λ s m h, by erw [←category.comp_id m, ←total,
comp_add, reassoc_of (h walking_pair.left), reassoc_of (h walking_pair.right)],
fac' := λ s j, by cases j; simp, },
is_colimit :=
{ desc := λ s, b.fst ≫ binary_cofan.inl s +
b.snd ≫ binary_cofan.inr s,
uniq' := λ s m h, by erw [←category.id_comp m, ←total,
add_comp, category.assoc, category.assoc, h walking_pair.left, h walking_pair.right],
fac' := λ s j, by cases j; simp, } }
/-- In a preadditive category, if the product of `X` and `Y` exists, then the
binary biproduct of `X` and `Y` exists. -/
lemma has_binary_biproduct.of_has_binary_product (X Y : C) [has_binary_product X Y] :
has_binary_biproduct X Y :=
has_binary_biproduct_of_total
{ X := X ⨯ Y,
fst := category_theory.limits.prod.fst,
snd := category_theory.limits.prod.snd,
inl := prod.lift (𝟙 X) 0,
inr := prod.lift 0 (𝟙 Y) }
begin
ext; simp [add_comp],
end
/-- In a preadditive category, if all binary products exist, then all binary biproducts exist. -/
lemma has_binary_biproducts.of_has_binary_products [has_binary_products C] :
has_binary_biproducts C :=
{ has_binary_biproduct := λ X Y, has_binary_biproduct.of_has_binary_product X Y, }
/-- In a preadditive category, if the coproduct of `X` and `Y` exists, then the
binary biproduct of `X` and `Y` exists. -/
lemma has_binary_biproduct.of_has_binary_coproduct (X Y : C) [has_binary_coproduct X Y] :
has_binary_biproduct X Y :=
has_binary_biproduct_of_total
{ X := X ⨿ Y,
fst := coprod.desc (𝟙 X) 0,
snd := coprod.desc 0 (𝟙 Y),
inl := category_theory.limits.coprod.inl,
inr := category_theory.limits.coprod.inr }
begin
ext; simp [add_comp],
end
/-- In a preadditive category, if all binary coproducts exist, then all binary biproducts exist. -/
lemma has_binary_biproducts.of_has_binary_coproducts [has_binary_coproducts C] :
has_binary_biproducts C :=
{ has_binary_biproduct := λ X Y, has_binary_biproduct.of_has_binary_coproduct X Y, }
section
variables {X Y : C} [has_binary_biproduct X Y]
/--
In any preadditive category, any binary biproduct satsifies
`biprod.fst ≫ biprod.inl + biprod.snd ≫ biprod.inr = 𝟙 (X ⊞ Y)`.
-/
@[simp] lemma biprod.total : biprod.fst ≫ biprod.inl + biprod.snd ≫ biprod.inr = 𝟙 (X ⊞ Y) :=
begin
ext; simp [add_comp],
end
lemma biprod.lift_eq {T : C} {f : T ⟶ X} {g : T ⟶ Y} :
biprod.lift f g = f ≫ biprod.inl + g ≫ biprod.inr :=
begin
ext; simp [add_comp],
end
lemma biprod.desc_eq {T : C} {f : X ⟶ T} {g : Y ⟶ T} :
biprod.desc f g = biprod.fst ≫ f + biprod.snd ≫ g :=
begin
ext; simp [add_comp],
end
@[simp, reassoc] lemma biprod.lift_desc {T U : C} {f : T ⟶ X} {g : T ⟶ Y} {h : X ⟶ U} {i : Y ⟶ U} :
biprod.lift f g ≫ biprod.desc h i = f ≫ h + g ≫ i :=
by simp [biprod.lift_eq, biprod.desc_eq]
lemma biprod.map_eq [has_binary_biproducts C] {W X Y Z : C} {f : W ⟶ Y} {g : X ⟶ Z} :
biprod.map f g = biprod.fst ≫ f ≫ biprod.inl + biprod.snd ≫ g ≫ biprod.inr :=
by apply biprod.hom_ext; apply biprod.hom_ext'; simp
end
end preadditive
end category_theory.limits
|
bacillus <- read.table('bacillus2.txt',header=T)
# construct indicator variables, == is the R is.equal logical
# reference group coding, with Placebo as the reference
bacillus$a1 <- bacillus$trt == 'Ab1'
bacillus$a2 <- bacillus$trt == 'Ab2'
bacillus$a3 <- bacillus$trt == 'Pl'
# a1, a2, and a3 are logicals (F or T), but they are interpreted
# (at least most of the time) as 0 or 1
a.lm <- lm(post~a1+a2,data=bacillus)
a0.lm <- lm(post~ +1, data=bacillus) # intercept only model
anova(a0.lm, a.lm) # overall ANOVA
coef(a.lm) # coefficients
# effects coding
bacillus$a1 <- bacillus$trt == 'Ab1'
bacillus$a2 <- bacillus$trt == 'Ab2'
bacillus$a1[bacillus$trt=='Pl'] <- -1
bacillus$a2[bacillus$trt=='Pl'] <- -1
b.lm <- lm(post~b1+b2,data=bacillus)
anova(a0.lm, b.lm)
coef(b.lm)
# cell means coding.
# the following should work
c.lm <- lm(post~-1 + a1 + a2 + a3, data=bacillus)
# the -1 suppresses the intercept
# BUT, R interprets the logicals as factors when
# there isn't an intercept. Wierd!
# I think I have an explanation but would appreciate
# any insight from an R guru
# To fit the cell means model, we need to convert the
# indicator variables to integers (0 or 1)
bacillus$n1 <- as.integer(bacillus$a1)
# as.numeric() would also work
bacillus$n2 <- as.integer(bacillus$a2)
bacillus$n3 <- as.integer(bacillus$a3)
anova(a0.lm,c.lm)
coef(c.lm)
# R handles non-full rank X matrices gracefully
d.lm <- lm(post~a1 + a2 + a3, data=bacillus)
# or d.lm <- lm(post~n1 + n2 + n3, data=bacillus)
anova(d.lm)
coef(d.lm)
# if you use summary(), you get the appropriate
# warning about singularities
summary(d.lm)
# You can see the X matrix automatically generated
# by R for factor variables by looking at the output
# from model.matrix()
bacillus$trt.f <- as.factor(bacillus$trt)
model.matrix(post~trt.f,data=bacillus)
# and can change the contrast coding using contrasts=
model.matrix(post~trt.f,data=bacillus,
contrasts=list(trt.f=contr.SAS) )
# the named element is the factor and the value is
# a specified set of contrasts
# R provides functions for:
# contr.sas: "SAS" contrasts, reference = last
# contr.sum: effect contrasts, sum to 0
# contr.helmert: each group to average of subsequent ones
# contr.poly: orthogonal polynomials
# contr.treatment: reference contrasts to specified group
# the default is contr.treatment using 1st group as ref.
|
[STATEMENT]
lemma space_P: "finite J \<Longrightarrow> J \<subseteq> I \<Longrightarrow> space (P J) = space (PiM J M)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>finite J; J \<subseteq> I\<rbrakk> \<Longrightarrow> space (P J) = space (Pi\<^sub>M J M)
[PROOF STEP]
using sets_P
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>finite ?J; ?J \<subseteq> I\<rbrakk> \<Longrightarrow> sets (P ?J) = sets (Pi\<^sub>M ?J M)
goal (1 subgoal):
1. \<lbrakk>finite J; J \<subseteq> I\<rbrakk> \<Longrightarrow> space (P J) = space (Pi\<^sub>M J M)
[PROOF STEP]
by (rule sets_eq_imp_space_eq) |
%compute central speed in the central-head direction
function [data,units]=compute_velcentralch(trx,n)
larvae=trx.exp2flies{n};
numlarvae=numel(larvae);
velcentralch=cell(1,numlarvae);
for i=1:numlarvae
larva=larvae(i);
% this is just slightly faster
velcentralch{i} = trx(larva).dxcentral_mm.*cos(trx(larva).centralheadang(1:end-1)) + ...
trx(larva).dycentral_mm.*sin(trx(larva).centralheadang(1:end-1));
%velcentralch1{1,i}=trx(larva).velmagcentral.*(cos(trx(larva).velangcentral).*cos(trx(larva).centralheadang(1,1:end-1))+sin(trx(larva).velangcentral).*sin(trx(larva).centralheadang(1,1:end-1)));
end
units=parseunits('mm/s');
data=velcentralch; |
\section{ocamlrun}
\label{sec:ocamlrun}
\href{http://caml.inria.fr/pub/docs/manual-ocaml/manual024.html#toc88}{ocamlrun} \\
The ocamlrun command comprises three main parts: the bytecode
interpreter, the memory allocator and garbage collector, and a set of
c functions that implement primitive operations such as input/output.
Back Trace -b When the program aborts due to an uncaught exception,
print a detailed ``back trace'' of the execution, showing where the
exception was raised and which function calls were outstanding at this
point. The back trace is printed only if the bytecode executable
contains debugging information, i.e. was compiled and linked with the
-g option to ocamlc set. This is equivalent to setting the b flag in
the \verb|OCAMLRUNPARAM| environment variable.
-I Include the search dir for dynamically loaded libraries.
-p Print the names of the primitives known to this version of ocaml
-v Direct the \verb|memory manager| to print some progress messages.
equivalent to setting \verb|v=63| in the \verb|OCAMLRUNPARAM|
The following environment variables are also consulted
\verb|CAML_LD_LIBRARY_PATH|
\verb|OCAMLLIB|
\verb|OCAMLRUNPARAM|
For \verb|OCAMLRUNPARAM| This variable must be a sequence of parameter
specifications. A parameter specification is an option letter followed
by an = sign, a decimal number (or an hexadecimal number prefixed by
0x), and an optional multiplier. There are nine options, six of which
correspond to the fields of the control record documented in Module
\verb|Gc|.
b
(backtrace) Trigger the printing of a stack backtrace when an uncaught
exception aborts the program. This option takes no argument.
p
(parser trace) Turn on debugging support for ocamlyacc-generated
parsers. When this option is on, the pushdown automaton that executes
the parsers prints a trace of its actions. This option takes no
argument.
s
(minor\_heap\_size) Size of the minor heap. (in words)
i
(major\_heap\_increment) Default size increment for the major heap. (in
words)
o
(space\_overhead) The major GC speed setting.
O
(max\_overhead) The heap compaction trigger setting.
v
(verbose) What GC messages to print to stderr. This is a sum of values
selected from the following:
1 (= 0x001)
Start of major GC cycle.
2 (= 0x002)
Minor collection and major GC slice.
4 (= 0x004)
Growing and shrinking of the heap.
8 (= 0x008)
Resizing of stacks and memory manager tables.
16 (= 0x010)
Heap compaction.
32 (= 0x020)
Change of GC parameters.
64 (= 0x040)
Computation of major GC slice size.
128 (= 0x080)
Calling of finalisation functions
256 (= 0x100)
Startup messages (loading the bytecode executable file, resolving
shared libraries).
l
(stack\_limit) The limit (in words) of the stack size.
h
The initial size of the major heap (in words).
The multiplier is k, M, or G, for multiplication by $2^10$, $2^20$,
and $2^30$ respectively. For example, on a 32-bit machine, under bash the
command export OCAMLRUNPARAM='b,s=256k,v=0x015' tells a subsequent
ocamlrun to print backtraces for uncaught exceptions, set its initial
minor heap size to 1 megabyte and print a message at the start of each
major GC cycle, when the heap size changes, and when compaction is
triggered.
|
module Minecraft.Server.Main
import public Minecraft.Base.PreClassic
%default total
main : IO ()
main = do
pure ()
|
= Data.Ix
> ||| A partial port of Haskell's Data.Ix to Idris.
> module Data.Ix
>
> import Debug.Error
>
> %language ElabReflection
== Errors
> %access export
>
> indexError : Show a => (a,a) -> a -> String -> b
> indexError rng i tp =
> error $ "Ix{" ++ show tp ++ "}.index: Index " ++
> "(" ++ show i ++ ") out of range " ++
> "(" ++ show rng ++ ")"
>
> hopelessIndexError : Int -- Try to use 'indexError' instead!
> hopelessIndexError = error "Error in array index"
== Interface
> %access public export
>
> interface (Ord a) => Ix a where
> range : (a,a) -> List a
> index : (a,a) -> a -> Int
> unsafeIndex : (a,a) -> a -> Int
> inRange : (a,a) -> a -> Bool
> rangeSize : (a,a) -> Int
> unsafeRangeSize : (a,a) -> Int
>
> index b i = if inRange b i
> then unsafeIndex b i
> else hopelessIndexError
>
> unsafeIndex b i = index b i
>
> rangeSize b@(_,h) = if inRange b h
> then unsafeIndex b h + 1
> else 0 -- This case is only here to
> -- check for an empty range
>
> unsafeRangeSize b@(_,h) = unsafeIndex b h + 1
== Implementations
> implementation Ix Nat where
> range (m,n) = [m .. n]
> unsafeIndex (m,_) i = cast $ i `minus` m
> index b i = if inRange b i
> then unsafeIndex b i
> else indexError b i "Nat"
> inRange (m,n) i = m <= i && i <= n
|
{-
Basic properties about Σ-types
- Characterization of equality in Σ-types using transport ([pathSigma≡sigmaPath])
-}
{-# OPTIONS --cubical --safe #-}
module Cubical.Data.Sigma.Properties where
open import Cubical.Data.Sigma.Base
open import Cubical.Core.Everything
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.Function
open import Cubical.Foundations.Isomorphism
open import Cubical.Foundations.Equiv
open import Cubical.Foundations.Equiv.HalfAdjoint
open import Cubical.Foundations.GroupoidLaws
open import Cubical.Foundations.Path
open import Cubical.Foundations.Transport
open import Cubical.Foundations.Univalence
open import Cubical.Relation.Nullary
open import Cubical.Relation.Nullary.DecidableEq
open import Cubical.Data.Unit.Base
private
variable
ℓ : Level
A A' : Type ℓ
B B' : (a : A) → Type ℓ
C : (a : A) (b : B a) → Type ℓ
mapʳ : (∀ {a} → B a → B' a) → Σ A B → Σ A B'
mapʳ f (a , b) = (a , f b)
mapˡ : {B : Type ℓ} → (f : A → A') → Σ A (λ _ → B) → Σ A' (λ _ → B)
mapˡ f (a , b) = (f a , b)
ΣPathP : ∀ {x y}
→ Σ (fst x ≡ fst y) (λ a≡ → PathP (λ i → B (a≡ i)) (snd x) (snd y))
→ x ≡ y
ΣPathP eq i = fst eq i , snd eq i
Σ-split-iso : {x y : Σ A B}
→ Iso (Σ[ q ∈ fst x ≡ fst y ] (PathP (λ i → B (q i)) (snd x) (snd y)))
(x ≡ y)
Iso.fun (Σ-split-iso) = ΣPathP
Iso.inv (Σ-split-iso) eq = (λ i → fst (eq i)) , (λ i → snd (eq i))
Iso.rightInv (Σ-split-iso) x = refl {x = x}
Iso.leftInv (Σ-split-iso) x = refl {x = x}
Σ≃ : {x y : Σ A B} →
Σ (fst x ≡ fst y) (λ p → PathP (λ i → B (p i)) (snd x) (snd y)) ≃
(x ≡ y)
Σ≃ {A = A} {B = B} {x} {y} = isoToEquiv (Σ-split-iso)
Σ≡ : {a a' : A} {b : B a} {b' : B a'} → (Σ (a ≡ a') (λ q → PathP (λ i → B (q i)) b b')) ≡ ((a , b) ≡ (a' , b'))
Σ≡ = isoToPath Σ-split-iso -- ua Σ≃
ΣProp≡ : ((x : A) → isProp (B x)) → {u v : Σ A B}
→ (p : u .fst ≡ v .fst) → u ≡ v
ΣProp≡ pB {u} {v} p i = (p i) , isProp→PathP (λ i → pB (p i)) (u .snd) (v .snd) i
-- Alternative version for path in Σ-types, as in the HoTT book
sigmaPathTransport : (a b : Σ A B) → Type _
sigmaPathTransport {B = B} a b =
Σ (fst a ≡ fst b) (λ p → transport (λ i → B (p i)) (snd a) ≡ snd b)
_Σ≡T_ : (a b : Σ A B) → Type _
a Σ≡T b = sigmaPathTransport a b
-- now we prove that the alternative path space a Σ≡ b is equal to the usual path space a ≡ b
-- forward direction
private
pathSigma-π1 : {a b : Σ A B} → a ≡ b → fst a ≡ fst b
pathSigma-π1 p i = fst (p i)
filler-π2 : {a b : Σ A B} → (p : a ≡ b) → I → (i : I) → B (fst (p i))
filler-π2 {B = B} {a = a} p i =
fill (λ i → B (fst (p i)))
(λ t → λ { (i = i0) → transport-filler (λ j → B (fst (p j))) (snd a) t
; (i = i1) → snd (p t) })
(inS (snd a))
pathSigma-π2 : {a b : Σ A B} → (p : a ≡ b) →
subst B (pathSigma-π1 p) (snd a) ≡ snd b
pathSigma-π2 p i = filler-π2 p i i1
pathSigma→sigmaPath : (a b : Σ A B) → a ≡ b → a Σ≡T b
pathSigma→sigmaPath _ _ p = (pathSigma-π1 p , pathSigma-π2 p)
-- backward direction
private
filler-comp : (a b : Σ A B) → a Σ≡T b → I → I → Σ A B
filler-comp {B = B} a b (p , q) i =
hfill (λ t → λ { (i = i0) → a
; (i = i1) → (p i1 , q t) })
(inS (p i , transport-filler (λ j → B (p j)) (snd a) i))
sigmaPath→pathSigma : (a b : Σ A B) → a Σ≡T b → (a ≡ b)
sigmaPath→pathSigma a b x i = filler-comp a b x i i1
-- first homotopy
private
homotopy-π1 : (a b : Σ A B) →
∀ (x : a Σ≡T b) → pathSigma-π1 (sigmaPath→pathSigma a b x) ≡ fst x
homotopy-π1 a b x i j = fst (filler-comp a b x j (~ i))
homotopy-π2 : (a b : Σ A B) → (p : a Σ≡T b) → (i : I) →
(transport (λ j → B (fst (filler-comp a b p j i))) (snd a) ≡ snd b)
homotopy-π2 {B = B} a b p i j =
comp (λ t → B (fst (filler-comp a b p t (i ∨ j))))
(λ t → λ { (j = i0) → transport-filler (λ t → B (fst (filler-comp a b p t i)))
(snd a) t
; (j = i1) → snd (sigmaPath→pathSigma a b p t)
; (i = i0) → snd (filler-comp a b p t j)
; (i = i1) → filler-π2 (sigmaPath→pathSigma a b p) j t })
(snd a)
pathSigma→sigmaPath→pathSigma : {a b : Σ A B} →
∀ (x : a Σ≡T b) → pathSigma→sigmaPath _ _ (sigmaPath→pathSigma a b x) ≡ x
pathSigma→sigmaPath→pathSigma {a = a} p i =
(homotopy-π1 a _ p i , homotopy-π2 a _ p (~ i))
-- second homotopy
sigmaPath→pathSigma→sigmaPath : {a b : Σ A B} →
∀ (x : a ≡ b) → sigmaPath→pathSigma a b (pathSigma→sigmaPath _ _ x) ≡ x
sigmaPath→pathSigma→sigmaPath {B = B} {a = a} {b = b} p i j =
hcomp (λ t → λ { (i = i1) → (fst (p j) , filler-π2 p t j)
; (i = i0) → filler-comp a b (pathSigma→sigmaPath _ _ p) j t
; (j = i0) → (fst a , snd a)
; (j = i1) → (fst b , filler-π2 p t i1) })
(fst (p j) , transport-filler (λ k → B (fst (p k))) (snd a) j)
pathSigma≡sigmaPath : (a b : Σ A B) → (a ≡ b) ≡ (a Σ≡T b)
pathSigma≡sigmaPath a b =
isoToPath (iso (pathSigma→sigmaPath a b)
(sigmaPath→pathSigma a b)
(pathSigma→sigmaPath→pathSigma {a = a})
sigmaPath→pathSigma→sigmaPath)
discreteΣ : Discrete A → ((a : A) → Discrete (B a)) → Discrete (Σ A B)
discreteΣ {B = B} Adis Bdis (a0 , b0) (a1 , b1) = discreteΣ' (Adis a0 a1)
where
discreteΣ' : Dec (a0 ≡ a1) → Dec ((a0 , b0) ≡ (a1 , b1))
discreteΣ' (yes p) = J (λ a1 p → ∀ b1 → Dec ((a0 , b0) ≡ (a1 , b1))) (discreteΣ'') p b1
where
discreteΣ'' : (b1 : B a0) → Dec ((a0 , b0) ≡ (a0 , b1))
discreteΣ'' b1 with Bdis a0 b0 b1
... | (yes q) = yes (transport (ua Σ≃) (refl , q))
... | (no ¬q) = no (λ r → ¬q (subst (λ X → PathP (λ i → B (X i)) b0 b1) (Discrete→isSet Adis a0 a0 (cong fst r) refl) (cong snd r)))
discreteΣ' (no ¬p) = no (λ r → ¬p (cong fst r))
Σ-contractFst : ∀ {ℓ ℓ'} {A : Type ℓ} {B : A → Type ℓ'} (c : isContr A)
→ Σ A B ≃ B (c .fst)
Σ-contractFst {B = B} c =
isoToEquiv
(iso
(λ {(a , b) → subst B (sym (c .snd a)) b})
(c .fst ,_)
(λ b →
cong (λ p → subst B p b) (isProp→isSet (isContr→isProp c) _ _ _ _)
∙ transportRefl _)
(λ {(a , b) →
sigmaPath→pathSigma _ _ (c .snd a , transportTransport⁻ (cong B (c .snd a)) _)}))
-- a special case of the above
ΣUnit : ∀ {ℓ} (A : Unit → Type ℓ) → Σ Unit A ≃ A tt
ΣUnit A = isoToEquiv (iso snd (λ { x → (tt , x) }) (λ _ → refl) (λ _ → refl))
assocΣ : (Σ[ (a , b) ∈ Σ A B ] C a b) ≃ (Σ[ a ∈ A ] Σ[ b ∈ B a ] C a b)
assocΣ = isoToEquiv (iso (λ { ((x , y) , z) → (x , (y , z)) })
(λ { (x , (y , z)) → ((x , y) , z) })
(λ _ → refl) (λ _ → refl))
congΣEquiv : (∀ a → B a ≃ B' a) → Σ A B ≃ Σ A B'
congΣEquiv h =
isoToEquiv (iso (λ { (x , y) → (x , equivFun (h x) y) })
(λ { (x , y) → (x , invEq (h x) y) })
(λ { (x , y) i → (x , retEq (h x) y i) })
(λ { (x , y) i → (x , secEq (h x) y i) }))
PiΣ : ((a : A) → Σ[ b ∈ B a ] C a b) ≃ (Σ[ f ∈ ((a : A) → B a) ] ∀ a → C a (f a))
PiΣ = isoToEquiv (iso (λ f → fst ∘ f , snd ∘ f)
(λ (f , g) → (λ x → f x , g x))
(λ _ → refl) (λ _ → refl))
swapΣEquiv : ∀ {ℓ'} (A : Type ℓ) (B : Type ℓ') → A × B ≃ B × A
swapΣEquiv A B = isoToEquiv (iso (λ x → x .snd , x .fst) (λ z → z .snd , z .fst) (\ _ → refl) (\ _ → refl))
Σ-ap-iso₁ : ∀ {ℓ} {ℓ'} {A A' : Type ℓ} {B : A' → Type ℓ'}
→ (isom : Iso A A')
→ Iso (Σ A (B ∘ (Iso.fun isom)))
(Σ A' B)
Iso.fun (Σ-ap-iso₁ isom) x = (Iso.fun isom) (x .fst) , x .snd
Iso.inv (Σ-ap-iso₁ {B = B} isom) x = (Iso.inv isom) (x .fst) , subst B (sym (ε' (x .fst))) (x .snd)
where
ε' = fst (vogt isom)
Iso.rightInv (Σ-ap-iso₁ {B = B} isom) (x , y) = ΣPathP (ε' x ,
transport
(sym (PathP≡Path (λ j → cong B (ε' x) j) (subst B (sym (ε' x)) y) y))
(subst B (ε' x) (subst B (sym (ε' x)) y)
≡⟨ sym (substComposite B (sym (ε' x)) (ε' x) y) ⟩
subst B ((sym (ε' x)) ∙ (ε' x)) y
≡⟨ (cong (λ a → subst B a y) (lCancel (ε' x))) ⟩
subst B refl y
≡⟨ substRefl {B = B} y ⟩
y ∎))
where
ε' = fst (vogt isom)
Iso.leftInv (Σ-ap-iso₁ {A = A} {B = B} isom@(iso f g ε η)) (x , y) = ΣPathP (η x ,
transport
(sym (PathP≡Path (λ j → cong B (cong f (η x)) j) (subst B (sym (ε' (f x))) y) y))
(subst B (cong f (η x)) (subst B (sym (ε' (f x))) y)
≡⟨ sym (substComposite B (sym (ε' (f x))) (cong f (η x)) y) ⟩
subst B (sym (ε' (f x)) ∙ (cong f (η x))) y
≡⟨ cong (λ a → subst B a y) (lem x) ⟩
subst B (refl) y
≡⟨ substRefl {B = B} y ⟩
y ∎))
where
ε' = fst (vogt isom)
γ = snd (vogt isom)
lem : (x : A) → sym (ε' (f x)) ∙ cong f (η x) ≡ refl
lem x = cong (λ a → sym (ε' (f x)) ∙ a) (γ x) ∙ lCancel (ε' (f x))
Σ-ap₁ : (isom : A ≡ A') → Σ A (B ∘ transport isom) ≡ Σ A' B
Σ-ap₁ isom = isoToPath (Σ-ap-iso₁ (pathToIso isom))
Σ-ap-iso₂ : ((x : A) → Iso (B x) (B' x)) → Iso (Σ A B) (Σ A B')
Iso.fun (Σ-ap-iso₂ isom) (x , y) = x , Iso.fun (isom x) y
Iso.inv (Σ-ap-iso₂ isom) (x , y') = x , Iso.inv (isom x) y'
Iso.rightInv (Σ-ap-iso₂ isom) (x , y) = ΣPathP (refl , Iso.rightInv (isom x) y)
Iso.leftInv (Σ-ap-iso₂ isom) (x , y') = ΣPathP (refl , Iso.leftInv (isom x) y')
Σ-ap₂ : ((x : A) → B x ≡ B' x) → Σ A B ≡ Σ A B'
Σ-ap₂ isom = isoToPath (Σ-ap-iso₂ (pathToIso ∘ isom))
Σ-ap-iso :
∀ {ℓ ℓ'} {A A' : Type ℓ}
→ {B : A → Type ℓ'} {B' : A' → Type ℓ'}
→ (isom : Iso A A')
→ ((x : A) → Iso (B x) (B' (Iso.fun isom x)))
------------------------
→ Iso (Σ A B) (Σ A' B')
Σ-ap-iso isom isom' = compIso (Σ-ap-iso₂ isom') (Σ-ap-iso₁ isom)
Σ-ap :
∀ {ℓ ℓ'} {X X' : Type ℓ} {Y : X → Type ℓ'} {Y' : X' → Type ℓ'}
→ (isom : X ≡ X')
→ ((x : X) → Y x ≡ Y' (transport isom x))
----------
→ (Σ X Y)
≡ (Σ X' Y')
Σ-ap isom isom' = isoToPath (Σ-ap-iso (pathToIso isom) (pathToIso ∘ isom'))
Σ-ap' :
∀ {ℓ ℓ'} {X X' : Type ℓ} {Y : X → Type ℓ'} {Y' : X' → Type ℓ'}
→ (isom : X ≡ X')
→ (PathP (λ i → isom i → Type ℓ') Y Y')
----------
→ (Σ X Y)
≡ (Σ X' Y')
Σ-ap' {ℓ} {ℓ'} isom isom' = cong₂ (λ (a : Type ℓ) (b : a → Type ℓ') → Σ a λ x → b x) isom isom'
|
[STATEMENT]
lemma fresh_e_opp_all:
shows "(z \<sharp> v1 \<and> z \<sharp> v2) = z \<sharp> AE_op opp v1 v2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (z \<sharp> v1 \<and> z \<sharp> v2) = z \<sharp> [ opp v1 v2 ]\<^sup>e
[PROOF STEP]
using e.fresh opp.exhaust opp.fresh fresh_opp_all
[PROOF STATE]
proof (prove)
using this:
?a \<sharp> [ ?v ]\<^sup>e = ?a \<sharp> ?v
?a \<sharp> [ ?list ?v ]\<^sup>e = (?a \<sharp> ?list \<and> ?a \<sharp> ?v)
?a \<sharp> [?list [ ?b ] ?v ]\<^sup>e = (?a \<sharp> ?list \<and> ?a \<sharp> ?b \<and> ?a \<sharp> ?v)
?a \<sharp> [ ?opp ?v1.0 ?v2.0 ]\<^sup>e = (?a \<sharp> ?opp \<and> ?a \<sharp> ?v1.0 \<and> ?a \<sharp> ?v2.0)
?a \<sharp> [ ?v1.0 @@ ?v2.0 ]\<^sup>e = (?a \<sharp> ?v1.0 \<and> ?a \<sharp> ?v2.0)
?a \<sharp> [#1?v ]\<^sup>e = ?a \<sharp> ?v
?a \<sharp> [#2?v ]\<^sup>e = ?a \<sharp> ?v
?a \<sharp> [ ?u ]\<^sup>e = ?a \<sharp> ?u
?a \<sharp> [| ?v |]\<^sup>e = ?a \<sharp> ?v
?a \<sharp> [ ?v1.0 ?v2.0 ]\<^sup>e = (?a \<sharp> ?v1.0 \<and> ?a \<sharp> ?v2.0)
\<lbrakk>?y = plus \<Longrightarrow> ?P; ?y = leq \<Longrightarrow> ?P; ?y = eq \<Longrightarrow> ?P\<rbrakk> \<Longrightarrow> ?P
?a \<sharp> plus
?a \<sharp> leq
?a \<sharp> eq
?z \<sharp> ?opp
goal (1 subgoal):
1. (z \<sharp> v1 \<and> z \<sharp> v2) = z \<sharp> [ opp v1 v2 ]\<^sup>e
[PROOF STEP]
by simp |
#!/usr/bin/env stack
-- stack --resolver lts-14.16 script
{-# LANGUAGE BangPatterns #-}
import Debug.Trace
-- import Control.Monad.State.Lazy
import System.Environment (getArgs)
import System.IO (readFile)
import Data.Map.Strict (Map, (!), insert, elems, fromList, toList, findWithDefault, size, empty, member, findMin, findMax, singleton)
import qualified Data.Map.Strict as M
--import qualified Data.Array as A
import Data.List (permutations)
import Data.List.Split (splitOn)
import Data.Complex (Complex((:+)), realPart, imagPart)
type Instructions = Map Integer Integer
run :: (Integer, Integer) -> Instructions -> [Integer] -> [Integer]
-- (instructionPointer, base), instructions, input, output
run (i, base) instructions inputs =
-- trace ((show $ toList instructions) ++ ": (ip, base, instr, inputs) " ++ show (i, base, instr, inputs) ++ "\n") $
case instr `mod` 100 of
1 -> run (i+4, base) (insert (addr 3) (arg 1 + arg 2) instructions) inputs
2 -> run (i+4, base) (insert (addr 3) (arg 1 * arg 2) instructions) inputs
3 -> run (i+2, base) (insert (addr 1) (head inputs) instructions) $ tail inputs
4 -> arg 1: run (i+2, base) instructions inputs
5 -> run (if arg 1 == 0 then i+3 else arg 2, base) instructions inputs
6 -> run (if arg 1 == 0 then arg 2 else i+3, base) instructions inputs
7 -> run (i+4, base) (insert (addr 3) (if arg 1 < arg 2 then 1 else 0) instructions) inputs
8 -> run (i+4, base) (insert (addr 3) (if arg 1 == arg 2 then 1 else 0) instructions) inputs
9 -> run (i+2, base+arg 1) instructions inputs
99 -> []
_ -> error "unknown opcode"
where instr = instructions!i
ii x = findWithDefault 0 x instructions
arg :: Integer -> Integer
arg n = case (instr `mod` (100*10^n)) `div` (10*10^n) of
0 -> ii $ ii $ i+n
1 -> ii $ i+n
2 -> ii ((ii $ i+n) + base)
_ -> error "bad argument mode"
addr n = case (instr `mod` (100*10^n)) `div` (10*10^n) of
0 -> ii $ i+n
1 -> error "address in mode 1"
2 -> --trace "address in mode 2" $
ii (i+n) + base
_ -> error $ "address in unknown mode"
instance Ord a => Ord (Complex a) where
compare a b | ar < br || ar == br && ai < bi = LT
| ar > br || ar == br && ai > bi = GT
| otherwise = EQ
where (ar, ai) = toParts a
(br, bi) = toParts b
toParts :: Complex a -> (a, a)
toParts c = (realPart c, imagPart c)
toIntParts :: RealFrac a => Complex a -> (Int, Int)
toIntParts c = (round $ realPart c, round $ imagPart c)
fromParts :: (a, a) -> Complex a
fromParts (a,b) = a :+ b
process :: (Complex Float, Complex Float, Map (Complex Float) Integer) -> [Integer] -> ([Integer], Complex Float, Complex Float, Map(Complex Float) Integer)
process (p, d, colors) [] =
-- traceShow (size colors) $
-- traceShow (showColors p d colors) $
([], p, d, colors)
process (p, d, colors) (c: t: futureOutputs) =
-- traceShow ("xy", toIntParts p,"out", out,robot d, "paint", c,"turn", t) $
-- trace (showColors p d colors) $
merge out $ process (p', d', insert p c colors) futureOutputs
where p' = p + d' -- next position
d' = d * (0 :+ (1.0 - fromIntegral t * 2)) -- next direction
out = findWithDefault 0 p' colors
merge :: Integer -> ([Integer], Complex Float, Complex Float, Map(Complex Float) Integer) -> ([Integer], Complex Float, Complex Float, Map(Complex Float) Integer)
merge x ~(xs, p, d, m) = (x:xs, p, d, m) -- need the irrefutable match operator ~, see https://stackoverflow.com/questions/59297557/when-can-i-rely-on-haskell-to-read-a-list-lazily/59298311#59298311
-- showProcess :: [Integer] -> (Complex Float, Complex Float, Map (Complex Float) Integer) -> (Complex Float, Complex Float, Map (Complex Float) Integer)
-- showProcess [] s = s
-- showProcess [c] s = trace ("leftover " ++ show c) $ s
-- showProcess (c: t: r) (p, d, m) = showProcess r (p', d', insert p c m)
-- where d' = d * (0 :+ (1.0 - fromIntegral t * 2 ))
-- p' = p + d'
showColors :: Complex Float -> Complex Float -> Map (Complex Float) Integer -> [Char]
showColors p d cs
-- | null cs = []
-- | otherwise
= --traceShow ("showColors", p, robot d, "min", (x0,y0), "max", (x1,y1), "colors size", size cs) $
unlines [line y | y <- reverse $ [y0 .. y1]]
where line :: Int -> [Char]
line y = [ (v (fromIntegral x :+ fromIntegral y) $ findWithDefault 3 (fromIntegral x :+ fromIntegral y) cs) | x <- [x0 .. x1]]
pts = map fst $ toList cs
xs = map (round . realPart) $ p: pts
ys = map (round . imagPart) $ p: pts
x0, y0, x1, y1 :: Int
[x0, y0, x1, y1] = [minimum xs, minimum ys, maximum xs, maximum ys]
v :: Complex Float -> Integer -> Char
v p' c | p == p' = robot d
| p' == (0.0 :+ 0.0) && c== 0 = 'o'
| p' == (0.0 :+ 0.0) && c== 1 = 'O'
| c == 1 = '#'
| c == 0 = ' ' -- '.' -- change to space for clarity
| otherwise = ' '
robot :: Complex Float -> Char
robot c | x == 0 && y == 1 = '^'
| x == 1 && y == 0 = '>'
| x == 0 && y == -1 = 'V'
| x == -1 && y == 0 = '<'
where (x,y) = toParts c
origin = 0 :+ 0
up = 0 :+ 1
startColors = singleton origin 0
startColors2 = singleton origin 1
main = do
[instructionFile] <- getArgs
instructionStrings <- readFile instructionFile
let instructions = fromList . zip [0 ..] $ map read $ splitOn "," instructionStrings
putStrLn "Part 1"
let out = run (0,0) instructions $ (startColors!origin) : processOutput
(processOutput, finalp, finald, colors) = process (origin,up, startColors) $ out
putStrLn
$ unlines
$ ["done Part 1"
, show $ ("tiles painted: ", size colors)
, show $ ("finalp", finalp, "finald", robot finald)
]
putStr $ showColors finalp finald colors
putStrLn "Part 2"
let out = run (0,0) instructions $ (startColors2!origin) : processOutput
(processOutput, finalp, finald, colors) = process (origin,up, startColors2) $ out
putStrLn
$ unlines
$ ["done Part 2"
, show $ ("tiles painted: ", size colors)
, show $ ("finalp", finalp, "finald", robot finald)
]
putStr $ showColors finalp finald colors
|
Load LFindLoad.
From lfind Require Import LFind.
From QuickChick Require Import QuickChick.
From adtind Require Import goal33.
Derive Show for natural.
Derive Arbitrary for natural.
Instance Dec_Eq_natural : Dec_Eq natural.
Proof. dec_eq. Qed.
Lemma conj8synthconj3 : forall (lv0 : natural), (@eq natural (Succ lv0) (plus lv0 (Succ Zero))).
Admitted.
QuickChick conj8synthconj3.
|
If you haven’t heard, Microsoft recently added Office 365 Planner to their application suite. Rolled out worldwide, anyone under Enterprise E1-E5, Business Essentials, Premium and Education subscription plans should now have access to Planner. Designed as an easy-to-use tool for teams to collaborate and organize teamwork, the Trello-like application is useful for teams of all sizes in businesses and schools. Given that many of you are new to Planner, we thought we’d throw together a few of our favorite tips so you can begin using the tool pronto!
To launch the team planning tool, login to your Office 365 account, navigate to the tile button at the top left hand of the page and select “Planner”.
Assuming this is the first time you have launched planner, you will likely navigate to a “plan-less” page. If you have already been added to a few plans in your organization, your Planner Hub will look more like what you see below. Take a few moments to click-around and then read-on to learn about the 4 best practices for Office 365 Planner.
Once you have created your first Plan (top left pane: + New Plan), begin by creating sub-categories that will bucket the various tasks in your team project. For our Blogs and Webinars Plan, we found it made the most sense to create buckets around overarching content topics which you can note below. Speak with your team members to create a set of buckets that make most sense to your project and the workflow of your teammates.
As you can see above we’re a bit behind the eight ball as there are a few tasks that are missing owners. It is important to always assign a task owner so that activities don’t fly under the radar. In addition, when setting a start date, end date and status to the task, you can be assured that the task owner will be aware when a task is overdue.
For example, in the first task under the “Planner” bucket above, you will notice that “Office 365 Planner Best Practices” has the due date highlighted in red indicating that the task is overdue!
As your Plan become more and more populated, you will find it helpful to begin using labels to quickly identify certain tasks. Just like the logic you created for your buckets, you’ll also want to create rules for how your team uses labels. Common practice is to color code your labels for either different task owners, task priorities or task types. We use a combination of task priorities and task types which you can see below.
The final best practice we recommend is making use of the attachment feature within each task. You can attach both files uploaded from your desktop or OneDrive, and URL links. We recommend this for a few reasons. For one, it keeps your documents organized and ensures you’ll be able to access them from the cloud anytime, anywhere and from any device. It will also help your team members quickly locate content they need for the project.
For example, if you note the final item in the checklist below, it reads: send blog to Kristina for edits. Rather than uploading the document to an email and letting her know it is ready to be edited, I can simply re-assign the task to Kristina. This will automatically send her an email with a link to the task where she will find the document.
For a live walk-through of Planner and additional best practices, watch a live demo presented by Office 365 Planner Expert, Kristina Mamina. |
```python
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
```
[Unique Binary Search Trees II](https://leetcode.com/problems/unique-binary-search-trees-ii/)。给定长度为$n$的正数序列$[1,2,...,n]$,求所有可能的BST结构。
思路:递归。因为给的是有序的正数序列,任意选取一个数字作为根节点,则左分支就为该数字左边的数字,而右分支为其右边的数字。设计一个递归函数```rec(i,j)```,其会返回$[i,j]$之间数字可能构成的所有BST。
```python
def generateTrees(n: int):
def rec(i, j):
if i > j:
return [None]
if i == j:
return [TreeNode(i)]
res = list()
for root_val in range(i, j+1):
left_branches = rec(i, root_val-1)
right_branches = rec(root_val+1, j)
for left_branch in left_branches:
for right_branch in right_branches:
root = TreeNode(root_val)
root.left = left_branch
root.right = right_branch
res.append(root)
return res
return rec(1, n) if n >= 1 else list()
```
[<__main__.TreeNode at 0x1ca698076d8>,
<__main__.TreeNode at 0x1ca69807978>,
<__main__.TreeNode at 0x1ca698075c0>,
<__main__.TreeNode at 0x1ca69807ba8>,
<__main__.TreeNode at 0x1ca69807be0>]
[Lowest Common Ancestor of a Binary Tree](https://leetcode.com/problems/lowest-common-ancestor-of-a-binary-tree/)。**2019百度面试手撕代码题**。给一二叉树与两个节点,返回两节点的最低公共祖先。
思路:设计一个递归函数,基准情况如下。若根节点空则返回空,若两节点之一是根节点,则返回根节点。然后递归地在左右分支中查找LCA。如果左分支返回不为空,则说明左分支至少包含两节点中的一个,右边也一样。若左右返回都不为空,说明两节点分属于左右分支,则当前根节点是一个LCA。
```python
def lowestCommonAncestor(root: TreeNode, p: TreeNode, q: TreeNode) -> TreeNode:
if root is None:
return None
if root == p or root == q:
return root
left_LCA = lowestCommonAncestor(root.left, p, q)
right_LCA = lowestCommonAncestor(root.right, p, q)
if left_LCA and right_LCA:
return root
elif left_LCA:
return left_LCA
else:
return right_LCA
```
[Convert Sorted Array to Binary Search Tree](https://leetcode.com/problems/convert-sorted-array-to-binary-search-tree/)。有序数组转平衡搜索二叉树。
思路:递归。要想生成的二叉搜索树平衡,那么每次将数组的中间元素作为根节点生成树即可。
```python
def sortedArrayToBST(nums) -> TreeNode:
if not nums:
return None
n = len(nums)
if n == 1:
return TreeNode(nums[0])
mid = (n-1) >> 1
root = TreeNode(nums[mid])
root.left = sortedArrayToBST(nums[:mid])
root.right = sortedArrayToBST(nums[mid+1:])
return root
```
[Convert Sorted List to Binary Search Tree](https://leetcode.com/problems/convert-sorted-list-to-binary-search-tree/)。有序单链表转平衡搜索二叉树。
思路:链表转数组,再递归。
```python
def sortedListToBST(head: ListNode) -> TreeNode:
def ll2l(head): # linked list to list
nums = list()
while head:
nums.append(head.val)
head = head.next
return nums
def l2bst(nums):
if not nums:
return None
n = len(nums)
if n == 1:
return TreeNode(nums[0])
mid = (n-1) >> 1
root = TreeNode(nums[mid])
root.left = l2bst(nums[:mid])
root.right = l2bst(nums[mid+1:])
return root
nums = ll2l(head)
return l2bst(nums)
```
[Construct Binary Tree from Inorder and Postorder Traversal](https://leetcode.com/problems/construct-binary-tree-from-inorder-and-postorder-traversal/)。由中序序列与后序序列重构二叉树。
思路:递归。后序遍历的最后一个节点为根节点,中序遍历左右划分后分别是左右子树。
```python
def buildTree(inorder, postorder) -> TreeNode:
if not postorder:
return None
if len(postorder) == 1:
return TreeNode(postorder[0])
root_node = TreeNode(postorder[-1])
root_in_idx = inorder.index(root_node.val)
left_branch = buildTree(inorder[:root_in_idx],
postorder[:root_in_idx])
right_branch = buildTree(inorder[root_in_idx+1:],
postorder[root_in_idx:-1])
root_node.left = left_branch
root_node.right = right_branch
return root_node
```
[Construct Binary Tree from Preorder and Inorder Traversal](https://leetcode.com/problems/construct-binary-tree-from-preorder-and-inorder-traversal/)。给定一棵二叉树的先序序列跟中序序列,重构这颗二叉树,返回根节点。
思路:递归。
```python
def buildTree(preorder, inorder) -> TreeNode:
if not preorder:
return None
if len(preorder) == 1:
return TreeNode(preorder[0])
root_val = preorder[0]
root_in_idx = inorder.index(root_val)
root = TreeNode(root_val)
left_branch = buildTree(preorder[1:root_in_idx+1], inorder[:root_in_idx])
right_branch = buildTree(preorder[root_in_idx+1:], inorder[root_in_idx+1:])
root.left = left_branch
root.right = right_branch
return root
```
[Maximum Binary Tree](https://leetcode.com/problems/maximum-binary-tree/)。给一数组,将该数组转换成二叉树。每次将数组的最大值作为根节点,左边的数字称为左分支,右边的数字称为右分支。
思路:每次递归找到最大值与最大值对应的索引,以该索引划分数组,左边递归右边递归即可。
```python
def constructMaximumBinaryTree(nums) -> TreeNode:
if not nums:
return None
max_val = max(nums)
max_idx = nums.index(max_val)
root = TreeNode(max_val)
root.left = self.constructMaximumBinaryTree(nums[:max_idx])
root.right = self.constructMaximumBinaryTree(nums[max_idx+1:])
return root
```
[Longest Substring with At Least K Repeating Characters](https://leetcode.com/problems/longest-substring-with-at-least-k-repeating-characters/)。寻找一个最长字符串的长度,该字串中每一个字符都至少出现$k$次。
思路:首先对字串的每一个字符进行计数,若所有字符的出现次数均大于等于$k$则直接返回字串的长度。若有不满足条件的字符,则该字符是不可能被包含的,所以以这些字符为界来分割原字串,在对字串进行递归求解,返回最大值即可。
```python
def longestSubstring(s: str, k: int) -> int:
n = len(s)
cnt_lookup = dict()
for ch in s:
cnt_lookup.setdefault(ch, 0)
cnt_lookup[ch] += 1
for ch in cnt_lookup:
if cnt_lookup[ch] < k:
return max([longestSubstring(sub_s, k) for sub_s in s.split(ch)])
return n
```
3
[Different Ways to Add Parentheses](https://leetcode.com/problems/different-ways-to-add-parentheses/)。以字串形式给出一个运算式,尝试所有合法的加括号方式,返回所有情况下的运算结果。
思路:设置一个函数,它会返回一个表达式在不同优先级下所有可能的解。那么基准情况就是空字串或者单数字,前者返回空,后者直接返回单数字列表。因为加括号是一个连续区域,并且对每一个运算符的两端都可以括起来成一个表达式,所以递归时扫描所有运算符,对运算符两边的表达式进行递归。
```python
def diffWaysToCompute(exp: str) -> List[int]:
if not exp:
return None
if exp.isdigit():
return [int(exp)]
res = list()
n = len(exp)
for i, ch in enumerate(exp):
if ch in ['+', '-', '*']:
left_res = diffWaysToCompute(exp[:i])
right_res = diffWaysToCompute(exp[i+1:])
for l_res in left_res:
for r_res in right_res:
if ch == '+':
res.append(l_res+r_res)
elif ch == '-':
res.append(l_res-r_res)
else:
res.append(l_res*r_res)
return res
```
[Same Tree](https://leetcode.com/problems/same-tree/)。给两颗二叉树,判断两二叉树是否相同。
思路:递归。基准情况为两棵树都为空时一定相同,其中一颗为空或者值不相等时返回False。值相等且存在子树时,必须满足左子树相同且右子树也相同。
```python
def isSameTree(p: TreeNode, q: TreeNode) -> bool:
if not p and not q:
return True
elif not p or not q or p.val != q.val:
return False
else:
return isSameTree(p.left, q.left) and isSameTree(p.right, q.right)
```
[Subtree of Another Tree](https://leetcode.com/problems/subtree-of-another-tree/)。给两棵非空树$s$和$t$,判断$t$是不是$s$的子树。
思路:如果$t$是$s$的子树,那么在$s$中肯定存在一个节点$root$使得$root$与$t$是相同的两棵树,所以可以使用上题的函数来判断两棵树是否相同。那么剩下的问题就是怎么找到$root$,首先判断$s$是否与$t$相同,然后递归地比较$s.left$或者$s.right$是否与$t$相同。
```python
def isSubtree(s: TreeNode, t: TreeNode) -> bool:
def isSameTree(root_1, root_2):
if not root_1 and not root_2:
return True
elif not root_1 or not root_2 or root_1.val != root_2.val:
return False
else:
return isSameTree(root_1.left, root_2.left) and isSameTree(root_1.right, root_2.right)
if not s:
return False
elif isSameTree(s, t):
return True
else:
return isSubtree(s.left, t) or isSubtree(s.right, t)
```
[Pow(x, n)](https://leetcode.com/problems/powx-n/)。尽可能高效的实现一个乘方运算。
思路:递归,基准情况,指数为$0$返回1,指数为$1$返回自身。递推式为:
$$
\begin{align}
x^{n}=
\begin{cases}
& x^{n/2}\times{x^{n/2}}, n为偶数 \\
& x\times{x^{(n-1)/2}\times{x^{(n-1)/2}}}, n为奇数 \\
\end{cases}
\end{align}
$$
```python
def myPow(x: float, n: int) -> float:
if n < 0:
return myPow(1/x, -n)
if n == 0:
return 1
if n == 1:
return x
if n & 1 == 0:
return myPow(x, n >> 1)*myPow(x, n >> 1)
else:
return x*myPow(x, n >> 1)*myPow(x, n >> 1)
```
1024
[Balanced Binary Tree](https://leetcode.com/problems/balanced-binary-tree/)。判别平衡二叉树。
思路:设计一个递归函数返回当前节点的最大深度,但是返回深度是有条件的。平衡树需要满足每一个子树都是平衡树,所以返回深度的条件就是必须是平衡树。
```python
def isBalanced(root: TreeNode) -> bool:
def rec(root: TreeNode) -> int:
if not root:
return 0
if not root.left and not root.right:
return 1
left = rec(root.left)
right = rec(root.right)
if left == -1 or right == -1:
return -1
return max(left, right)+1 if abs(left-right) < 2 else -1
if not root:
return True
return rec(root) != -1
```
[后序序列的合法性](https://www.nowcoder.com/practice/a861533d45854474ac791d90e447bafd?tpId=13&&tqId=11176&rp=1&ru=/activity/oj&qru=/ta/coding-interviews/question-ranking)。给一数组,判断该序列是否是BST的后序序列。(该题的空测试用例很费解,竟然让返回False,这里就不管了)
思路:后序序列的尾元素就是根节点,那么之前的序列可分为两部分,小于根节点的左分支与大于根节点的右分支。
```python
def VerifySquenceOfBST(sequence):
n = len(sequence)
if n == 0:
return True
root = sequence[-1]
for i in range(n):
if sequence[i] >= sequence[-1]:
break
for j in range(i, n): # 右边的值应该都大于根节点
if sequence[j] < root:
return False
return True if VerifySquenceOfBST(sequence[:i]) and VerifySquenceOfBST(sequence[i+1:]) else False
```
[树的子结构](https://www.nowcoder.com/practice/6e196c44c7004d15b1610b9afca8bd88?tpId=13&tqId=11170&rp=1&ru=%2Factivity%2Foj&qru=%2Fta%2Fcoding-interviews%2Fquestion-ranking&tPage=1)。给两棵树```pRoot1```跟```pRoot2```,判断```pRoot2```是否是```pRoot1```的一个子结构。令空树不是任何树的子结构。
思路:第一步需要先找到根节点相等的位置,第二步开始递归比较。
```python
def HasSubtree(pRoot1, pRoot2):
def rec(root1, root2):
if not root2:
return True
if not root1 or root1.val != root2.val:
return False
return rec(root1.left, root2.left) and rec(root1.right, root2.right)
if not pRoot1 or not pRoot2: # 二者有一空则返回False
return False
return rec(pRoot1, pRoot2) or \
HasSubtree(pRoot1.left, pRoot2) or \
HasSubtree(pRoot1.right, pRoot2)
```
[Generate Parentheses](https://leetcode.com/problems/generate-parentheses/)。指定括号对的数量$n$,求所有可能合法的括号序列。
思路:设$left$、$right$分别为左右括号的数量,在生成过程中,要始终保证$left\ge{right}$。若$left<n$则可以添加左括号,若$right<left$则可以添加右括号。但是注意,对能否添加左括号的判断要优先与对右括号的判断。
```python
def generateParenthesis(n: int):
res = list()
def rec(cur_res, left, right):
if len(cur_res) == 2*n:
res.append(cur_res)
return
if left < n: # 能否添加左括号
rec(cur_res+'(', left+1, right)
if left > right: # 能否添加右括号
rec(cur_res+')', left, right+1)
rec(str(), 0, 0)
return res
```
['((()))', '(()())', '(())()', '()(())', '()()()']
[所有可能的出栈序列](https://www.nowcoder.com/practice/7d41a99eb92d4350a621123b73d708e5?tpId=104&tqId=33261&tPage=1&rp=&ru=%2Fta%2F2020sangfor-new&qru=%2Fta%2F2020sangfor-new%2Fquestion-ranking)。给定一个入栈序列,任意时刻都可进行出栈操作,求所有可能的出栈序列。
思路:三种情况。1. 栈外有元素,可以入栈;2.栈外有元素,且栈非空,可以出栈;3.栈外无元素,只能出栈且只能全部出栈。
```python
import sys
s = sys.stdin.readline().strip()
l = len(s)
res = list()
def rec(idx, stk, seq):
'''
:param idx: 工作指针
:param stk: 栈
:param seq: 出栈序列
:return:
'''
# 栈外有元素
if idx != l:
# 入栈
stk.append(s[idx])
rec(idx + 1, stk, seq)
stk.pop()
# 出栈
if stk:
tmp = stk.pop()
seq.append(tmp)
rec(idx, stk, seq)
seq.pop()
stk.append(tmp)
# 只能出栈
else:
tmp = len(stk)
if stk:
seq.extend(stk[::-1])
print(''.join(seq))
for i in range(tmp):
seq.pop()
rec(0, list(), list())
```
|
||| A Idris port of the prettyprinter library [1] and
||| the ANSI terminal backend [2].
|||
||| [1] https://hackage.haskell.org/package/prettyprinter
||| [2] https://hackage.haskell.org/package/prettyprinter-ansi-terminal
module Libraries.Text.PrettyPrint.Prettyprinter
import public Libraries.Text.PrettyPrint.Prettyprinter.Doc
import public Libraries.Text.PrettyPrint.Prettyprinter.Symbols
%default total
|
Formal statement is: lemma infdist_nonneg: "0 \<le> infdist x A" Informal statement is: The infimum distance from a point $x$ to a set $A$ is nonnegative. |
function [weights,location] = GaussQuad1d(ngp)
% Guass weights and abscissas.
% Taken from http://en.wikipedia.org/wiki/Gaussian_quadrature
% Data Table
switch ngp
case{1} % ngp =1
location = 0;
weights = 2;
case{2} % ngp =2
location = [-sqrt(3)/3,sqrt(3)/3]';
weights = [1,1]';
case{3} % ngp =3
location = [-sqrt(3/5),0,sqrt(3/5)]';
weights = [5/9,8/9,5/9]';
case{4} % ngp =4
x1 = sqrt((3-2*sqrt(6/5))/7);
x2 = sqrt((3+2*sqrt(6/5))/7);
location = [-x1,-x2,x2,x1]';
w1 = (18+sqrt(30))/36;
w2 = (18-sqrt(30))/36;
weights = [w1,w2,w2,w1]';
case{5} % ngp =5
x1 = (1/3)*sqrt(5-2*sqrt(10/7));
x2 = (1/3)*sqrt(5+2*sqrt(10/7));
x3 = 0;
location = [-x1,-x2,x3,x2,x1]';
w1 = (322 + 13*sqrt(70))/900;
w2 = (322 - 13*sqrt(70))/900;
w3 = 128/225;
weights = [w1,w2,w3,w2,w1]';
otherwise
error('ngp max value = 5')
end
|
module cedille where
open import lib
open import cedille-types public
----------------------------------------------------------------------------------
-- Run-rewriting rules
----------------------------------------------------------------------------------
data gratr2-nt : Set where
_ws-plus-77 : gratr2-nt
_ws : gratr2-nt
_vars : gratr2-nt
_var-star-12 : gratr2-nt
_var-bar-11 : gratr2-nt
_var : gratr2-nt
_type : gratr2-nt
_tk : gratr2-nt
_theta : gratr2-nt
_term : gratr2-nt
_start : gratr2-nt
_rho : gratr2-nt
_qvar : gratr2-nt
_qkvar : gratr2-nt
_pterm : gratr2-nt
_posinfo : gratr2-nt
_params : gratr2-nt
_ows-star-78 : gratr2-nt
_ows : gratr2-nt
_otherpunct-bar-67 : gratr2-nt
_otherpunct-bar-66 : gratr2-nt
_otherpunct-bar-65 : gratr2-nt
_otherpunct-bar-64 : gratr2-nt
_otherpunct-bar-63 : gratr2-nt
_otherpunct-bar-62 : gratr2-nt
_otherpunct-bar-61 : gratr2-nt
_otherpunct-bar-60 : gratr2-nt
_otherpunct-bar-59 : gratr2-nt
_otherpunct-bar-58 : gratr2-nt
_otherpunct-bar-57 : gratr2-nt
_otherpunct-bar-56 : gratr2-nt
_otherpunct-bar-55 : gratr2-nt
_otherpunct-bar-54 : gratr2-nt
_otherpunct-bar-53 : gratr2-nt
_otherpunct-bar-52 : gratr2-nt
_otherpunct-bar-51 : gratr2-nt
_otherpunct-bar-50 : gratr2-nt
_otherpunct-bar-49 : gratr2-nt
_otherpunct-bar-48 : gratr2-nt
_otherpunct-bar-47 : gratr2-nt
_otherpunct-bar-46 : gratr2-nt
_otherpunct-bar-45 : gratr2-nt
_otherpunct-bar-44 : gratr2-nt
_otherpunct-bar-43 : gratr2-nt
_otherpunct-bar-42 : gratr2-nt
_otherpunct-bar-41 : gratr2-nt
_otherpunct-bar-40 : gratr2-nt
_otherpunct-bar-39 : gratr2-nt
_otherpunct-bar-38 : gratr2-nt
_otherpunct-bar-37 : gratr2-nt
_otherpunct-bar-36 : gratr2-nt
_otherpunct-bar-35 : gratr2-nt
_otherpunct-bar-34 : gratr2-nt
_otherpunct-bar-33 : gratr2-nt
_otherpunct-bar-32 : gratr2-nt
_otherpunct-bar-31 : gratr2-nt
_otherpunct-bar-30 : gratr2-nt
_otherpunct-bar-29 : gratr2-nt
_otherpunct-bar-28 : gratr2-nt
_otherpunct-bar-27 : gratr2-nt
_otherpunct-bar-26 : gratr2-nt
_otherpunct-bar-25 : gratr2-nt
_otherpunct-bar-24 : gratr2-nt
_otherpunct-bar-23 : gratr2-nt
_otherpunct-bar-22 : gratr2-nt
_otherpunct-bar-21 : gratr2-nt
_otherpunct : gratr2-nt
_optType : gratr2-nt
_optTerm : gratr2-nt
_optClass : gratr2-nt
_optAs : gratr2-nt
_numpunct-bar-9 : gratr2-nt
_numpunct-bar-8 : gratr2-nt
_numpunct-bar-7 : gratr2-nt
_numpunct-bar-6 : gratr2-nt
_numpunct-bar-10 : gratr2-nt
_numpunct : gratr2-nt
_numone-range-4 : gratr2-nt
_numone : gratr2-nt
_num-plus-5 : gratr2-nt
_num : gratr2-nt
_maybeMinus : gratr2-nt
_maybeErased : gratr2-nt
_maybeCheckType : gratr2-nt
_maybeAtype : gratr2-nt
_ltype : gratr2-nt
_lterms : gratr2-nt
_lterm : gratr2-nt
_lliftingType : gratr2-nt
_liftingType : gratr2-nt
_leftRight : gratr2-nt
_lam : gratr2-nt
_kvar-star-20 : gratr2-nt
_kvar-bar-19 : gratr2-nt
_kvar : gratr2-nt
_kind : gratr2-nt
_imprt : gratr2-nt
_imports : gratr2-nt
_fpth-star-18 : gratr2-nt
_fpth-plus-14 : gratr2-nt
_fpth-bar-17 : gratr2-nt
_fpth-bar-16 : gratr2-nt
_fpth-bar-15 : gratr2-nt
_fpth : gratr2-nt
_defTermOrType : gratr2-nt
_decl : gratr2-nt
_comment-star-73 : gratr2-nt
_comment : gratr2-nt
_cmds : gratr2-nt
_cmd : gratr2-nt
_bvar-bar-13 : gratr2-nt
_bvar : gratr2-nt
_binder : gratr2-nt
_aws-bar-76 : gratr2-nt
_aws-bar-75 : gratr2-nt
_aws-bar-74 : gratr2-nt
_aws : gratr2-nt
_atype : gratr2-nt
_aterm : gratr2-nt
_arrowtype : gratr2-nt
_args : gratr2-nt
_arg : gratr2-nt
_anychar-bar-72 : gratr2-nt
_anychar-bar-71 : gratr2-nt
_anychar-bar-70 : gratr2-nt
_anychar-bar-69 : gratr2-nt
_anychar-bar-68 : gratr2-nt
_anychar : gratr2-nt
_alpha-range-2 : gratr2-nt
_alpha-range-1 : gratr2-nt
_alpha-bar-3 : gratr2-nt
_alpha : gratr2-nt
gratr2-nt-eq : gratr2-nt → gratr2-nt → 𝔹
gratr2-nt-eq _ws-plus-77 _ws-plus-77 = tt
gratr2-nt-eq _ws _ws = tt
gratr2-nt-eq _vars _vars = tt
gratr2-nt-eq _var-star-12 _var-star-12 = tt
gratr2-nt-eq _var-bar-11 _var-bar-11 = tt
gratr2-nt-eq _var _var = tt
gratr2-nt-eq _type _type = tt
gratr2-nt-eq _tk _tk = tt
gratr2-nt-eq _theta _theta = tt
gratr2-nt-eq _term _term = tt
gratr2-nt-eq _start _start = tt
gratr2-nt-eq _rho _rho = tt
gratr2-nt-eq _qvar _qvar = tt
gratr2-nt-eq _qkvar _qkvar = tt
gratr2-nt-eq _pterm _pterm = tt
gratr2-nt-eq _posinfo _posinfo = tt
gratr2-nt-eq _params _params = tt
gratr2-nt-eq _ows-star-78 _ows-star-78 = tt
gratr2-nt-eq _ows _ows = tt
gratr2-nt-eq _otherpunct-bar-67 _otherpunct-bar-67 = tt
gratr2-nt-eq _otherpunct-bar-66 _otherpunct-bar-66 = tt
gratr2-nt-eq _otherpunct-bar-65 _otherpunct-bar-65 = tt
gratr2-nt-eq _otherpunct-bar-64 _otherpunct-bar-64 = tt
gratr2-nt-eq _otherpunct-bar-63 _otherpunct-bar-63 = tt
gratr2-nt-eq _otherpunct-bar-62 _otherpunct-bar-62 = tt
gratr2-nt-eq _otherpunct-bar-61 _otherpunct-bar-61 = tt
gratr2-nt-eq _otherpunct-bar-60 _otherpunct-bar-60 = tt
gratr2-nt-eq _otherpunct-bar-59 _otherpunct-bar-59 = tt
gratr2-nt-eq _otherpunct-bar-58 _otherpunct-bar-58 = tt
gratr2-nt-eq _otherpunct-bar-57 _otherpunct-bar-57 = tt
gratr2-nt-eq _otherpunct-bar-56 _otherpunct-bar-56 = tt
gratr2-nt-eq _otherpunct-bar-55 _otherpunct-bar-55 = tt
gratr2-nt-eq _otherpunct-bar-54 _otherpunct-bar-54 = tt
gratr2-nt-eq _otherpunct-bar-53 _otherpunct-bar-53 = tt
gratr2-nt-eq _otherpunct-bar-52 _otherpunct-bar-52 = tt
gratr2-nt-eq _otherpunct-bar-51 _otherpunct-bar-51 = tt
gratr2-nt-eq _otherpunct-bar-50 _otherpunct-bar-50 = tt
gratr2-nt-eq _otherpunct-bar-49 _otherpunct-bar-49 = tt
gratr2-nt-eq _otherpunct-bar-48 _otherpunct-bar-48 = tt
gratr2-nt-eq _otherpunct-bar-47 _otherpunct-bar-47 = tt
gratr2-nt-eq _otherpunct-bar-46 _otherpunct-bar-46 = tt
gratr2-nt-eq _otherpunct-bar-45 _otherpunct-bar-45 = tt
gratr2-nt-eq _otherpunct-bar-44 _otherpunct-bar-44 = tt
gratr2-nt-eq _otherpunct-bar-43 _otherpunct-bar-43 = tt
gratr2-nt-eq _otherpunct-bar-42 _otherpunct-bar-42 = tt
gratr2-nt-eq _otherpunct-bar-41 _otherpunct-bar-41 = tt
gratr2-nt-eq _otherpunct-bar-40 _otherpunct-bar-40 = tt
gratr2-nt-eq _otherpunct-bar-39 _otherpunct-bar-39 = tt
gratr2-nt-eq _otherpunct-bar-38 _otherpunct-bar-38 = tt
gratr2-nt-eq _otherpunct-bar-37 _otherpunct-bar-37 = tt
gratr2-nt-eq _otherpunct-bar-36 _otherpunct-bar-36 = tt
gratr2-nt-eq _otherpunct-bar-35 _otherpunct-bar-35 = tt
gratr2-nt-eq _otherpunct-bar-34 _otherpunct-bar-34 = tt
gratr2-nt-eq _otherpunct-bar-33 _otherpunct-bar-33 = tt
gratr2-nt-eq _otherpunct-bar-32 _otherpunct-bar-32 = tt
gratr2-nt-eq _otherpunct-bar-31 _otherpunct-bar-31 = tt
gratr2-nt-eq _otherpunct-bar-30 _otherpunct-bar-30 = tt
gratr2-nt-eq _otherpunct-bar-29 _otherpunct-bar-29 = tt
gratr2-nt-eq _otherpunct-bar-28 _otherpunct-bar-28 = tt
gratr2-nt-eq _otherpunct-bar-27 _otherpunct-bar-27 = tt
gratr2-nt-eq _otherpunct-bar-26 _otherpunct-bar-26 = tt
gratr2-nt-eq _otherpunct-bar-25 _otherpunct-bar-25 = tt
gratr2-nt-eq _otherpunct-bar-24 _otherpunct-bar-24 = tt
gratr2-nt-eq _otherpunct-bar-23 _otherpunct-bar-23 = tt
gratr2-nt-eq _otherpunct-bar-22 _otherpunct-bar-22 = tt
gratr2-nt-eq _otherpunct-bar-21 _otherpunct-bar-21 = tt
gratr2-nt-eq _otherpunct _otherpunct = tt
gratr2-nt-eq _optType _optType = tt
gratr2-nt-eq _optTerm _optTerm = tt
gratr2-nt-eq _optClass _optClass = tt
gratr2-nt-eq _optAs _optAs = tt
gratr2-nt-eq _numpunct-bar-9 _numpunct-bar-9 = tt
gratr2-nt-eq _numpunct-bar-8 _numpunct-bar-8 = tt
gratr2-nt-eq _numpunct-bar-7 _numpunct-bar-7 = tt
gratr2-nt-eq _numpunct-bar-6 _numpunct-bar-6 = tt
gratr2-nt-eq _numpunct-bar-10 _numpunct-bar-10 = tt
gratr2-nt-eq _numpunct _numpunct = tt
gratr2-nt-eq _numone-range-4 _numone-range-4 = tt
gratr2-nt-eq _numone _numone = tt
gratr2-nt-eq _num-plus-5 _num-plus-5 = tt
gratr2-nt-eq _num _num = tt
gratr2-nt-eq _maybeMinus _maybeMinus = tt
gratr2-nt-eq _maybeErased _maybeErased = tt
gratr2-nt-eq _maybeCheckType _maybeCheckType = tt
gratr2-nt-eq _maybeAtype _maybeAtype = tt
gratr2-nt-eq _ltype _ltype = tt
gratr2-nt-eq _lterms _lterms = tt
gratr2-nt-eq _lterm _lterm = tt
gratr2-nt-eq _lliftingType _lliftingType = tt
gratr2-nt-eq _liftingType _liftingType = tt
gratr2-nt-eq _leftRight _leftRight = tt
gratr2-nt-eq _lam _lam = tt
gratr2-nt-eq _kvar-star-20 _kvar-star-20 = tt
gratr2-nt-eq _kvar-bar-19 _kvar-bar-19 = tt
gratr2-nt-eq _kvar _kvar = tt
gratr2-nt-eq _kind _kind = tt
gratr2-nt-eq _imprt _imprt = tt
gratr2-nt-eq _imports _imports = tt
gratr2-nt-eq _fpth-star-18 _fpth-star-18 = tt
gratr2-nt-eq _fpth-plus-14 _fpth-plus-14 = tt
gratr2-nt-eq _fpth-bar-17 _fpth-bar-17 = tt
gratr2-nt-eq _fpth-bar-16 _fpth-bar-16 = tt
gratr2-nt-eq _fpth-bar-15 _fpth-bar-15 = tt
gratr2-nt-eq _fpth _fpth = tt
gratr2-nt-eq _defTermOrType _defTermOrType = tt
gratr2-nt-eq _decl _decl = tt
gratr2-nt-eq _comment-star-73 _comment-star-73 = tt
gratr2-nt-eq _comment _comment = tt
gratr2-nt-eq _cmds _cmds = tt
gratr2-nt-eq _cmd _cmd = tt
gratr2-nt-eq _bvar-bar-13 _bvar-bar-13 = tt
gratr2-nt-eq _bvar _bvar = tt
gratr2-nt-eq _binder _binder = tt
gratr2-nt-eq _aws-bar-76 _aws-bar-76 = tt
gratr2-nt-eq _aws-bar-75 _aws-bar-75 = tt
gratr2-nt-eq _aws-bar-74 _aws-bar-74 = tt
gratr2-nt-eq _aws _aws = tt
gratr2-nt-eq _atype _atype = tt
gratr2-nt-eq _aterm _aterm = tt
gratr2-nt-eq _arrowtype _arrowtype = tt
gratr2-nt-eq _args _args = tt
gratr2-nt-eq _arg _arg = tt
gratr2-nt-eq _anychar-bar-72 _anychar-bar-72 = tt
gratr2-nt-eq _anychar-bar-71 _anychar-bar-71 = tt
gratr2-nt-eq _anychar-bar-70 _anychar-bar-70 = tt
gratr2-nt-eq _anychar-bar-69 _anychar-bar-69 = tt
gratr2-nt-eq _anychar-bar-68 _anychar-bar-68 = tt
gratr2-nt-eq _anychar _anychar = tt
gratr2-nt-eq _alpha-range-2 _alpha-range-2 = tt
gratr2-nt-eq _alpha-range-1 _alpha-range-1 = tt
gratr2-nt-eq _alpha-bar-3 _alpha-bar-3 = tt
gratr2-nt-eq _alpha _alpha = tt
gratr2-nt-eq _ _ = ff
open import rtn gratr2-nt
cedille-start : gratr2-nt → 𝕃 gratr2-rule
cedille-start _ws-plus-77 = (just "P225" , nothing , just _ws-plus-77 , inj₁ _aws :: inj₁ _ws-plus-77 :: []) :: (just "P224" , nothing , just _ws-plus-77 , inj₁ _aws :: []) :: []
cedille-start _ws = (just "P226" , nothing , just _ws , inj₁ _ws-plus-77 :: []) :: []
cedille-start _vars = (just "VarsStart" , nothing , just _vars , inj₁ _var :: []) :: (just "VarsNext" , nothing , just _vars , inj₁ _var :: inj₁ _ws :: inj₁ _vars :: []) :: []
cedille-start _var-star-12 = (just "P85" , nothing , just _var-star-12 , inj₁ _var-bar-11 :: inj₁ _var-star-12 :: []) :: (just "P84" , nothing , just _var-star-12 , []) :: []
cedille-start _var-bar-11 = (just "P83" , nothing , just _var-bar-11 , inj₁ _numpunct :: []) :: (just "P82" , nothing , just _var-bar-11 , inj₁ _alpha :: []) :: []
cedille-start _var = (just "P86" , nothing , just _var , inj₁ _alpha :: inj₁ _var-star-12 :: []) :: []
cedille-start _type = (just "embed" , just "embed_end" , just _type , inj₁ _ltype :: []) :: (just "TpLambda" , nothing , just _type , inj₁ _posinfo :: inj₂ 'λ' :: inj₁ _ows :: inj₁ _posinfo :: inj₁ _bvar :: inj₁ _ows :: inj₂ ':' :: inj₁ _ows :: inj₁ _tk :: inj₁ _ows :: inj₂ '.' :: inj₁ _ows :: inj₁ _type :: []) :: (just "TpEq" , nothing , just _type , inj₁ _term :: inj₁ _ows :: inj₂ '≃' :: inj₁ _ows :: inj₁ _term :: []) :: (just "TpArrow" , nothing , just _type , inj₁ _ltype :: inj₁ _ows :: inj₁ _arrowtype :: inj₁ _ows :: inj₁ _type :: []) :: (just "NoSpans" , nothing , just _type , inj₂ '{' :: inj₂ '^' :: inj₁ _type :: inj₁ _posinfo :: inj₂ '^' :: inj₂ '}' :: []) :: (just "Iota" , nothing , just _type , inj₁ _posinfo :: inj₂ 'ι' :: inj₁ _ows :: inj₁ _posinfo :: inj₁ _bvar :: inj₁ _optType :: inj₁ _ows :: inj₂ '.' :: inj₁ _ows :: inj₁ _type :: []) :: (just "Abs" , nothing , just _type , inj₁ _posinfo :: inj₁ _binder :: inj₁ _ows :: inj₁ _posinfo :: inj₁ _bvar :: inj₁ _ows :: inj₂ ':' :: inj₁ _ows :: inj₁ _tk :: inj₁ _ows :: inj₂ '.' :: inj₁ _ows :: inj₁ _type :: []) :: []
cedille-start _tk = (just "Tkt" , nothing , just _tk , inj₁ _type :: []) :: (just "Tkk" , just "Tkk_end" , just _tk , inj₁ _kind :: []) :: []
cedille-start _theta = (just "AbstractVars" , nothing , just _theta , inj₂ 'θ' :: inj₂ '<' :: inj₁ _ows :: inj₁ _vars :: inj₁ _ows :: inj₂ '>' :: []) :: (just "AbstractEq" , nothing , just _theta , inj₂ 'θ' :: inj₂ '+' :: []) :: (just "Abstract" , nothing , just _theta , inj₂ 'θ' :: []) :: []
cedille-start _term = (just "embed" , just "embed_end" , just _term , inj₁ _aterm :: []) :: (just "Theta" , nothing , just _term , inj₁ _posinfo :: inj₁ _theta :: inj₁ _ws :: inj₁ _lterm :: inj₁ _ows :: inj₁ _lterms :: []) :: (just "Let" , nothing , just _term , inj₁ _posinfo :: inj₂ 'l' :: inj₂ 'e' :: inj₂ 't' :: inj₁ _ws :: inj₁ _defTermOrType :: inj₁ _ws :: inj₂ 'i' :: inj₂ 'n' :: inj₁ _ws :: inj₁ _term :: []) :: (just "Lam" , nothing , just _term , inj₁ _posinfo :: inj₁ _lam :: inj₁ _ows :: inj₁ _posinfo :: inj₁ _bvar :: inj₁ _optClass :: inj₁ _ows :: inj₂ '.' :: inj₁ _ows :: inj₁ _term :: []) :: []
cedille-start _start = (just "File" , nothing , just _start , inj₁ _posinfo :: inj₁ _ows :: inj₁ _imports :: inj₂ 'm' :: inj₂ 'o' :: inj₂ 'd' :: inj₂ 'u' :: inj₂ 'l' :: inj₂ 'e' :: inj₁ _ws :: inj₁ _qvar :: inj₁ _ows :: inj₁ _params :: inj₁ _ows :: inj₂ '.' :: inj₁ _ows :: inj₁ _cmds :: inj₁ _ows :: inj₁ _posinfo :: []) :: []
cedille-start _rho = (just "RhoPlus" , nothing , just _rho , inj₂ 'ρ' :: inj₂ '+' :: []) :: (just "RhoPlain" , nothing , just _rho , inj₂ 'ρ' :: []) :: []
cedille-start _qvar = (just "P81" , nothing , just _qvar , inj₁ _var :: inj₂ '.' :: inj₁ _qvar :: []) :: (just "P80" , nothing , just _qvar , inj₁ _var :: []) :: []
cedille-start _qkvar = (just "P102" , nothing , just _qkvar , inj₁ _var :: inj₂ '.' :: inj₁ _qkvar :: []) :: (just "P101" , nothing , just _qkvar , inj₁ _kvar :: []) :: []
cedille-start _pterm = (just "Var" , nothing , just _pterm , inj₁ _posinfo :: inj₁ _qvar :: []) :: (just "Parens" , nothing , just _pterm , inj₁ _posinfo :: inj₂ '(' :: inj₁ _ows :: inj₁ _term :: inj₁ _ows :: inj₂ ')' :: inj₁ _posinfo :: []) :: (just "IotaPair" , nothing , just _pterm , inj₁ _posinfo :: inj₂ '[' :: inj₁ _ows :: inj₁ _term :: inj₁ _ows :: inj₂ ',' :: inj₁ _ows :: inj₁ _term :: inj₁ _ows :: inj₂ ']' :: inj₁ _posinfo :: []) :: (just "Hole" , nothing , just _pterm , inj₁ _posinfo :: inj₂ '●' :: []) :: []
cedille-start _posinfo = (just "Posinfo" , nothing , just _posinfo , []) :: []
cedille-start _params = (just "ParamsNil" , nothing , just _params , []) :: (just "ParamsCons" , nothing , just _params , inj₁ _ows :: inj₁ _decl :: inj₁ _params :: []) :: []
cedille-start _ows-star-78 = (just "P228" , nothing , just _ows-star-78 , inj₁ _aws :: inj₁ _ows-star-78 :: []) :: (just "P227" , nothing , just _ows-star-78 , []) :: []
cedille-start _ows = (just "P229" , nothing , just _ows , inj₁ _ows-star-78 :: []) :: []
cedille-start _otherpunct-bar-67 = (just "P201" , nothing , just _otherpunct-bar-67 , inj₁ _otherpunct-bar-66 :: []) :: (just "P200" , nothing , just _otherpunct-bar-67 , inj₂ '|' :: []) :: []
cedille-start _otherpunct-bar-66 = (just "P199" , nothing , just _otherpunct-bar-66 , inj₁ _otherpunct-bar-65 :: []) :: (just "P198" , nothing , just _otherpunct-bar-66 , inj₂ '□' :: []) :: []
cedille-start _otherpunct-bar-65 = (just "P197" , nothing , just _otherpunct-bar-65 , inj₁ _otherpunct-bar-64 :: []) :: (just "P196" , nothing , just _otherpunct-bar-65 , inj₂ 'Π' :: []) :: []
cedille-start _otherpunct-bar-64 = (just "P195" , nothing , just _otherpunct-bar-64 , inj₁ _otherpunct-bar-63 :: []) :: (just "P194" , nothing , just _otherpunct-bar-64 , inj₂ 'ι' :: []) :: []
cedille-start _otherpunct-bar-63 = (just "P193" , nothing , just _otherpunct-bar-63 , inj₁ _otherpunct-bar-62 :: []) :: (just "P192" , nothing , just _otherpunct-bar-63 , inj₂ 'λ' :: []) :: []
cedille-start _otherpunct-bar-62 = (just "P191" , nothing , just _otherpunct-bar-62 , inj₁ _otherpunct-bar-61 :: []) :: (just "P190" , nothing , just _otherpunct-bar-62 , inj₂ '∀' :: []) :: []
cedille-start _otherpunct-bar-61 = (just "P189" , nothing , just _otherpunct-bar-61 , inj₁ _otherpunct-bar-60 :: []) :: (just "P188" , nothing , just _otherpunct-bar-61 , inj₂ 'π' :: []) :: []
cedille-start _otherpunct-bar-60 = (just "P187" , nothing , just _otherpunct-bar-60 , inj₁ _otherpunct-bar-59 :: []) :: (just "P186" , nothing , just _otherpunct-bar-60 , inj₂ '★' :: []) :: []
cedille-start _otherpunct-bar-59 = (just "P185" , nothing , just _otherpunct-bar-59 , inj₁ _otherpunct-bar-58 :: []) :: (just "P184" , nothing , just _otherpunct-bar-59 , inj₂ '☆' :: []) :: []
cedille-start _otherpunct-bar-58 = (just "P183" , nothing , just _otherpunct-bar-58 , inj₁ _otherpunct-bar-57 :: []) :: (just "P182" , nothing , just _otherpunct-bar-58 , inj₂ '·' :: []) :: []
cedille-start _otherpunct-bar-57 = (just "P181" , nothing , just _otherpunct-bar-57 , inj₁ _otherpunct-bar-56 :: []) :: (just "P180" , nothing , just _otherpunct-bar-57 , inj₂ '⇐' :: []) :: []
cedille-start _otherpunct-bar-56 = (just "P179" , nothing , just _otherpunct-bar-56 , inj₁ _otherpunct-bar-55 :: []) :: (just "P178" , nothing , just _otherpunct-bar-56 , inj₂ '➔' :: []) :: []
cedille-start _otherpunct-bar-55 = (just "P177" , nothing , just _otherpunct-bar-55 , inj₁ _otherpunct-bar-54 :: []) :: (just "P176" , nothing , just _otherpunct-bar-55 , inj₂ '➾' :: []) :: []
cedille-start _otherpunct-bar-54 = (just "P175" , nothing , just _otherpunct-bar-54 , inj₁ _otherpunct-bar-53 :: []) :: (just "P174" , nothing , just _otherpunct-bar-54 , inj₂ '↑' :: []) :: []
cedille-start _otherpunct-bar-53 = (just "P173" , nothing , just _otherpunct-bar-53 , inj₁ _otherpunct-bar-52 :: []) :: (just "P172" , nothing , just _otherpunct-bar-53 , inj₂ '●' :: []) :: []
cedille-start _otherpunct-bar-52 = (just "P171" , nothing , just _otherpunct-bar-52 , inj₁ _otherpunct-bar-51 :: []) :: (just "P170" , nothing , just _otherpunct-bar-52 , inj₂ '(' :: []) :: []
cedille-start _otherpunct-bar-51 = (just "P169" , nothing , just _otherpunct-bar-51 , inj₁ _otherpunct-bar-50 :: []) :: (just "P168" , nothing , just _otherpunct-bar-51 , inj₂ ')' :: []) :: []
cedille-start _otherpunct-bar-50 = (just "P167" , nothing , just _otherpunct-bar-50 , inj₁ _otherpunct-bar-49 :: []) :: (just "P166" , nothing , just _otherpunct-bar-50 , inj₂ ':' :: []) :: []
cedille-start _otherpunct-bar-49 = (just "P165" , nothing , just _otherpunct-bar-49 , inj₁ _otherpunct-bar-48 :: []) :: (just "P164" , nothing , just _otherpunct-bar-49 , inj₂ '.' :: []) :: []
cedille-start _otherpunct-bar-48 = (just "P163" , nothing , just _otherpunct-bar-48 , inj₁ _otherpunct-bar-47 :: []) :: (just "P162" , nothing , just _otherpunct-bar-48 , inj₂ '[' :: []) :: []
cedille-start _otherpunct-bar-47 = (just "P161" , nothing , just _otherpunct-bar-47 , inj₁ _otherpunct-bar-46 :: []) :: (just "P160" , nothing , just _otherpunct-bar-47 , inj₂ ']' :: []) :: []
cedille-start _otherpunct-bar-46 = (just "P159" , nothing , just _otherpunct-bar-46 , inj₁ _otherpunct-bar-45 :: []) :: (just "P158" , nothing , just _otherpunct-bar-46 , inj₂ ',' :: []) :: []
cedille-start _otherpunct-bar-45 = (just "P157" , nothing , just _otherpunct-bar-45 , inj₁ _otherpunct-bar-44 :: []) :: (just "P156" , nothing , just _otherpunct-bar-45 , inj₂ '!' :: []) :: []
cedille-start _otherpunct-bar-44 = (just "P155" , nothing , just _otherpunct-bar-44 , inj₁ _otherpunct-bar-43 :: []) :: (just "P154" , nothing , just _otherpunct-bar-44 , inj₂ '{' :: []) :: []
cedille-start _otherpunct-bar-43 = (just "P153" , nothing , just _otherpunct-bar-43 , inj₁ _otherpunct-bar-42 :: []) :: (just "P152" , nothing , just _otherpunct-bar-43 , inj₂ '}' :: []) :: []
cedille-start _otherpunct-bar-42 = (just "P151" , nothing , just _otherpunct-bar-42 , inj₁ _otherpunct-bar-41 :: []) :: (just "P150" , nothing , just _otherpunct-bar-42 , inj₂ '⇒' :: []) :: []
cedille-start _otherpunct-bar-41 = (just "P149" , nothing , just _otherpunct-bar-41 , inj₁ _otherpunct-bar-40 :: []) :: (just "P148" , nothing , just _otherpunct-bar-41 , inj₂ '?' :: []) :: []
cedille-start _otherpunct-bar-40 = (just "P147" , nothing , just _otherpunct-bar-40 , inj₁ _otherpunct-bar-39 :: []) :: (just "P146" , nothing , just _otherpunct-bar-40 , inj₂ 'Λ' :: []) :: []
cedille-start _otherpunct-bar-39 = (just "P145" , nothing , just _otherpunct-bar-39 , inj₁ _otherpunct-bar-38 :: []) :: (just "P144" , nothing , just _otherpunct-bar-39 , inj₂ 'ρ' :: []) :: []
cedille-start _otherpunct-bar-38 = (just "P143" , nothing , just _otherpunct-bar-38 , inj₁ _otherpunct-bar-37 :: []) :: (just "P142" , nothing , just _otherpunct-bar-38 , inj₂ 'ε' :: []) :: []
cedille-start _otherpunct-bar-37 = (just "P141" , nothing , just _otherpunct-bar-37 , inj₁ _otherpunct-bar-36 :: []) :: (just "P140" , nothing , just _otherpunct-bar-37 , inj₂ 'β' :: []) :: []
cedille-start _otherpunct-bar-36 = (just "P139" , nothing , just _otherpunct-bar-36 , inj₁ _otherpunct-bar-35 :: []) :: (just "P138" , nothing , just _otherpunct-bar-36 , inj₂ '-' :: []) :: []
cedille-start _otherpunct-bar-35 = (just "P137" , nothing , just _otherpunct-bar-35 , inj₁ _otherpunct-bar-34 :: []) :: (just "P136" , nothing , just _otherpunct-bar-35 , inj₂ '𝒌' :: []) :: []
cedille-start _otherpunct-bar-34 = (just "P135" , nothing , just _otherpunct-bar-34 , inj₁ _otherpunct-bar-33 :: []) :: (just "P134" , nothing , just _otherpunct-bar-34 , inj₂ '=' :: []) :: []
cedille-start _otherpunct-bar-33 = (just "P133" , nothing , just _otherpunct-bar-33 , inj₁ _otherpunct-bar-32 :: []) :: (just "P132" , nothing , just _otherpunct-bar-33 , inj₂ 'ς' :: []) :: []
cedille-start _otherpunct-bar-32 = (just "P131" , nothing , just _otherpunct-bar-32 , inj₁ _otherpunct-bar-31 :: []) :: (just "P130" , nothing , just _otherpunct-bar-32 , inj₂ 'θ' :: []) :: []
cedille-start _otherpunct-bar-31 = (just "P129" , nothing , just _otherpunct-bar-31 , inj₁ _otherpunct-bar-30 :: []) :: (just "P128" , nothing , just _otherpunct-bar-31 , inj₂ '+' :: []) :: []
cedille-start _otherpunct-bar-30 = (just "P127" , nothing , just _otherpunct-bar-30 , inj₁ _otherpunct-bar-29 :: []) :: (just "P126" , nothing , just _otherpunct-bar-30 , inj₂ '<' :: []) :: []
cedille-start _otherpunct-bar-29 = (just "P125" , nothing , just _otherpunct-bar-29 , inj₁ _otherpunct-bar-28 :: []) :: (just "P124" , nothing , just _otherpunct-bar-29 , inj₂ '>' :: []) :: []
cedille-start _otherpunct-bar-28 = (just "P123" , nothing , just _otherpunct-bar-28 , inj₁ _otherpunct-bar-27 :: []) :: (just "P122" , nothing , just _otherpunct-bar-28 , inj₂ '≃' :: []) :: []
cedille-start _otherpunct-bar-27 = (just "P121" , nothing , just _otherpunct-bar-27 , inj₁ _otherpunct-bar-26 :: []) :: (just "P120" , nothing , just _otherpunct-bar-27 , inj₂ '\"' :: []) :: []
cedille-start _otherpunct-bar-26 = (just "P119" , nothing , just _otherpunct-bar-26 , inj₁ _otherpunct-bar-25 :: []) :: (just "P118" , nothing , just _otherpunct-bar-26 , inj₂ 'δ' :: []) :: []
cedille-start _otherpunct-bar-25 = (just "P117" , nothing , just _otherpunct-bar-25 , inj₁ _otherpunct-bar-24 :: []) :: (just "P116" , nothing , just _otherpunct-bar-25 , inj₂ 'χ' :: []) :: []
cedille-start _otherpunct-bar-24 = (just "P115" , nothing , just _otherpunct-bar-24 , inj₁ _otherpunct-bar-23 :: []) :: (just "P114" , nothing , just _otherpunct-bar-24 , inj₂ 'μ' :: []) :: []
cedille-start _otherpunct-bar-23 = (just "P113" , nothing , just _otherpunct-bar-23 , inj₁ _otherpunct-bar-22 :: []) :: (just "P112" , nothing , just _otherpunct-bar-23 , inj₂ 'υ' :: []) :: []
cedille-start _otherpunct-bar-22 = (just "P111" , nothing , just _otherpunct-bar-22 , inj₁ _otherpunct-bar-21 :: []) :: (just "P110" , nothing , just _otherpunct-bar-22 , inj₂ 'φ' :: []) :: []
cedille-start _otherpunct-bar-21 = (just "P109" , nothing , just _otherpunct-bar-21 , inj₂ 'ω' :: []) :: (just "P108" , nothing , just _otherpunct-bar-21 , inj₂ '◂' :: []) :: []
cedille-start _otherpunct = (just "P202" , nothing , just _otherpunct , inj₁ _otherpunct-bar-67 :: []) :: []
cedille-start _optType = (just "SomeType" , nothing , just _optType , inj₁ _ows :: inj₂ ':' :: inj₁ _ows :: inj₁ _type :: []) :: (just "NoType" , nothing , just _optType , []) :: []
cedille-start _optTerm = (just "SomeTerm" , nothing , just _optTerm , inj₁ _ows :: inj₂ '{' :: inj₁ _ows :: inj₁ _term :: inj₁ _ows :: inj₂ '}' :: inj₁ _posinfo :: []) :: (just "NoTerm" , nothing , just _optTerm , []) :: []
cedille-start _optClass = (just "SomeClass" , nothing , just _optClass , inj₁ _ows :: inj₂ ':' :: inj₁ _ows :: inj₁ _tk :: []) :: (just "NoClass" , nothing , just _optClass , []) :: []
cedille-start _optAs = (just "SomeOptAs" , nothing , just _optAs , inj₁ _ows :: inj₂ 'a' :: inj₂ 's' :: inj₁ _ws :: inj₁ _var :: []) :: (just "NoOptAs" , nothing , just _optAs , []) :: []
cedille-start _numpunct-bar-9 = (just "P76" , nothing , just _numpunct-bar-9 , inj₁ _numpunct-bar-8 :: []) :: (just "P75" , nothing , just _numpunct-bar-9 , inj₂ '\'' :: []) :: []
cedille-start _numpunct-bar-8 = (just "P74" , nothing , just _numpunct-bar-8 , inj₁ _numpunct-bar-7 :: []) :: (just "P73" , nothing , just _numpunct-bar-8 , inj₂ '-' :: []) :: []
cedille-start _numpunct-bar-7 = (just "P72" , nothing , just _numpunct-bar-7 , inj₁ _numpunct-bar-6 :: []) :: (just "P71" , nothing , just _numpunct-bar-7 , inj₂ '~' :: []) :: []
cedille-start _numpunct-bar-6 = (just "P70" , nothing , just _numpunct-bar-6 , inj₂ '_' :: []) :: (just "P69" , nothing , just _numpunct-bar-6 , inj₂ '#' :: []) :: []
cedille-start _numpunct-bar-10 = (just "P78" , nothing , just _numpunct-bar-10 , inj₁ _numpunct-bar-9 :: []) :: (just "P77" , nothing , just _numpunct-bar-10 , inj₁ _numone :: []) :: []
cedille-start _numpunct = (just "P79" , nothing , just _numpunct , inj₁ _numpunct-bar-10 :: []) :: []
cedille-start _numone-range-4 = (just "P64" , nothing , just _numone-range-4 , inj₂ '9' :: []) :: (just "P63" , nothing , just _numone-range-4 , inj₂ '8' :: []) :: (just "P62" , nothing , just _numone-range-4 , inj₂ '7' :: []) :: (just "P61" , nothing , just _numone-range-4 , inj₂ '6' :: []) :: (just "P60" , nothing , just _numone-range-4 , inj₂ '5' :: []) :: (just "P59" , nothing , just _numone-range-4 , inj₂ '4' :: []) :: (just "P58" , nothing , just _numone-range-4 , inj₂ '3' :: []) :: (just "P57" , nothing , just _numone-range-4 , inj₂ '2' :: []) :: (just "P56" , nothing , just _numone-range-4 , inj₂ '1' :: []) :: (just "P55" , nothing , just _numone-range-4 , inj₂ '0' :: []) :: []
cedille-start _numone = (just "P65" , nothing , just _numone , inj₁ _numone-range-4 :: []) :: []
cedille-start _num-plus-5 = (just "P67" , nothing , just _num-plus-5 , inj₁ _numone :: inj₁ _num-plus-5 :: []) :: (just "P66" , nothing , just _num-plus-5 , inj₁ _numone :: []) :: []
cedille-start _num = (just "P68" , nothing , just _num , inj₁ _num-plus-5 :: []) :: []
cedille-start _maybeMinus = (just "EpsHnf" , nothing , just _maybeMinus , []) :: (just "EpsHanf" , nothing , just _maybeMinus , inj₂ '-' :: []) :: []
cedille-start _maybeErased = (just "NotErased" , nothing , just _maybeErased , []) :: (just "Erased" , nothing , just _maybeErased , inj₂ '-' :: inj₁ _ows :: []) :: []
cedille-start _maybeCheckType = (just "Type" , nothing , just _maybeCheckType , inj₁ _ows :: inj₂ '◂' :: inj₁ _ows :: inj₁ _type :: []) :: (just "NoCheckType" , nothing , just _maybeCheckType , []) :: []
cedille-start _maybeAtype = (just "NoAtype" , nothing , just _maybeAtype , []) :: (just "Atype" , nothing , just _maybeAtype , inj₁ _ows :: inj₁ _atype :: []) :: []
cedille-start _ltype = (just "embed" , nothing , just _ltype , inj₁ _atype :: []) :: (just "Lft" , nothing , just _ltype , inj₁ _posinfo :: inj₂ '↑' :: inj₁ _ows :: inj₁ _posinfo :: inj₁ _var :: inj₁ _ows :: inj₂ '.' :: inj₁ _ows :: inj₁ _term :: inj₁ _ows :: inj₂ ':' :: inj₁ _ows :: inj₁ _lliftingType :: []) :: []
cedille-start _lterms = (just "LtermsNil" , nothing , just _lterms , inj₁ _posinfo :: []) :: (just "LtermsCons" , nothing , just _lterms , inj₁ _ws :: inj₁ _maybeErased :: inj₁ _lterm :: inj₁ _lterms :: []) :: []
cedille-start _lterm = (just "embed" , just "embed_end" , just _lterm , inj₁ _pterm :: []) :: (just "Sigma" , nothing , just _lterm , inj₁ _posinfo :: inj₂ 'ς' :: inj₁ _ows :: inj₁ _lterm :: []) :: (just "Rho" , nothing , just _lterm , inj₁ _posinfo :: inj₁ _rho :: inj₁ _ows :: inj₁ _lterm :: inj₁ _ows :: inj₂ '-' :: inj₁ _ows :: inj₁ _lterm :: []) :: (just "Phi" , nothing , just _lterm , inj₁ _posinfo :: inj₂ 'φ' :: inj₁ _ows :: inj₁ _lterm :: inj₁ _ows :: inj₂ '-' :: inj₁ _ows :: inj₁ _lterm :: inj₁ _ows :: inj₂ '{' :: inj₁ _ows :: inj₁ _term :: inj₁ _ows :: inj₂ '}' :: inj₁ _posinfo :: []) :: (just "Epsilon" , nothing , just _lterm , inj₁ _posinfo :: inj₂ 'ε' :: inj₁ _leftRight :: inj₁ _maybeMinus :: inj₁ _ows :: inj₁ _lterm :: []) :: (just "Chi" , nothing , just _lterm , inj₁ _posinfo :: inj₂ 'χ' :: inj₁ _maybeAtype :: inj₁ _ows :: inj₂ '-' :: inj₁ _ows :: inj₁ _lterm :: []) :: (just "Beta" , nothing , just _lterm , inj₁ _posinfo :: inj₂ 'β' :: inj₁ _optTerm :: []) :: []
cedille-start _lliftingType = (just "LiftStar" , nothing , just _lliftingType , inj₁ _posinfo :: inj₂ '☆' :: []) :: (just "LiftParens" , nothing , just _lliftingType , inj₁ _posinfo :: inj₂ '(' :: inj₁ _ows :: inj₁ _liftingType :: inj₁ _ows :: inj₂ ')' :: inj₁ _posinfo :: []) :: []
cedille-start _liftingType = (just "embed" , nothing , just _liftingType , inj₁ _lliftingType :: []) :: (just "LiftTpArrow" , nothing , just _liftingType , inj₁ _type :: inj₁ _ows :: inj₂ '➔' :: inj₁ _ows :: inj₁ _liftingType :: []) :: (just "LiftPi" , nothing , just _liftingType , inj₁ _posinfo :: inj₂ 'Π' :: inj₁ _ows :: inj₁ _bvar :: inj₁ _ows :: inj₂ ':' :: inj₁ _ows :: inj₁ _type :: inj₁ _ows :: inj₂ '.' :: inj₁ _ows :: inj₁ _liftingType :: []) :: []
cedille-start _leftRight = (just "Right" , nothing , just _leftRight , inj₂ 'r' :: []) :: (just "Left" , nothing , just _leftRight , inj₂ 'l' :: []) :: (just "Both" , nothing , just _leftRight , []) :: []
cedille-start _lam = (just "KeptLambda" , nothing , just _lam , inj₂ 'λ' :: []) :: (just "ErasedLambda" , nothing , just _lam , inj₂ 'Λ' :: []) :: []
cedille-start _kvar-star-20 = (just "P106" , nothing , just _kvar-star-20 , inj₁ _kvar-bar-19 :: inj₁ _kvar-star-20 :: []) :: (just "P105" , nothing , just _kvar-star-20 , []) :: []
cedille-start _kvar-bar-19 = (just "P104" , nothing , just _kvar-bar-19 , inj₁ _numpunct :: []) :: (just "P103" , nothing , just _kvar-bar-19 , inj₁ _alpha :: []) :: []
cedille-start _kvar = (just "P107" , nothing , just _kvar , inj₂ '𝒌' :: inj₁ _kvar-star-20 :: []) :: []
cedille-start _kind = (just "Star" , nothing , just _kind , inj₁ _posinfo :: inj₂ '★' :: []) :: (just "KndVar" , nothing , just _kind , inj₁ _posinfo :: inj₁ _qkvar :: inj₁ _args :: []) :: (just "KndTpArrow" , nothing , just _kind , inj₁ _ltype :: inj₁ _ows :: inj₂ '➔' :: inj₁ _ows :: inj₁ _kind :: []) :: (just "KndPi" , nothing , just _kind , inj₁ _posinfo :: inj₂ 'Π' :: inj₁ _ows :: inj₁ _posinfo :: inj₁ _bvar :: inj₁ _ows :: inj₂ ':' :: inj₁ _ows :: inj₁ _tk :: inj₁ _ows :: inj₂ '.' :: inj₁ _ows :: inj₁ _kind :: []) :: (just "KndParens" , nothing , just _kind , inj₁ _posinfo :: inj₂ '(' :: inj₁ _ows :: inj₁ _kind :: inj₁ _ows :: inj₂ ')' :: inj₁ _posinfo :: []) :: []
cedille-start _imprt = (just "Import" , nothing , just _imprt , inj₁ _posinfo :: inj₂ 'i' :: inj₂ 'm' :: inj₂ 'p' :: inj₂ 'o' :: inj₂ 'r' :: inj₂ 't' :: inj₁ _ws :: inj₁ _fpth :: inj₁ _optAs :: inj₁ _args :: inj₁ _ows :: inj₂ '.' :: inj₁ _posinfo :: []) :: []
cedille-start _imports = (just "ImportsStart" , nothing , just _imports , []) :: (just "ImportsNext" , nothing , just _imports , inj₁ _imprt :: inj₁ _ows :: inj₁ _imports :: []) :: []
cedille-start _fpth-star-18 = (just "P99" , nothing , just _fpth-star-18 , inj₁ _fpth-bar-17 :: inj₁ _fpth-star-18 :: []) :: (just "P98" , nothing , just _fpth-star-18 , []) :: []
cedille-start _fpth-plus-14 = (just "P91" , nothing , just _fpth-plus-14 , inj₂ '.' :: inj₂ '.' :: inj₂ '/' :: inj₁ _fpth-plus-14 :: []) :: (just "P90" , nothing , just _fpth-plus-14 , inj₂ '.' :: inj₂ '.' :: inj₂ '/' :: []) :: []
cedille-start _fpth-bar-17 = (just "P97" , nothing , just _fpth-bar-17 , inj₁ _fpth-bar-16 :: []) :: (just "P96" , nothing , just _fpth-bar-17 , inj₁ _alpha :: []) :: []
cedille-start _fpth-bar-16 = (just "P95" , nothing , just _fpth-bar-16 , inj₂ '/' :: []) :: (just "P94" , nothing , just _fpth-bar-16 , inj₁ _numpunct :: []) :: []
cedille-start _fpth-bar-15 = (just "P93" , nothing , just _fpth-bar-15 , inj₁ _fpth-plus-14 :: []) :: (just "P92" , nothing , just _fpth-bar-15 , inj₁ _alpha :: []) :: []
cedille-start _fpth = (just "P100" , nothing , just _fpth , inj₁ _fpth-bar-15 :: inj₁ _fpth-star-18 :: []) :: []
cedille-start _defTermOrType = (just "DefType" , nothing , just _defTermOrType , inj₁ _posinfo :: inj₁ _var :: inj₁ _ows :: inj₂ '◂' :: inj₁ _ows :: inj₁ _kind :: inj₁ _ows :: inj₂ '=' :: inj₁ _ows :: inj₁ _type :: []) :: (just "DefTerm" , nothing , just _defTermOrType , inj₁ _posinfo :: inj₁ _var :: inj₁ _maybeCheckType :: inj₁ _ows :: inj₂ '=' :: inj₁ _ows :: inj₁ _term :: []) :: []
cedille-start _decl = (just "Decl" , nothing , just _decl , inj₁ _posinfo :: inj₂ '(' :: inj₁ _ows :: inj₁ _posinfo :: inj₁ _bvar :: inj₁ _ows :: inj₂ ':' :: inj₁ _ows :: inj₁ _tk :: inj₁ _ows :: inj₂ ')' :: inj₁ _posinfo :: []) :: []
cedille-start _comment-star-73 = (just "P215" , nothing , just _comment-star-73 , inj₁ _anychar :: inj₁ _comment-star-73 :: []) :: (just "P214" , nothing , just _comment-star-73 , []) :: []
cedille-start _comment = (just "P216" , nothing , just _comment , inj₂ '%' :: inj₁ _comment-star-73 :: inj₂ '\n' :: []) :: []
cedille-start _cmds = (just "CmdsStart" , nothing , just _cmds , []) :: (just "CmdsNext" , nothing , just _cmds , inj₁ _cmd :: inj₁ _ws :: inj₁ _cmds :: []) :: []
cedille-start _cmd = (just "ImportCmd" , nothing , just _cmd , inj₁ _imprt :: []) :: (just "DefTermOrType" , nothing , just _cmd , inj₁ _defTermOrType :: inj₁ _ows :: inj₂ '.' :: inj₁ _posinfo :: []) :: (just "DefKind" , nothing , just _cmd , inj₁ _posinfo :: inj₁ _kvar :: inj₁ _params :: inj₁ _ows :: inj₂ '=' :: inj₁ _ows :: inj₁ _kind :: inj₁ _ows :: inj₂ '.' :: inj₁ _posinfo :: []) :: []
cedille-start _bvar-bar-13 = (just "P88" , nothing , just _bvar-bar-13 , inj₁ _var :: []) :: (just "P87" , nothing , just _bvar-bar-13 , inj₂ '_' :: []) :: []
cedille-start _bvar = (just "P89" , nothing , just _bvar , inj₁ _bvar-bar-13 :: []) :: []
cedille-start _binder = (just "Pi" , nothing , just _binder , inj₂ 'Π' :: []) :: (just "All" , nothing , just _binder , inj₂ '∀' :: []) :: []
cedille-start _aws-bar-76 = (just "P222" , nothing , just _aws-bar-76 , inj₁ _aws-bar-75 :: []) :: (just "P221" , nothing , just _aws-bar-76 , inj₂ '\n' :: []) :: []
cedille-start _aws-bar-75 = (just "P220" , nothing , just _aws-bar-75 , inj₁ _aws-bar-74 :: []) :: (just "P219" , nothing , just _aws-bar-75 , inj₂ '\t' :: []) :: []
cedille-start _aws-bar-74 = (just "P218" , nothing , just _aws-bar-74 , inj₁ _comment :: []) :: (just "P217" , nothing , just _aws-bar-74 , inj₂ ' ' :: []) :: []
cedille-start _aws = (just "P223" , nothing , just _aws , inj₁ _aws-bar-76 :: []) :: []
cedille-start _atype = (just "TpVar" , nothing , just _atype , inj₁ _posinfo :: inj₁ _qvar :: []) :: (just "TpParens" , nothing , just _atype , inj₁ _posinfo :: inj₂ '(' :: inj₁ _ows :: inj₁ _type :: inj₁ _ows :: inj₂ ')' :: inj₁ _posinfo :: []) :: (just "TpHole" , nothing , just _atype , inj₁ _posinfo :: inj₂ '●' :: []) :: []
cedille-start _aterm = (just "embed" , nothing , just _aterm , inj₁ _lterm :: []) :: []
cedille-start _arrowtype = (just "UnerasedArrow" , nothing , just _arrowtype , inj₂ '➔' :: []) :: (just "ErasedArrow" , nothing , just _arrowtype , inj₂ '➾' :: []) :: []
cedille-start _args = (just "ArgsNil" , nothing , just _args , inj₁ _posinfo :: []) :: (just "ArgsCons" , nothing , just _args , inj₁ _arg :: inj₁ _args :: []) :: []
cedille-start _arg = (just "TypeArg" , nothing , just _arg , inj₁ _ows :: inj₂ '·' :: inj₁ _ws :: inj₁ _atype :: []) :: (just "TermArg" , nothing , just _arg , inj₁ _ws :: inj₁ _lterm :: []) :: []
cedille-start _anychar-bar-72 = (just "P212" , nothing , just _anychar-bar-72 , inj₁ _anychar-bar-71 :: []) :: (just "P211" , nothing , just _anychar-bar-72 , inj₁ _alpha :: []) :: []
cedille-start _anychar-bar-71 = (just "P210" , nothing , just _anychar-bar-71 , inj₁ _anychar-bar-70 :: []) :: (just "P209" , nothing , just _anychar-bar-71 , inj₁ _numpunct :: []) :: []
cedille-start _anychar-bar-70 = (just "P208" , nothing , just _anychar-bar-70 , inj₁ _anychar-bar-69 :: []) :: (just "P207" , nothing , just _anychar-bar-70 , inj₂ '\t' :: []) :: []
cedille-start _anychar-bar-69 = (just "P206" , nothing , just _anychar-bar-69 , inj₁ _anychar-bar-68 :: []) :: (just "P205" , nothing , just _anychar-bar-69 , inj₂ ' ' :: []) :: []
cedille-start _anychar-bar-68 = (just "P204" , nothing , just _anychar-bar-68 , inj₁ _otherpunct :: []) :: (just "P203" , nothing , just _anychar-bar-68 , inj₂ '%' :: []) :: []
cedille-start _anychar = (just "P213" , nothing , just _anychar , inj₁ _anychar-bar-72 :: []) :: []
cedille-start _alpha-range-2 = (just "P51" , nothing , just _alpha-range-2 , inj₂ 'Z' :: []) :: (just "P50" , nothing , just _alpha-range-2 , inj₂ 'Y' :: []) :: (just "P49" , nothing , just _alpha-range-2 , inj₂ 'X' :: []) :: (just "P48" , nothing , just _alpha-range-2 , inj₂ 'W' :: []) :: (just "P47" , nothing , just _alpha-range-2 , inj₂ 'V' :: []) :: (just "P46" , nothing , just _alpha-range-2 , inj₂ 'U' :: []) :: (just "P45" , nothing , just _alpha-range-2 , inj₂ 'T' :: []) :: (just "P44" , nothing , just _alpha-range-2 , inj₂ 'S' :: []) :: (just "P43" , nothing , just _alpha-range-2 , inj₂ 'R' :: []) :: (just "P42" , nothing , just _alpha-range-2 , inj₂ 'Q' :: []) :: (just "P41" , nothing , just _alpha-range-2 , inj₂ 'P' :: []) :: (just "P40" , nothing , just _alpha-range-2 , inj₂ 'O' :: []) :: (just "P39" , nothing , just _alpha-range-2 , inj₂ 'N' :: []) :: (just "P38" , nothing , just _alpha-range-2 , inj₂ 'M' :: []) :: (just "P37" , nothing , just _alpha-range-2 , inj₂ 'L' :: []) :: (just "P36" , nothing , just _alpha-range-2 , inj₂ 'K' :: []) :: (just "P35" , nothing , just _alpha-range-2 , inj₂ 'J' :: []) :: (just "P34" , nothing , just _alpha-range-2 , inj₂ 'I' :: []) :: (just "P33" , nothing , just _alpha-range-2 , inj₂ 'H' :: []) :: (just "P32" , nothing , just _alpha-range-2 , inj₂ 'G' :: []) :: (just "P31" , nothing , just _alpha-range-2 , inj₂ 'F' :: []) :: (just "P30" , nothing , just _alpha-range-2 , inj₂ 'E' :: []) :: (just "P29" , nothing , just _alpha-range-2 , inj₂ 'D' :: []) :: (just "P28" , nothing , just _alpha-range-2 , inj₂ 'C' :: []) :: (just "P27" , nothing , just _alpha-range-2 , inj₂ 'B' :: []) :: (just "P26" , nothing , just _alpha-range-2 , inj₂ 'A' :: []) :: []
cedille-start _alpha-range-1 = (just "P9" , nothing , just _alpha-range-1 , inj₂ 'j' :: []) :: (just "P8" , nothing , just _alpha-range-1 , inj₂ 'i' :: []) :: (just "P7" , nothing , just _alpha-range-1 , inj₂ 'h' :: []) :: (just "P6" , nothing , just _alpha-range-1 , inj₂ 'g' :: []) :: (just "P5" , nothing , just _alpha-range-1 , inj₂ 'f' :: []) :: (just "P4" , nothing , just _alpha-range-1 , inj₂ 'e' :: []) :: (just "P3" , nothing , just _alpha-range-1 , inj₂ 'd' :: []) :: (just "P25" , nothing , just _alpha-range-1 , inj₂ 'z' :: []) :: (just "P24" , nothing , just _alpha-range-1 , inj₂ 'y' :: []) :: (just "P23" , nothing , just _alpha-range-1 , inj₂ 'x' :: []) :: (just "P22" , nothing , just _alpha-range-1 , inj₂ 'w' :: []) :: (just "P21" , nothing , just _alpha-range-1 , inj₂ 'v' :: []) :: (just "P20" , nothing , just _alpha-range-1 , inj₂ 'u' :: []) :: (just "P2" , nothing , just _alpha-range-1 , inj₂ 'c' :: []) :: (just "P19" , nothing , just _alpha-range-1 , inj₂ 't' :: []) :: (just "P18" , nothing , just _alpha-range-1 , inj₂ 's' :: []) :: (just "P17" , nothing , just _alpha-range-1 , inj₂ 'r' :: []) :: (just "P16" , nothing , just _alpha-range-1 , inj₂ 'q' :: []) :: (just "P15" , nothing , just _alpha-range-1 , inj₂ 'p' :: []) :: (just "P14" , nothing , just _alpha-range-1 , inj₂ 'o' :: []) :: (just "P13" , nothing , just _alpha-range-1 , inj₂ 'n' :: []) :: (just "P12" , nothing , just _alpha-range-1 , inj₂ 'm' :: []) :: (just "P11" , nothing , just _alpha-range-1 , inj₂ 'l' :: []) :: (just "P10" , nothing , just _alpha-range-1 , inj₂ 'k' :: []) :: (just "P1" , nothing , just _alpha-range-1 , inj₂ 'b' :: []) :: (just "P0" , nothing , just _alpha-range-1 , inj₂ 'a' :: []) :: []
cedille-start _alpha-bar-3 = (just "P53" , nothing , just _alpha-bar-3 , inj₁ _alpha-range-2 :: []) :: (just "P52" , nothing , just _alpha-bar-3 , inj₁ _alpha-range-1 :: []) :: []
cedille-start _alpha = (just "P54" , nothing , just _alpha , inj₁ _alpha-bar-3 :: []) :: []
cedille-return : maybe gratr2-nt → 𝕃 gratr2-rule
cedille-return (just _pterm) = (nothing , nothing , just _pterm , inj₁ _ows :: inj₂ '.' :: inj₁ _ows :: inj₁ _num :: inj₁ _posinfo :: []) :: []
cedille-return (just _ltype) = (nothing , nothing , just _ltype , inj₁ _ws :: inj₁ _lterm :: []) :: (nothing , nothing , just _ltype , inj₁ _ws :: inj₂ '·' :: inj₁ _ws :: inj₁ _atype :: []) :: []
cedille-return (just _liftingType) = (nothing , nothing , just _liftingType , inj₁ _ows :: inj₂ '➔' :: inj₁ _ows :: inj₁ _liftingType :: []) :: []
cedille-return (just _kind) = (nothing , nothing , just _kind , inj₁ _ows :: inj₂ '➔' :: inj₁ _ows :: inj₁ _kind :: []) :: []
cedille-return (just _aterm) = (nothing , nothing , just _aterm , inj₁ _ws :: inj₂ '·' :: inj₁ _ws :: inj₁ _atype :: []) :: (nothing , nothing , just _aterm , inj₁ _ws :: inj₁ _maybeErased :: inj₁ _aterm :: []) :: []
cedille-return _ = []
cedille-rtn : gratr2-rtn
cedille-rtn = record { start = _start ; _eq_ = gratr2-nt-eq ; gratr2-start = cedille-start ; gratr2-return = cedille-return }
open import run ptr
open noderiv
------------------------------------------
-- Length-decreasing rules
------------------------------------------
len-dec-rewrite : Run → maybe (Run × ℕ)
len-dec-rewrite {- Abs-} ((Id "Abs") :: (ParseTree (parsed-posinfo x0)) :: (ParseTree (parsed-binder x1)) :: (ParseTree parsed-ows) :: (ParseTree (parsed-posinfo x2)) :: (ParseTree (parsed-bvar x3)) :: (ParseTree parsed-ows) :: (InputChar ':') :: (ParseTree parsed-ows) :: (ParseTree (parsed-tk x4)) :: (ParseTree parsed-ows) :: (InputChar '.') :: (ParseTree parsed-ows) :: _::_(ParseTree (parsed-type x5)) rest) = just (ParseTree (parsed-type (norm-type (Abs x0 x1 x2 x3 x4 x5))) ::' rest , 14)
len-dec-rewrite {- Abstract-} ((Id "Abstract") :: _::_(InputChar 'θ') rest) = just (ParseTree (parsed-theta (norm-theta Abstract)) ::' rest , 2)
len-dec-rewrite {- AbstractEq-} ((Id "AbstractEq") :: (InputChar 'θ') :: _::_(InputChar '+') rest) = just (ParseTree (parsed-theta (norm-theta AbstractEq)) ::' rest , 3)
len-dec-rewrite {- AbstractVars-} ((Id "AbstractVars") :: (InputChar 'θ') :: (InputChar '<') :: (ParseTree parsed-ows) :: (ParseTree (parsed-vars x0)) :: (ParseTree parsed-ows) :: _::_(InputChar '>') rest) = just (ParseTree (parsed-theta (norm-theta (AbstractVars x0))) ::' rest , 7)
len-dec-rewrite {- All-} ((Id "All") :: _::_(InputChar '∀') rest) = just (ParseTree (parsed-binder (norm-binder All)) ::' rest , 2)
len-dec-rewrite {- App-} ((ParseTree (parsed-aterm x0)) :: (ParseTree parsed-ws) :: (ParseTree (parsed-maybeErased x1)) :: _::_(ParseTree (parsed-aterm x2)) rest) = just (ParseTree (parsed-aterm (norm-term (App x0 x1 x2))) ::' rest , 4)
len-dec-rewrite {- AppTp-} ((ParseTree (parsed-aterm x0)) :: (ParseTree parsed-ws) :: (InputChar '·') :: (ParseTree parsed-ws) :: _::_(ParseTree (parsed-atype x1)) rest) = just (ParseTree (parsed-aterm (norm-term (AppTp x0 x1))) ::' rest , 5)
len-dec-rewrite {- ArgsCons-} ((Id "ArgsCons") :: (ParseTree (parsed-arg x0)) :: _::_(ParseTree (parsed-args x1)) rest) = just (ParseTree (parsed-args (norm-args (ArgsCons x0 x1))) ::' rest , 3)
len-dec-rewrite {- ArgsNil-} ((Id "ArgsNil") :: _::_(ParseTree (parsed-posinfo x0)) rest) = just (ParseTree (parsed-args (norm-args (ArgsNil x0))) ::' rest , 2)
len-dec-rewrite {- Atype-} ((Id "Atype") :: (ParseTree parsed-ows) :: _::_(ParseTree (parsed-atype x0)) rest) = just (ParseTree (parsed-maybeAtype (norm-maybeAtype (Atype x0))) ::' rest , 3)
len-dec-rewrite {- Beta-} ((Id "Beta") :: (ParseTree (parsed-posinfo x0)) :: (InputChar 'β') :: _::_(ParseTree (parsed-optTerm x1)) rest) = just (ParseTree (parsed-lterm (norm-term (Beta x0 x1))) ::' rest , 4)
len-dec-rewrite {- Chi-} ((Id "Chi") :: (ParseTree (parsed-posinfo x0)) :: (InputChar 'χ') :: (ParseTree (parsed-maybeAtype x1)) :: (ParseTree parsed-ows) :: (InputChar '-') :: (ParseTree parsed-ows) :: _::_(ParseTree (parsed-lterm x2)) rest) = just (ParseTree (parsed-lterm (norm-term (Chi x0 x1 x2))) ::' rest , 8)
len-dec-rewrite {- CmdsNext-} ((Id "CmdsNext") :: (ParseTree (parsed-cmd x0)) :: (ParseTree parsed-ws) :: _::_(ParseTree (parsed-cmds x1)) rest) = just (ParseTree (parsed-cmds (norm-cmds (CmdsNext x0 x1))) ::' rest , 4)
len-dec-rewrite {- Decl-} ((Id "Decl") :: (ParseTree (parsed-posinfo x0)) :: (InputChar '(') :: (ParseTree parsed-ows) :: (ParseTree (parsed-posinfo x1)) :: (ParseTree (parsed-bvar x2)) :: (ParseTree parsed-ows) :: (InputChar ':') :: (ParseTree parsed-ows) :: (ParseTree (parsed-tk x3)) :: (ParseTree parsed-ows) :: (InputChar ')') :: _::_(ParseTree (parsed-posinfo x4)) rest) = just (ParseTree (parsed-decl (norm-decl (Decl x0 x1 x2 x3 x4))) ::' rest , 13)
len-dec-rewrite {- DefKind-} ((Id "DefKind") :: (ParseTree (parsed-posinfo x0)) :: (ParseTree (parsed-kvar x1)) :: (ParseTree (parsed-params x2)) :: (ParseTree parsed-ows) :: (InputChar '=') :: (ParseTree parsed-ows) :: (ParseTree (parsed-kind x3)) :: (ParseTree parsed-ows) :: (InputChar '.') :: _::_(ParseTree (parsed-posinfo x4)) rest) = just (ParseTree (parsed-cmd (norm-cmd (DefKind x0 x1 x2 x3 x4))) ::' rest , 11)
len-dec-rewrite {- DefTerm-} ((Id "DefTerm") :: (ParseTree (parsed-posinfo x0)) :: (ParseTree (parsed-var x1)) :: (ParseTree (parsed-maybeCheckType x2)) :: (ParseTree parsed-ows) :: (InputChar '=') :: (ParseTree parsed-ows) :: _::_(ParseTree (parsed-term x3)) rest) = just (ParseTree (parsed-defTermOrType (norm-defTermOrType (DefTerm x0 x1 x2 x3))) ::' rest , 8)
len-dec-rewrite {- DefTermOrType-} ((Id "DefTermOrType") :: (ParseTree (parsed-defTermOrType x0)) :: (ParseTree parsed-ows) :: (InputChar '.') :: _::_(ParseTree (parsed-posinfo x1)) rest) = just (ParseTree (parsed-cmd (norm-cmd (DefTermOrType x0 x1))) ::' rest , 5)
len-dec-rewrite {- DefType-} ((Id "DefType") :: (ParseTree (parsed-posinfo x0)) :: (ParseTree (parsed-var x1)) :: (ParseTree parsed-ows) :: (InputChar '◂') :: (ParseTree parsed-ows) :: (ParseTree (parsed-kind x2)) :: (ParseTree parsed-ows) :: (InputChar '=') :: (ParseTree parsed-ows) :: _::_(ParseTree (parsed-type x3)) rest) = just (ParseTree (parsed-defTermOrType (norm-defTermOrType (DefType x0 x1 x2 x3))) ::' rest , 11)
len-dec-rewrite {- EpsHanf-} ((Id "EpsHanf") :: _::_(InputChar '-') rest) = just (ParseTree (parsed-maybeMinus (norm-maybeMinus EpsHanf)) ::' rest , 2)
len-dec-rewrite {- Epsilon-} ((Id "Epsilon") :: (ParseTree (parsed-posinfo x0)) :: (InputChar 'ε') :: (ParseTree (parsed-leftRight x1)) :: (ParseTree (parsed-maybeMinus x2)) :: (ParseTree parsed-ows) :: _::_(ParseTree (parsed-lterm x3)) rest) = just (ParseTree (parsed-lterm (norm-term (Epsilon x0 x1 x2 x3))) ::' rest , 7)
len-dec-rewrite {- Erased-} ((Id "Erased") :: (InputChar '-') :: _::_(ParseTree parsed-ows) rest) = just (ParseTree (parsed-maybeErased (norm-maybeErased Erased)) ::' rest , 3)
len-dec-rewrite {- ErasedArrow-} ((Id "ErasedArrow") :: _::_(InputChar '➾') rest) = just (ParseTree (parsed-arrowtype (norm-arrowtype ErasedArrow)) ::' rest , 2)
len-dec-rewrite {- ErasedLambda-} ((Id "ErasedLambda") :: _::_(InputChar 'Λ') rest) = just (ParseTree (parsed-lam (norm-lam ErasedLambda)) ::' rest , 2)
len-dec-rewrite {- File-} ((Id "File") :: (ParseTree (parsed-posinfo x0)) :: (ParseTree parsed-ows) :: (ParseTree (parsed-imports x1)) :: (InputChar 'm') :: (InputChar 'o') :: (InputChar 'd') :: (InputChar 'u') :: (InputChar 'l') :: (InputChar 'e') :: (ParseTree parsed-ws) :: (ParseTree (parsed-qvar x2)) :: (ParseTree parsed-ows) :: (ParseTree (parsed-params x3)) :: (ParseTree parsed-ows) :: (InputChar '.') :: (ParseTree parsed-ows) :: (ParseTree (parsed-cmds x4)) :: (ParseTree parsed-ows) :: _::_(ParseTree (parsed-posinfo x5)) rest) = just (ParseTree (parsed-start (norm-start (File x0 x1 x2 x3 x4 x5))) ::' rest , 20)
len-dec-rewrite {- Hole-} ((Id "Hole") :: (ParseTree (parsed-posinfo x0)) :: _::_(InputChar '●') rest) = just (ParseTree (parsed-pterm (norm-term (Hole x0))) ::' rest , 3)
len-dec-rewrite {- Import-} ((Id "Import") :: (ParseTree (parsed-posinfo x0)) :: (InputChar 'i') :: (InputChar 'm') :: (InputChar 'p') :: (InputChar 'o') :: (InputChar 'r') :: (InputChar 't') :: (ParseTree parsed-ws) :: (ParseTree (parsed-fpth x1)) :: (ParseTree (parsed-optAs x2)) :: (ParseTree (parsed-args x3)) :: (ParseTree parsed-ows) :: (InputChar '.') :: _::_(ParseTree (parsed-posinfo x4)) rest) = just (ParseTree (parsed-imprt (norm-imprt (Import x0 x1 x2 x3 x4))) ::' rest , 15)
len-dec-rewrite {- ImportCmd-} ((Id "ImportCmd") :: _::_(ParseTree (parsed-imprt x0)) rest) = just (ParseTree (parsed-cmd (norm-cmd (ImportCmd x0))) ::' rest , 2)
len-dec-rewrite {- ImportsNext-} ((Id "ImportsNext") :: (ParseTree (parsed-imprt x0)) :: (ParseTree parsed-ows) :: _::_(ParseTree (parsed-imports x1)) rest) = just (ParseTree (parsed-imports (norm-imports (ImportsNext x0 x1))) ::' rest , 4)
len-dec-rewrite {- Iota-} ((Id "Iota") :: (ParseTree (parsed-posinfo x0)) :: (InputChar 'ι') :: (ParseTree parsed-ows) :: (ParseTree (parsed-posinfo x1)) :: (ParseTree (parsed-bvar x2)) :: (ParseTree (parsed-optType x3)) :: (ParseTree parsed-ows) :: (InputChar '.') :: (ParseTree parsed-ows) :: _::_(ParseTree (parsed-type x4)) rest) = just (ParseTree (parsed-type (norm-type (Iota x0 x1 x2 x3 x4))) ::' rest , 11)
len-dec-rewrite {- IotaPair-} ((Id "IotaPair") :: (ParseTree (parsed-posinfo x0)) :: (InputChar '[') :: (ParseTree parsed-ows) :: (ParseTree (parsed-term x1)) :: (ParseTree parsed-ows) :: (InputChar ',') :: (ParseTree parsed-ows) :: (ParseTree (parsed-term x2)) :: (ParseTree parsed-ows) :: (InputChar ']') :: _::_(ParseTree (parsed-posinfo x3)) rest) = just (ParseTree (parsed-pterm (norm-term (IotaPair x0 x1 x2 x3))) ::' rest , 12)
len-dec-rewrite {- IotaProj-} ((ParseTree (parsed-pterm x0)) :: (ParseTree parsed-ows) :: (InputChar '.') :: (ParseTree parsed-ows) :: (ParseTree (parsed-num x1)) :: _::_(ParseTree (parsed-posinfo x2)) rest) = just (ParseTree (parsed-pterm (norm-term (IotaProj x0 x1 x2))) ::' rest , 6)
len-dec-rewrite {- KeptLambda-} ((Id "KeptLambda") :: _::_(InputChar 'λ') rest) = just (ParseTree (parsed-lam (norm-lam KeptLambda)) ::' rest , 2)
len-dec-rewrite {- KndArrow-} ((ParseTree (parsed-kind x0)) :: (ParseTree parsed-ows) :: (InputChar '➔') :: (ParseTree parsed-ows) :: _::_(ParseTree (parsed-kind x1)) rest) = just (ParseTree (parsed-kind (norm-kind (KndArrow x0 x1))) ::' rest , 5)
len-dec-rewrite {- KndParens-} ((Id "KndParens") :: (ParseTree (parsed-posinfo x0)) :: (InputChar '(') :: (ParseTree parsed-ows) :: (ParseTree (parsed-kind x1)) :: (ParseTree parsed-ows) :: (InputChar ')') :: _::_(ParseTree (parsed-posinfo x2)) rest) = just (ParseTree (parsed-kind (norm-kind (KndParens x0 x1 x2))) ::' rest , 8)
len-dec-rewrite {- KndPi-} ((Id "KndPi") :: (ParseTree (parsed-posinfo x0)) :: (InputChar 'Π') :: (ParseTree parsed-ows) :: (ParseTree (parsed-posinfo x1)) :: (ParseTree (parsed-bvar x2)) :: (ParseTree parsed-ows) :: (InputChar ':') :: (ParseTree parsed-ows) :: (ParseTree (parsed-tk x3)) :: (ParseTree parsed-ows) :: (InputChar '.') :: (ParseTree parsed-ows) :: _::_(ParseTree (parsed-kind x4)) rest) = just (ParseTree (parsed-kind (norm-kind (KndPi x0 x1 x2 x3 x4))) ::' rest , 14)
len-dec-rewrite {- KndTpArrow-} ((Id "KndTpArrow") :: (ParseTree (parsed-ltype x0)) :: (ParseTree parsed-ows) :: (InputChar '➔') :: (ParseTree parsed-ows) :: _::_(ParseTree (parsed-kind x1)) rest) = just (ParseTree (parsed-kind (norm-kind (KndTpArrow x0 x1))) ::' rest , 6)
len-dec-rewrite {- KndVar-} ((Id "KndVar") :: (ParseTree (parsed-posinfo x0)) :: (ParseTree (parsed-qkvar x1)) :: _::_(ParseTree (parsed-args x2)) rest) = just (ParseTree (parsed-kind (norm-kind (KndVar x0 x1 x2))) ::' rest , 4)
len-dec-rewrite {- Lam-} ((Id "Lam") :: (ParseTree (parsed-posinfo x0)) :: (ParseTree (parsed-lam x1)) :: (ParseTree parsed-ows) :: (ParseTree (parsed-posinfo x2)) :: (ParseTree (parsed-bvar x3)) :: (ParseTree (parsed-optClass x4)) :: (ParseTree parsed-ows) :: (InputChar '.') :: (ParseTree parsed-ows) :: _::_(ParseTree (parsed-term x5)) rest) = just (ParseTree (parsed-term (norm-term (Lam x0 x1 x2 x3 x4 x5))) ::' rest , 11)
len-dec-rewrite {- Left-} ((Id "Left") :: _::_(InputChar 'l') rest) = just (ParseTree (parsed-leftRight (norm-leftRight Left)) ::' rest , 2)
len-dec-rewrite {- Let-} ((Id "Let") :: (ParseTree (parsed-posinfo x0)) :: (InputChar 'l') :: (InputChar 'e') :: (InputChar 't') :: (ParseTree parsed-ws) :: (ParseTree (parsed-defTermOrType x1)) :: (ParseTree parsed-ws) :: (InputChar 'i') :: (InputChar 'n') :: (ParseTree parsed-ws) :: _::_(ParseTree (parsed-term x2)) rest) = just (ParseTree (parsed-term (norm-term (Let x0 x1 x2))) ::' rest , 12)
len-dec-rewrite {- Lft-} ((Id "Lft") :: (ParseTree (parsed-posinfo x0)) :: (InputChar '↑') :: (ParseTree parsed-ows) :: (ParseTree (parsed-posinfo x1)) :: (ParseTree (parsed-var x2)) :: (ParseTree parsed-ows) :: (InputChar '.') :: (ParseTree parsed-ows) :: (ParseTree (parsed-term x3)) :: (ParseTree parsed-ows) :: (InputChar ':') :: (ParseTree parsed-ows) :: _::_(ParseTree (parsed-lliftingType x4)) rest) = just (ParseTree (parsed-ltype (norm-type (Lft x0 x1 x2 x3 x4))) ::' rest , 14)
len-dec-rewrite {- LiftArrow-} ((ParseTree (parsed-liftingType x0)) :: (ParseTree parsed-ows) :: (InputChar '➔') :: (ParseTree parsed-ows) :: _::_(ParseTree (parsed-liftingType x1)) rest) = just (ParseTree (parsed-liftingType (norm-liftingType (LiftArrow x0 x1))) ::' rest , 5)
len-dec-rewrite {- LiftParens-} ((Id "LiftParens") :: (ParseTree (parsed-posinfo x0)) :: (InputChar '(') :: (ParseTree parsed-ows) :: (ParseTree (parsed-liftingType x1)) :: (ParseTree parsed-ows) :: (InputChar ')') :: _::_(ParseTree (parsed-posinfo x2)) rest) = just (ParseTree (parsed-lliftingType (norm-liftingType (LiftParens x0 x1 x2))) ::' rest , 8)
len-dec-rewrite {- LiftPi-} ((Id "LiftPi") :: (ParseTree (parsed-posinfo x0)) :: (InputChar 'Π') :: (ParseTree parsed-ows) :: (ParseTree (parsed-bvar x1)) :: (ParseTree parsed-ows) :: (InputChar ':') :: (ParseTree parsed-ows) :: (ParseTree (parsed-type x2)) :: (ParseTree parsed-ows) :: (InputChar '.') :: (ParseTree parsed-ows) :: _::_(ParseTree (parsed-liftingType x3)) rest) = just (ParseTree (parsed-liftingType (norm-liftingType (LiftPi x0 x1 x2 x3))) ::' rest , 13)
len-dec-rewrite {- LiftStar-} ((Id "LiftStar") :: (ParseTree (parsed-posinfo x0)) :: _::_(InputChar '☆') rest) = just (ParseTree (parsed-lliftingType (norm-liftingType (LiftStar x0))) ::' rest , 3)
len-dec-rewrite {- LiftTpArrow-} ((Id "LiftTpArrow") :: (ParseTree (parsed-type x0)) :: (ParseTree parsed-ows) :: (InputChar '➔') :: (ParseTree parsed-ows) :: _::_(ParseTree (parsed-liftingType x1)) rest) = just (ParseTree (parsed-liftingType (norm-liftingType (LiftTpArrow x0 x1))) ::' rest , 6)
len-dec-rewrite {- LtermsCons-} ((Id "LtermsCons") :: (ParseTree parsed-ws) :: (ParseTree (parsed-maybeErased x0)) :: (ParseTree (parsed-lterm x1)) :: _::_(ParseTree (parsed-lterms x2)) rest) = just (ParseTree (parsed-lterms (norm-lterms (LtermsCons x0 x1 x2))) ::' rest , 5)
len-dec-rewrite {- LtermsNil-} ((Id "LtermsNil") :: _::_(ParseTree (parsed-posinfo x0)) rest) = just (ParseTree (parsed-lterms (norm-lterms (LtermsNil x0))) ::' rest , 2)
len-dec-rewrite {- NoSpans-} ((Id "NoSpans") :: (InputChar '{') :: (InputChar '^') :: (ParseTree (parsed-type x0)) :: (ParseTree (parsed-posinfo x1)) :: (InputChar '^') :: _::_(InputChar '}') rest) = just (ParseTree (parsed-type (norm-type (NoSpans x0 x1))) ::' rest , 7)
len-dec-rewrite {- P0-} ((Id "P0") :: _::_(InputChar 'a') rest) = just (ParseTree (parsed-alpha-range-1 (string-append 0 (char-to-string 'a'))) ::' rest , 2)
len-dec-rewrite {- P1-} ((Id "P1") :: _::_(InputChar 'b') rest) = just (ParseTree (parsed-alpha-range-1 (string-append 0 (char-to-string 'b'))) ::' rest , 2)
len-dec-rewrite {- P10-} ((Id "P10") :: _::_(InputChar 'k') rest) = just (ParseTree (parsed-alpha-range-1 (string-append 0 (char-to-string 'k'))) ::' rest , 2)
len-dec-rewrite {- P100-} ((Id "P100") :: (ParseTree (parsed-fpth-bar-15 x0)) :: _::_(ParseTree (parsed-fpth-star-18 x1)) rest) = just (ParseTree (parsed-fpth (string-append 1 x0 x1)) ::' rest , 3)
len-dec-rewrite {- P101-} ((Id "P101") :: _::_(ParseTree (parsed-kvar x0)) rest) = just (ParseTree (parsed-qkvar (string-append 0 x0)) ::' rest , 2)
len-dec-rewrite {- P102-} ((Id "P102") :: (ParseTree (parsed-var x0)) :: (InputChar '.') :: _::_(ParseTree (parsed-qkvar x1)) rest) = just (ParseTree (parsed-qkvar (string-append 2 x0 (char-to-string '.') x1)) ::' rest , 4)
len-dec-rewrite {- P103-} ((Id "P103") :: _::_(ParseTree (parsed-alpha x0)) rest) = just (ParseTree (parsed-kvar-bar-19 (string-append 0 x0)) ::' rest , 2)
len-dec-rewrite {- P104-} ((Id "P104") :: _::_(ParseTree (parsed-numpunct x0)) rest) = just (ParseTree (parsed-kvar-bar-19 (string-append 0 x0)) ::' rest , 2)
len-dec-rewrite {- P106-} ((Id "P106") :: (ParseTree (parsed-kvar-bar-19 x0)) :: _::_(ParseTree (parsed-kvar-star-20 x1)) rest) = just (ParseTree (parsed-kvar-star-20 (string-append 1 x0 x1)) ::' rest , 3)
len-dec-rewrite {- P107-} ((Id "P107") :: (InputChar '𝒌') :: _::_(ParseTree (parsed-kvar-star-20 x0)) rest) = just (ParseTree (parsed-kvar (string-append 1 (char-to-string '𝒌') x0)) ::' rest , 3)
len-dec-rewrite {- P108-} ((Id "P108") :: _::_(InputChar '◂') rest) = just (ParseTree parsed-otherpunct-bar-21 ::' rest , 2)
len-dec-rewrite {- P109-} ((Id "P109") :: _::_(InputChar 'ω') rest) = just (ParseTree parsed-otherpunct-bar-21 ::' rest , 2)
len-dec-rewrite {- P11-} ((Id "P11") :: _::_(InputChar 'l') rest) = just (ParseTree (parsed-alpha-range-1 (string-append 0 (char-to-string 'l'))) ::' rest , 2)
len-dec-rewrite {- P110-} ((Id "P110") :: _::_(InputChar 'φ') rest) = just (ParseTree parsed-otherpunct-bar-22 ::' rest , 2)
len-dec-rewrite {- P111-} ((Id "P111") :: _::_(ParseTree parsed-otherpunct-bar-21) rest) = just (ParseTree parsed-otherpunct-bar-22 ::' rest , 2)
len-dec-rewrite {- P112-} ((Id "P112") :: _::_(InputChar 'υ') rest) = just (ParseTree parsed-otherpunct-bar-23 ::' rest , 2)
len-dec-rewrite {- P113-} ((Id "P113") :: _::_(ParseTree parsed-otherpunct-bar-22) rest) = just (ParseTree parsed-otherpunct-bar-23 ::' rest , 2)
len-dec-rewrite {- P114-} ((Id "P114") :: _::_(InputChar 'μ') rest) = just (ParseTree parsed-otherpunct-bar-24 ::' rest , 2)
len-dec-rewrite {- P115-} ((Id "P115") :: _::_(ParseTree parsed-otherpunct-bar-23) rest) = just (ParseTree parsed-otherpunct-bar-24 ::' rest , 2)
len-dec-rewrite {- P116-} ((Id "P116") :: _::_(InputChar 'χ') rest) = just (ParseTree parsed-otherpunct-bar-25 ::' rest , 2)
len-dec-rewrite {- P117-} ((Id "P117") :: _::_(ParseTree parsed-otherpunct-bar-24) rest) = just (ParseTree parsed-otherpunct-bar-25 ::' rest , 2)
len-dec-rewrite {- P118-} ((Id "P118") :: _::_(InputChar 'δ') rest) = just (ParseTree parsed-otherpunct-bar-26 ::' rest , 2)
len-dec-rewrite {- P119-} ((Id "P119") :: _::_(ParseTree parsed-otherpunct-bar-25) rest) = just (ParseTree parsed-otherpunct-bar-26 ::' rest , 2)
len-dec-rewrite {- P12-} ((Id "P12") :: _::_(InputChar 'm') rest) = just (ParseTree (parsed-alpha-range-1 (string-append 0 (char-to-string 'm'))) ::' rest , 2)
len-dec-rewrite {- P120-} ((Id "P120") :: _::_(InputChar '\"') rest) = just (ParseTree parsed-otherpunct-bar-27 ::' rest , 2)
len-dec-rewrite {- P121-} ((Id "P121") :: _::_(ParseTree parsed-otherpunct-bar-26) rest) = just (ParseTree parsed-otherpunct-bar-27 ::' rest , 2)
len-dec-rewrite {- P122-} ((Id "P122") :: _::_(InputChar '≃') rest) = just (ParseTree parsed-otherpunct-bar-28 ::' rest , 2)
len-dec-rewrite {- P123-} ((Id "P123") :: _::_(ParseTree parsed-otherpunct-bar-27) rest) = just (ParseTree parsed-otherpunct-bar-28 ::' rest , 2)
len-dec-rewrite {- P124-} ((Id "P124") :: _::_(InputChar '>') rest) = just (ParseTree parsed-otherpunct-bar-29 ::' rest , 2)
len-dec-rewrite {- P125-} ((Id "P125") :: _::_(ParseTree parsed-otherpunct-bar-28) rest) = just (ParseTree parsed-otherpunct-bar-29 ::' rest , 2)
len-dec-rewrite {- P126-} ((Id "P126") :: _::_(InputChar '<') rest) = just (ParseTree parsed-otherpunct-bar-30 ::' rest , 2)
len-dec-rewrite {- P127-} ((Id "P127") :: _::_(ParseTree parsed-otherpunct-bar-29) rest) = just (ParseTree parsed-otherpunct-bar-30 ::' rest , 2)
len-dec-rewrite {- P128-} ((Id "P128") :: _::_(InputChar '+') rest) = just (ParseTree parsed-otherpunct-bar-31 ::' rest , 2)
len-dec-rewrite {- P129-} ((Id "P129") :: _::_(ParseTree parsed-otherpunct-bar-30) rest) = just (ParseTree parsed-otherpunct-bar-31 ::' rest , 2)
len-dec-rewrite {- P13-} ((Id "P13") :: _::_(InputChar 'n') rest) = just (ParseTree (parsed-alpha-range-1 (string-append 0 (char-to-string 'n'))) ::' rest , 2)
len-dec-rewrite {- P130-} ((Id "P130") :: _::_(InputChar 'θ') rest) = just (ParseTree parsed-otherpunct-bar-32 ::' rest , 2)
len-dec-rewrite {- P131-} ((Id "P131") :: _::_(ParseTree parsed-otherpunct-bar-31) rest) = just (ParseTree parsed-otherpunct-bar-32 ::' rest , 2)
len-dec-rewrite {- P132-} ((Id "P132") :: _::_(InputChar 'ς') rest) = just (ParseTree parsed-otherpunct-bar-33 ::' rest , 2)
len-dec-rewrite {- P133-} ((Id "P133") :: _::_(ParseTree parsed-otherpunct-bar-32) rest) = just (ParseTree parsed-otherpunct-bar-33 ::' rest , 2)
len-dec-rewrite {- P134-} ((Id "P134") :: _::_(InputChar '=') rest) = just (ParseTree parsed-otherpunct-bar-34 ::' rest , 2)
len-dec-rewrite {- P135-} ((Id "P135") :: _::_(ParseTree parsed-otherpunct-bar-33) rest) = just (ParseTree parsed-otherpunct-bar-34 ::' rest , 2)
len-dec-rewrite {- P136-} ((Id "P136") :: _::_(InputChar '𝒌') rest) = just (ParseTree parsed-otherpunct-bar-35 ::' rest , 2)
len-dec-rewrite {- P137-} ((Id "P137") :: _::_(ParseTree parsed-otherpunct-bar-34) rest) = just (ParseTree parsed-otherpunct-bar-35 ::' rest , 2)
len-dec-rewrite {- P138-} ((Id "P138") :: _::_(InputChar '-') rest) = just (ParseTree parsed-otherpunct-bar-36 ::' rest , 2)
len-dec-rewrite {- P139-} ((Id "P139") :: _::_(ParseTree parsed-otherpunct-bar-35) rest) = just (ParseTree parsed-otherpunct-bar-36 ::' rest , 2)
len-dec-rewrite {- P14-} ((Id "P14") :: _::_(InputChar 'o') rest) = just (ParseTree (parsed-alpha-range-1 (string-append 0 (char-to-string 'o'))) ::' rest , 2)
len-dec-rewrite {- P140-} ((Id "P140") :: _::_(InputChar 'β') rest) = just (ParseTree parsed-otherpunct-bar-37 ::' rest , 2)
len-dec-rewrite {- P141-} ((Id "P141") :: _::_(ParseTree parsed-otherpunct-bar-36) rest) = just (ParseTree parsed-otherpunct-bar-37 ::' rest , 2)
len-dec-rewrite {- P142-} ((Id "P142") :: _::_(InputChar 'ε') rest) = just (ParseTree parsed-otherpunct-bar-38 ::' rest , 2)
len-dec-rewrite {- P143-} ((Id "P143") :: _::_(ParseTree parsed-otherpunct-bar-37) rest) = just (ParseTree parsed-otherpunct-bar-38 ::' rest , 2)
len-dec-rewrite {- P144-} ((Id "P144") :: _::_(InputChar 'ρ') rest) = just (ParseTree parsed-otherpunct-bar-39 ::' rest , 2)
len-dec-rewrite {- P145-} ((Id "P145") :: _::_(ParseTree parsed-otherpunct-bar-38) rest) = just (ParseTree parsed-otherpunct-bar-39 ::' rest , 2)
len-dec-rewrite {- P146-} ((Id "P146") :: _::_(InputChar 'Λ') rest) = just (ParseTree parsed-otherpunct-bar-40 ::' rest , 2)
len-dec-rewrite {- P147-} ((Id "P147") :: _::_(ParseTree parsed-otherpunct-bar-39) rest) = just (ParseTree parsed-otherpunct-bar-40 ::' rest , 2)
len-dec-rewrite {- P148-} ((Id "P148") :: _::_(InputChar '?') rest) = just (ParseTree parsed-otherpunct-bar-41 ::' rest , 2)
len-dec-rewrite {- P149-} ((Id "P149") :: _::_(ParseTree parsed-otherpunct-bar-40) rest) = just (ParseTree parsed-otherpunct-bar-41 ::' rest , 2)
len-dec-rewrite {- P15-} ((Id "P15") :: _::_(InputChar 'p') rest) = just (ParseTree (parsed-alpha-range-1 (string-append 0 (char-to-string 'p'))) ::' rest , 2)
len-dec-rewrite {- P150-} ((Id "P150") :: _::_(InputChar '⇒') rest) = just (ParseTree parsed-otherpunct-bar-42 ::' rest , 2)
len-dec-rewrite {- P151-} ((Id "P151") :: _::_(ParseTree parsed-otherpunct-bar-41) rest) = just (ParseTree parsed-otherpunct-bar-42 ::' rest , 2)
len-dec-rewrite {- P152-} ((Id "P152") :: _::_(InputChar '}') rest) = just (ParseTree parsed-otherpunct-bar-43 ::' rest , 2)
len-dec-rewrite {- P153-} ((Id "P153") :: _::_(ParseTree parsed-otherpunct-bar-42) rest) = just (ParseTree parsed-otherpunct-bar-43 ::' rest , 2)
len-dec-rewrite {- P154-} ((Id "P154") :: _::_(InputChar '{') rest) = just (ParseTree parsed-otherpunct-bar-44 ::' rest , 2)
len-dec-rewrite {- P155-} ((Id "P155") :: _::_(ParseTree parsed-otherpunct-bar-43) rest) = just (ParseTree parsed-otherpunct-bar-44 ::' rest , 2)
len-dec-rewrite {- P156-} ((Id "P156") :: _::_(InputChar '!') rest) = just (ParseTree parsed-otherpunct-bar-45 ::' rest , 2)
len-dec-rewrite {- P157-} ((Id "P157") :: _::_(ParseTree parsed-otherpunct-bar-44) rest) = just (ParseTree parsed-otherpunct-bar-45 ::' rest , 2)
len-dec-rewrite {- P158-} ((Id "P158") :: _::_(InputChar ',') rest) = just (ParseTree parsed-otherpunct-bar-46 ::' rest , 2)
len-dec-rewrite {- P159-} ((Id "P159") :: _::_(ParseTree parsed-otherpunct-bar-45) rest) = just (ParseTree parsed-otherpunct-bar-46 ::' rest , 2)
len-dec-rewrite {- P16-} ((Id "P16") :: _::_(InputChar 'q') rest) = just (ParseTree (parsed-alpha-range-1 (string-append 0 (char-to-string 'q'))) ::' rest , 2)
len-dec-rewrite {- P160-} ((Id "P160") :: _::_(InputChar ']') rest) = just (ParseTree parsed-otherpunct-bar-47 ::' rest , 2)
len-dec-rewrite {- P161-} ((Id "P161") :: _::_(ParseTree parsed-otherpunct-bar-46) rest) = just (ParseTree parsed-otherpunct-bar-47 ::' rest , 2)
len-dec-rewrite {- P162-} ((Id "P162") :: _::_(InputChar '[') rest) = just (ParseTree parsed-otherpunct-bar-48 ::' rest , 2)
len-dec-rewrite {- P163-} ((Id "P163") :: _::_(ParseTree parsed-otherpunct-bar-47) rest) = just (ParseTree parsed-otherpunct-bar-48 ::' rest , 2)
len-dec-rewrite {- P164-} ((Id "P164") :: _::_(InputChar '.') rest) = just (ParseTree parsed-otherpunct-bar-49 ::' rest , 2)
len-dec-rewrite {- P165-} ((Id "P165") :: _::_(ParseTree parsed-otherpunct-bar-48) rest) = just (ParseTree parsed-otherpunct-bar-49 ::' rest , 2)
len-dec-rewrite {- P166-} ((Id "P166") :: _::_(InputChar ':') rest) = just (ParseTree parsed-otherpunct-bar-50 ::' rest , 2)
len-dec-rewrite {- P167-} ((Id "P167") :: _::_(ParseTree parsed-otherpunct-bar-49) rest) = just (ParseTree parsed-otherpunct-bar-50 ::' rest , 2)
len-dec-rewrite {- P168-} ((Id "P168") :: _::_(InputChar ')') rest) = just (ParseTree parsed-otherpunct-bar-51 ::' rest , 2)
len-dec-rewrite {- P169-} ((Id "P169") :: _::_(ParseTree parsed-otherpunct-bar-50) rest) = just (ParseTree parsed-otherpunct-bar-51 ::' rest , 2)
len-dec-rewrite {- P17-} ((Id "P17") :: _::_(InputChar 'r') rest) = just (ParseTree (parsed-alpha-range-1 (string-append 0 (char-to-string 'r'))) ::' rest , 2)
len-dec-rewrite {- P170-} ((Id "P170") :: _::_(InputChar '(') rest) = just (ParseTree parsed-otherpunct-bar-52 ::' rest , 2)
len-dec-rewrite {- P171-} ((Id "P171") :: _::_(ParseTree parsed-otherpunct-bar-51) rest) = just (ParseTree parsed-otherpunct-bar-52 ::' rest , 2)
len-dec-rewrite {- P172-} ((Id "P172") :: _::_(InputChar '●') rest) = just (ParseTree parsed-otherpunct-bar-53 ::' rest , 2)
len-dec-rewrite {- P173-} ((Id "P173") :: _::_(ParseTree parsed-otherpunct-bar-52) rest) = just (ParseTree parsed-otherpunct-bar-53 ::' rest , 2)
len-dec-rewrite {- P174-} ((Id "P174") :: _::_(InputChar '↑') rest) = just (ParseTree parsed-otherpunct-bar-54 ::' rest , 2)
len-dec-rewrite {- P175-} ((Id "P175") :: _::_(ParseTree parsed-otherpunct-bar-53) rest) = just (ParseTree parsed-otherpunct-bar-54 ::' rest , 2)
len-dec-rewrite {- P176-} ((Id "P176") :: _::_(InputChar '➾') rest) = just (ParseTree parsed-otherpunct-bar-55 ::' rest , 2)
len-dec-rewrite {- P177-} ((Id "P177") :: _::_(ParseTree parsed-otherpunct-bar-54) rest) = just (ParseTree parsed-otherpunct-bar-55 ::' rest , 2)
len-dec-rewrite {- P178-} ((Id "P178") :: _::_(InputChar '➔') rest) = just (ParseTree parsed-otherpunct-bar-56 ::' rest , 2)
len-dec-rewrite {- P179-} ((Id "P179") :: _::_(ParseTree parsed-otherpunct-bar-55) rest) = just (ParseTree parsed-otherpunct-bar-56 ::' rest , 2)
len-dec-rewrite {- P18-} ((Id "P18") :: _::_(InputChar 's') rest) = just (ParseTree (parsed-alpha-range-1 (string-append 0 (char-to-string 's'))) ::' rest , 2)
len-dec-rewrite {- P180-} ((Id "P180") :: _::_(InputChar '⇐') rest) = just (ParseTree parsed-otherpunct-bar-57 ::' rest , 2)
len-dec-rewrite {- P181-} ((Id "P181") :: _::_(ParseTree parsed-otherpunct-bar-56) rest) = just (ParseTree parsed-otherpunct-bar-57 ::' rest , 2)
len-dec-rewrite {- P182-} ((Id "P182") :: _::_(InputChar '·') rest) = just (ParseTree parsed-otherpunct-bar-58 ::' rest , 2)
len-dec-rewrite {- P183-} ((Id "P183") :: _::_(ParseTree parsed-otherpunct-bar-57) rest) = just (ParseTree parsed-otherpunct-bar-58 ::' rest , 2)
len-dec-rewrite {- P184-} ((Id "P184") :: _::_(InputChar '☆') rest) = just (ParseTree parsed-otherpunct-bar-59 ::' rest , 2)
len-dec-rewrite {- P185-} ((Id "P185") :: _::_(ParseTree parsed-otherpunct-bar-58) rest) = just (ParseTree parsed-otherpunct-bar-59 ::' rest , 2)
len-dec-rewrite {- P186-} ((Id "P186") :: _::_(InputChar '★') rest) = just (ParseTree parsed-otherpunct-bar-60 ::' rest , 2)
len-dec-rewrite {- P187-} ((Id "P187") :: _::_(ParseTree parsed-otherpunct-bar-59) rest) = just (ParseTree parsed-otherpunct-bar-60 ::' rest , 2)
len-dec-rewrite {- P188-} ((Id "P188") :: _::_(InputChar 'π') rest) = just (ParseTree parsed-otherpunct-bar-61 ::' rest , 2)
len-dec-rewrite {- P189-} ((Id "P189") :: _::_(ParseTree parsed-otherpunct-bar-60) rest) = just (ParseTree parsed-otherpunct-bar-61 ::' rest , 2)
len-dec-rewrite {- P19-} ((Id "P19") :: _::_(InputChar 't') rest) = just (ParseTree (parsed-alpha-range-1 (string-append 0 (char-to-string 't'))) ::' rest , 2)
len-dec-rewrite {- P190-} ((Id "P190") :: _::_(InputChar '∀') rest) = just (ParseTree parsed-otherpunct-bar-62 ::' rest , 2)
len-dec-rewrite {- P191-} ((Id "P191") :: _::_(ParseTree parsed-otherpunct-bar-61) rest) = just (ParseTree parsed-otherpunct-bar-62 ::' rest , 2)
len-dec-rewrite {- P192-} ((Id "P192") :: _::_(InputChar 'λ') rest) = just (ParseTree parsed-otherpunct-bar-63 ::' rest , 2)
len-dec-rewrite {- P193-} ((Id "P193") :: _::_(ParseTree parsed-otherpunct-bar-62) rest) = just (ParseTree parsed-otherpunct-bar-63 ::' rest , 2)
len-dec-rewrite {- P194-} ((Id "P194") :: _::_(InputChar 'ι') rest) = just (ParseTree parsed-otherpunct-bar-64 ::' rest , 2)
len-dec-rewrite {- P195-} ((Id "P195") :: _::_(ParseTree parsed-otherpunct-bar-63) rest) = just (ParseTree parsed-otherpunct-bar-64 ::' rest , 2)
len-dec-rewrite {- P196-} ((Id "P196") :: _::_(InputChar 'Π') rest) = just (ParseTree parsed-otherpunct-bar-65 ::' rest , 2)
len-dec-rewrite {- P197-} ((Id "P197") :: _::_(ParseTree parsed-otherpunct-bar-64) rest) = just (ParseTree parsed-otherpunct-bar-65 ::' rest , 2)
len-dec-rewrite {- P198-} ((Id "P198") :: _::_(InputChar '□') rest) = just (ParseTree parsed-otherpunct-bar-66 ::' rest , 2)
len-dec-rewrite {- P199-} ((Id "P199") :: _::_(ParseTree parsed-otherpunct-bar-65) rest) = just (ParseTree parsed-otherpunct-bar-66 ::' rest , 2)
len-dec-rewrite {- P2-} ((Id "P2") :: _::_(InputChar 'c') rest) = just (ParseTree (parsed-alpha-range-1 (string-append 0 (char-to-string 'c'))) ::' rest , 2)
len-dec-rewrite {- P20-} ((Id "P20") :: _::_(InputChar 'u') rest) = just (ParseTree (parsed-alpha-range-1 (string-append 0 (char-to-string 'u'))) ::' rest , 2)
len-dec-rewrite {- P200-} ((Id "P200") :: _::_(InputChar '|') rest) = just (ParseTree parsed-otherpunct-bar-67 ::' rest , 2)
len-dec-rewrite {- P201-} ((Id "P201") :: _::_(ParseTree parsed-otherpunct-bar-66) rest) = just (ParseTree parsed-otherpunct-bar-67 ::' rest , 2)
len-dec-rewrite {- P202-} ((Id "P202") :: _::_(ParseTree parsed-otherpunct-bar-67) rest) = just (ParseTree parsed-otherpunct ::' rest , 2)
len-dec-rewrite {- P203-} ((Id "P203") :: _::_(InputChar '%') rest) = just (ParseTree parsed-anychar-bar-68 ::' rest , 2)
len-dec-rewrite {- P204-} ((Id "P204") :: _::_(ParseTree parsed-otherpunct) rest) = just (ParseTree parsed-anychar-bar-68 ::' rest , 2)
len-dec-rewrite {- P205-} ((Id "P205") :: _::_(InputChar ' ') rest) = just (ParseTree parsed-anychar-bar-69 ::' rest , 2)
len-dec-rewrite {- P206-} ((Id "P206") :: _::_(ParseTree parsed-anychar-bar-68) rest) = just (ParseTree parsed-anychar-bar-69 ::' rest , 2)
len-dec-rewrite {- P207-} ((Id "P207") :: _::_(InputChar '\t') rest) = just (ParseTree parsed-anychar-bar-70 ::' rest , 2)
len-dec-rewrite {- P208-} ((Id "P208") :: _::_(ParseTree parsed-anychar-bar-69) rest) = just (ParseTree parsed-anychar-bar-70 ::' rest , 2)
len-dec-rewrite {- P209-} ((Id "P209") :: _::_(ParseTree (parsed-numpunct x0)) rest) = just (ParseTree parsed-anychar-bar-71 ::' rest , 2)
len-dec-rewrite {- P21-} ((Id "P21") :: _::_(InputChar 'v') rest) = just (ParseTree (parsed-alpha-range-1 (string-append 0 (char-to-string 'v'))) ::' rest , 2)
len-dec-rewrite {- P210-} ((Id "P210") :: _::_(ParseTree parsed-anychar-bar-70) rest) = just (ParseTree parsed-anychar-bar-71 ::' rest , 2)
len-dec-rewrite {- P211-} ((Id "P211") :: _::_(ParseTree (parsed-alpha x0)) rest) = just (ParseTree parsed-anychar-bar-72 ::' rest , 2)
len-dec-rewrite {- P212-} ((Id "P212") :: _::_(ParseTree parsed-anychar-bar-71) rest) = just (ParseTree parsed-anychar-bar-72 ::' rest , 2)
len-dec-rewrite {- P213-} ((Id "P213") :: _::_(ParseTree parsed-anychar-bar-72) rest) = just (ParseTree parsed-anychar ::' rest , 2)
len-dec-rewrite {- P215-} ((Id "P215") :: (ParseTree parsed-anychar) :: _::_(ParseTree parsed-comment-star-73) rest) = just (ParseTree parsed-comment-star-73 ::' rest , 3)
len-dec-rewrite {- P216-} ((Id "P216") :: (InputChar '%') :: (ParseTree parsed-comment-star-73) :: _::_(InputChar '\n') rest) = just (ParseTree parsed-comment ::' rest , 4)
len-dec-rewrite {- P217-} ((Id "P217") :: _::_(InputChar ' ') rest) = just (ParseTree parsed-aws-bar-74 ::' rest , 2)
len-dec-rewrite {- P218-} ((Id "P218") :: _::_(ParseTree parsed-comment) rest) = just (ParseTree parsed-aws-bar-74 ::' rest , 2)
len-dec-rewrite {- P219-} ((Id "P219") :: _::_(InputChar '\t') rest) = just (ParseTree parsed-aws-bar-75 ::' rest , 2)
len-dec-rewrite {- P22-} ((Id "P22") :: _::_(InputChar 'w') rest) = just (ParseTree (parsed-alpha-range-1 (string-append 0 (char-to-string 'w'))) ::' rest , 2)
len-dec-rewrite {- P220-} ((Id "P220") :: _::_(ParseTree parsed-aws-bar-74) rest) = just (ParseTree parsed-aws-bar-75 ::' rest , 2)
len-dec-rewrite {- P221-} ((Id "P221") :: _::_(InputChar '\n') rest) = just (ParseTree parsed-aws-bar-76 ::' rest , 2)
len-dec-rewrite {- P222-} ((Id "P222") :: _::_(ParseTree parsed-aws-bar-75) rest) = just (ParseTree parsed-aws-bar-76 ::' rest , 2)
len-dec-rewrite {- P223-} ((Id "P223") :: _::_(ParseTree parsed-aws-bar-76) rest) = just (ParseTree parsed-aws ::' rest , 2)
len-dec-rewrite {- P224-} ((Id "P224") :: _::_(ParseTree parsed-aws) rest) = just (ParseTree parsed-ws-plus-77 ::' rest , 2)
len-dec-rewrite {- P225-} ((Id "P225") :: (ParseTree parsed-aws) :: _::_(ParseTree parsed-ws-plus-77) rest) = just (ParseTree parsed-ws-plus-77 ::' rest , 3)
len-dec-rewrite {- P226-} ((Id "P226") :: _::_(ParseTree parsed-ws-plus-77) rest) = just (ParseTree parsed-ws ::' rest , 2)
len-dec-rewrite {- P228-} ((Id "P228") :: (ParseTree parsed-aws) :: _::_(ParseTree parsed-ows-star-78) rest) = just (ParseTree parsed-ows-star-78 ::' rest , 3)
len-dec-rewrite {- P229-} ((Id "P229") :: _::_(ParseTree parsed-ows-star-78) rest) = just (ParseTree parsed-ows ::' rest , 2)
len-dec-rewrite {- P23-} ((Id "P23") :: _::_(InputChar 'x') rest) = just (ParseTree (parsed-alpha-range-1 (string-append 0 (char-to-string 'x'))) ::' rest , 2)
len-dec-rewrite {- P24-} ((Id "P24") :: _::_(InputChar 'y') rest) = just (ParseTree (parsed-alpha-range-1 (string-append 0 (char-to-string 'y'))) ::' rest , 2)
len-dec-rewrite {- P25-} ((Id "P25") :: _::_(InputChar 'z') rest) = just (ParseTree (parsed-alpha-range-1 (string-append 0 (char-to-string 'z'))) ::' rest , 2)
len-dec-rewrite {- P26-} ((Id "P26") :: _::_(InputChar 'A') rest) = just (ParseTree (parsed-alpha-range-2 (string-append 0 (char-to-string 'A'))) ::' rest , 2)
len-dec-rewrite {- P27-} ((Id "P27") :: _::_(InputChar 'B') rest) = just (ParseTree (parsed-alpha-range-2 (string-append 0 (char-to-string 'B'))) ::' rest , 2)
len-dec-rewrite {- P28-} ((Id "P28") :: _::_(InputChar 'C') rest) = just (ParseTree (parsed-alpha-range-2 (string-append 0 (char-to-string 'C'))) ::' rest , 2)
len-dec-rewrite {- P29-} ((Id "P29") :: _::_(InputChar 'D') rest) = just (ParseTree (parsed-alpha-range-2 (string-append 0 (char-to-string 'D'))) ::' rest , 2)
len-dec-rewrite {- P3-} ((Id "P3") :: _::_(InputChar 'd') rest) = just (ParseTree (parsed-alpha-range-1 (string-append 0 (char-to-string 'd'))) ::' rest , 2)
len-dec-rewrite {- P30-} ((Id "P30") :: _::_(InputChar 'E') rest) = just (ParseTree (parsed-alpha-range-2 (string-append 0 (char-to-string 'E'))) ::' rest , 2)
len-dec-rewrite {- P31-} ((Id "P31") :: _::_(InputChar 'F') rest) = just (ParseTree (parsed-alpha-range-2 (string-append 0 (char-to-string 'F'))) ::' rest , 2)
len-dec-rewrite {- P32-} ((Id "P32") :: _::_(InputChar 'G') rest) = just (ParseTree (parsed-alpha-range-2 (string-append 0 (char-to-string 'G'))) ::' rest , 2)
len-dec-rewrite {- P33-} ((Id "P33") :: _::_(InputChar 'H') rest) = just (ParseTree (parsed-alpha-range-2 (string-append 0 (char-to-string 'H'))) ::' rest , 2)
len-dec-rewrite {- P34-} ((Id "P34") :: _::_(InputChar 'I') rest) = just (ParseTree (parsed-alpha-range-2 (string-append 0 (char-to-string 'I'))) ::' rest , 2)
len-dec-rewrite {- P35-} ((Id "P35") :: _::_(InputChar 'J') rest) = just (ParseTree (parsed-alpha-range-2 (string-append 0 (char-to-string 'J'))) ::' rest , 2)
len-dec-rewrite {- P36-} ((Id "P36") :: _::_(InputChar 'K') rest) = just (ParseTree (parsed-alpha-range-2 (string-append 0 (char-to-string 'K'))) ::' rest , 2)
len-dec-rewrite {- P37-} ((Id "P37") :: _::_(InputChar 'L') rest) = just (ParseTree (parsed-alpha-range-2 (string-append 0 (char-to-string 'L'))) ::' rest , 2)
len-dec-rewrite {- P38-} ((Id "P38") :: _::_(InputChar 'M') rest) = just (ParseTree (parsed-alpha-range-2 (string-append 0 (char-to-string 'M'))) ::' rest , 2)
len-dec-rewrite {- P39-} ((Id "P39") :: _::_(InputChar 'N') rest) = just (ParseTree (parsed-alpha-range-2 (string-append 0 (char-to-string 'N'))) ::' rest , 2)
len-dec-rewrite {- P4-} ((Id "P4") :: _::_(InputChar 'e') rest) = just (ParseTree (parsed-alpha-range-1 (string-append 0 (char-to-string 'e'))) ::' rest , 2)
len-dec-rewrite {- P40-} ((Id "P40") :: _::_(InputChar 'O') rest) = just (ParseTree (parsed-alpha-range-2 (string-append 0 (char-to-string 'O'))) ::' rest , 2)
len-dec-rewrite {- P41-} ((Id "P41") :: _::_(InputChar 'P') rest) = just (ParseTree (parsed-alpha-range-2 (string-append 0 (char-to-string 'P'))) ::' rest , 2)
len-dec-rewrite {- P42-} ((Id "P42") :: _::_(InputChar 'Q') rest) = just (ParseTree (parsed-alpha-range-2 (string-append 0 (char-to-string 'Q'))) ::' rest , 2)
len-dec-rewrite {- P43-} ((Id "P43") :: _::_(InputChar 'R') rest) = just (ParseTree (parsed-alpha-range-2 (string-append 0 (char-to-string 'R'))) ::' rest , 2)
len-dec-rewrite {- P44-} ((Id "P44") :: _::_(InputChar 'S') rest) = just (ParseTree (parsed-alpha-range-2 (string-append 0 (char-to-string 'S'))) ::' rest , 2)
len-dec-rewrite {- P45-} ((Id "P45") :: _::_(InputChar 'T') rest) = just (ParseTree (parsed-alpha-range-2 (string-append 0 (char-to-string 'T'))) ::' rest , 2)
len-dec-rewrite {- P46-} ((Id "P46") :: _::_(InputChar 'U') rest) = just (ParseTree (parsed-alpha-range-2 (string-append 0 (char-to-string 'U'))) ::' rest , 2)
len-dec-rewrite {- P47-} ((Id "P47") :: _::_(InputChar 'V') rest) = just (ParseTree (parsed-alpha-range-2 (string-append 0 (char-to-string 'V'))) ::' rest , 2)
len-dec-rewrite {- P48-} ((Id "P48") :: _::_(InputChar 'W') rest) = just (ParseTree (parsed-alpha-range-2 (string-append 0 (char-to-string 'W'))) ::' rest , 2)
len-dec-rewrite {- P49-} ((Id "P49") :: _::_(InputChar 'X') rest) = just (ParseTree (parsed-alpha-range-2 (string-append 0 (char-to-string 'X'))) ::' rest , 2)
len-dec-rewrite {- P5-} ((Id "P5") :: _::_(InputChar 'f') rest) = just (ParseTree (parsed-alpha-range-1 (string-append 0 (char-to-string 'f'))) ::' rest , 2)
len-dec-rewrite {- P50-} ((Id "P50") :: _::_(InputChar 'Y') rest) = just (ParseTree (parsed-alpha-range-2 (string-append 0 (char-to-string 'Y'))) ::' rest , 2)
len-dec-rewrite {- P51-} ((Id "P51") :: _::_(InputChar 'Z') rest) = just (ParseTree (parsed-alpha-range-2 (string-append 0 (char-to-string 'Z'))) ::' rest , 2)
len-dec-rewrite {- P52-} ((Id "P52") :: _::_(ParseTree (parsed-alpha-range-1 x0)) rest) = just (ParseTree (parsed-alpha-bar-3 (string-append 0 x0)) ::' rest , 2)
len-dec-rewrite {- P53-} ((Id "P53") :: _::_(ParseTree (parsed-alpha-range-2 x0)) rest) = just (ParseTree (parsed-alpha-bar-3 (string-append 0 x0)) ::' rest , 2)
len-dec-rewrite {- P54-} ((Id "P54") :: _::_(ParseTree (parsed-alpha-bar-3 x0)) rest) = just (ParseTree (parsed-alpha (string-append 0 x0)) ::' rest , 2)
len-dec-rewrite {- P55-} ((Id "P55") :: _::_(InputChar '0') rest) = just (ParseTree (parsed-numone-range-4 (string-append 0 (char-to-string '0'))) ::' rest , 2)
len-dec-rewrite {- P56-} ((Id "P56") :: _::_(InputChar '1') rest) = just (ParseTree (parsed-numone-range-4 (string-append 0 (char-to-string '1'))) ::' rest , 2)
len-dec-rewrite {- P57-} ((Id "P57") :: _::_(InputChar '2') rest) = just (ParseTree (parsed-numone-range-4 (string-append 0 (char-to-string '2'))) ::' rest , 2)
len-dec-rewrite {- P58-} ((Id "P58") :: _::_(InputChar '3') rest) = just (ParseTree (parsed-numone-range-4 (string-append 0 (char-to-string '3'))) ::' rest , 2)
len-dec-rewrite {- P59-} ((Id "P59") :: _::_(InputChar '4') rest) = just (ParseTree (parsed-numone-range-4 (string-append 0 (char-to-string '4'))) ::' rest , 2)
len-dec-rewrite {- P6-} ((Id "P6") :: _::_(InputChar 'g') rest) = just (ParseTree (parsed-alpha-range-1 (string-append 0 (char-to-string 'g'))) ::' rest , 2)
len-dec-rewrite {- P60-} ((Id "P60") :: _::_(InputChar '5') rest) = just (ParseTree (parsed-numone-range-4 (string-append 0 (char-to-string '5'))) ::' rest , 2)
len-dec-rewrite {- P61-} ((Id "P61") :: _::_(InputChar '6') rest) = just (ParseTree (parsed-numone-range-4 (string-append 0 (char-to-string '6'))) ::' rest , 2)
len-dec-rewrite {- P62-} ((Id "P62") :: _::_(InputChar '7') rest) = just (ParseTree (parsed-numone-range-4 (string-append 0 (char-to-string '7'))) ::' rest , 2)
len-dec-rewrite {- P63-} ((Id "P63") :: _::_(InputChar '8') rest) = just (ParseTree (parsed-numone-range-4 (string-append 0 (char-to-string '8'))) ::' rest , 2)
len-dec-rewrite {- P64-} ((Id "P64") :: _::_(InputChar '9') rest) = just (ParseTree (parsed-numone-range-4 (string-append 0 (char-to-string '9'))) ::' rest , 2)
len-dec-rewrite {- P65-} ((Id "P65") :: _::_(ParseTree (parsed-numone-range-4 x0)) rest) = just (ParseTree (parsed-numone (string-append 0 x0)) ::' rest , 2)
len-dec-rewrite {- P66-} ((Id "P66") :: _::_(ParseTree (parsed-numone x0)) rest) = just (ParseTree (parsed-num-plus-5 (string-append 0 x0)) ::' rest , 2)
len-dec-rewrite {- P67-} ((Id "P67") :: (ParseTree (parsed-numone x0)) :: _::_(ParseTree (parsed-num-plus-5 x1)) rest) = just (ParseTree (parsed-num-plus-5 (string-append 1 x0 x1)) ::' rest , 3)
len-dec-rewrite {- P68-} ((Id "P68") :: _::_(ParseTree (parsed-num-plus-5 x0)) rest) = just (ParseTree (parsed-num (string-append 0 x0)) ::' rest , 2)
len-dec-rewrite {- P69-} ((Id "P69") :: _::_(InputChar '#') rest) = just (ParseTree (parsed-numpunct-bar-6 (string-append 0 (char-to-string '#'))) ::' rest , 2)
len-dec-rewrite {- P7-} ((Id "P7") :: _::_(InputChar 'h') rest) = just (ParseTree (parsed-alpha-range-1 (string-append 0 (char-to-string 'h'))) ::' rest , 2)
len-dec-rewrite {- P70-} ((Id "P70") :: _::_(InputChar '_') rest) = just (ParseTree (parsed-numpunct-bar-6 (string-append 0 (char-to-string '_'))) ::' rest , 2)
len-dec-rewrite {- P71-} ((Id "P71") :: _::_(InputChar '~') rest) = just (ParseTree (parsed-numpunct-bar-7 (string-append 0 (char-to-string '~'))) ::' rest , 2)
len-dec-rewrite {- P72-} ((Id "P72") :: _::_(ParseTree (parsed-numpunct-bar-6 x0)) rest) = just (ParseTree (parsed-numpunct-bar-7 (string-append 0 x0)) ::' rest , 2)
len-dec-rewrite {- P73-} ((Id "P73") :: _::_(InputChar '-') rest) = just (ParseTree (parsed-numpunct-bar-8 (string-append 0 (char-to-string '-'))) ::' rest , 2)
len-dec-rewrite {- P74-} ((Id "P74") :: _::_(ParseTree (parsed-numpunct-bar-7 x0)) rest) = just (ParseTree (parsed-numpunct-bar-8 (string-append 0 x0)) ::' rest , 2)
len-dec-rewrite {- P75-} ((Id "P75") :: _::_(InputChar '\'') rest) = just (ParseTree (parsed-numpunct-bar-9 (string-append 0 (char-to-string '\''))) ::' rest , 2)
len-dec-rewrite {- P76-} ((Id "P76") :: _::_(ParseTree (parsed-numpunct-bar-8 x0)) rest) = just (ParseTree (parsed-numpunct-bar-9 (string-append 0 x0)) ::' rest , 2)
len-dec-rewrite {- P77-} ((Id "P77") :: _::_(ParseTree (parsed-numone x0)) rest) = just (ParseTree (parsed-numpunct-bar-10 (string-append 0 x0)) ::' rest , 2)
len-dec-rewrite {- P78-} ((Id "P78") :: _::_(ParseTree (parsed-numpunct-bar-9 x0)) rest) = just (ParseTree (parsed-numpunct-bar-10 (string-append 0 x0)) ::' rest , 2)
len-dec-rewrite {- P79-} ((Id "P79") :: _::_(ParseTree (parsed-numpunct-bar-10 x0)) rest) = just (ParseTree (parsed-numpunct (string-append 0 x0)) ::' rest , 2)
len-dec-rewrite {- P8-} ((Id "P8") :: _::_(InputChar 'i') rest) = just (ParseTree (parsed-alpha-range-1 (string-append 0 (char-to-string 'i'))) ::' rest , 2)
len-dec-rewrite {- P80-} ((Id "P80") :: _::_(ParseTree (parsed-var x0)) rest) = just (ParseTree (parsed-qvar (string-append 0 x0)) ::' rest , 2)
len-dec-rewrite {- P81-} ((Id "P81") :: (ParseTree (parsed-var x0)) :: (InputChar '.') :: _::_(ParseTree (parsed-qvar x1)) rest) = just (ParseTree (parsed-qvar (string-append 2 x0 (char-to-string '.') x1)) ::' rest , 4)
len-dec-rewrite {- P82-} ((Id "P82") :: _::_(ParseTree (parsed-alpha x0)) rest) = just (ParseTree (parsed-var-bar-11 (string-append 0 x0)) ::' rest , 2)
len-dec-rewrite {- P83-} ((Id "P83") :: _::_(ParseTree (parsed-numpunct x0)) rest) = just (ParseTree (parsed-var-bar-11 (string-append 0 x0)) ::' rest , 2)
len-dec-rewrite {- P85-} ((Id "P85") :: (ParseTree (parsed-var-bar-11 x0)) :: _::_(ParseTree (parsed-var-star-12 x1)) rest) = just (ParseTree (parsed-var-star-12 (string-append 1 x0 x1)) ::' rest , 3)
len-dec-rewrite {- P86-} ((Id "P86") :: (ParseTree (parsed-alpha x0)) :: _::_(ParseTree (parsed-var-star-12 x1)) rest) = just (ParseTree (parsed-var (string-append 1 x0 x1)) ::' rest , 3)
len-dec-rewrite {- P87-} ((Id "P87") :: _::_(InputChar '_') rest) = just (ParseTree (parsed-bvar-bar-13 (string-append 0 (char-to-string '_'))) ::' rest , 2)
len-dec-rewrite {- P88-} ((Id "P88") :: _::_(ParseTree (parsed-var x0)) rest) = just (ParseTree (parsed-bvar-bar-13 (string-append 0 x0)) ::' rest , 2)
len-dec-rewrite {- P89-} ((Id "P89") :: _::_(ParseTree (parsed-bvar-bar-13 x0)) rest) = just (ParseTree (parsed-bvar (string-append 0 x0)) ::' rest , 2)
len-dec-rewrite {- P9-} ((Id "P9") :: _::_(InputChar 'j') rest) = just (ParseTree (parsed-alpha-range-1 (string-append 0 (char-to-string 'j'))) ::' rest , 2)
len-dec-rewrite {- P90-} ((Id "P90") :: (InputChar '.') :: (InputChar '.') :: _::_(InputChar '/') rest) = just (ParseTree (parsed-fpth-plus-14 (string-append 2 (char-to-string '.') (char-to-string '.') (char-to-string '/'))) ::' rest , 4)
len-dec-rewrite {- P91-} ((Id "P91") :: (InputChar '.') :: (InputChar '.') :: (InputChar '/') :: _::_(ParseTree (parsed-fpth-plus-14 x0)) rest) = just (ParseTree (parsed-fpth-plus-14 (string-append 3 (char-to-string '.') (char-to-string '.') (char-to-string '/') x0)) ::' rest , 5)
len-dec-rewrite {- P92-} ((Id "P92") :: _::_(ParseTree (parsed-alpha x0)) rest) = just (ParseTree (parsed-fpth-bar-15 (string-append 0 x0)) ::' rest , 2)
len-dec-rewrite {- P93-} ((Id "P93") :: _::_(ParseTree (parsed-fpth-plus-14 x0)) rest) = just (ParseTree (parsed-fpth-bar-15 (string-append 0 x0)) ::' rest , 2)
len-dec-rewrite {- P94-} ((Id "P94") :: _::_(ParseTree (parsed-numpunct x0)) rest) = just (ParseTree (parsed-fpth-bar-16 (string-append 0 x0)) ::' rest , 2)
len-dec-rewrite {- P95-} ((Id "P95") :: _::_(InputChar '/') rest) = just (ParseTree (parsed-fpth-bar-16 (string-append 0 (char-to-string '/'))) ::' rest , 2)
len-dec-rewrite {- P96-} ((Id "P96") :: _::_(ParseTree (parsed-alpha x0)) rest) = just (ParseTree (parsed-fpth-bar-17 (string-append 0 x0)) ::' rest , 2)
len-dec-rewrite {- P97-} ((Id "P97") :: _::_(ParseTree (parsed-fpth-bar-16 x0)) rest) = just (ParseTree (parsed-fpth-bar-17 (string-append 0 x0)) ::' rest , 2)
len-dec-rewrite {- P99-} ((Id "P99") :: (ParseTree (parsed-fpth-bar-17 x0)) :: _::_(ParseTree (parsed-fpth-star-18 x1)) rest) = just (ParseTree (parsed-fpth-star-18 (string-append 1 x0 x1)) ::' rest , 3)
len-dec-rewrite {- ParamsCons-} ((Id "ParamsCons") :: (ParseTree parsed-ows) :: (ParseTree (parsed-decl x0)) :: _::_(ParseTree (parsed-params x1)) rest) = just (ParseTree (parsed-params (norm-params (ParamsCons x0 x1))) ::' rest , 4)
len-dec-rewrite {- Parens-} ((Id "Parens") :: (ParseTree (parsed-posinfo x0)) :: (InputChar '(') :: (ParseTree parsed-ows) :: (ParseTree (parsed-term x1)) :: (ParseTree parsed-ows) :: (InputChar ')') :: _::_(ParseTree (parsed-posinfo x2)) rest) = just (ParseTree (parsed-pterm (norm-term (Parens x0 x1 x2))) ::' rest , 8)
len-dec-rewrite {- Phi-} ((Id "Phi") :: (ParseTree (parsed-posinfo x0)) :: (InputChar 'φ') :: (ParseTree parsed-ows) :: (ParseTree (parsed-lterm x1)) :: (ParseTree parsed-ows) :: (InputChar '-') :: (ParseTree parsed-ows) :: (ParseTree (parsed-lterm x2)) :: (ParseTree parsed-ows) :: (InputChar '{') :: (ParseTree parsed-ows) :: (ParseTree (parsed-term x3)) :: (ParseTree parsed-ows) :: (InputChar '}') :: _::_(ParseTree (parsed-posinfo x4)) rest) = just (ParseTree (parsed-lterm (norm-term (Phi x0 x1 x2 x3 x4))) ::' rest , 16)
len-dec-rewrite {- Pi-} ((Id "Pi") :: _::_(InputChar 'Π') rest) = just (ParseTree (parsed-binder (norm-binder Pi)) ::' rest , 2)
len-dec-rewrite {- Rho-} ((Id "Rho") :: (ParseTree (parsed-posinfo x0)) :: (ParseTree (parsed-rho x1)) :: (ParseTree parsed-ows) :: (ParseTree (parsed-lterm x2)) :: (ParseTree parsed-ows) :: (InputChar '-') :: (ParseTree parsed-ows) :: _::_(ParseTree (parsed-lterm x3)) rest) = just (ParseTree (parsed-lterm (norm-term (Rho x0 x1 x2 x3))) ::' rest , 9)
len-dec-rewrite {- RhoPlain-} ((Id "RhoPlain") :: _::_(InputChar 'ρ') rest) = just (ParseTree (parsed-rho (norm-rho RhoPlain)) ::' rest , 2)
len-dec-rewrite {- RhoPlus-} ((Id "RhoPlus") :: (InputChar 'ρ') :: _::_(InputChar '+') rest) = just (ParseTree (parsed-rho (norm-rho RhoPlus)) ::' rest , 3)
len-dec-rewrite {- Right-} ((Id "Right") :: _::_(InputChar 'r') rest) = just (ParseTree (parsed-leftRight (norm-leftRight Right)) ::' rest , 2)
len-dec-rewrite {- Sigma-} ((Id "Sigma") :: (ParseTree (parsed-posinfo x0)) :: (InputChar 'ς') :: (ParseTree parsed-ows) :: _::_(ParseTree (parsed-lterm x1)) rest) = just (ParseTree (parsed-lterm (norm-term (Sigma x0 x1))) ::' rest , 5)
len-dec-rewrite {- SomeClass-} ((Id "SomeClass") :: (ParseTree parsed-ows) :: (InputChar ':') :: (ParseTree parsed-ows) :: _::_(ParseTree (parsed-tk x0)) rest) = just (ParseTree (parsed-optClass (norm-optClass (SomeClass x0))) ::' rest , 5)
len-dec-rewrite {- SomeOptAs-} ((Id "SomeOptAs") :: (ParseTree parsed-ows) :: (InputChar 'a') :: (InputChar 's') :: (ParseTree parsed-ws) :: _::_(ParseTree (parsed-var x0)) rest) = just (ParseTree (parsed-optAs (norm-optAs (SomeOptAs x0))) ::' rest , 6)
len-dec-rewrite {- SomeTerm-} ((Id "SomeTerm") :: (ParseTree parsed-ows) :: (InputChar '{') :: (ParseTree parsed-ows) :: (ParseTree (parsed-term x0)) :: (ParseTree parsed-ows) :: (InputChar '}') :: _::_(ParseTree (parsed-posinfo x1)) rest) = just (ParseTree (parsed-optTerm (norm-optTerm (SomeTerm x0 x1))) ::' rest , 8)
len-dec-rewrite {- SomeType-} ((Id "SomeType") :: (ParseTree parsed-ows) :: (InputChar ':') :: (ParseTree parsed-ows) :: _::_(ParseTree (parsed-type x0)) rest) = just (ParseTree (parsed-optType (norm-optType (SomeType x0))) ::' rest , 5)
len-dec-rewrite {- Star-} ((Id "Star") :: (ParseTree (parsed-posinfo x0)) :: _::_(InputChar '★') rest) = just (ParseTree (parsed-kind (norm-kind (Star x0))) ::' rest , 3)
len-dec-rewrite {- TermArg-} ((Id "TermArg") :: (ParseTree parsed-ws) :: _::_(ParseTree (parsed-lterm x0)) rest) = just (ParseTree (parsed-arg (norm-arg (TermArg x0))) ::' rest , 3)
len-dec-rewrite {- Theta-} ((Id "Theta") :: (ParseTree (parsed-posinfo x0)) :: (ParseTree (parsed-theta x1)) :: (ParseTree parsed-ws) :: (ParseTree (parsed-lterm x2)) :: (ParseTree parsed-ows) :: _::_(ParseTree (parsed-lterms x3)) rest) = just (ParseTree (parsed-term (norm-term (Theta x0 x1 x2 x3))) ::' rest , 7)
len-dec-rewrite {- Tkk-} ((Id "Tkk") :: (ParseTree (parsed-kind x0)) :: _::_(Id "Tkk_end") rest) = just (ParseTree (parsed-tk (norm-tk (Tkk x0))) ::' rest , 3)
len-dec-rewrite {- Tkt-} ((Id "Tkt") :: _::_(ParseTree (parsed-type x0)) rest) = just (ParseTree (parsed-tk (norm-tk (Tkt x0))) ::' rest , 2)
len-dec-rewrite {- TpApp-} ((ParseTree (parsed-ltype x0)) :: (ParseTree parsed-ws) :: (InputChar '·') :: (ParseTree parsed-ws) :: _::_(ParseTree (parsed-atype x1)) rest) = just (ParseTree (parsed-ltype (norm-type (TpApp x0 x1))) ::' rest , 5)
len-dec-rewrite {- TpAppt-} ((ParseTree (parsed-ltype x0)) :: (ParseTree parsed-ws) :: _::_(ParseTree (parsed-lterm x1)) rest) = just (ParseTree (parsed-ltype (norm-type (TpAppt x0 x1))) ::' rest , 3)
len-dec-rewrite {- TpArrow-} ((Id "TpArrow") :: (ParseTree (parsed-ltype x0)) :: (ParseTree parsed-ows) :: (ParseTree (parsed-arrowtype x1)) :: (ParseTree parsed-ows) :: _::_(ParseTree (parsed-type x2)) rest) = just (ParseTree (parsed-type (norm-type (TpArrow x0 x1 x2))) ::' rest , 6)
len-dec-rewrite {- TpEq-} ((Id "TpEq") :: (ParseTree (parsed-term x0)) :: (ParseTree parsed-ows) :: (InputChar '≃') :: (ParseTree parsed-ows) :: _::_(ParseTree (parsed-term x1)) rest) = just (ParseTree (parsed-type (norm-type (TpEq x0 x1))) ::' rest , 6)
len-dec-rewrite {- TpHole-} ((Id "TpHole") :: (ParseTree (parsed-posinfo x0)) :: _::_(InputChar '●') rest) = just (ParseTree (parsed-atype (norm-type (TpHole x0))) ::' rest , 3)
len-dec-rewrite {- TpLambda-} ((Id "TpLambda") :: (ParseTree (parsed-posinfo x0)) :: (InputChar 'λ') :: (ParseTree parsed-ows) :: (ParseTree (parsed-posinfo x1)) :: (ParseTree (parsed-bvar x2)) :: (ParseTree parsed-ows) :: (InputChar ':') :: (ParseTree parsed-ows) :: (ParseTree (parsed-tk x3)) :: (ParseTree parsed-ows) :: (InputChar '.') :: (ParseTree parsed-ows) :: _::_(ParseTree (parsed-type x4)) rest) = just (ParseTree (parsed-type (norm-type (TpLambda x0 x1 x2 x3 x4))) ::' rest , 14)
len-dec-rewrite {- TpParens-} ((Id "TpParens") :: (ParseTree (parsed-posinfo x0)) :: (InputChar '(') :: (ParseTree parsed-ows) :: (ParseTree (parsed-type x1)) :: (ParseTree parsed-ows) :: (InputChar ')') :: _::_(ParseTree (parsed-posinfo x2)) rest) = just (ParseTree (parsed-atype (norm-type (TpParens x0 x1 x2))) ::' rest , 8)
len-dec-rewrite {- TpVar-} ((Id "TpVar") :: (ParseTree (parsed-posinfo x0)) :: _::_(ParseTree (parsed-qvar x1)) rest) = just (ParseTree (parsed-atype (norm-type (TpVar x0 x1))) ::' rest , 3)
len-dec-rewrite {- Type-} ((Id "Type") :: (ParseTree parsed-ows) :: (InputChar '◂') :: (ParseTree parsed-ows) :: _::_(ParseTree (parsed-type x0)) rest) = just (ParseTree (parsed-maybeCheckType (norm-maybeCheckType (Type x0))) ::' rest , 5)
len-dec-rewrite {- TypeArg-} ((Id "TypeArg") :: (ParseTree parsed-ows) :: (InputChar '·') :: (ParseTree parsed-ws) :: _::_(ParseTree (parsed-atype x0)) rest) = just (ParseTree (parsed-arg (norm-arg (TypeArg x0))) ::' rest , 5)
len-dec-rewrite {- UnerasedArrow-} ((Id "UnerasedArrow") :: _::_(InputChar '➔') rest) = just (ParseTree (parsed-arrowtype (norm-arrowtype UnerasedArrow)) ::' rest , 2)
len-dec-rewrite {- Var-} ((Id "Var") :: (ParseTree (parsed-posinfo x0)) :: _::_(ParseTree (parsed-qvar x1)) rest) = just (ParseTree (parsed-pterm (norm-term (Var x0 x1))) ::' rest , 3)
len-dec-rewrite {- VarsNext-} ((Id "VarsNext") :: (ParseTree (parsed-var x0)) :: (ParseTree parsed-ws) :: _::_(ParseTree (parsed-vars x1)) rest) = just (ParseTree (parsed-vars (norm-vars (VarsNext x0 x1))) ::' rest , 4)
len-dec-rewrite {- VarsStart-} ((Id "VarsStart") :: _::_(ParseTree (parsed-var x0)) rest) = just (ParseTree (parsed-vars (norm-vars (VarsStart x0))) ::' rest , 2)
len-dec-rewrite {- embed-} ((Id "embed") :: (ParseTree (parsed-aterm x0)) :: _::_(Id "embed_end") rest) = just (ParseTree (parsed-term x0) ::' rest , 3)
len-dec-rewrite {- embed-} ((Id "embed") :: _::_(ParseTree (parsed-lterm x0)) rest) = just (ParseTree (parsed-aterm x0) ::' rest , 2)
len-dec-rewrite {- embed-} ((Id "embed") :: (ParseTree (parsed-pterm x0)) :: _::_(Id "embed_end") rest) = just (ParseTree (parsed-lterm x0) ::' rest , 3)
len-dec-rewrite {- embed-} ((Id "embed") :: (ParseTree (parsed-ltype x0)) :: _::_(Id "embed_end") rest) = just (ParseTree (parsed-type x0) ::' rest , 3)
len-dec-rewrite {- embed-} ((Id "embed") :: _::_(ParseTree (parsed-atype x0)) rest) = just (ParseTree (parsed-ltype x0) ::' rest , 2)
len-dec-rewrite {- embed-} ((Id "embed") :: _::_(ParseTree (parsed-lliftingType x0)) rest) = just (ParseTree (parsed-liftingType x0) ::' rest , 2)
len-dec-rewrite {- Both-} (_::_(Id "Both") rest) = just (ParseTree (parsed-leftRight (norm-leftRight Both)) ::' rest , 1)
len-dec-rewrite {- CmdsStart-} (_::_(Id "CmdsStart") rest) = just (ParseTree (parsed-cmds (norm-cmds CmdsStart)) ::' rest , 1)
len-dec-rewrite {- EpsHnf-} (_::_(Id "EpsHnf") rest) = just (ParseTree (parsed-maybeMinus (norm-maybeMinus EpsHnf)) ::' rest , 1)
len-dec-rewrite {- ImportsStart-} (_::_(Id "ImportsStart") rest) = just (ParseTree (parsed-imports (norm-imports ImportsStart)) ::' rest , 1)
len-dec-rewrite {- NoAtype-} (_::_(Id "NoAtype") rest) = just (ParseTree (parsed-maybeAtype (norm-maybeAtype NoAtype)) ::' rest , 1)
len-dec-rewrite {- NoCheckType-} (_::_(Id "NoCheckType") rest) = just (ParseTree (parsed-maybeCheckType (norm-maybeCheckType NoCheckType)) ::' rest , 1)
len-dec-rewrite {- NoClass-} (_::_(Id "NoClass") rest) = just (ParseTree (parsed-optClass (norm-optClass NoClass)) ::' rest , 1)
len-dec-rewrite {- NoOptAs-} (_::_(Id "NoOptAs") rest) = just (ParseTree (parsed-optAs (norm-optAs NoOptAs)) ::' rest , 1)
len-dec-rewrite {- NoTerm-} (_::_(Id "NoTerm") rest) = just (ParseTree (parsed-optTerm (norm-optTerm NoTerm)) ::' rest , 1)
len-dec-rewrite {- NoType-} (_::_(Id "NoType") rest) = just (ParseTree (parsed-optType (norm-optType NoType)) ::' rest , 1)
len-dec-rewrite {- NotErased-} (_::_(Id "NotErased") rest) = just (ParseTree (parsed-maybeErased (norm-maybeErased NotErased)) ::' rest , 1)
len-dec-rewrite {- P105-} (_::_(Id "P105") rest) = just (ParseTree (parsed-kvar-star-20 empty-string) ::' rest , 1)
len-dec-rewrite {- P214-} (_::_(Id "P214") rest) = just (ParseTree parsed-comment-star-73 ::' rest , 1)
len-dec-rewrite {- P227-} (_::_(Id "P227") rest) = just (ParseTree parsed-ows-star-78 ::' rest , 1)
len-dec-rewrite {- P84-} (_::_(Id "P84") rest) = just (ParseTree (parsed-var-star-12 empty-string) ::' rest , 1)
len-dec-rewrite {- P98-} (_::_(Id "P98") rest) = just (ParseTree (parsed-fpth-star-18 empty-string) ::' rest , 1)
len-dec-rewrite {- ParamsNil-} (_::_(Id "ParamsNil") rest) = just (ParseTree (parsed-params (norm-params ParamsNil)) ::' rest , 1)
len-dec-rewrite {- Posinfo-} (_::_(Posinfo n) rest) = just (ParseTree (parsed-posinfo (ℕ-to-string n)) ::' rest , 1)
len-dec-rewrite x = nothing
rrs : rewriteRules
rrs = record { len-dec-rewrite = len-dec-rewrite }
|
(* Title: JinjaThreads/JVM/JVMExec.thy
Author: Cornelia Pusch, Gerwin Klein, Andreas Lochbihler
*)
section \<open>Program Execution in the JVM\<close>
theory JVMExec
imports
JVMExecInstr
JVMExceptions
"../Common/StartConfig"
begin
abbreviation instrs_of :: "'addr jvm_prog \<Rightarrow> cname \<Rightarrow> mname \<Rightarrow> 'addr instr list"
where "instrs_of P C M == fst(snd(snd(the(snd(snd(snd(method P C M)))))))"
subsection "single step execution"
context JVM_heap_base begin
fun exception_step :: "'addr jvm_prog \<Rightarrow> 'addr \<Rightarrow> 'heap \<Rightarrow> 'addr frame \<Rightarrow> 'addr frame list \<Rightarrow> ('addr, 'heap) jvm_state"
where
"exception_step P a h (stk, loc, C, M, pc) frs =
(case match_ex_table P (cname_of h a) pc (ex_table_of P C M) of
None \<Rightarrow> (\<lfloor>a\<rfloor>, h, frs)
| Some (pc', d) \<Rightarrow> (None, h, (Addr a # drop (size stk - d) stk, loc, C, M, pc') # frs))"
fun exec :: "'addr jvm_prog \<Rightarrow> 'thread_id \<Rightarrow> ('addr, 'heap) jvm_state \<Rightarrow> ('addr, 'thread_id, 'heap) jvm_ta_state set" where
"exec P t (xcp, h, []) = {}"
| "exec P t (None, h, (stk, loc, C, M, pc) # frs) = exec_instr (instrs_of P C M ! pc) P t h stk loc C M pc frs"
| "exec P t (\<lfloor>a\<rfloor>, h, fr # frs) = {(\<epsilon>, exception_step P a h fr frs)}"
subsection "relational view"
inductive exec_1 ::
"'addr jvm_prog \<Rightarrow> 'thread_id \<Rightarrow> ('addr, 'heap) jvm_state
\<Rightarrow> ('addr, 'thread_id, 'heap) jvm_thread_action \<Rightarrow> ('addr, 'heap) jvm_state \<Rightarrow> bool"
("_,_ \<turnstile>/ _ -_-jvm\<rightarrow>/ _" [61,0,61,0,61] 60)
for P :: "'addr jvm_prog" and t :: 'thread_id
where
exec_1I:
"(ta, \<sigma>') \<in> exec P t \<sigma> \<Longrightarrow> P,t \<turnstile> \<sigma> -ta-jvm\<rightarrow> \<sigma>'"
lemma exec_1_iff:
"P,t \<turnstile> \<sigma> -ta-jvm\<rightarrow> \<sigma>' \<longleftrightarrow> (ta, \<sigma>') \<in> exec P t \<sigma>"
by(auto intro: exec_1I elim: exec_1.cases)
end
text \<open>
The start configuration of the JVM: in the start heap, we call a
method \<open>m\<close> of class \<open>C\<close> in program \<open>P\<close> with parameters @{term "vs"}. The
\<open>this\<close> pointer of the frame is set to \<open>Null\<close> to simulate
a static method invokation.
\<close>
abbreviation JVM_local_start ::
"cname \<Rightarrow> mname \<Rightarrow> ty list \<Rightarrow> ty \<Rightarrow> 'addr jvm_method \<Rightarrow> 'addr val list
\<Rightarrow> 'addr jvm_thread_state"
where
"JVM_local_start \<equiv>
\<lambda>C M Ts T (mxs, mxl0, b) vs.
(None, [([], Null # vs @ replicate mxl0 undefined_value, C, M, 0)])"
context JVM_heap_base begin
abbreviation JVM_start_state ::
"'addr jvm_prog \<Rightarrow> cname \<Rightarrow> mname \<Rightarrow> 'addr val list \<Rightarrow> ('addr,'thread_id,'addr jvm_thread_state,'heap,'addr) state"
where
"JVM_start_state \<equiv> start_state JVM_local_start"
definition JVM_start_state' :: "'addr jvm_prog \<Rightarrow> cname \<Rightarrow> mname \<Rightarrow> 'addr val list \<Rightarrow> ('addr, 'heap) jvm_state"
where
"JVM_start_state' P C M vs \<equiv>
let (D, Ts, T, meth) = method P C M;
(mxs, mxl0, ins, xt) = the meth
in (None, start_heap, [([], Null # vs @ replicate mxl0 undefined_value, D, M, 0)])"
end
end
|
(*
Copyright (C) 2017 M.A.L. Marques
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*)
(* type: gga_exc *)
thakkar_f0 := x -> 1 + 0.0055*x^2/(1 + 0.0253*x*arcsinh(x)):
thakkar_f1 := x -> -0.072*x/(1 + 2*4^(1/3)*x):
thakkar_f := x -> thakkar_f0(x) + thakkar_f1(x):
f := (rs, zeta, xt, xs0, xs1) -> gga_kinetic(thakkar_f, rs, zeta, xs0, xs1):
|
postulate
U V W X Y Z : Set
u : U
v : V
w : W
x : X
y : Y
z : Z
module Top (u : U) where
module A (v : V) where
module M (w : W) where
module O (x : X) where
postulate O : X
postulate O : X
module B (y : Y) where
open A public
module Test0 where
module C = Top.B u y
module D = C.M.O v w x
O₁ : X
O₁ = C.M.O v w
O₂ : X
O₂ = C.O v
module Test1 where
module C (i g n o r i n g m e : Z) = Top.B u y
module D = C.M.O z z z z z z z z z z v w x
O : X
O = C.M.O z z z z z z z z z z v w
module Test2 where
module C (y : Y) = Top.B u y
module D = C.M.O y v w x
O : X
O = C.M.O y v w
module Test3 where
module C (z : Z) = Top.B u y
module D = C.M.O z v w x
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.