text
stringlengths 0
3.34M
|
---|
postulate
A : Set
P : A → Set
record ΣAP : Set where
no-eta-equality
field
fst : A
snd : P fst
open ΣAP
test : (x : ΣAP) → P (fst x)
test x = snd {!!}
|
[STATEMENT]
lemma AsmUN:
"(\<Union>Z. {(P Z, p, Q Z,A Z)}) \<subseteq> \<Theta>
\<Longrightarrow>
\<forall>Z. \<Gamma>,\<Theta>\<turnstile>\<^bsub>/F\<^esub> (P Z) (Call p) (Q Z),(A Z)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Union>Z. {(P Z, p, Q Z, A Z)}) \<subseteq> \<Theta> \<Longrightarrow> \<forall>Z. \<Gamma>,\<Theta>\<turnstile>\<^bsub>/F \<^esub>(P Z) Call p (Q Z),(A Z)
[PROOF STEP]
by (blast intro: hoarep.Asm) |
/*! \file deleter.h
\brief gsl_interp_accelとgsl_splineのデリータを宣言・定義したヘッダファイル
Copyright © 2015 @dc1394 All Rights Reserved.
This software is released under the BSD 2-Clause License.
*/
#ifndef _DELETER_H_
#define _DELETER_H_
#pragma once
#include <gsl/gsl_errno.h>
#include <gsl/gsl_spline.h>
namespace getdata {
//! A function.
/*!
gsl_interp_accelへのポインタを解放するラムダ式
\param acc gsl_interp_accelへのポインタ
*/
static auto const gsl_interp_accel_deleter = [](gsl_interp_accel * acc) {
gsl_interp_accel_free(acc);
};
//! A function.
/*!
gsl_splineへのポインタを解放するラムダ式
\param spline gsl_splineへのポインタ
*/
static auto const gsl_spline_deleter = [](gsl_spline * spline) {
gsl_spline_free(spline);
};
}
#endif // _DELETER_H_
|
-- Andreas, 2014-01-10, reported by fredrik.forsberg
record ⊤ : Set where
record Σ (A : Set) (B : A → Set) : Set where
syntax Σ A (λ x → B) = Σ[ x ∈ A ] B
test : Set
test = {! Σ[ x ∈ ⊤ ] ?!}
-- Fredrik's report:
-- Using the darcs version of Agda from today 10 January,
-- if I load the file and give (or refine) in the hole, I end up with
--
-- test = Σ ⊤ (λ x → {!!})
--
-- i.e. Agda has translated away the syntax Σ[ x ∈ ⊤ ] {!!} for me.
-- I would of course expect
--
-- test = Σ[ x ∈ ⊤ ] {!!}
--
-- instead, and I think this used to be the behaviour? (Using the Σ from
-- the standard library, this is more annoying, as one gets
--
-- test = Σ-syntax ⊤ (λ x → {!!})
--
-- as a result.)
--
-- This might be related to issue 994?
-- Expected test case behavior:
--
-- Bad (at the time of report:
--
-- (agda2-give-action 0 "Σ ⊤ (λ x → ?)")
--
-- Good:
--
-- (agda2-give-action 0 'no-paren)
|
#ifndef SVGP_H
#define SVGP_H
#define M_PI 3.14159265358979323846
#include "Vector.h"
#include "Matrix.h"
#include "GaussianProcess.h"
#include <gsl/gsl_math.h>
class SVGP //: public GaussianProcess
{
protected:
Matrix X; ///< Samples (N x M)
Vector Y; ///< Output (N x 1)
int N; ///< Number of samples
int d;
// Kernel parameters
real noise_var;
real sig_var;
Vector scale_length;
/// SVI parameters
int num_inducing; ///< inducing inputs (J)
Matrix Z; ///< Hidden variables (N x J)
Vector u; ///< Global variables (N x 1) (targets)
// variational distribution parametrization
Matrix S;
Vector m;
real l; //step length
int samples; //not currently used.. but should be
Vector p_mean;
Matrix p_var;
Matrix Kmm;
Matrix Knm;
Matrix Kmn;
Matrix Knn;
Matrix invKmm;
Matrix K_tilde;
real Beta;
real KL;
real L;
Matrix currentSample;
Vector currentObservation;
//preferably these could be done in minibatches but only with single examples for now
//the example is repeated <subsamples> times as in Hoffman et al [2013]
virtual void local_update(); //updating Z (local/latent variables)
virtual void global_update(const Matrix& X_samples, const Vector& Y_samples); //updating m, S (which parametrizes q(u) and in turn gives global param u)
//virtual Vector optimize_Z(int max_iters);
virtual void init();
//virtual real LogLikelihood(double* data); //computes the likelihood given a Z*, to use for optimizing the position of Z
virtual void getSamples(Matrix& X_, Vector& Y_);
public:
SVGP(Matrix& X, Vector& Y, Matrix& Z, real noise_var, real sig_var, Vector scale_length, int samples);
//initializes Z through k-means
SVGP(Matrix& X, Vector& Y, int num_inducing, real noise_var, real sig_var, Vector scale_length, int samples);
virtual Matrix Kernel(const Matrix& A, const Matrix& B);
virtual void Prediction(const Vector& x, real& mean, real& var);
virtual void UpdateGaussianProcess(); //update
virtual void FullUpdateGaussianProcess();
virtual real LogLikelihood();
//virtual real LogLikelihood(const gsl_vector *v);
virtual void AddObservation(const Vector& x, const real& y);
virtual void AddObservation(const std::vector<Vector>& x, const std::vector<real>& y);
virtual void Clear();
};
#endif
|
-- Andreas, 2018-10-29, issue #3246
-- More things are now allowed in mutual blocks.
-- @mutual@ just pushes the definition parts to the bottom.
-- Definitions exist for data, record, functions, and pattern synonyms.
{-# BUILTIN FLOAT Float #-} -- not (yet) allowed in mutual block
mutual
import Agda.Builtin.Bool
open Agda.Builtin.Bool
f : Bool
f = g -- pushed to bottom
-- module M where -- not (yet) allowed in mutual block
module B = Agda.Builtin.Bool
primitive
primFloatEquality : Float → Float → Bool
{-# INJECTIVE primFloatEquality #-} -- certainly a lie
open import Agda.Builtin.Equality
{-# DISPLAY primFloatEquality x y = x ≡ y #-}
postulate A : Set
{-# COMPILE GHC A = type Integer #-}
variable x : Bool
g : Bool
g = true -- pushed to bottom
{-# STATIC g #-}
record R : Set where
coinductive
field foo : R
{-# ETA R #-}
|
[STATEMENT]
lemma dverts_child_if_not_root:
"\<lbrakk>v \<in> dverts (Node r xs); v \<noteq> r\<rbrakk> \<Longrightarrow> \<exists>t\<in>fst ` fset xs. v \<in> dverts t"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>v \<in> dverts (Node r xs); v \<noteq> r\<rbrakk> \<Longrightarrow> \<exists>t\<in>fst ` fset xs. v \<in> dverts t
[PROOF STEP]
by force |
module Convertibility where
open import Syntax public
-- β-η-equivalence.
infix 4 _≡ᵝᵑ_
data _≡ᵝᵑ_ : ∀ {A Γ} → Γ ⊢ A → Γ ⊢ A → Set where
refl≡ᵝᵑ : ∀ {A Γ} {d : Γ ⊢ A} → d ≡ᵝᵑ d
trans≡ᵝᵑ : ∀ {A Γ} {d d′ d″ : Γ ⊢ A} → d ≡ᵝᵑ d′ → d′ ≡ᵝᵑ d″ → d ≡ᵝᵑ d″
sym≡ᵝᵑ : ∀ {A Γ} {d d′ : Γ ⊢ A} → d ≡ᵝᵑ d′ → d′ ≡ᵝᵑ d
-- Congruences.
cong≡ᵝᵑlam : ∀ {A B Γ} {d d′ : Γ , A ⊢ B} →
d ≡ᵝᵑ d′ → lam d ≡ᵝᵑ lam d′
cong≡ᵝᵑapp : ∀ {A B Γ} {d d′ : Γ ⊢ A ⇒ B} {e e′ : Γ ⊢ A} →
d ≡ᵝᵑ d′ → e ≡ᵝᵑ e′ → app d e ≡ᵝᵑ app d′ e′
cong≡ᵝᵑpair : ∀ {A B Γ} {d d′ : Γ ⊢ A} {e e′ : Γ ⊢ B} →
d ≡ᵝᵑ d′ → e ≡ᵝᵑ e′ → pair d e ≡ᵝᵑ pair d′ e′
cong≡ᵝᵑfst : ∀ {A B Γ} {d d′ : Γ ⊢ A ⩕ B} →
d ≡ᵝᵑ d′ → fst d ≡ᵝᵑ fst d′
cong≡ᵝᵑsnd : ∀ {A B Γ} {d d′ : Γ ⊢ A ⩕ B} →
d ≡ᵝᵑ d′ → snd d ≡ᵝᵑ snd d′
-- Reductions, or β-conversions.
reduce⇒ : ∀ {A B Γ} → (d : Γ , A ⊢ B) (e : Γ ⊢ A) →
app (lam d) e ≡ᵝᵑ [ top ≔ e ] d
reduce⩕₁ : ∀ {A B Γ} → (d : Γ ⊢ A) (e : Γ ⊢ B) →
fst (pair d e) ≡ᵝᵑ d
reduce⩕₂ : ∀ {A B Γ} → (d : Γ ⊢ A) (e : Γ ⊢ B) →
snd (pair d e) ≡ᵝᵑ e
-- Expansions, or η-conversions.
expand⇒ : ∀ {A B Γ} → (d : Γ ⊢ A ⇒ B) →
d ≡ᵝᵑ lam (app (mono⊢ weak⊆ d) v₀)
expand⩕ : ∀ {A B Γ} → (d : Γ ⊢ A ⩕ B) →
d ≡ᵝᵑ pair (fst d) (snd d)
expand⫪ : ∀ {Γ} → (d : Γ ⊢ ⫪) →
d ≡ᵝᵑ unit
≡→≡ᵝᵑ : ∀ {A Γ} {d d′ : Γ ⊢ A} → d ≡ d′ → d ≡ᵝᵑ d′
≡→≡ᵝᵑ refl = refl≡ᵝᵑ
-- Syntax for equational reasoning with β-η-equivalence.
module ≡ᵝᵑ-Reasoning where
infix 1 begin≡ᵝᵑ_
begin≡ᵝᵑ_ : ∀ {A Γ} {d d′ : Γ ⊢ A} → d ≡ᵝᵑ d′ → d ≡ᵝᵑ d′
begin≡ᵝᵑ_ p = p
infixr 2 _≡→≡ᵝᵑ⟨⟩_
_≡→≡ᵝᵑ⟨⟩_ : ∀ {A Γ} (d {d′} : Γ ⊢ A) → d ≡ d′ → d ≡ᵝᵑ d′
d ≡→≡ᵝᵑ⟨⟩ p = ≡→≡ᵝᵑ p
infixr 2 _≡ᵝᵑ⟨⟩_
_≡ᵝᵑ⟨⟩_ : ∀ {A Γ} (d {d′} : Γ ⊢ A) → d ≡ᵝᵑ d′ → d ≡ᵝᵑ d′
d ≡ᵝᵑ⟨⟩ p = p
infixr 2 _≡ᵝᵑ⟨_⟩_
_≡ᵝᵑ⟨_⟩_ : ∀ {A Γ} (d {d′ d″} : Γ ⊢ A) → d ≡ᵝᵑ d′ → d′ ≡ᵝᵑ d″ → d ≡ᵝᵑ d″
d ≡ᵝᵑ⟨ p ⟩ q = trans≡ᵝᵑ p q
infix 3 _∎≡ᵝᵑ
_∎≡ᵝᵑ : ∀ {A Γ} (d : Γ ⊢ A) → d ≡ᵝᵑ d
_∎≡ᵝᵑ _ = refl≡ᵝᵑ
open ≡ᵝᵑ-Reasoning public
|
------------------------------------------------------------------------
-- The Agda standard library
--
-- Convenient syntax for "equational reasoning" using a preorder
------------------------------------------------------------------------
-- Example uses:
--
-- u∼y : u ∼ y
-- u∼y = begin
-- u ≈⟨ u≈v ⟩
-- v ≡⟨ v≡w ⟩
-- w ∼⟨ w∼y ⟩
-- y ≈⟨ z≈y ⟩
-- z ∎
--
-- u≈w : u ≈ w
-- u≈w = begin-equality
-- u ≈⟨ u≈v ⟩
-- v ≡⟨ v≡w ⟩
-- w ≡˘⟨ x≡w ⟩
-- x ∎
{-# OPTIONS --without-K --safe #-}
open import Relation.Binary
module Relation.Binary.Reasoning.Preorder
{p₁ p₂ p₃} (P : Preorder p₁ p₂ p₃) where
open Preorder P
------------------------------------------------------------------------
-- Publicly re-export the contents of the base module
open import Relation.Binary.Reasoning.Base.Double isPreorder public
------------------------------------------------------------------------
-- DEPRECATED NAMES
------------------------------------------------------------------------
-- Please use the new names as continuing support for the old names is
-- not guaranteed.
-- Version 1.0
infixr 2 _≈⟨⟩_
_≈⟨⟩_ = _≡⟨⟩_
{-# WARNING_ON_USAGE _≈⟨⟩_
"Warning: _≈⟨⟩_ was deprecated in v1.0.
Please use _≡⟨⟩_ instead."
#-}
|
lemma cnj_in_Ints_iff [simp]: "cnj x \<in> \<int> \<longleftrightarrow> x \<in> \<int>" |
lemma csqrt_1 [simp]: "csqrt 1 = 1" |
module Syntax where
data S
(A₁ : Set)
(A₂ : A₁ → Set)
: Set
where
_,_
: (x₁ : A₁)
→ A₂ x₁
→ S A₁ A₂
syntax S A₁ (λ x → A₂)
= x ∈ A₁ × A₂
module M where
data S'
(A₁ : Set)
(A₂ : A₁ → Set)
: Set
where
_,'_
: (x₁ : A₁)
→ A₂ x₁
→ S' A₁ A₂
syntax S' A₁ (λ x → A₂)
= x ∈' A₁ ×' A₂
open M
using (S')
postulate
p1
: {A₁ : Set}
→ {A₂ : A₁ → Set}
→ x₁ ∈ A₁ × A₂ x₁
→ A₁
p1'
: {A₁ : Set}
→ {A₂ : A₁ → Set}
→ x₁ ∈' A₁ ×' A₂ x₁
→ A₁
|
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE AllowAmbiguousTypes #-}
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE TypeOperators #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE FlexibleContexts #-}
module GrenadeExtras.GradNorm (
GradNorm(..)
) where
import Grenade (FullyConnected'(FullyConnected'), FullyConnected(FullyConnected), Tanh,
Logit, Gradient, Relu, Gradients(GNil, (:/>)))
import Numeric.LinearAlgebra.Static ((<.>), toColumns)
import GHC.TypeLits (KnownNat)
class GradNorm x where
normSquared :: x -> Double
instance (KnownNat i, KnownNat o) => GradNorm (FullyConnected' i o) where
normSquared (FullyConnected' r l) = r <.> r + (sum . fmap (\c->c<.>c) . toColumns $ l)
instance (KnownNat i, KnownNat o) => GradNorm (FullyConnected i o) where
normSquared (FullyConnected w _) = normSquared w
instance GradNorm () where
normSquared _ = 0
instance (GradNorm a, GradNorm b) => GradNorm (a, b) where
normSquared (a, b) = normSquared a + normSquared b
instance GradNorm Logit where
normSquared _ = 0
instance GradNorm Tanh where
normSquared _ = 0
instance GradNorm Relu where
normSquared _ = 0
instance GradNorm (Gradients '[]) where
normSquared GNil = 0
instance (GradNorm l, GradNorm (Gradient l), GradNorm (Gradients ls)) => GradNorm (Gradients (l ': ls)) where
normSquared (grad :/> grest) = normSquared grad + normSquared grest
|
lemmas scaleR = scale |
MODULE time_I
INTERFACE
!...Generated by Pacific-Sierra Research 77to90 4.4G 10:47:16 03/09/06
INTEGER FUNCTION time ( )
!VAST...Calls: SECNDS
END FUNCTION
END INTERFACE
END MODULE
|
If $f \in L(F, \lambda x. t(x, g(x)))$ and $g \in \Theta(h)$, then $f \in L(F, \lambda x. t(x, h(x)))$. |
If $c$ is a vector of nonzero real numbers, then the Lebesgue measure of the set $T = \{t + \sum_{j \in \text{Basis}} c_j x_j \mid x \in \mathbb{R}^n\}$ is equal to the product of the absolute values of the $c_j$'s. |
------------------------------------------------------------------------
-- The Agda standard library
--
-- Converting reflection machinery to strings
------------------------------------------------------------------------
-- Note that Reflection.termErr can also be used directly in tactic
-- error messages.
{-# OPTIONS --without-K --safe #-}
module Reflection.Show where
import Data.Char as Char
import Data.Float as Float
open import Data.List hiding (_++_; intersperse)
import Data.Nat as ℕ
import Data.Nat.Show as ℕ
open import Data.String as String
import Data.Word as Word
open import Relation.Nullary using (yes; no)
open import Function.Base using (_∘′_)
open import Reflection.Abstraction hiding (map)
open import Reflection.Argument hiding (map)
open import Reflection.Argument.Relevance
open import Reflection.Argument.Visibility
open import Reflection.Argument.Information
open import Reflection.Definition
open import Reflection.Literal
open import Reflection.Pattern
open import Reflection.Term
------------------------------------------------------------------------
-- Re-export primitive show functions
open import Agda.Builtin.Reflection public
using () renaming
( primShowMeta to showMeta
; primShowQName to showName
)
------------------------------------------------------------------------
-- Non-primitive show functions
showRelevance : Relevance → String
showRelevance relevant = "relevant"
showRelevance irrelevant = "irrelevant"
showRel : Relevance → String
showRel relevant = ""
showRel irrelevant = "."
showVisibility : Visibility → String
showVisibility visible = "visible"
showVisibility hidden = "hidden"
showVisibility instance′ = "instance"
showLiteral : Literal → String
showLiteral (nat x) = ℕ.show x
showLiteral (word64 x) = ℕ.show (Word.toℕ x)
showLiteral (float x) = Float.show x
showLiteral (char x) = Char.show x
showLiteral (string x) = String.show x
showLiteral (name x) = showName x
showLiteral (meta x) = showMeta x
mutual
showPatterns : List (Arg Pattern) → String
showPatterns [] = ""
showPatterns (a ∷ ps) = showArg a <+> showPatterns ps
where
showArg : Arg Pattern → String
showArg (arg (arg-info visible r) p) = showRel r ++ showPattern p
showArg (arg (arg-info hidden r) p) = braces (showRel r ++ showPattern p)
showArg (arg (arg-info instance′ r) p) = braces (braces (showRel r ++ showPattern p))
showPattern : Pattern → String
showPattern (con c []) = showName c
showPattern (con c ps) = parens (showName c <+> showPatterns ps)
showPattern dot = "._"
showPattern (var s) = s
showPattern (lit l) = showLiteral l
showPattern (proj f) = showName f
showPattern absurd = "()"
private
-- add appropriate parens depending on the given visibility
visibilityParen : Visibility → String → String
visibilityParen visible s = parensIfSpace s
visibilityParen hidden s = braces s
visibilityParen instance′ s = braces (braces s)
mutual
showTerms : List (Arg Term) → String
showTerms [] = ""
showTerms (arg i t ∷ ts) = visibilityParen (visibility i) (showTerm t) <+> showTerms ts
showTerm : Term → String
showTerm (var x args) = "var" <+> ℕ.show x <+> showTerms args
showTerm (con c args) = showName c <+> showTerms args
showTerm (def f args) = showName f <+> showTerms args
showTerm (lam v (abs s x)) = "λ" <+> visibilityParen v s <+> "→" <+> showTerm x
showTerm (pat-lam cs args) =
"λ {" <+> showClauses cs <+> "}" <+> showTerms args
showTerm (Π[ x ∶ arg i a ] b) =
"Π (" ++ visibilityParen (visibility i) x <+> ":" <+>
parensIfSpace (showTerm a) ++ ")" <+> parensIfSpace (showTerm b)
showTerm (sort s) = showSort s
showTerm (lit l) = showLiteral l
showTerm (meta x args) = showMeta x <+> showTerms args
showTerm unknown = "unknown"
showSort : Sort → String
showSort (set t) = "Set" <+> parensIfSpace (showTerm t)
showSort (lit n) = "Set" ++ ℕ.show n -- no space to disambiguate from set t
showSort unknown = "unknown"
showClause : Clause → String
showClause (clause ps t) = showPatterns ps <+> "→" <+> showTerm t
showClause (absurd-clause ps) = showPatterns ps
showClauses : List Clause → String
showClauses [] = ""
showClauses (c ∷ cs) = showClause c <+> ";" <+> showClauses cs
showDefinition : Definition → String
showDefinition (function cs) = "function" <+> braces (showClauses cs)
showDefinition (data-type pars cs) =
"datatype" <+> ℕ.show pars <+> braces (intersperse ", " (map showName cs))
showDefinition (record′ c fs) =
"record" <+> showName c <+> braces (intersperse ", " (map (showName ∘′ unArg) fs))
showDefinition (constructor′ d) = "constructor" <+> showName d
showDefinition axiom = "axiom"
showDefinition primitive′ = "primitive"
|
/-
Copyright (c) 2023 David Loeffler. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: David Loeffler
! This file was ported from Lean 3 source module analysis.fourier.fourier_transform
! leanprover-community/mathlib commit 3353f3371120058977ce1e20bf7fc8986c0fb042
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Analysis.SpecialFunctions.Complex.Circle
import Mathbin.MeasureTheory.Group.Integration
import Mathbin.MeasureTheory.Integral.IntegralEqImproper
/-!
# The Fourier transform
We set up the Fourier transform for complex-valued functions on finite-dimensional spaces.
## Design choices
In namespace `vector_fourier`, we define the Fourier integral in the following context:
* `𝕜` is a commutative ring.
* `V` and `W` are `𝕜`-modules.
* `e` is a unitary additive character of `𝕜`, i.e. a homomorphism `(multiplicative 𝕜) →* circle`.
* `μ` is a measure on `V`.
* `L` is a `𝕜`-bilinear form `V × W → 𝕜`.
* `E` is a complete normed `ℂ`-vector space.
With these definitions, we define `fourier_integral` to be the map from functions `V → E` to
functions `W → E` that sends `f` to
`λ w, ∫ v in V, e [-L v w] • f v ∂μ`,
where `e [x]` is notational sugar for `(e (multiplicative.of_add x) : ℂ)` (available in locale
`fourier_transform`). This includes the cases `W` is the dual of `V` and `L` is the canonical
pairing, or `W = V` and `L` is a bilinear form (e.g. an inner product).
In namespace `fourier`, we consider the more familiar special case when `V = W = 𝕜` and `L` is the
multiplication map (but still allowing `𝕜` to be an arbitrary ring equipped with a measure).
The most familiar case of all is when `V = W = 𝕜 = ℝ`, `L` is multiplication, `μ` is volume, and
`e` is `real.fourier_char`, i.e. the character `λ x, exp ((2 * π * x) * I)`. The Fourier integral
in this case is defined as `real.fourier_integral`.
## Main results
At present the only nontrivial lemma we prove is `continuous_fourier_integral`, stating that the
Fourier transform of an integrable function is continuous (under mild assumptions).
-/
noncomputable section
-- mathport name: expr𝕊
local notation "𝕊" => circle
open MeasureTheory Filter
open Topology
-- mathport name: «expr [ ]»
-- To avoid messing around with multiplicative vs. additive characters, we make a notation.
scoped[FourierTransform] notation e "[" x "]" => (e (Multiplicative.ofAdd x) : ℂ)
/-! ## Fourier theory for functions on general vector spaces -/
namespace VectorFourier
variable {𝕜 : Type _} [CommRing 𝕜] {V : Type _} [AddCommGroup V] [Module 𝕜 V] [MeasurableSpace V]
{W : Type _} [AddCommGroup W] [Module 𝕜 W] {E : Type _} [NormedAddCommGroup E] [NormedSpace ℂ E]
section Defs
variable [CompleteSpace E]
/-- The Fourier transform integral for `f : V → E`, with respect to a bilinear form `L : V × W → 𝕜`
and an additive character `e`. -/
def fourierIntegral (e : Multiplicative 𝕜 →* 𝕊) (μ : Measure V) (L : V →ₗ[𝕜] W →ₗ[𝕜] 𝕜) (f : V → E)
(w : W) : E :=
∫ v, e[-L v w] • f v ∂μ
#align vector_fourier.fourier_integral VectorFourier.fourierIntegral
theorem fourierIntegral_smul_const (e : Multiplicative 𝕜 →* 𝕊) (μ : Measure V)
(L : V →ₗ[𝕜] W →ₗ[𝕜] 𝕜) (f : V → E) (r : ℂ) :
fourierIntegral e μ L (r • f) = r • fourierIntegral e μ L f :=
by
ext1 w
simp only [Pi.smul_apply, fourier_integral, smul_comm _ r, integral_smul]
#align vector_fourier.fourier_integral_smul_const VectorFourier.fourierIntegral_smul_const
/-- The uniform norm of the Fourier integral of `f` is bounded by the `L¹` norm of `f`. -/
theorem fourierIntegral_norm_le (e : Multiplicative 𝕜 →* 𝕊) {μ : Measure V} (L : V →ₗ[𝕜] W →ₗ[𝕜] 𝕜)
{f : V → E} (hf : Integrable f μ) (w : W) : ‖fourierIntegral e μ L f w‖ ≤ ‖hf.toL1 f‖ :=
by
rw [L1.norm_of_fun_eq_integral_norm]
refine' (norm_integral_le_integral_norm _).trans (le_of_eq _)
simp_rw [norm_smul, Complex.norm_eq_abs, abs_coe_circle, one_mul]
#align vector_fourier.fourier_integral_norm_le VectorFourier.fourierIntegral_norm_le
/-- The Fourier integral converts right-translation into scalar multiplication by a phase factor.-/
theorem fourierIntegral_comp_add_right [HasMeasurableAdd V] (e : Multiplicative 𝕜 →* 𝕊)
(μ : Measure V) [μ.IsAddRightInvariant] (L : V →ₗ[𝕜] W →ₗ[𝕜] 𝕜) (f : V → E) (v₀ : V) :
fourierIntegral e μ L (f ∘ fun v => v + v₀) = fun w => e[L v₀ w] • fourierIntegral e μ L f w :=
by
ext1 w
dsimp only [fourier_integral, Function.comp_apply]
conv in L _ => rw [← add_sub_cancel v v₀]
rw [integral_add_right_eq_self fun v : V => e[-L (v - v₀) w] • f v]
swap; infer_instance
dsimp only
rw [← integral_smul]
congr 1 with v
rw [← smul_assoc, smul_eq_mul, ← Submonoid.coe_mul, ← e.map_mul, ← ofAdd_add, ←
LinearMap.neg_apply, ← sub_eq_add_neg, ← LinearMap.sub_apply, LinearMap.map_sub, neg_sub]
#align vector_fourier.fourier_integral_comp_add_right VectorFourier.fourierIntegral_comp_add_right
end Defs
section Continuous
/- In this section we assume 𝕜, V, W have topologies, and L, e are continuous (but f needn't be).
This is used to ensure that `e [-L v w]` is (ae strongly) measurable. We could get away with
imposing only a measurable-space structure on 𝕜 (it doesn't have to be the Borel sigma-algebra of
a topology); but it seems hard to imagine cases where this extra generality would be useful, and
allowing it would complicate matters in the most important use cases.
-/
variable [TopologicalSpace 𝕜] [TopologicalRing 𝕜] [TopologicalSpace V] [BorelSpace V]
[TopologicalSpace W] {e : Multiplicative 𝕜 →* 𝕊} {μ : Measure V} {L : V →ₗ[𝕜] W →ₗ[𝕜] 𝕜}
/-- If `f` is integrable, then the Fourier integral is convergent for all `w`. -/
theorem fourierIntegralConvergent (he : Continuous e) (hL : Continuous fun p : V × W => L p.1 p.2)
{f : V → E} (hf : Integrable f μ) (w : W) : Integrable (fun v : V => e[-L v w] • f v) μ :=
by
rw [continuous_induced_rng] at he
have c : Continuous fun v => e[-L v w] :=
by
refine' he.comp (continuous_of_add.comp (Continuous.neg _))
exact hL.comp (continuous_prod_mk.mpr ⟨continuous_id, continuous_const⟩)
rw [← integrable_norm_iff (c.ae_strongly_measurable.smul hf.1)]
convert hf.norm
ext1 v
rw [norm_smul, Complex.norm_eq_abs, abs_coe_circle, one_mul]
#align vector_fourier.fourier_integral_convergent VectorFourier.fourierIntegralConvergent
variable [CompleteSpace E]
theorem fourierIntegral_add (he : Continuous e) (hL : Continuous fun p : V × W => L p.1 p.2)
{f g : V → E} (hf : Integrable f μ) (hg : Integrable g μ) :
fourierIntegral e μ L f + fourierIntegral e μ L g = fourierIntegral e μ L (f + g) :=
by
ext1 w
dsimp only [Pi.add_apply, fourier_integral]
simp_rw [smul_add]
rw [integral_add]
· exact fourier_integral_convergent he hL hf w
· exact fourier_integral_convergent he hL hg w
#align vector_fourier.fourier_integral_add VectorFourier.fourierIntegral_add
/-- The Fourier integral of an `L^1` function is a continuous function. -/
theorem fourierIntegral_continuous [TopologicalSpace.FirstCountableTopology W] (he : Continuous e)
(hL : Continuous fun p : V × W => L p.1 p.2) {f : V → E} (hf : Integrable f μ) :
Continuous (fourierIntegral e μ L f) :=
by
apply continuous_of_dominated
· exact fun w => (fourier_integral_convergent he hL hf w).1
· refine' fun w => ae_of_all _ fun v => _
· exact fun v => ‖f v‖
· rw [norm_smul, Complex.norm_eq_abs, abs_coe_circle, one_mul]
· exact hf.norm
· rw [continuous_induced_rng] at he
refine' ae_of_all _ fun v => (he.comp (continuous_of_add.comp _)).smul continuous_const
refine' (hL.comp (continuous_prod_mk.mpr ⟨continuous_const, continuous_id⟩)).neg
#align vector_fourier.fourier_integral_continuous VectorFourier.fourierIntegral_continuous
end Continuous
end VectorFourier
/-! ## Fourier theory for functions on `𝕜` -/
namespace Fourier
variable {𝕜 : Type _} [CommRing 𝕜] [MeasurableSpace 𝕜] {E : Type _} [NormedAddCommGroup E]
[NormedSpace ℂ E]
section Defs
variable [CompleteSpace E]
/-- The Fourier transform integral for `f : 𝕜 → E`, with respect to the measure `μ` and additive
character `e`. -/
def fourierIntegral (e : Multiplicative 𝕜 →* 𝕊) (μ : Measure 𝕜) (f : 𝕜 → E) (w : 𝕜) : E :=
VectorFourier.fourierIntegral e μ (LinearMap.mul 𝕜 𝕜) f w
#align fourier.fourier_integral Fourier.fourierIntegral
theorem fourierIntegral_def (e : Multiplicative 𝕜 →* 𝕊) (μ : Measure 𝕜) (f : 𝕜 → E) (w : 𝕜) :
fourierIntegral e μ f w = ∫ v : 𝕜, e[-(v * w)] • f v ∂μ :=
rfl
#align fourier.fourier_integral_def Fourier.fourierIntegral_def
theorem fourierIntegral_smul_const (e : Multiplicative 𝕜 →* 𝕊) (μ : Measure 𝕜) (f : 𝕜 → E) (r : ℂ) :
fourierIntegral e μ (r • f) = r • fourierIntegral e μ f :=
VectorFourier.fourierIntegral_smul_const _ _ _ _ _
#align fourier.fourier_integral_smul_const Fourier.fourierIntegral_smul_const
/-- The uniform norm of the Fourier transform of `f` is bounded by the `L¹` norm of `f`. -/
theorem fourierIntegral_norm_le (e : Multiplicative 𝕜 →* 𝕊) {μ : Measure 𝕜} {f : 𝕜 → E}
(hf : Integrable f μ) (w : 𝕜) : ‖fourierIntegral e μ f w‖ ≤ ‖hf.toL1 f‖ :=
VectorFourier.fourierIntegral_norm_le _ _ _ _
#align fourier.fourier_integral_norm_le Fourier.fourierIntegral_norm_le
/-- The Fourier transform converts right-translation into scalar multiplication by a phase factor.-/
theorem fourierIntegral_comp_add_right [HasMeasurableAdd 𝕜] (e : Multiplicative 𝕜 →* 𝕊)
(μ : Measure 𝕜) [μ.IsAddRightInvariant] (f : 𝕜 → E) (v₀ : 𝕜) :
fourierIntegral e μ (f ∘ fun v => v + v₀) = fun w => e[v₀ * w] • fourierIntegral e μ f w :=
VectorFourier.fourierIntegral_comp_add_right _ _ _ _ _
#align fourier.fourier_integral_comp_add_right Fourier.fourierIntegral_comp_add_right
end Defs
end Fourier
open Real
namespace Real
/-- The standard additive character of `ℝ`, given by `λ x, exp (2 * π * x * I)`. -/
def fourierChar : Multiplicative ℝ →* 𝕊
where
toFun z := expMapCircle (2 * π * z.toAdd)
map_one' := by rw [toAdd_one, MulZeroClass.mul_zero, expMapCircle_zero]
map_mul' x y := by rw [toAdd_mul, mul_add, expMapCircle_add]
#align real.fourier_char Real.fourierChar
theorem fourierChar_apply (x : ℝ) : Real.fourierChar[x] = Complex.exp (↑(2 * π * x) * Complex.I) :=
by rfl
#align real.fourier_char_apply Real.fourierChar_apply
@[continuity]
theorem continuous_fourierChar : Continuous Real.fourierChar :=
(map_continuous expMapCircle).comp (continuous_const.mul continuous_toAdd)
#align real.continuous_fourier_char Real.continuous_fourierChar
variable {E : Type _} [NormedAddCommGroup E] [CompleteSpace E] [NormedSpace ℂ E]
theorem vector_fourierIntegral_eq_integral_exp_smul {V : Type _} [AddCommGroup V] [Module ℝ V]
[MeasurableSpace V] {W : Type _} [AddCommGroup W] [Module ℝ W] (L : V →ₗ[ℝ] W →ₗ[ℝ] ℝ)
(μ : Measure V) (f : V → E) (w : W) :
VectorFourier.fourierIntegral fourierChar μ L f w =
∫ v : V, Complex.exp (↑(-2 * π * L v w) * Complex.I) • f v ∂μ :=
by simp_rw [VectorFourier.fourierIntegral, Real.fourierChar_apply, mul_neg, neg_mul]
#align real.vector_fourier_integral_eq_integral_exp_smul Real.vector_fourierIntegral_eq_integral_exp_smul
/-- The Fourier integral for `f : ℝ → E`, with respect to the standard additive character and
measure on `ℝ`. -/
def fourierIntegral (f : ℝ → E) (w : ℝ) :=
Fourier.fourierIntegral fourierChar volume f w
#align real.fourier_integral Real.fourierIntegral
theorem fourierIntegral_def (f : ℝ → E) (w : ℝ) :
fourierIntegral f w = ∫ v : ℝ, fourierChar[-(v * w)] • f v :=
rfl
#align real.fourier_integral_def Real.fourierIntegral_def
-- mathport name: fourier_integral
scoped[FourierTransform] notation "𝓕" => Real.fourierIntegral
theorem fourierIntegral_eq_integral_exp_smul {E : Type _} [NormedAddCommGroup E] [CompleteSpace E]
[NormedSpace ℂ E] (f : ℝ → E) (w : ℝ) :
𝓕 f w = ∫ v : ℝ, Complex.exp (↑(-2 * π * v * w) * Complex.I) • f v := by
simp_rw [fourier_integral_def, Real.fourierChar_apply, mul_neg, neg_mul, mul_assoc]
#align real.fourier_integral_eq_integral_exp_smul Real.fourierIntegral_eq_integral_exp_smul
end Real
|
using MAT
include("../src/hypergraphs.jl")
include("../src/SparseCard.jl")
## Load
datasets = ["020_smallplant","001_oct"]
for numsuperpix = [500; 200]
if numsuperpix == 200
lambdas = [0.3; 0.1]
lambda2 = 0.002
elseif numsuperpix == 500
lambdas = [0.08; 0.01]
lambda2 = 0.008
end
for jj = 1:2
dataset = datasets[jj]
lambda1 = lambdas[jj]
M = matread("../data/data_reflections/$(dataset)_p$numsuperpix.mat")
# The graph is a grid: each node is a pixel in a picture
# This gives superpixel regions
sup_img = M["sup_img"]
# Load
M2 = matread("../data/data_reflections/$(dataset)_wts.mat")
W1 = M2["W1"] # edge weights for vertical edges
W2 = M2["W2"] # edge weights for horizontal edges
unary = M2["unary"] # this is the s-t edge weights, already linearized
# number of columns
col = size(sup_img,2)
# number of rows
row = size(sup_img,1)
# go from (i,j) node ID to linear node ID
node = (i,j)->i + (j-1)*row
## Build edge information
I = Vector{Int64}()
J = Vector{Int64}()
W = Vector{Float64}()
# Type 1 edges: terminal edges
tvec = abs.((unary .> 0).* unary)
svec = abs.((unary .< 0).* unary)
# Type 2 edges: vertical edges, organized into rows
for i = 1:row-1
for j = 1:col
v1 = node(i,j) # first node in edge
v2 = node(i+1,j) # second node in edge
push!(I,v1)
push!(J,v2)
# Use edge weights as defined by Jegelka et al.
push!(W,lambda1*W1[i,j])
end
end
# Type 3 edges: horizontal edges, organized into columns
for i = 1:col-1
for j = 1:row
v1 = node(j,i) # first node in edge
v2 = node(j,i+1) # second node in edge
push!(I,v1)
push!(J,v2)
# Use edge weights as defined by Jegelka et al.
push!(W,lambda1*W2[j,i])
end
end
N = length(unary)
A = sparse(I,J,W,N,N)
A = A+sparse(A')
matwrite("Graph_noclique_$(dataset)_$numsuperpix.mat",Dict("A"=>A,"svec"=>svec, "tvec"=>tvec))
end
end
|
/-
Copyright (c) 2020 Bhavik Mehta. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Bhavik Mehta
! This file was ported from Lean 3 source module category_theory.sites.closed
! leanprover-community/mathlib commit 4cfc30e317caad46858393f1a7a33f609296cc30
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.CategoryTheory.Sites.SheafOfTypes
import Mathbin.Order.Closure
/-!
# Closed sieves
A natural closure operator on sieves is a closure operator on `sieve X` for each `X` which commutes
with pullback.
We show that a Grothendieck topology `J` induces a natural closure operator, and define what the
closed sieves are. The collection of `J`-closed sieves forms a presheaf which is a sheaf for `J`,
and further this presheaf can be used to determine the Grothendieck topology from the sheaf
predicate.
Finally we show that a natural closure operator on sieves induces a Grothendieck topology, and hence
that natural closure operators are in bijection with Grothendieck topologies.
## Main definitions
* `category_theory.grothendieck_topology.close`: Sends a sieve `S` on `X` to the set of arrows
which it covers. This has all the usual properties of a closure operator, as well as commuting
with pullback.
* `category_theory.grothendieck_topology.closure_operator`: The bundled `closure_operator` given
by `category_theory.grothendieck_topology.close`.
* `category_theory.grothendieck_topology.closed`: A sieve `S` on `X` is closed for the topology `J`
if it contains every arrow it covers.
* `category_theory.functor.closed_sieves`: The presheaf sending `X` to the collection of `J`-closed
sieves on `X`. This is additionally shown to be a sheaf for `J`, and if this is a sheaf for a
different topology `J'`, then `J' ≤ J`.
* `category_theory.grothendieck_topology.topology_of_closure_operator`: A closure operator on the
set of sieves on every object which commutes with pullback additionally induces a Grothendieck
topology, giving a bijection with `category_theory.grothendieck_topology.closure_operator`.
## Tags
closed sieve, closure, Grothendieck topology
## References
* [S. MacLane, I. Moerdijk, *Sheaves in Geometry and Logic*][MM92]
-/
universe v u
namespace CategoryTheory
variable {C : Type u} [Category.{v} C]
variable (J₁ J₂ : GrothendieckTopology C)
namespace GrothendieckTopology
/-- The `J`-closure of a sieve is the collection of arrows which it covers. -/
@[simps]
def close {X : C} (S : Sieve X) : Sieve X
where
arrows Y f := J₁.Covers S f
downward_closed' Y Z f hS := J₁.arrow_stable _ _ hS
#align category_theory.grothendieck_topology.close CategoryTheory.GrothendieckTopology.close
/-- Any sieve is smaller than its closure. -/
theorem le_close {X : C} (S : Sieve X) : S ≤ J₁.close S := fun Y g hg =>
J₁.covering_of_eq_top (S.pullback_eq_top_of_mem hg)
#align category_theory.grothendieck_topology.le_close CategoryTheory.GrothendieckTopology.le_close
/-- A sieve is closed for the Grothendieck topology if it contains every arrow it covers.
In the case of the usual topology on a topological space, this means that the open cover contains
every open set which it covers.
Note this has no relation to a closed subset of a topological space.
-/
def IsClosed {X : C} (S : Sieve X) : Prop :=
∀ ⦃Y : C⦄ (f : Y ⟶ X), J₁.Covers S f → S f
#align category_theory.grothendieck_topology.is_closed CategoryTheory.GrothendieckTopology.IsClosed
/-- If `S` is `J₁`-closed, then `S` covers exactly the arrows it contains. -/
theorem covers_iff_mem_of_closed {X : C} {S : Sieve X} (h : J₁.IsClosed S) {Y : C} (f : Y ⟶ X) :
J₁.Covers S f ↔ S f :=
⟨h _, J₁.arrow_max _ _⟩
#align category_theory.grothendieck_topology.covers_iff_mem_of_closed CategoryTheory.GrothendieckTopology.covers_iff_mem_of_closed
/-- Being `J`-closed is stable under pullback. -/
theorem isClosed_pullback {X Y : C} (f : Y ⟶ X) (S : Sieve X) :
J₁.IsClosed S → J₁.IsClosed (S.pullback f) := fun hS Z g hg =>
hS (g ≫ f) (by rwa [J₁.covers_iff, sieve.pullback_comp])
#align category_theory.grothendieck_topology.is_closed_pullback CategoryTheory.GrothendieckTopology.isClosed_pullback
/-- The closure of a sieve `S` is the largest closed sieve which contains `S` (justifying the name
"closure").
-/
theorem le_close_of_isClosed {X : C} {S T : Sieve X} (h : S ≤ T) (hT : J₁.IsClosed T) :
J₁.close S ≤ T := fun Y f hf => hT _ (J₁.superset_covering (Sieve.pullback_monotone f h) hf)
#align category_theory.grothendieck_topology.le_close_of_is_closed CategoryTheory.GrothendieckTopology.le_close_of_isClosed
/-- The closure of a sieve is closed. -/
theorem close_isClosed {X : C} (S : Sieve X) : J₁.IsClosed (J₁.close S) := fun Y g hg =>
J₁.arrow_trans g _ S hg fun Z h hS => hS
#align category_theory.grothendieck_topology.close_is_closed CategoryTheory.GrothendieckTopology.close_isClosed
/-- The sieve `S` is closed iff its closure is equal to itself. -/
theorem isClosed_iff_close_eq_self {X : C} (S : Sieve X) : J₁.IsClosed S ↔ J₁.close S = S :=
by
constructor
· intro h
apply le_antisymm
· intro Y f hf
rw [← J₁.covers_iff_mem_of_closed h]
apply hf
· apply J₁.le_close
· intro e
rw [← e]
apply J₁.close_is_closed
#align category_theory.grothendieck_topology.is_closed_iff_close_eq_self CategoryTheory.GrothendieckTopology.isClosed_iff_close_eq_self
theorem close_eq_self_of_isClosed {X : C} {S : Sieve X} (hS : J₁.IsClosed S) : J₁.close S = S :=
(J₁.isClosed_iff_close_eq_self S).1 hS
#align category_theory.grothendieck_topology.close_eq_self_of_is_closed CategoryTheory.GrothendieckTopology.close_eq_self_of_isClosed
/-- Closing under `J` is stable under pullback. -/
theorem pullback_close {X Y : C} (f : Y ⟶ X) (S : Sieve X) :
J₁.close (S.pullback f) = (J₁.close S).pullback f :=
by
apply le_antisymm
· refine' J₁.le_close_of_is_closed (sieve.pullback_monotone _ (J₁.le_close S)) _
apply J₁.is_closed_pullback _ _ (J₁.close_is_closed _)
· intro Z g hg
change _ ∈ J₁ _
rw [← sieve.pullback_comp]
apply hg
#align category_theory.grothendieck_topology.pullback_close CategoryTheory.GrothendieckTopology.pullback_close
@[mono]
theorem monotone_close {X : C} : Monotone (J₁.close : Sieve X → Sieve X) := fun S₁ S₂ h =>
J₁.le_close_of_isClosed (h.trans (J₁.le_close _)) (J₁.close_isClosed S₂)
#align category_theory.grothendieck_topology.monotone_close CategoryTheory.GrothendieckTopology.monotone_close
@[simp]
theorem close_close {X : C} (S : Sieve X) : J₁.close (J₁.close S) = J₁.close S :=
le_antisymm (J₁.le_close_of_isClosed le_rfl (J₁.close_isClosed S))
(J₁.monotone_close (J₁.le_close _))
#align category_theory.grothendieck_topology.close_close CategoryTheory.GrothendieckTopology.close_close
/--
The sieve `S` is in the topology iff its closure is the maximal sieve. This shows that the closure
operator determines the topology.
-/
theorem close_eq_top_iff_mem {X : C} (S : Sieve X) : J₁.close S = ⊤ ↔ S ∈ J₁ X :=
by
constructor
· intro h
apply J₁.transitive (J₁.top_mem X)
intro Y f hf
change J₁.close S f
rwa [h]
· intro hS
rw [eq_top_iff]
intro Y f hf
apply J₁.pullback_stable _ hS
#align category_theory.grothendieck_topology.close_eq_top_iff_mem CategoryTheory.GrothendieckTopology.close_eq_top_iff_mem
/-- A Grothendieck topology induces a natural family of closure operators on sieves. -/
@[simps (config := { rhsMd := semireducible })]
def closureOperator (X : C) : ClosureOperator (Sieve X) :=
ClosureOperator.mk' J₁.close
(fun S₁ S₂ h => J₁.le_close_of_isClosed (h.trans (J₁.le_close _)) (J₁.close_isClosed S₂))
J₁.le_close fun S => J₁.le_close_of_isClosed le_rfl (J₁.close_isClosed S)
#align category_theory.grothendieck_topology.closure_operator CategoryTheory.GrothendieckTopology.closureOperator
@[simp]
theorem closed_iff_closed {X : C} (S : Sieve X) :
S ∈ (J₁.ClosureOperator X).closed ↔ J₁.IsClosed S :=
(J₁.isClosed_iff_close_eq_self S).symm
#align category_theory.grothendieck_topology.closed_iff_closed CategoryTheory.GrothendieckTopology.closed_iff_closed
end GrothendieckTopology
/--
The presheaf sending each object to the set of `J`-closed sieves on it. This presheaf is a `J`-sheaf
(and will turn out to be a subobject classifier for the category of `J`-sheaves).
-/
@[simps]
def Functor.closedSieves : Cᵒᵖ ⥤ Type max v u
where
obj X := { S : Sieve X.unop // J₁.IsClosed S }
map X Y f S := ⟨S.1.pullback f.unop, J₁.isClosed_pullback f.unop _ S.2⟩
#align category_theory.functor.closed_sieves CategoryTheory.Functor.closedSieves
/-- The presheaf of `J`-closed sieves is a `J`-sheaf.
The proof of this is adapted from [MM92], Chatper III, Section 7, Lemma 1.
-/
theorem classifier_isSheaf : Presieve.IsSheaf J₁ (Functor.closedSieves J₁) :=
by
intro X S hS
rw [← presieve.is_separated_for_and_exists_is_amalgamation_iff_sheaf_for]
refine' ⟨_, _⟩
· rintro x ⟨M, hM⟩ ⟨N, hN⟩ hM₂ hN₂
ext
dsimp only [Subtype.coe_mk]
rw [← J₁.covers_iff_mem_of_closed hM, ← J₁.covers_iff_mem_of_closed hN]
have q : ∀ ⦃Z : C⦄ (g : Z ⟶ X) (hg : S g), M.pullback g = N.pullback g :=
by
intro Z g hg
apply congr_arg Subtype.val ((hM₂ g hg).trans (hN₂ g hg).symm)
have MSNS : M ⊓ S = N ⊓ S := by
ext (Z g)
rw [sieve.inter_apply, sieve.inter_apply, and_comm' (N g), and_comm']
apply and_congr_right
intro hg
rw [sieve.pullback_eq_top_iff_mem, sieve.pullback_eq_top_iff_mem, q g hg]
constructor
· intro hf
rw [J₁.covers_iff]
apply J₁.superset_covering (sieve.pullback_monotone f inf_le_left)
rw [← MSNS]
apply J₁.arrow_intersect f M S hf (J₁.pullback_stable _ hS)
· intro hf
rw [J₁.covers_iff]
apply J₁.superset_covering (sieve.pullback_monotone f inf_le_left)
rw [MSNS]
apply J₁.arrow_intersect f N S hf (J₁.pullback_stable _ hS)
· intro x hx
rw [presieve.compatible_iff_sieve_compatible] at hx
let M := sieve.bind S fun Y f hf => (x f hf).1
have : ∀ ⦃Y⦄ (f : Y ⟶ X) (hf : S f), M.pullback f = (x f hf).1 :=
by
intro Y f hf
apply le_antisymm
· rintro Z u ⟨W, g, f', hf', hg : (x f' hf').1 _, c⟩
rw [sieve.pullback_eq_top_iff_mem, ←
show (x (u ≫ f) _).1 = (x f hf).1.pullback u from congr_arg Subtype.val (hx f u hf)]
simp_rw [← c]
rw [show (x (g ≫ f') _).1 = _ from congr_arg Subtype.val (hx f' g hf')]
apply sieve.pullback_eq_top_of_mem _ hg
· apply sieve.le_pullback_bind S fun Y f hf => (x f hf).1
refine' ⟨⟨_, J₁.close_is_closed M⟩, _⟩
· intro Y f hf
ext1
dsimp
rw [← J₁.pullback_close, this _ hf]
apply le_antisymm (J₁.le_close_of_is_closed le_rfl (x f hf).2) (J₁.le_close _)
#align category_theory.classifier_is_sheaf CategoryTheory.classifier_isSheaf
/-- If presheaf of `J₁`-closed sieves is a `J₂`-sheaf then `J₁ ≤ J₂`. Note the converse is true by
`classifier_is_sheaf` and `is_sheaf_of_le`.
-/
theorem le_topology_of_closedSieves_isSheaf {J₁ J₂ : GrothendieckTopology C}
(h : Presieve.IsSheaf J₁ (Functor.closedSieves J₂)) : J₁ ≤ J₂ := fun X S hS =>
by
rw [← J₂.close_eq_top_iff_mem]
have : J₂.is_closed (⊤ : sieve X) := by
intro Y f hf
trivial
suffices (⟨J₂.close S, J₂.close_is_closed S⟩ : Subtype _) = ⟨⊤, this⟩
by
rw [Subtype.ext_iff] at this
exact this
apply (h S hS).IsSeparatedFor.ext
· intro Y f hf
ext1
dsimp
rw [sieve.pullback_top, ← J₂.pullback_close, S.pullback_eq_top_of_mem hf,
J₂.close_eq_top_iff_mem]
apply J₂.top_mem
#align category_theory.le_topology_of_closed_sieves_is_sheaf CategoryTheory.le_topology_of_closedSieves_isSheaf
/-- If being a sheaf for `J₁` is equivalent to being a sheaf for `J₂`, then `J₁ = J₂`. -/
theorem topology_eq_iff_same_sheaves {J₁ J₂ : GrothendieckTopology C} :
J₁ = J₂ ↔ ∀ P : Cᵒᵖ ⥤ Type max v u, Presieve.IsSheaf J₁ P ↔ Presieve.IsSheaf J₂ P :=
by
constructor
· rintro rfl
intro P
rfl
· intro h
apply le_antisymm
· apply le_topology_of_closed_sieves_is_sheaf
rw [h]
apply classifier_is_sheaf
· apply le_topology_of_closed_sieves_is_sheaf
rw [← h]
apply classifier_is_sheaf
#align category_theory.topology_eq_iff_same_sheaves CategoryTheory.topology_eq_iff_same_sheaves
/--
A closure (increasing, inflationary and idempotent) operation on sieves that commutes with pullback
induces a Grothendieck topology.
In fact, such operations are in bijection with Grothendieck topologies.
-/
@[simps]
def topologyOfClosureOperator (c : ∀ X : C, ClosureOperator (Sieve X))
(hc : ∀ ⦃X Y : C⦄ (f : Y ⟶ X) (S : Sieve X), c _ (S.pullback f) = (c _ S).pullback f) :
GrothendieckTopology C where
sieves X := { S | c X S = ⊤ }
top_mem' X := top_unique ((c X).le_closure _)
pullback_stable' X Y S f hS := by
rw [Set.mem_setOf_eq] at hS
rw [Set.mem_setOf_eq, hc, hS, sieve.pullback_top]
transitive' X S hS R hR := by
rw [Set.mem_setOf_eq] at hS
rw [Set.mem_setOf_eq, ← (c X).idempotent, eq_top_iff, ← hS]
apply (c X).Monotone fun Y f hf => _
rw [sieve.pullback_eq_top_iff_mem, ← hc]
apply hR hf
#align category_theory.topology_of_closure_operator CategoryTheory.topologyOfClosureOperator
/--
The topology given by the closure operator `J.close` on a Grothendieck topology is the same as `J`.
-/
theorem topologyOfClosureOperator_self :
(topologyOfClosureOperator J₁.ClosureOperator fun X Y => J₁.pullback_close) = J₁ :=
by
ext (X S)
apply grothendieck_topology.close_eq_top_iff_mem
#align category_theory.topology_of_closure_operator_self CategoryTheory.topologyOfClosureOperator_self
theorem topologyOfClosureOperator_close (c : ∀ X : C, ClosureOperator (Sieve X))
(pb : ∀ ⦃X Y : C⦄ (f : Y ⟶ X) (S : Sieve X), c Y (S.pullback f) = (c X S).pullback f) (X : C)
(S : Sieve X) : (topologyOfClosureOperator c pb).close S = c X S :=
by
ext
change c _ (sieve.pullback f S) = ⊤ ↔ c _ S f
rw [pb, sieve.pullback_eq_top_iff_mem]
#align category_theory.topology_of_closure_operator_close CategoryTheory.topologyOfClosureOperator_close
end CategoryTheory
|
Formal statement is: lemma ksimplex_replace_2: assumes s: "ksimplex p n s" and "a \<in> s" and "n \<noteq> 0" and lb: "\<forall>j<n. \<exists>x\<in>s - {a}. x j \<noteq> 0" and ub: "\<forall>j<n. \<exists>x\<in>s - {a}. x j \<noteq> p" shows "card {s'. ksimplex p n s' \<and> (\<exists>b\<in>s'. s' - {b} = s - {a})} = 2" Informal statement is: If $s$ is a $k$-simplex, $a$ is a vertex of $s$, and $s$ has at least two vertices, then there are exactly two $k$-simplices that contain $a$ and have the same vertices as $s$ except for $a$. |
function[dir]=whichdir(name)
% WHICHDIR Returns directory name containing file in search path.
%
% WHICHDIR NAME returns the directory containing a file called NAME,
% i.e. the full path of the file excluding NAME and the trailing '/'.
%
% If more than one directory contains a file called NAME, then
% WHICHDIR returns a cell array of directories names.
%
% If NAME does not include an extension, then it is interpreted as
% the name of an m-file, i.e. having extension '.m' .
% _________________________________________________________________
% This is part of JLAB --- type 'help jlab' for more information
% (C) 2006--2015 J.M. Lilly --- type 'help jlab_license' for details
if isempty(strfind(name,'.'))
name=[name '.m'];
end
dir=which(name,'-all');
for i=1:length(dir)
dir{i}=dir{i}(1:end-length(name)-1);
end
if i==1
dir=dir{1};
end
|
/*
*/
#ifndef _aXe_GRISM_H
#define _aXe_GRISM_H
#include <gsl/gsl_matrix.h>
#include <gsl/gsl_vector.h>
#include "spc_trace_functions.h"
//#include "aXe_errors.h"
#define RELEASE "See github"
#define gsl_matrix gsl_matrix_float
#define gsl_matrix_get gsl_matrix_float_get
#define gsl_matrix_set gsl_matrix_float_set
#define gsl_matrix_set_all gsl_matrix_float_set_all
#define gsl_matrix_alloc gsl_matrix_float_alloc
#define gsl_matrix_free gsl_matrix_float_free
#define gsl_matrix_fprintf gsl_matrix_float_fprintf
#define gsl_matrix_add_constant gsl_matrix_float_add_constant
#define gsl_matrix_scale gsl_matrix_float_scale
#define gsl_matrix_min gsl_matrix_float_min
#define gsl_matrix_max gsl_matrix_float_max
/* WARNING: If you ever change PIXEL_T, make sure you also change
the FITS loader */
#define PIXEL_T float /* type of pixel data -- this should be in
sync with the GSL definitions */
#define MAX_BEAMS 27 /* Max. number of spectrum orders we handle */
// now equals the alphabet...
#define BEAM(x) (char)((int)x+65) /* Defines names for 3 different beams */
#define MAXCHAR 255 /* Number of characters in small strings and FITS keywords */
/**
A set of two relative (pixel) offsets.
*/
typedef struct
{
int dx0, dx1;
}
px_offset;
/**
A set of two relative double offsets.
*/
typedef struct
{
double dx0, dx1;
}
dx_offset;
/**
A point with integer (pixel) coordinates.
*/
typedef struct
{
int x, y;
}
px_point;
/**
A point with double coordinates.
*/
typedef struct
{
double x, y;
}
d_point;
/**
An aggregation of the grism exposure, its errors, the background,
and flatfields
*/
typedef struct
{
gsl_matrix *grism; /* The grism image */
gsl_matrix *pixerrs; /* (Absolute) errors associated with the grism image */
gsl_matrix *dq; /* Data quality associated with grism image */
}
observation;
/**
A beam, i.e., a description of the image of a spectrum of a given order,
including a reference point, the corners of a convex quadrangle
bounding the image, and the parametrization of the spectrum trace
*/
typedef struct
{
int ID; /* An integer ID for this beam
(see enum beamtype */
d_point refpoint; /* Pixel coordinates of the reference point */
px_point corners[4]; /* Bounding box of spectrum as an arbitrary
quadrangle */
px_point bbox[2]; /* Bounding box of corners, will be filled
in by spc_extract */
trace_func *spec_trace; /* Parametrization of the spectrum trace */
double width; /* largest axis of object in pixels */
double orient; /* orientation of largest axis in radian,
0==horizontal */
int modspec;
int modimage;
double awidth; /* the object width along the extraction direction */
double bwidth; /* the object width perpendicular to ewidth
(which is closer to the dispersion direction */
double aorient; /* orientation of awidth (CCW) =0 along the x-axis */
double slitgeom[4]; /* containst the values for the slit length, angle and
so on */
gsl_vector *flux; /* the flux of the object at various wavelengths */
int ignore; /* This beam should be ignored if this is not set to 0 */
}
beam;
/**
An object, describing the width and orientation of the object,
the beams in which the spectrum images lie, and the images
comprising an observation of the object.
*/
typedef struct
{
int ID; /* An integer ID number for this object */
beam beams[MAX_BEAMS]; /* table of beams (orders of spectrum) */
int nbeams; /* number of beams in beams member */
observation *grism_obs;
}
object;
/**
An aperture pixel, i.e., a pixel within a beam. The structure
contains the coordinates, the abscissa of the section point between
the object axis and the spectrum trace, the distance between
the pixel and the section point along the object axis, the path
length of the section point along the trace, and the photon count for
this pixel. A table of these is produced by the
make_spc_table function.
*/
typedef struct
{
int p_x, p_y; /* the absolute coordinates of the
source pixel. A -1 in p_x signifies
the end of a list of ap_pixels */
double x, y; /* the logical coordinates of this (logical)
pixel relative to the beam's reference
point */
double dist; /* distance between point and section */
double xs; /* abscissa of section point relative to reference point */
double ys; /* y coord of section point relative to reference point */
// traditionally (at least aXe-1.3-1.6 "dxs" was called "Projected size of pixel along the trace".
// however in the code (spc_extract.c, function "handle_one_pixel()" it becomes clear
// that this quantity indeed is the local trace angle
double dxs;
double xi; /* path length of the section along the
spectrum trace (computed from xs) */
double lambda; /* path length of the section along the
spectrum trace (computed from xi) */
double dlambda; /* dpath length of the section along the
spectrum trace */
double count; /* intensity for this pixel */
double weight; /* extraction weight for this pixel */
double error; /* absolute error of count */
double contam; /* flag whether this pixel is contaminated by another spectrum */
double model; /* the pixel flux computed for one of the emission models */
long dq; /* DQ bit information */
}
ap_pixel;
typedef struct
{
ap_pixel *ap; /* A pointer to an ap_pixel array */
object *obj; /* A pointer to the corresponding object
structure */
beam *b; /* A pointer to the beam in the object
that this group of pixel correspond to */
}
PET_entry;
/* See comment to spectrum structure */
typedef enum
{
SPC_W_CONTAM,
SPC_W_BORDER
}
spec_warning;
/**
A point within a spectrum, consisting of the minimum, maximum, and
(weighted) mean lambda of the points that contributed to the
bin, as well as an error estimate for the value (that's in there,
too, of course)
*/
typedef struct
{
double lambda_min, lambda_mean;
double lambda_max, dlambda;/* wavelength information */
double error; /* error in counts in this bin */
double count; /* number of counts in this bin */
double weight; /* weight of this bin */
double flux; /* flux in physical units in this bin */
double ferror; /* error in physical units in this bin */
double contam; /* spectral contamination flag */
// int contam; /* spectral contamination flag */
long dq; /* DQ information */
}
spc_entry;
/**
A spectrum, consisting of a gsl_array containing the data, a start
value (center wave length of lowest bin), a bin size, and warn flags,
which may be <ul><li>SPC_W_CONTAM -- Spectrum is contaminated</li>
<li>SPC_W_BORDER -- Spectrum lies partially beyond the image borders.
</li></ul>
*/
typedef struct
{
spc_entry *spec; /* the actual spectrum */
double lambdamin;
double lambdamax;
int spec_len; /* Number of items in spec */
spec_warning warning; /* problems with this spectrum */
}
spectrum;
#define DEBUG_FILE 0x01
#define DEBUG 0x0000
extern void
print_ap_pixel_table (const ap_pixel * ap_p);
extern ap_pixel *
make_spc_table (object * const ob,
const int beamorder, int *const flags);
extern ap_pixel *
make_gps_table (object * const ob,
const int beamorder, int *const flags,
int xval, int yval);
#endif /* !_aXe_GRISM_H */
|
func $cvtu64tof32 (
var %i u64
) f32 {
return (
cvt f32 u64(dread u64 %i))}
# EXEC: %irbuild Main.mpl
# EXEC: %irbuild Main.irb.mpl
# EXEC: %cmp Main.irb.mpl Main.irb.irb.mpl
|
%!TEX root = da2020-05.tex
\Chapter{5}{\tCONGEST{}~Model: Bandwidth~Limitations}
\noindent
In the previous chapter, we learned about the $\LOCAL$ model. We saw that with the help of unique identifiers, it is possible to gather the full information on a connected input graph in $O(\diam(G))$ rounds. To achieve this, we heavily abused the fact that we can send arbitrarily large messages. In this chapter we will see what can be done if we are only allowed to send small messages. With this restriction, we arrive at a model that is commonly known as the ``$\CONGEST$ model''.
\section{Definitions}\label{sec:congest}
Let $A$ be a distributed algorithm that solves a problem $\Pi$ on a graph family $\calF$ in the $\LOCAL$ model. Assume that $\Msg_A$ is a countable set; without loss of generality, we can then assume that
\[
\Msg_A = \NN,
\]
that is, the messages are encoded as natural numbers. Now we say that $A$ solves problem $\Pi$ on graph family $\calF$ in the $\CONGEST$ model if the following holds for some constant $C$: for any graph $G = (V,E) \in \calF$, algorithm $A$ only sends messages from the set $\{0, 1, \dotsc, |V|^C\}$.
Put otherwise, we have the following \emph{bandwidth restriction}: in each communication round, over each edge, we only send $O(\log n)$-bit messages, where $n$ is the total number of nodes.
\section{Examples}
Assume that we have an algorithm $A$ that is designed for the $\LOCAL$ model. Moreover, assume that during the execution of $A$ on a graph $G = (V,E)$, in each communication round, we only need to send the following pieces of information over each edge:
\begin{itemize}[noitemsep]
\item $O(1)$ node identifiers,
\item $O(1)$ edges, encoded as a pair of node identifiers,
\item $O(1)$ counters that take values from $0$ to $\diam(G)$,
\item $O(1)$ counters that take values from $0$ to $|V|$,
\item $O(1)$ counters that take values from $0$ to $|E|$.
\end{itemize}
Now it is easy to see that we can encode all of this as a binary string with $O(\log n)$ bits. Hence $A$ is not just an algorithm for the $\LOCAL$ model, but it is also an algorithm for the $\CONGEST$ model.
Many algorithms that we have encountered in this book so far are of the above form, and hence they are also $\CONGEST$ algorithms (see Exercise~\ref{ex:congest-prior}). However, there is a notable exception: the algorithm for gathering the entire network from Section~\longref{4.2}{sec:gather}. In this algorithm, we need to send messages of size up to $\Theta(n^2)$ bits:
\begin{itemize}
\item To encode the set of nodes, we may need up to $\Theta(n \log n)$ bits (a list of $n$ identifiers, each of which is $\Theta(\log n)$ bits long).
\item To encode the set of edges, we may need up to $\Theta(n^2)$ bits (the adjacency matrix).
\end{itemize}
While algorithms with a running time of $O(\diam(G))$ or $O(n)$ are trivial in the $\LOCAL$ model, this is no longer the case in the $\CONGEST$ model. Indeed, there are graph problems that \emph{cannot} be solved in time $O(n)$ in the $\CONGEST$ model (see Exercise~\ref{ex:congest-gather-lb}).
In this chapter, we will learn techniques that can be used to design efficient algorithms in the $\CONGEST$ model. We will use the all-pairs shortest path problem as the running example.
\section{All-Pairs Shortest Path Problem}
Throughout this chapter, we will assume that the input graph $G = (V,E)$ is connected, and as usual, we have $n = |V|$. In the \emph{all-pairs shortest path} problem (APSP in brief), the goal is to find the distances between all pairs of nodes. More precisely, the local output of node $v \in V$ is
\[
f(v) = \bigl\{ (u, d) : u \in V,\ d = \dist_G(v, u) \bigr\}.
\]
That is, $v$ has to know the identities of all other nodes, as well as the shortest-path distance between itself and all other nodes.
Note that to represent the local output of a single node we need $\Theta(n \log n)$ bits, and just to transmit this information over a single edge we would need $\Theta(n)$ communication rounds. Indeed, we can prove that any algorithm that solves the APSP problem in the $\CONGEST$ model takes $\Omega(n)$ rounds\mydash see Exercise~\ref{ex:apsp-lb}.
In this chapter, we will present an optimal distributed algorithm for the APSP problem: it solves the problem in $O(n)$ rounds in the $\CONGEST$ model.
\newcommand{\BFS}{\algo{BFS}}
\newcommand{\Wave}{\algo{Wave}}
\section{Single-Source Shortest Paths}\label{sec:wave}
As a warm-up, we will start with a much simpler problem. Assume that we have elected a leader $s \in V$, that is, there is precisely one node $s$ with input $1$ and all other nodes have input $0$. We will design an algorithm such that each node $v \in V$ outputs
\[
f(v) = \dist_G(s,v),
\]
i.e., its shortest-path distance to leader $s$.
The algorithm proceeds as follows. In the first round, the leader will send message \msg{wave} to all neighbors, switch to state $0$, and stop. In round $i$, each node $v$ proceeds as follows: if $v$ has not stopped, and if it receives message \msg{wave} from some ports, it will send message \msg{wave} to all other ports, switch to state $i$, and stop; otherwise it does nothing. See Figure~\ref{fig:wave}.
\begin{figure}
\centering
\includegraphics[page=\PWave]{figs.pdf}
\caption{(a)~Graph $G$ and leader~$s$. (b)~Execution of algorithm $\Wave$ on graph $G$. The arrows denote \msg{wave} messages, and the dotted lines indicate the communication round during which these messages were sent.}\label{fig:wave}
\end{figure}
The analysis of the algorithm is simple. By induction, all nodes at distance $i$ from $s$ will receive message \msg{wave} from at least one port in round $i$, and they will hence output the correct value~$i$. The running time of the algorithm is $O(\diam(G))$ rounds in the $\CONGEST$ model.
\section{Breadth-First Search Tree}\label{sec:bfs}
Algorithm $\Wave$ finds the shortest-path distances from a single source $s$. Now we will do something slightly more demanding: calculate not just the distances but also the shortest paths.
More precisely, our goal is to construct a \emph{breadth-first search tree} (BFS tree) $T$ rooted at $s$. This is a spanning subgraph $T = (V,E')$ of $G$ such that $T$ is a tree, and for each node $v \in V$, the shortest path from $s$ to $v$ in tree $T$ is also a shortest path from $s$ to $v$ in graph $G$. We will also label each node $v \in V$ with a \emph{distance label} $d(v)$, so that for each node $v \in V$ we have
\[
d(v) = \dist_T(s,v) = \dist_G(s,v).
\]
See Figure~\ref{fig:bfs} for an illustration. We will interpret $T$ as a directed graph, so that each edge is of form $(u,v)$, where $d(u) > d(v)$, that is, the edges point towards the root~$s$.
\begin{figure}
\centering
\includegraphics[page=\PBFS]{figs.pdf}
\caption{(a)~Graph $G$ and leader~$s$. (b)~BFS tree $T$ (arrows) and distance labels $d(v)$ (numbers).}\label{fig:bfs}
\end{figure}
There is a simple centralized algorithm that constructs the BFS tree and distance labels: breadth-first search. We start with an empty tree and unlabeled nodes. First we label the leader $s$ with $d(s) = 0$. Then in step $i = 0, 1, \dotsc$, we visit each node $u$ with distance label $d(u) = i$, and check each neighbor $v$ of $u$. If we have not labeled $v$ yet, we will label it with $d(v) = i+1$, and add the edge $(v,u)$ to the BFS tree. This way all nodes that are at distance $i$ from $s$ in $G$ will be labeled with the distance label $i$, and they will also be at distance $i$ from $s$ in~$T$.
We can implement the same idea as a distributed algorithm in the CONGEST model. We will call this algorithm $\BFS$. In the algorithm, each node $v$ maintains the following variables:
\begin{itemize}
\item $d(v)$: distance to the root.
\item $p(v)$: pointer to the parent of node $v$ in tree $T$ (port number).
\item $C(v)$: the set of children of node $v$ in tree $T$ (port numbers).
\item $a(v)$: acknowledgment\mydash set to $1$ when the subtree rooted at $v$ has been constructed.
\end{itemize}
Here $a(v) = 1$ denotes a stopping state. When the algorithm stops, variables $d(v)$ will be distance labels, tree $T$ is encoded in variables $p(v)$ and $C(v)$, and all nodes will have $a(v) = 1$.
Initially, we set $d(v) \gets \bot$, $p(v) \gets \bot$, $C(v) \gets \bot$, and $a(v) \gets 0$ for each node $v$, except for the root which has $d(s) = 0$. We will grow tree $T$ from $s$ by iterating the following steps:
\begin{itemize}
\item Each node $v$ with $d(v) \ne \bot$ and $C(v) = \bot$ will send a \emph{proposal} with value $d(v)$ to all neighbors.
\item If a node $u$ with $d(u) = \bot$ receives some proposals with value $j$, it will \emph{accept} one of them and \emph{reject} all other proposals. It will set $p(u)$ to point to the node whose proposal it accepted, and it will set $d(u) \gets j+1$.
\item Each node $v$ that sent some proposals will set $C(v)$ to be the set of neighbors that accepted proposals.
\end{itemize}
This way $T$ will grow towards the leaf nodes. Once we reach a leaf node, we will send acknowledgments back towards the root:
\begin{itemize}
\item Each node $v$ with $a(v) = 1$ and $p(v) \ne \bot$ will send an \emph{acknowledgment} to port $p(v)$.
\item Each node $v$ with $a(v) = 0$ and $C(v) \ne \bot$ will set $a(v) \gets 1$ when it has received acknowledgments from each port of $C(v)$. In particular, if a node has $C(v) = \emptyset$, it can set $a(v) \gets 1$ without waiting for any acknowledgments.
\end{itemize}
It is straightforward to verify that the algorithm works correctly and constructs a BFS tree in $O(\diam(G))$ rounds in the $\CONGEST$ model.
Note that the acknowledgments would not be strictly necessary in order to construct the tree. However, they will be very helpful in the next section when we use algorithm $\BFS$ as a subroutine.
\section{Leader Election}\label{sec:leader}
Algorithm $\BFS$ constructs a BFS tree rooted at a single leader, assuming that we have already elected a leader. Now we will show how to elect a leader. Surprisingly, we can use algorithm $\BFS$ to do it!
We will design an algorithm $\algo{Leader}$ that finds the node with the smallest identifier; this node will be the leader. The basic idea is very simple:
\begin{enumerate}
\item We modify algorithm $\BFS$ so that we can run multiple copies of it in parallel, with different root nodes. We augment the messages with the identity of the root node, and each node keeps track of the variables $d$, $p$, $C$, and $a$ separately for each possible root.
\item Then we pretend that all nodes are leaders and start running $\BFS$. In essence, we will run $n$ copies of $\BFS$ in parallel, and hence we will construct $n$ BFS trees, one rooted at each node. We will denote by $\BFS_v$ the $\BFS$ process rooted at node $v \in V$, and we will write $T_v$ for the output of this process.
\end{enumerate}
However, there are two problems: First, it is not yet obvious how all this would help with leader election. Second, we cannot implement this idea directly in the $\CONGEST$ model\mydash nodes would need to send up to $n$ distinct messages per communication round, one per each $\BFS$ process, and there is not enough bandwidth for all those messages.
\begin{figure}
\centering
\includegraphics[page=\PLeader]{figs.pdf}
\caption{Leader election. Each node $v$ will launch a process $\BFS_v$ that attempts to construct a BFS tree $T_v$ rooted at $v$. Other nodes will happily follow $\BFS_v$ if $v$ is the smallest leader they have seen so far; otherwise they will start to ignore messages related to $\BFS_v$. Eventually, precisely one of the processes will complete successfully, while all other process will get stuck at some point. In this example, node $1$ will be the leader, as it has the smallest identifier. Process $\BFS_2$ will never succeed, as node $1$ (as well as all other nodes that are aware of node $1$) will ignore all messages related to $\BFS_2$. Node $1$ is the only root that will receive acknowledgments from every child.}\label{fig:leader}
\end{figure}
Fortunately, we can solve both of these issues very easily; see Figure~\ref{fig:leader}:
\begin{enumerate}[resume]
\item Each node will only send messages related to the tree that has the \emph{smallest identifier as the root}. More precisely, for each node $v$, let $U(v) \subseteq V$ denote the set of nodes $u$ such that $v$ has received messages related to process $\BFS_u$, and let $\ell(v) = \min U(v)$ be the smallest of these nodes. Then $v$ will ignore messages related to process $\BFS_u$ for all $u \ne \ell(v)$, and it will only send messages related to process $\BFS_{\ell(v)}$.
\end{enumerate}
We make the following observations:
\begin{itemize}
\item In each round, each node will only send messages related to at most one $\BFS$ process. Hence we have solved the second problem\mydash this algorithm can be implemented in the $\CONGEST$ model.
\item Let $s = \min V$ be the node with the smallest identifier. When messages related to $\BFS_s$ reach a node $v$, it will set $\ell(v) = s$ and never change it again. Hence all nodes will follow process $\BFS_s$ from start to end, and thanks to the acknowledgments, node $s$ will eventually know that we have successfully constructed a BFS tree $T_s$ rooted at it.
\item Let $u \ne \min V$ be any other node. Now there is at least one node, $s$, that will ignore all messages related to process $\BFS_u$. Hence $\BFS_u$ will never finish; node $u$ will never receive the acknowledgments related to tree $T_u$ from all neighbors.
\end{itemize}
That is, we now have an algorithm with the following properties: after $O(\diam(G))$ rounds, there is precisely one node $s$ that knows that it is the unique node $s = \min V$. To finish the leader election process, node $s$ will inform all other nodes that leader election is over; node $s$ will output $1$ and all other nodes will output $0$ and stop.
\section{All-Pairs Shortest Paths}\label{sec:apsp}
Now we are ready to design algorithm $\algo{APSP}$ that solves the all-pairs shortest path problem (APSP) in time $O(n)$.
We already know how to find the shortest-path distances from a single source; this is efficiently solved with algorithm $\Wave$. Just like we did with the $\BFS$ algorithm, we can also augment $\Wave$ with the root identifier and hence have a separate process $\Wave_v$ for each possible root $v \in V$. If we could somehow run all these processes in parallel, then each node would receive a wave from every other node, and hence each node would learn the distance to every other node, which is precisely what we need to do in the APSP problem. However, it is not obvious how to achieve a good performance in the $\CONGEST$ model:
\begin{itemize}
\item If we try to run all $\Wave_v$ processes simultaneously in parallel, we may need to send messages related to several waves simultaneously over a single edge, and there is not enough bandwidth to do that.
\item If we try to run all $\Wave_v$ processes sequentially, it will take a lot of time: the running time would be $O(n \diam(G))$ instead of $O(n)$.
\end{itemize}
The solution is to \emph{pipeline} the $\Wave_v$ processes so that we can have many of them running simultaneously in parallel, without congestion. In essence, we want to have multiple wavefronts active simultaneously so that they never collide with each other.
To achieve this, we start with the leader election and the construction of a BFS tree rooted at the leader; let $s$ be the leader, and let $T_s$ be the BFS tree. Then we do a \emph{depth-first traversal} of $T_s$. This is a walk $w_s$ in $T_s$ that starts at $s$, ends at $s$, and traverses each edge precisely twice; see Figure~\ref{fig:dfs}.
\begin{figure}
\centering
\includegraphics[page=\PDFS]{figs.pdf}
\caption{(a)~BFS tree $T_s$ rooted at $s$. (b)~A depth-first traversal $w_s$ of $T_s$.}\label{fig:dfs}
\end{figure}
More concretely, we move a \emph{token} along walk $w_s$. We move the token \emph{slowly}: we always spend 2 communication rounds before we move the token to an adjacent node. Whenever the token reaches a new node $v$ that we have not encountered previously during the walk, we launch process $\Wave_v$. This is sufficient to avoid all congestion!
\begin{figure}
\centering
\includegraphics[page=\PPipeline]{figs.pdf}
\caption{Algorithm $\algo{APSP}$: the token walks along the BFS tree at speed $0.5$ (thick arrows), while each $\Wave_v$ moves along the original graph at speed $1$ (dashed lines). The waves are strictly nested: if $\Wave_v$ was triggered after $\Wave_u$, it will never catch up with $\Wave_u$.}\label{fig:pipeline}
\end{figure}
The key observation here is that the token moves slower than the waves. The waves move at speed $1$ edge per round (along the edges of $G$), while the token moves at speed $0.5$ edges per round (along the edges of $T_s$, which is a subgraph of $G$). This guarantees that two waves never collide. To see this, consider two waves $\Wave_u$ and $\Wave_v$, so that $\Wave_u$ was launched before $\Wave_v$. Let $d = \dist_G(u,v)$. Then it will take at least $2d$ rounds to move the token from $u$ to $v$, but only $d$ rounds for $\Wave_u$ to reach node $v$. Hence $\Wave_u$ was already past $v$ before we triggered $\Wave_v$, and $\Wave_v$ will never catch up with $\Wave_u$ as both of them travel at the same speed. See Figure~\ref{fig:pipeline} for an illustration.
Hence we have an algorithm $\algo{APSP}$ that is able to trigger all $\Wave_v$ processes in $O(n)$ time, without collisions, and each of them completes $O(\diam(G))$ rounds after it was launched. Overall, it takes $O(n)$ rounds for all nodes to learn distances to all other nodes. Finally, the leader can inform everyone else when it is safe to stop and announce the local outputs (e.g., with the help of another wave).
\section{Quiz}
Give an example of a graph problem that \emph{can} be solved in $O(1)$ rounds in the $\LOCAL$ model and \emph{cannot} be solved in $O(n)$ rounds in the $\CONGEST$ model. The input has to be an unlabeled graph; that is, the nodes will not have any inputs (beyond their own degree and their unique identifier). The output can be anything; you are free to construct an artificial graph problem. It is enough to give a brief definition of the graph problem; no further explanations are needed.
\section{Exercises}
\begin{ex}[prior algorithms]\label{ex:congest-prior}
In Chapters \chapterref{3} and \chapterref{4} we have seen examples of algorithms that were designed for the $\PN$ and $\LOCAL$ models. Many of these algorithms use only small messages\mydash they can be used directly in the $\CONGEST$ model. Give at least four concrete examples of such algorithms, and prove that they indeed use only small messages.
\end{ex}
\begin{ex}[edge counting]
The \emph{edge counting} problem is defined as follows: each node has to output the value $|E|$, i.e., it has to indicate how many edges there are in the graph.
Assume that the input graph is connected. Design an algorithm that solves the edge counting problem in the $\CONGEST$ model in time $O(\diam(G))$.
\end{ex}
\begin{ex}[detecting bipartite graphs]
Assume that the input graph is connected. Design an algorithm that solves the following problem in the $\CONGEST$ model in time $O(\diam(G))$:
\begin{itemize}[noitemsep]
\item If the input graph is bipartite, all nodes output $1$.
\item Otherwise all nodes output $0$.
\end{itemize}
\end{ex}
\begin{ex}[detecting complete graphs]
We say that a graph $G = (V,E)$ is \emph{complete} if for all nodes $u, v \in V$, $u \ne v$, there is an edge $\{u,v\} \in E$.
Assume that the input graph is connected. Design an algorithm that solves the following problem in the $\CONGEST$ model in time $O(1)$:
\begin{itemize}[noitemsep]
\item If the input graph is a complete graph, all nodes output $1$.
\item Otherwise all nodes output $0$.
\end{itemize}
\end{ex}
\begin{ex}[gathering]
Assume that the input graph is connected. In Section~\longref{4.2}{sec:gather} we saw how to gather full information on the input graph in time $O(\diam(G))$ in the $\LOCAL$ model. Design an algorithm that solves the problem in time $O(|E|)$ in the $\CONGEST$ model.
\end{ex}
\begin{exs}[gathering lower bounds]\label{ex:congest-gather-lb}
Assume that the input graph is connected. Prove that there is no algorithm that gathers full information on the input graph in time $O(|V|)$ in the $\CONGEST$ model.
\hint{To reach a contradiction, assume that $A$ is an algorithm that solves the problem. For each $n$, let $\calF(n)$ consists of all graphs with the following properties: there are $n$ nodes with unique identifiers $1,2,\dotsc,n$, the graph is connected, and the degree of node $1$ is $1$. Then compare the following two quantities as a function of~$n$:
\begin{enumerate}
\item $f(n) = {}$how many different graphs there are in family $\calF(n)$.
\item $g(n) = {}$how many different message sequences node number $1$ may receive during the execution of algorithm~$A$ if we run it on any graph $G \in \calF(n)$.
\end{enumerate}
Argue that for a sufficiently large $n$, we will have $f(n) > g(n)$. Then there are at least two different graphs $G_1, G_2 \in \calF(n)$ such that node $1$ receives the same information when we run $A$ on either of these graphs.}
\end{exs}
\begin{exs}[APSP lower bounds]\label{ex:apsp-lb}
Assume that the input graph is connected. Prove that there is no algorithm that solves the APSP problem in time $o(|V|)$ in the $\CONGEST$ model.
\end{exs}
\section{Bibliographic Notes}
The name $\CONGEST$ is from Peleg's~\cite{peleg00distributed} book. Algorithm $\algo{APSP}$ is due to Holzer and Wattenhofer \cite{holzer12apsp}\mydash surprisingly, it was published only as recently as in 2012.
|
(**************************************************************)
(* Copyright Dominique Larchey-Wendling [*] *)
(* *)
(* [*] Affiliation LORIA -- CNRS *)
(**************************************************************)
(* This file is distributed under the terms of the *)
(* CeCILL v2 FREE SOFTWARE LICENSE AGREEMENT *)
(**************************************************************)
From Undecidability.Synthetic
Require Import Definitions ReducibilityFacts.
From Undecidability.TM Require Import SBTM.
From Undecidability.MinskyMachines Require Import MMA.
From Undecidability.TM
Require SBTM_HALT_to_PCTM_HALT.
From Undecidability.StackMachines
Require PCTM_HALT_to_BSM_HALTING.
From Undecidability.MinskyMachines
Require MMA BSM_to_MMA_HALTING MMA3_to_MMA2_HALTING.
Theorem reduction : SBTM_HALT ⪯ MMA2_HALTING.
Proof.
eapply reduces_transitive. apply SBTM_HALT_to_PCTM_HALT.reduction.
eapply reduces_transitive. apply PCTM_HALT_to_BSM_HALTING.reduction.
eapply reduces_transitive. apply BSM_to_MMA_HALTING.reduction.
apply MMA3_to_MMA2_HALTING.reduction.
Qed.
|
lemmas isCont_of_real [simp] = bounded_linear.isCont [OF bounded_linear_of_real] |
{-# LANGUAGE ScopedTypeVariables #-}
module Types
( Planet(..)
, mass
, diameter
, pullOn
, distance
, distanceScale
, alterSpeed
, newPoint
)
where
import Numeric.LinearAlgebra
data Planet = Earth | Jupiter deriving (Eq, Show)
type Point = (Float, Float)
mass :: Planet -> Int
mass Earth = 5972
mass Jupiter = 1898000
distanceScale :: Float
distanceScale = 1000000
diameter :: Planet -> Integer
diameter Earth = 12756000 -- m
diameter Jupiter = 142984000
distance :: Point -> Point -> Float
distance (aX, aY) (bX, bY) =
let height = abs (bY - aY)
width = abs (bX - aX)
height2 = height ^ 2
width2 = width ^ 2
in sqrt (height2 + width2) * distanceScale * 100 -- planets actual size so brag about distance
pullOn :: Planet -> Point -> Planet -> Point -> Vector Float -- a accelaration pointing out from a
pullOn on a from b =
let
gConstant = 15 * 10 ^ 12
massA = mass on
massB = mass from
dist = distance b a ^ 2
forceScalar =
(-gConstant * fromIntegral massA * fromIntegral massB) / dist
accScalar = forceScalar / fromIntegral massA
in
scale accScalar (vectorize b a)
vectorize :: Point -> Point -> Vector Float
vectorize (ax, ay) (bx, by) = fromList [bx - ax, by - ay]
alterSpeed :: Vector Float -> Vector Float -> Vector Float
alterSpeed current pullForce = add current pullForce -- todo
newPoint :: Point -> Vector Float -> Point
newPoint (ax, ay) v = case toList v of
[bx, by] -> (ax + bx, ay + by)
x ->
error
$ "cannot create new point from speed vector with /= 2 elements "
++ show x
|
[GOAL]
ι : Type u_1
I J J₁ J₂ : Box ι
π : TaggedPrepartition I
x : ι → ℝ
⊢ x ∈ iUnion π ↔ ∃ J, J ∈ π ∧ x ∈ J
[PROOFSTEP]
convert Set.mem_iUnion₂
[GOAL]
case h.e'_2.h.e'_2.h.a
ι : Type u_1
I J J₁ J₂ : Box ι
π : TaggedPrepartition I
x : ι → ℝ
x✝ : Box ι
⊢ x✝ ∈ π ∧ x ∈ x✝ ↔ ∃ j, x ∈ ↑x✝
[PROOFSTEP]
rw [Box.mem_coe, mem_toPrepartition, exists_prop]
[GOAL]
ι : Type u_1
I J : Box ι
π : Prepartition I
πi : (J : Box ι) → TaggedPrepartition J
hJ : J ∈ π
J' : Box ι
hJ' : J' ∈ πi J
⊢ TaggedPrepartition.tag (biUnionTagged π πi) J' = TaggedPrepartition.tag (πi J) J'
[PROOFSTEP]
rw [← π.biUnionIndex_of_mem (πi := fun J => (πi J).toPrepartition) hJ hJ']
[GOAL]
ι : Type u_1
I J : Box ι
π : Prepartition I
πi : (J : Box ι) → TaggedPrepartition J
hJ : J ∈ π
J' : Box ι
hJ' : J' ∈ πi J
⊢ TaggedPrepartition.tag (biUnionTagged π πi) J' =
TaggedPrepartition.tag (πi (biUnionIndex π (fun J => (πi J).toPrepartition) J')) J'
[PROOFSTEP]
rfl
[GOAL]
ι : Type u_1
I J : Box ι
p : (ι → ℝ) → Box ι → Prop
π : Prepartition I
πi : (J : Box ι) → TaggedPrepartition J
⊢ (∀ (J : Box ι), J ∈ biUnionTagged π πi → p (TaggedPrepartition.tag (biUnionTagged π πi) J) J) ↔
∀ (J : Box ι), J ∈ π → ∀ (J' : Box ι), J' ∈ πi J → p (TaggedPrepartition.tag (πi J) J') J'
[PROOFSTEP]
simp only [mem_biUnionTagged]
[GOAL]
ι : Type u_1
I J : Box ι
p : (ι → ℝ) → Box ι → Prop
π : Prepartition I
πi : (J : Box ι) → TaggedPrepartition J
⊢ (∀ (J : Box ι), (∃ J', J' ∈ π ∧ J ∈ πi J') → p (TaggedPrepartition.tag (biUnionTagged π πi) J) J) ↔
∀ (J : Box ι), J ∈ π → ∀ (J' : Box ι), J' ∈ πi J → p (TaggedPrepartition.tag (πi J) J') J'
[PROOFSTEP]
refine' ⟨fun H J hJ J' hJ' => _, fun H J' ⟨J, hJ, hJ'⟩ => _⟩
[GOAL]
case refine'_1
ι : Type u_1
I J✝ : Box ι
p : (ι → ℝ) → Box ι → Prop
π : Prepartition I
πi : (J : Box ι) → TaggedPrepartition J
H : ∀ (J : Box ι), (∃ J', J' ∈ π ∧ J ∈ πi J') → p (TaggedPrepartition.tag (biUnionTagged π πi) J) J
J : Box ι
hJ : J ∈ π
J' : Box ι
hJ' : J' ∈ πi J
⊢ p (TaggedPrepartition.tag (πi J) J') J'
[PROOFSTEP]
rw [← π.tag_biUnionTagged hJ hJ']
[GOAL]
case refine'_1
ι : Type u_1
I J✝ : Box ι
p : (ι → ℝ) → Box ι → Prop
π : Prepartition I
πi : (J : Box ι) → TaggedPrepartition J
H : ∀ (J : Box ι), (∃ J', J' ∈ π ∧ J ∈ πi J') → p (TaggedPrepartition.tag (biUnionTagged π πi) J) J
J : Box ι
hJ : J ∈ π
J' : Box ι
hJ' : J' ∈ πi J
⊢ p (TaggedPrepartition.tag (biUnionTagged π πi) J') J'
[PROOFSTEP]
exact H J' ⟨J, hJ, hJ'⟩
[GOAL]
case refine'_2
ι : Type u_1
I J✝ : Box ι
p : (ι → ℝ) → Box ι → Prop
π : Prepartition I
πi : (J : Box ι) → TaggedPrepartition J
H : ∀ (J : Box ι), J ∈ π → ∀ (J' : Box ι), J' ∈ πi J → p (TaggedPrepartition.tag (πi J) J') J'
J' : Box ι
x✝ : ∃ J'_1, J'_1 ∈ π ∧ J' ∈ πi J'_1
J : Box ι
hJ : J ∈ π
hJ' : J' ∈ πi J
⊢ p (TaggedPrepartition.tag (biUnionTagged π πi) J') J'
[PROOFSTEP]
rw [π.tag_biUnionTagged hJ hJ']
[GOAL]
case refine'_2
ι : Type u_1
I J✝ : Box ι
p : (ι → ℝ) → Box ι → Prop
π : Prepartition I
πi : (J : Box ι) → TaggedPrepartition J
H : ∀ (J : Box ι), J ∈ π → ∀ (J' : Box ι), J' ∈ πi J → p (TaggedPrepartition.tag (πi J) J') J'
J' : Box ι
x✝ : ∃ J'_1, J'_1 ∈ π ∧ J' ∈ πi J'_1
J : Box ι
hJ : J ∈ π
hJ' : J' ∈ πi J
⊢ p (TaggedPrepartition.tag (πi J) J') J'
[PROOFSTEP]
exact H J hJ J' hJ'
[GOAL]
ι : Type u_1
I J : Box ι
π π₁ π₂ : TaggedPrepartition I
x : ι → ℝ
⊢ J ∈ infPrepartition π₁ π₂.toPrepartition ↔ J ∈ infPrepartition π₂ π₁.toPrepartition
[PROOFSTEP]
simp only [← mem_toPrepartition, infPrepartition_toPrepartition, inf_comm]
[GOAL]
ι : Type u_1
I J : Box ι
π π₁ π₂ : TaggedPrepartition I
x✝ : ι → ℝ
inst✝ : Fintype ι
h : IsHenstock π
x : ι → ℝ
⊢ Finset.card (Finset.filter (fun J => tag π J = x) π.boxes) ≤
Finset.card (Finset.filter (fun J => x ∈ ↑Box.Icc J) π.boxes)
[PROOFSTEP]
refine' Finset.card_le_of_subset fun J hJ => _
[GOAL]
ι : Type u_1
I J✝ : Box ι
π π₁ π₂ : TaggedPrepartition I
x✝ : ι → ℝ
inst✝ : Fintype ι
h : IsHenstock π
x : ι → ℝ
J : Box ι
hJ : J ∈ Finset.filter (fun J => tag π J = x) π.boxes
⊢ J ∈ Finset.filter (fun J => x ∈ ↑Box.Icc J) π.boxes
[PROOFSTEP]
rw [Finset.mem_filter] at hJ ⊢
[GOAL]
ι : Type u_1
I J✝ : Box ι
π π₁ π₂ : TaggedPrepartition I
x✝ : ι → ℝ
inst✝ : Fintype ι
h : IsHenstock π
x : ι → ℝ
J : Box ι
hJ : J ∈ π.boxes ∧ tag π J = x
⊢ J ∈ π.boxes ∧ x ∈ ↑Box.Icc J
[PROOFSTEP]
rcases hJ with ⟨hJ, rfl⟩
[GOAL]
case intro
ι : Type u_1
I J✝ : Box ι
π π₁ π₂ : TaggedPrepartition I
x : ι → ℝ
inst✝ : Fintype ι
h : IsHenstock π
J : Box ι
hJ : J ∈ π.boxes
⊢ J ∈ π.boxes ∧ tag π J ∈ ↑Box.Icc J
[PROOFSTEP]
exact ⟨hJ, h J hJ⟩
[GOAL]
ι : Type u_1
I J : Box ι
π π₁ π₂ : TaggedPrepartition I
x : ι → ℝ
r r₁ r₂ : (ι → ℝ) → ↑(Ioi 0)
p : (ι → ℝ) → Box ι → Prop
hJ : J ≤ I
h : x ∈ ↑Box.Icc I
⊢ (∀ (J' : Box ι), J' ∈ single I J hJ x h → p (tag (single I J hJ x h) J') J') ↔ p x J
[PROOFSTEP]
simp
[GOAL]
ι : Type u_1
I J✝ : Box ι
π π₁✝ π₂✝ : TaggedPrepartition I
x : ι → ℝ
r r₁ r₂ : (ι → ℝ) → ↑(Ioi 0)
π₁ π₂ : TaggedPrepartition I
h : Disjoint (iUnion π₁) (iUnion π₂)
J : Box ι
⊢ Finset.piecewise π₁.boxes π₁.tag π₂.tag J ∈ ↑Box.Icc I
[PROOFSTEP]
dsimp only [Finset.piecewise]
[GOAL]
ι : Type u_1
I J✝ : Box ι
π π₁✝ π₂✝ : TaggedPrepartition I
x : ι → ℝ
r r₁ r₂ : (ι → ℝ) → ↑(Ioi 0)
π₁ π₂ : TaggedPrepartition I
h : Disjoint (iUnion π₁) (iUnion π₂)
J : Box ι
⊢ (if J ∈ π₁.boxes then tag π₁ J else tag π₂ J) ∈ ↑Box.Icc I
[PROOFSTEP]
split_ifs
[GOAL]
case pos
ι : Type u_1
I J✝ : Box ι
π π₁✝ π₂✝ : TaggedPrepartition I
x : ι → ℝ
r r₁ r₂ : (ι → ℝ) → ↑(Ioi 0)
π₁ π₂ : TaggedPrepartition I
h : Disjoint (iUnion π₁) (iUnion π₂)
J : Box ι
h✝ : J ∈ π₁.boxes
⊢ tag π₁ J ∈ ↑Box.Icc I
case neg
ι : Type u_1
I J✝ : Box ι
π π₁✝ π₂✝ : TaggedPrepartition I
x : ι → ℝ
r r₁ r₂ : (ι → ℝ) → ↑(Ioi 0)
π₁ π₂ : TaggedPrepartition I
h : Disjoint (iUnion π₁) (iUnion π₂)
J : Box ι
h✝ : ¬J ∈ π₁.boxes
⊢ tag π₂ J ∈ ↑Box.Icc I
[PROOFSTEP]
exacts [π₁.tag_mem_Icc J, π₂.tag_mem_Icc J]
[GOAL]
ι : Type u_1
I J : Box ι
π π₁ π₂ : TaggedPrepartition I
x : ι → ℝ
r r₁ r₂ : (ι → ℝ) → ↑(Ioi 0)
inst✝ : Fintype ι
h₁ : IsSubordinate π₁ r
h₂ : IsSubordinate π₂ r
h : Disjoint (iUnion π₁) (iUnion π₂)
⊢ IsSubordinate (TaggedPrepartition.disjUnion π₁ π₂ h) r
[PROOFSTEP]
refine' fun J hJ => (Finset.mem_union.1 hJ).elim (fun hJ => _) fun hJ => _
[GOAL]
case refine'_1
ι : Type u_1
I J✝ : Box ι
π π₁ π₂ : TaggedPrepartition I
x : ι → ℝ
r r₁ r₂ : (ι → ℝ) → ↑(Ioi 0)
inst✝ : Fintype ι
h₁ : IsSubordinate π₁ r
h₂ : IsSubordinate π₂ r
h : Disjoint (iUnion π₁) (iUnion π₂)
J : Box ι
hJ✝ : J ∈ TaggedPrepartition.disjUnion π₁ π₂ h
hJ : J ∈ π₁.boxes
⊢ ↑Box.Icc J ⊆
closedBall (tag (TaggedPrepartition.disjUnion π₁ π₂ h) J) ↑(r (tag (TaggedPrepartition.disjUnion π₁ π₂ h) J))
[PROOFSTEP]
rw [disjUnion_tag_of_mem_left _ hJ]
[GOAL]
case refine'_1
ι : Type u_1
I J✝ : Box ι
π π₁ π₂ : TaggedPrepartition I
x : ι → ℝ
r r₁ r₂ : (ι → ℝ) → ↑(Ioi 0)
inst✝ : Fintype ι
h₁ : IsSubordinate π₁ r
h₂ : IsSubordinate π₂ r
h : Disjoint (iUnion π₁) (iUnion π₂)
J : Box ι
hJ✝ : J ∈ TaggedPrepartition.disjUnion π₁ π₂ h
hJ : J ∈ π₁.boxes
⊢ ↑Box.Icc J ⊆ closedBall (tag π₁ J) ↑(r (tag π₁ J))
[PROOFSTEP]
exact h₁ _ hJ
[GOAL]
case refine'_2
ι : Type u_1
I J✝ : Box ι
π π₁ π₂ : TaggedPrepartition I
x : ι → ℝ
r r₁ r₂ : (ι → ℝ) → ↑(Ioi 0)
inst✝ : Fintype ι
h₁ : IsSubordinate π₁ r
h₂ : IsSubordinate π₂ r
h : Disjoint (iUnion π₁) (iUnion π₂)
J : Box ι
hJ✝ : J ∈ TaggedPrepartition.disjUnion π₁ π₂ h
hJ : J ∈ π₂.boxes
⊢ ↑Box.Icc J ⊆
closedBall (tag (TaggedPrepartition.disjUnion π₁ π₂ h) J) ↑(r (tag (TaggedPrepartition.disjUnion π₁ π₂ h) J))
[PROOFSTEP]
rw [disjUnion_tag_of_mem_right _ hJ]
[GOAL]
case refine'_2
ι : Type u_1
I J✝ : Box ι
π π₁ π₂ : TaggedPrepartition I
x : ι → ℝ
r r₁ r₂ : (ι → ℝ) → ↑(Ioi 0)
inst✝ : Fintype ι
h₁ : IsSubordinate π₁ r
h₂ : IsSubordinate π₂ r
h : Disjoint (iUnion π₁) (iUnion π₂)
J : Box ι
hJ✝ : J ∈ TaggedPrepartition.disjUnion π₁ π₂ h
hJ : J ∈ π₂.boxes
⊢ ↑Box.Icc J ⊆ closedBall (tag π₂ J) ↑(r (tag π₂ J))
[PROOFSTEP]
exact h₂ _ hJ
[GOAL]
ι : Type u_1
I J : Box ι
π π₁ π₂ : TaggedPrepartition I
x : ι → ℝ
r r₁ r₂ : (ι → ℝ) → ↑(Ioi 0)
h₁ : IsHenstock π₁
h₂ : IsHenstock π₂
h : Disjoint (iUnion π₁) (iUnion π₂)
⊢ IsHenstock (TaggedPrepartition.disjUnion π₁ π₂ h)
[PROOFSTEP]
refine' fun J hJ => (Finset.mem_union.1 hJ).elim (fun hJ => _) fun hJ => _
[GOAL]
case refine'_1
ι : Type u_1
I J✝ : Box ι
π π₁ π₂ : TaggedPrepartition I
x : ι → ℝ
r r₁ r₂ : (ι → ℝ) → ↑(Ioi 0)
h₁ : IsHenstock π₁
h₂ : IsHenstock π₂
h : Disjoint (iUnion π₁) (iUnion π₂)
J : Box ι
hJ✝ : J ∈ TaggedPrepartition.disjUnion π₁ π₂ h
hJ : J ∈ π₁.boxes
⊢ tag (TaggedPrepartition.disjUnion π₁ π₂ h) J ∈ ↑Box.Icc J
[PROOFSTEP]
rw [disjUnion_tag_of_mem_left _ hJ]
[GOAL]
case refine'_1
ι : Type u_1
I J✝ : Box ι
π π₁ π₂ : TaggedPrepartition I
x : ι → ℝ
r r₁ r₂ : (ι → ℝ) → ↑(Ioi 0)
h₁ : IsHenstock π₁
h₂ : IsHenstock π₂
h : Disjoint (iUnion π₁) (iUnion π₂)
J : Box ι
hJ✝ : J ∈ TaggedPrepartition.disjUnion π₁ π₂ h
hJ : J ∈ π₁.boxes
⊢ tag π₁ J ∈ ↑Box.Icc J
[PROOFSTEP]
exact h₁ _ hJ
[GOAL]
case refine'_2
ι : Type u_1
I J✝ : Box ι
π π₁ π₂ : TaggedPrepartition I
x : ι → ℝ
r r₁ r₂ : (ι → ℝ) → ↑(Ioi 0)
h₁ : IsHenstock π₁
h₂ : IsHenstock π₂
h : Disjoint (iUnion π₁) (iUnion π₂)
J : Box ι
hJ✝ : J ∈ TaggedPrepartition.disjUnion π₁ π₂ h
hJ : J ∈ π₂.boxes
⊢ tag (TaggedPrepartition.disjUnion π₁ π₂ h) J ∈ ↑Box.Icc J
[PROOFSTEP]
rw [disjUnion_tag_of_mem_right _ hJ]
[GOAL]
case refine'_2
ι : Type u_1
I J✝ : Box ι
π π₁ π₂ : TaggedPrepartition I
x : ι → ℝ
r r₁ r₂ : (ι → ℝ) → ↑(Ioi 0)
h₁ : IsHenstock π₁
h₂ : IsHenstock π₂
h : Disjoint (iUnion π₁) (iUnion π₂)
J : Box ι
hJ✝ : J ∈ TaggedPrepartition.disjUnion π₁ π₂ h
hJ : J ∈ π₂.boxes
⊢ tag π₂ J ∈ ↑Box.Icc J
[PROOFSTEP]
exact h₂ _ hJ
[GOAL]
ι : Type u_1
I✝ J✝ : Box ι
π π₁ π₂ : TaggedPrepartition I✝
x : ι → ℝ
r r₁ r₂ : (ι → ℝ) → ↑(Ioi 0)
I J : Box ι
h : I ≤ J
⊢ Injective fun π =>
{
toPrepartition :=
{ boxes := π.boxes, le_of_mem' := (_ : ∀ (J' : Box ι), J' ∈ π.boxes → J' ≤ J),
pairwiseDisjoint := (_ : Set.Pairwise (↑π.boxes) (Disjoint on Box.toSet)) },
tag := π.tag, tag_mem_Icc := (_ : ∀ (J_1 : Box ι), tag π J_1 ∈ ↑Box.Icc J) }
[PROOFSTEP]
rintro ⟨⟨b₁, h₁le, h₁d⟩, t₁, ht₁⟩ ⟨⟨b₂, h₂le, h₂d⟩, t₂, ht₂⟩ H
[GOAL]
case mk.mk.mk.mk
ι : Type u_1
I✝ J✝ : Box ι
π π₁ π₂ : TaggedPrepartition I✝
x : ι → ℝ
r r₁ r₂ : (ι → ℝ) → ↑(Ioi 0)
I J : Box ι
h : I ≤ J
t₁ : Box ι → ι → ℝ
ht₁ : ∀ (J : Box ι), t₁ J ∈ ↑Box.Icc I
b₁ : Finset (Box ι)
h₁le : ∀ (J : Box ι), J ∈ b₁ → J ≤ I
h₁d : Set.Pairwise (↑b₁) (Disjoint on Box.toSet)
t₂ : Box ι → ι → ℝ
ht₂ : ∀ (J : Box ι), t₂ J ∈ ↑Box.Icc I
b₂ : Finset (Box ι)
h₂le : ∀ (J : Box ι), J ∈ b₂ → J ≤ I
h₂d : Set.Pairwise (↑b₂) (Disjoint on Box.toSet)
H :
(fun π =>
{
toPrepartition :=
{ boxes := π.boxes, le_of_mem' := (_ : ∀ (J' : Box ι), J' ∈ π.boxes → J' ≤ J),
pairwiseDisjoint := (_ : Set.Pairwise (↑π.boxes) (Disjoint on Box.toSet)) },
tag := π.tag, tag_mem_Icc := (_ : ∀ (J_1 : Box ι), tag π J_1 ∈ ↑Box.Icc J) })
{ toPrepartition := { boxes := b₁, le_of_mem' := h₁le, pairwiseDisjoint := h₁d }, tag := t₁,
tag_mem_Icc := ht₁ } =
(fun π =>
{
toPrepartition :=
{ boxes := π.boxes, le_of_mem' := (_ : ∀ (J' : Box ι), J' ∈ π.boxes → J' ≤ J),
pairwiseDisjoint := (_ : Set.Pairwise (↑π.boxes) (Disjoint on Box.toSet)) },
tag := π.tag, tag_mem_Icc := (_ : ∀ (J_1 : Box ι), tag π J_1 ∈ ↑Box.Icc J) })
{ toPrepartition := { boxes := b₂, le_of_mem' := h₂le, pairwiseDisjoint := h₂d }, tag := t₂, tag_mem_Icc := ht₂ }
⊢ { toPrepartition := { boxes := b₁, le_of_mem' := h₁le, pairwiseDisjoint := h₁d }, tag := t₁, tag_mem_Icc := ht₁ } =
{ toPrepartition := { boxes := b₂, le_of_mem' := h₂le, pairwiseDisjoint := h₂d }, tag := t₂, tag_mem_Icc := ht₂ }
[PROOFSTEP]
simpa using H
|
lemma additive_right: "additive (\<lambda>b. prod a b)" |
import Leanhello
def main : IO Unit := do
IO.println s!"Hello, {hello}!"
IO.println s!"Hello, {hello}!"
inductive Palindrome: List α -> Prop where
| nil : Palindrome []
| single : (a : α) -> Palindrome [a]
| sandwich : (a : α) -> Palindrome as -> Palindrome ([a] ++ as ++ [a])
theorem palindrome_reverse (h: Palindrome as) : Palindrome as.reverse := by
induction h with
| nil => exact Palindrome.nil
| single a => exact Palindrome.single a
| sandwich a h ih => simp; exact Palindrome.sandwich _ ih
theorem reverse_eq_of_palindrome (hh: Palindrome as) : as.reverse = as := by
induction hh with
| nil => rfl
| single a => rfl
| sandwich a _ ih => simp [ih]
example (h: Palindrome as) : as.reverse = as := by
simp [reverse_eq_of_palindrome h, h]
def List.last : (as : List α) → as ≠ [] → α
| [a], _ => a
| _::a₂::as, _ => (a₂::as).last (by simp)
@[simp] theorem List.dropLast_append_last (h : as ≠ []) : as.dropLast ++ [as.last h] = as := by
match as with
| [] => contradiction
| [a] => simp_all [last, dropLast]
| a₁ :: a₂ :: as =>
simp [last ,dropLast]
exact dropLast_append_last (as := a₂ :: as) (by simp)
theorem List.palindrome_ind (motive : List α → Prop)
(h₁ : motive [])
(h₂ : (a : α) → motive [a])
(h₃ : (a b : α) → (as : List α) → motive as → motive ([a] ++ as ++ [b]))
(as : List α)
: motive as :=
match as with
| [] => h₁
| [a] => h₂ a
| a₁::a₂::as' =>
have ih := palindrome_ind motive h₁ h₂ h₃ (a₂::as').dropLast
have : [a₁] ++ (a₂::as').dropLast ++ [(a₂::as').last (by simp)] = a₁::a₂::as' := by simp
this ▸ h₃ _ _ _ ih
termination_by _ as => as.length
theorem List.palindrome_of_eq_reverse (h : as.reverse = as) : Palindrome as := by
induction as using palindrome_ind
next => exact Palindrome.nil
next a => exact Palindrome.single a
next a b as ih =>
have : a = b := by simp_all
subst this
have : as.reverse = as := by simp_all
exact Palindrome.sandwich a (ih this)
def List.isPalindrome [DecidableEq α] (as : List α) : Bool :=
as.reverse = as
theorem List.isPalindrome_correct [DecidableEq α] (as : List α) : as.isPalindrome ↔ Palindrome as := by
simp [isPalindrome]
exact Iff.intro (fun h => palindrome_of_eq_reverse h) (fun h => reverse_eq_of_palindrome h)
#eval [1, 2, 1].isPalindrome
#eval [1, 2, 3, 1].isPalindrome
example : [1, 2, 1].isPalindrome := rfl
|
The European Systemic Risk Board (ESRB) has today published a new recommendation on US dollar-denominated funding of banks addressed to the national supervisory authorities of the EU Member States (Document reference: ESRB/2011/2).
The US dollar funding markets are of significant importance to EU banks. In the periods 2007-08 and 2010-11, EU banks funding themselves in US dollars were exposed to vulnerabilities, owing, on the one hand, to maturity mismatches between long-term assets and short-term liabilities in US dollars and, on the other, to the risk aversion of US money market funds at times of enhanced market tensions.
The ESRB considers that action should be taken to avoid a recurrence of these strains in US dollar-denominated funding of EU banks in the medium term. Even if the recommendations – by their nature – are not necessarily aimed at addressing recent market developments, national authorities are called on to report to the ESRB on their implementation by June 2012. To achieve this goal within a relatively short deadline, the recommendation predominantly involves measures to strengthen the use of already existing supervisory tools; but this is also partly because banks have already taken action in order to reduce the risk.
The ESRB recommends that the competent authorities intensify their monitoring action to prevent EU credit institutions from accumulating future excessive funding risks in US dollars. In particular, national supervisory authorities are requested to closely monitor maturity mismatches, funding concentration, the use of US dollar currency swaps and intra-group exposures. Moreover, the supervisory authorities should encourage credit institutions to take action before these risks reach an excessive level. At the same time, any measure to limit exposures should avoid having an adverse effect on existing financing in US dollars.
Moreover, authorities should make sure that EU credit institutions enhance their resilience to strains in the US dollar funding markets. To achieve this, the ESRB recommends that national supervisory authorities ensure that EU banks include management actions in their contingency funding plans for handling a shock in US dollar funding. In addition, the national supervisory authorities should assess the feasibility of these plans at the level of the banking sector. Based on this assessment, if there were to be a risk of simultaneous and similar responses by several banks in the face of a crisis, the supervisory authorities should consider action to diminish a potential systemic impact. |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.11.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Cubic Interpolation with Scipy
# %% pycharm={"name": "#%%\n"}
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import CubicHermiteSpline
from HARK.interpolation import CubicInterp, CubicHermiteInterp
# %% [markdown]
# ### Creating a HARK wrapper for scipy's CubicHermiteSpline
#
# The class CubicHermiteInterp in HARK.interpolation implements a HARK wrapper for scipy's CubicHermiteSpline. A HARK wrapper is needed due to the way interpolators are used in solution methods accross HARK, and in particular due to the `distance_criteria` attribute used for VFI convergence.
# %% pycharm={"name": "#%%\n"}
x = np.linspace(0, 10, num=11, endpoint=True)
y = np.cos(-(x ** 2) / 9.0)
dydx = 2.0 * x / 9.0 * np.sin(-(x ** 2) / 9.0)
f = CubicInterp(x, y, dydx, lower_extrap=True)
f2 = CubicHermiteSpline(x, y, dydx)
f3 = CubicHermiteInterp(x, y, dydx, lower_extrap=True)
# %% [markdown]
# Above are 3 interpolators, which are:
# 1. **CubicInterp** from HARK.interpolation
# 2. **CubicHermiteSpline** from scipy.interpolate
# 3. **CubicHermiteInterp** hybrid newly implemented in HARK.interpolation
#
# Below we see that they behave in much the same way.
# %% pycharm={"name": "#%%\n"}
xnew = np.linspace(0, 10, num=41, endpoint=True)
plt.plot(x, y, "o", xnew, f(xnew), "-", xnew, f2(xnew), "--", xnew, f3(xnew), "-.")
plt.legend(["data", "hark", "scipy", "hark_new"], loc="best")
plt.show()
# %% [markdown]
# We can also verify that **CubicHermiteInterp** works as intended when extrapolating. Scipy's **CubicHermiteSpline** behaves differently when extrapolating, as it extrapolates using the last polynomial, whereas HARK implements linear decay extrapolation, so it is not shown below.
# %% pycharm={"name": "#%%\n"}
x_out = np.linspace(-1, 11, num=41, endpoint=True)
plt.plot(x, y, "o", x_out, f(x_out), "-", x_out, f3(x_out), "-.")
plt.legend(["data", "hark", "hark_new"], loc="best")
plt.show()
# %% [markdown]
# ### Timings
#
# Below we can compare timings for interpolation and extrapolation among the 3 interpolators. As expected, `scipy`'s CubicHermiteInterpolator (`f2` below) is the fastest, but it's not HARK compatible. `HARK.interpolation`'s CubicInterp (`f`) is the slowest, and `HARK.interpolation`'s new CubicHermiteInterp (`f3`) is somewhere in between.
# %% pycharm={"name": "#%%\n"}
# %timeit f(xnew)
# %timeit f(x_out)
# %% pycharm={"name": "#%%\n"}
# %timeit f2(xnew)
# %timeit f2(x_out)
# %% pycharm={"name": "#%%\n"}
# %timeit f3(xnew)
# %timeit f3(x_out)
# %% [markdown] pycharm={"name": "#%%\n"}
# Notice in particular the difference between interpolating and extrapolating for the new ** CubicHermiteInterp **.The difference comes from having to calculate the extrapolation "by hand", since `HARK` uses linear decay extrapolation, whereas for interpolation it returns `scipy`'s result directly.
# %%
|
import xenalib.nat_stuff
import chris_hughes_various.zmod
/- Here is my memory of the situation Clara and Jason were faced with -/
--set_option pp.all true
--set_option pp.notation false
example (a : ℕ) (p : ℕ) [pos_nat p] (Hodd : 2 ∣ (p - 1)) (x : ℤ)
(H1 : ↑(a ^ (p - 1)) - 1 ≡ 0 [ZMOD ↑p])
(H2 : x ≡ (↑a)^2 [ZMOD ↑p]) :
x ^ ((p-1) / 2) ≡ 1 [ZMOD ↑p] :=
-- A mathematician would just say "substitute x = a^2 and we're done"
-- Lean says "naturals aren't integers, and congruence is not equality, so no"
-- I say "that's OK, we just explain to Lean exactly why things which are
-- "obviously equal" are equal"
begin
rw nat.cast_pow' a 2 at H2,
rw ←zmod.eq_iff_modeq_int at H2,
-- H2 now an equality in the ring Z/pZ
rw ←zmod.eq_iff_modeq_int,
rw ←int.cast_pow,
-- we can finally rewrite!
rw H2,
-- we now have to convince Lean that this is just H1
rw int.cast_pow,
rw nat.cast_pow',
rw nat.pow_pow,
rw nat.mul_div (show 2 ≠ 0, from dec_trivial) Hodd,
apply eq_of_sub_eq_zero,
rw ←int.cast_sub,
rw ←zmod.eq_iff_modeq_int at H1,
exact H1
end
|
module Test.Suite
import IdrTest.Test
import Test.Test
suite : IO ()
suite = do
runSuites
[ Test.Test.suite
]
|
The sets of a restricted space are the same as the sets of the vimage algebra of the intersection of the space and the set. |
The new version includes many new exciting functions (RBL, RWL, Automatic white listing) and gives you a very granular control over how the filter works. And it’s still available in a free version!
Go and have a look!.
Since I have had a lot of problems with false positives with the black lists that I’m using on my Exchange 2003 server I started looking into another way of filtering spam.
The obvious choice of additional protection fell on greylisting ( you can read more about what it is here ).
The problem with this is that there doesn’t seem to be any free products out there for Exchange and as I don’t want to set up a Linux box ( yet another box in the rack ) I decided to write one myself.
Usually i receive 3500-4000 spam attempts per day so that means that 70 mails a day are slipping trough. These 70 get matched to a blacklist that is not that aggressive and the result of this is that my spam level has gone down to almost 0% while I haven’t had a single false positive yet.
About the program. It consists of two parts.
Greylist installs as a .dll and connects to the SMTP service’s OnInboundCommand RCPT. It reads it configuration from Greylist.cfg and uses Greylist.mdb for logging entries. It also produces a log file in the log directory.
Greylist admin creates and configures the above files as well as controls the settings and the white list.
Continue blocking for X minutes.
Block by Source IP, Sender email address, Recipient address all together or in any combination.
White list (always allow) by Source IP, Sender email address, Recipient address or in any combination.
Clean out entries older then X days on the first session of the day.
Stores data in a Microsoft access database, .mdb or in a MsSQL db.
Configures: Block for X minutes, Max age in X days, White list.
Configures which items to use when blocking by Source IP, Sender email address, Recipient address all together or in any combination.
Displays blocked items and passed items in totals.
Displays current items in database.
Displays block rate in % according to all entries in the database.
Registered users can now set how Greylist will behave on a blocked item. It can either send the 451 message and then disconnect (default) or keep the session alive and accept more recipients.
Registered users can now change the message Greylist sends out. This is especially great for corporate usage where each company can have a custom message.
For the rest Greylist stays the same for unregistered users.
Added new tab with controls for Disconnect on 451 and 451 message.
Even though Greylist has been succeeded by JEP(S), the download links remain here for reference.
For comments like ‘Hey – great app!’ use the form at the end of the page.
Nada. Nothing. It’s for free!
See it as a contribution to a better world A free contribution! I’ve released this under a Creative Commonce license, which comes down to that you can use it and redistribute it as long as you refer to me and this site while using any part of my program. The full license is available in the readme file.
Register it! It will cost you 50 euro (about 65 USD) and will support the continued development and you’ll get access to the customization options for how Greylist behaves on the communication level.
The registration license will be mailed to you as soon as I’ve registered the payment.
And if your boss wants an invoice – no problem! I’ll mail that to you upon request.
The program is distributed ‘as-is’ and I don’t intend to provide any support for it.
But feel free to send me any suggestions to improvements or your own modifications.
remote SSE install when the 3rd party vendors couldn’t get their own product to work.
they released SSE to market – so their isn’t any documentation!! Lucky us.
are onto a terrific little applet here.
I have been using Greylist for a couple of weeks now and have found it very useful, it has blocked 180,000 messages since we’ve started using it and has stopped a lot of emails that our GFI MailEssentials was failing to block.
One thing I think could be changed is in the way the database cleanup is applied. As I don’t have the IP address checking enabled then over time you will get spam from/to the same sender/recipient causing all future spam from such pairs to get through.
I think it’s save to assume that if the First Seen and Last Seen timestamps are the same and over 24 hours old then it is not going to be resent and is probably spam. The clean up function could look for this and delete those entries, the database would be much smaller then. I have added a query to the Access database to do this for me.
Following on from this, is there an easy way compact the database? Access is especially wasteful with regards deleted records and I find the database can grow to 100MB in a couple of days. To run a compact on the database I am having to disable Greylist to remove the file locks and then enable again afterwards.
I came across this tool and it looks to be a good tool. There is one issue I faced. After applying this on exchange server, I had socket errors when trying the email test on dnsreport.com so I restarted the server and it went fine. Maybe it is mandatory step and has to be in manual for product.
Usually my own email account got about 50-100 spam emails each day. I used to run with the GFI sollution, but it kept crashing our server, and let quite a few span-emails through. This has let 3 spam-mails through during a 3 week period! I am running with the built-in exchange “intelligent filter” along side with greylisting, and have made a short “whitelist” of domains, and it works perfect!
If you are using the SQL DSN and you lose connection to your server, what happens? Does the software fail back to the Jet DB locally? Does it stop greylisting until the connection is restored?
If that happens then it will fallback to not blocking / greylisting emails until the database connection is restored. It will try to restore the connection every couple of minutes automatically. Then it will log these error sessions with code 999 in the logfile.
This has been tested quiet extensively to make sure that it works correctly – and it does.
What’s changed from 1.3.0 to 1.3.1?
Gmail accounts seem to be blocked completly. When it resends the first and last timestamps are always the same. I’ll update later if this changes for me. Also is there a way to track which email are passed?
Micheal: I’ve posted a reply to your question in the community forum –> here.
Ahh Nevermind. Gmail uses multiple servers, blocking by IP address kills these for a while.
1) from the /23 part, you generate the subnet mask. This is an unsigned 32 bit integer with the 23 upmost bits set.
Thanks! I’ve choosen another path though.
I’m not converting the IP’s to serials and then do a more then less then comparison between the source ip.
This will be implemented in the next major version which ‘might’ come in a month or so.
1,000,000 spams stopped and counting!
Hi Chris, just thought I’d pop you a quick note. We’ve got the registered version running on a dozen or so servers now – just on our 8 top servers we have prevented over 1,000,000 spam emails reaching the servers.
As for the latest (couple of days ago) Trojan that is trying to spread by email – well, all we’ve noticed is the greylist logs are a little bigger.
Thank-you very much for your wonderfull tool: it really works as expecded stopping tons of spam mails per day.
Can this be configured so that it only works on one domain? As I host mail, I do not want to annoy customers while I test this.
Heyden Kirk: Unfortunately not. The next version (2.0) which is still under development will have a function for ‘learning’. This means that it does all the processing while no mails are blocked so that the filter gets to know you senders email patterns.
I had a slight issue with the greylist program recently, it was running amazing for about a week and then all of a sudden it stopped allowing email to get through. I disabled and then re-enabled to see if that made a difference, disabled, deleted the directory downloaded a new copy and started it up and same thing, full blocking of all email to the server. Is there anything I can check because while it worked, it ran like a champ.
Problem is, in this way i am not able to identify the original user who is sending email to my gmail account. For example if Sue at [email protected] sends an email to my gmail account and it gets forwarded to my test domain account it will have the same format in the database as i mentioned earlier. Since from the database i would never know that email came from Sue, what would be the way to identify the original user of the email?
My guess was that since my gmail account is already been whitelisted, when Sue sends me an email it really dosent matter if i whitelist her email separately because my gmail account is already in the whitelist.
I apologize if my language here is a bit confusing. Any help on this would be appreciated. I would love to implement this in my real environment.
I have problem to get e-mail through while having greylist enabled. I am running version 1.3.1.
I have tried disable and then enable it again. I don’t know what to do. I have also tried download a new copy.
Greylist have worked great for about 6 months, but now something have happened.
I have had some problems with this on certain customers servers. Here are some tips to help those of you in trouble.
After creating a database and enabling the Greylist sink, sometimes all mail will be blocked. Greylist admin will also report nothing.
There is another problem where you have run the enable command too many times. This creates more than one sink. I have found this to be a huge problem and not even realising it.
Hi. Just added this great tool to a site. It look fine, but i realized after 1-2 hours that it was not so happy to talk to some mail servers. In the log it said “SMTP – 250”, but on the sending server it said “host dropped connection”. I tried everything of the above things. At last i had to drop using it. Any ideas ??
P.S. Really sorry about that because greylisting i a smashing good techique.
I also noticed that messages with status 250 in log file are dropped.
has someone any ideas to fix this bug ?
This mails are “return receipt, delivered or read messages” and JEPs are blocking this messages.
I think that is a huge bug.
Lars: Regarding the ‘interesting problem’; it’s not a lot message you see, but the autowhitelist in action.
When you have it active then when a outbound mail is sent, then the recipient email address gets added to the inbound sender whitelist. This gets logged in the listener like that.
After x hours then the sender email address gets automatically removed.
Thus, not a bug – it’s a feature.
I would just like to say thanks for taking the time to write one of the most amazing programs I’ve ever seen.
I’m planning on registering our copy as soon as I can get it approved.
I have just installed this on a server running 2003 and Exchange 2003. Emails are coming straight in from everywhere with no delay at all. he system is also running Symantec’s Mail Security for Exchange, do I need to uninstall this first?? Also, when I telnet to port 25 on the server it still appears that Exchange service is listening, should the SMTP banner look different?
Seems like it’s not activated. Have you enabled it?
Normally Greylist works fine with other spam products, but ofcourse there’s no guarantee. I know that other people use Symantec’s products together with Greylist.
The banner should not change as it’s not replacing the IIS SMTP service.
More questions? –> Look in the Greylist forum.
You rock! Thanks for the great tool. A solid Directory Harvesting Attack killer is what I was looking for.
This looks great. Thanks, and I’m looking forward to trying this out.
i have been using version 2.0 for almost over 3+ weeks and it works great. Blocks tons of spam and very very easy to configure. I did not see anything that i would say is wrong at the moment. Cant wait for the final version.
I know you said you are 95% done but how much time span are we looking at?
Enterprise Sales Partner, Corel and Novell Business Partner.).
directly from you. Please write us the terms of reselling and prices too.
We could pay with credit card. Please write me your fax number too.
If we order, could you make out the invoice to us, NOT for the enduser?
Forum is not accepting new registrations as process fails when it tries to send you your confirmation email. It looks like the author’s greylist package is stopping the mail?
the Hungarian direct partner of ACDSystems, Ulead, WinZip, StarNet etc.).
This is important for us.
I used greylist in linux, http://www.logistic-china.combut now Exchanger can use it. Thanks for your job.
will there be an ‘upgrade license’? we’ve paied for 1.3 just some weeks ago and were pretty satisfied with the solution. now we’re wondering if everybody has to pay these € 150 ..
Christian: No worries – if you already have a Greylist license then you will recieve a cupon code that can be used for getting a €50 reduction on JEP(S). These codes will be sent out in the next coming weeks.
(1) If you get a 250 in your logs *and* mail is not coming in, try stopping any other antispam solution and/or running “iisreset” again. On two servers, I found the following to be the case.
This last step suggests to me that something is awry with either Trend Micro’s antispam solution and/or this greylisting dll.
(2) 200 replies in log might be result of blank entries in the whitelist portion of your MS Access DB. When you get a 200 error, you still seem to get mail, even spam. To fix this, try deleting the blank entries and run “iisreset” from the command line. (200 reply = “nonstandard success response”, see RFC 876).
(3) I created a “greylist-bounce.bat” file in my greylisting folder to “fix” things when mail seemed to mysteriously stop working. For whatever reason, that seemed to work.
(4) Incoming mail from Gmail and Hotmail (dunno about Y!) might a huge problems in some environments. On one server, new mail from Hotmail got bounced a couple of times, and new mail from Gmail would range from like 5 minutes to several hours (like 12). I have my suspicions why this is the case (different IPs, different policies on different servers, etc), but the bottom line is that this could be very frustrating for users and possibly lethal to your job if, say, the CEO sent a super important email from his gmail account to someone to the Exchange server. This criticism is more directed to greylisting as a whole, rather than GRYNX in particular.
(5) I have found this program to be 90% to 99 to 100% effective. On the server where it was 99 to 100% effective, most of the spam seemed to be dictionary attacks and/or email sent to an old domain that the Exchange server had in its recipient policy (e.g. [email protected], [email protected], [email protected], [email protected], [email protected], etc). On the server where it was 90% effective, a lot of the crap spam seemed like stuff that users had subscribed to (coupons, job board sites, etc). This type of spam is much harder to deal with, as it seems to adhere to proper RFC standards.
(6) This solution is good quick fix if you’re in a pinch and need something fast and free. Ultimately, if you’re running Exchange and need a “free” greylisting spam solution that is enterprise-worthy, you’ll probably want to put some sort of postfix/tumsgreyspf-like box in front of your Exchange server. GRYNX is a very cool project, and while I thoroughly appreciate the hard work that has gone into it, I think that something like tumsgreyspf is much better suited for mission critical environments (the admins at RealityKings.com use it, in fact). GRYNX “works”, but only if you’re willing to fiddle and goof around a bit before you roll it out on your production servers. If you’ve got that much time to burn, then perhaps consider implementing postfix/tumsgreyspf to begin with.
As the saying goes, fast, free, and good — pick any two!
Thanks for your feedback – this is very valuable to us in developing the new versions.
We’ve released a new version under a new name, JEP(S), and under another website, http://www.Proxmea.com, which addresses all of the issues in your feedback. Be sure to check it out!
(1) This is no longer an issue as JEP(S) handles all installation of the sinks. Further is there a smart tool included (View sinks) which helps in positioning JEP(S) correctly together with other mail add-on’s.
(2) Also no longer an issue, fixed in JEP(S).
(3) No longer an issue, see 1.
(4) No longer an issue. JEP(S) can use Realtime White Lists (RWL’s) which identifies especially large companies. When in use then the SourceIP is exluded in the processing.
(5) We also include tarpitting in JEP(S) which protects against harvesting attacks.
(6) I hope that you, after reviewing, see that JEP(S) is a lot more mature and that it’s more aimed at the enterprise market – without letting go of all installations in smaller systems. |
% SOLVE_LINEAR_ELASTICITY: Solve a linear elasticity problem on a NURBS domain.
% For a planar domain it is the plane strain model.
%
% The function solves the linear elasticity problem
%
% - div (sigma(u)) = f in Omega = F((0,1)^n)
% sigma(u) \cdot n = g on Gamma_N
% u = h on Gamma_D
%
% with sigma(u) = mu*(grad(u) + grad(u)^t) + lambda*div(u)*I.
%
% u: displacement vector
% sigma: Cauchy stress tensor
% lambda, mu: Lame' parameters
% I: identity tensor
%
% USAGE:
%
% [geometry, msh, space, u] = solve_linear_elasticity (problem_data, method_data)
%
% INPUT:
%
% problem_data: a structure with data of the problem. It contains the fields:
% - geo_name: name of the file containing the geometry
% - nmnn_sides: sides with Neumann boundary condition (may be empty)
% - drchlt_sides: sides with Dirichlet boundary condition
% - press_sides: sides with pressure boundary condition (may be empty)
% - symm_sides: sides with symmetry boundary condition (may be empty)
% - lambda_lame: first Lame' parameter
% - mu_lame: second Lame' parameter
% - f: source term
% - h: function for Dirichlet boundary condition
% - g: function for Neumann condition (if nmnn_sides is not empty)
%
% method_data : a structure with discretization data. Its fields are:
% - degree: degree of the spline functions.
% - regularity: continuity of the spline functions.
% - nsub: number of subelements with respect to the geometry mesh
% (nsub=1 leaves the mesh unchanged)
% - nquad: number of points for Gaussian quadrature rule
%
% OUTPUT:
%
% geometry: geometry structure (see geo_load)
% msh: mesh object that defines the quadrature rule (see msh_cartesian)
% space: space object that defines the discrete basis functions (see sp_vector)
% u: the computed degrees of freedom
%
% See also EX_LIN_ELAST_HORSESHOE for an example.
%
% Copyright (C) 2010 Carlo de Falco
% Copyright (C) 2011, 2015 Rafael Vazquez
%
% This program is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
% This program is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with this program. If not, see <http://www.gnu.org/licenses/>.
function [geometry, msh, sp, u] = ...
solve_linear_elasticity (problem_data, method_data)
% Extract the fields from the data structures into local variables
data_names = fieldnames (problem_data);
for iopt = 1:numel (data_names)
eval ([data_names{iopt} '= problem_data.(data_names{iopt});']);
end
data_names = fieldnames (method_data);
for iopt = 1:numel (data_names)
eval ([data_names{iopt} '= method_data.(data_names{iopt});']);
end
% Construct geometry structure
geometry = geo_load (geo_name);
degelev = max (degree - (geometry.nurbs.order-1), 0);
nurbs = nrbdegelev (geometry.nurbs, degelev);
[rknots, zeta, nknots] = kntrefine (nurbs.knots, nsub-1, nurbs.order-1, regularity);
nurbs = nrbkntins (nurbs, nknots);
geometry = geo_load (nurbs);
% Construct msh structure
rule = msh_gauss_nodes (nquad);
[qn, qw] = msh_set_quad_nodes (geometry.nurbs.knots, rule);
msh = msh_cartesian (geometry.nurbs.knots, qn, qw, geometry);
% Construct space structure
space_scalar = sp_nurbs (nurbs, msh);
scalar_spaces = repmat ({space_scalar}, 1, msh.rdim);
sp = sp_vector (scalar_spaces, msh);
clear space_scalar scalar_spaces
% Assemble the matrices
mat = op_su_ev_tp (sp, sp, msh, lambda_lame, mu_lame);
rhs = op_f_v_tp (sp, msh, f);
% Apply Neumann boundary conditions
for iside = nmnn_sides
% Restrict the function handle to the specified side, in any dimension, gside = @(x,y) g(x,y,iside)
gside = @(varargin) g(varargin{:},iside);
dofs = sp.boundary(iside).dofs;
rhs(dofs) = rhs(dofs) + op_f_v_tp (sp.boundary(iside), msh.boundary(iside), gside);
end
% Apply pressure conditions
for iside = press_sides
msh_side = msh_eval_boundary_side (msh, iside);
sp_side = sp_eval_boundary_side (sp, msh_side);
x = cell (msh_side.rdim, 1);
for idim = 1:msh_side.rdim
x{idim} = reshape (msh_side.geo_map(idim,:,:), msh_side.nqn, msh_side.nel);
end
pval = reshape (p (x{:}, iside), msh_side.nqn, msh_side.nel);
rhs(sp_side.dofs) = rhs(sp_side.dofs) - op_pn_v (sp_side, msh_side, pval);
end
% Apply symmetry conditions
symm_dofs = [];
for iside = symm_sides
if (~strcmpi (sp.transform, 'grad-preserving'))
error ('The symmetry condition is only implemented for spaces with grad-preserving transform')
end
msh_side = msh_eval_boundary_side (msh, iside);
for idim = 1:msh.rdim
normal_comp(idim,:) = reshape (msh_side.normal(idim,:,:), 1, msh_side.nqn*msh_side.nel);
end
parallel_to_axes = false;
for ind = 1:msh.rdim
ind2 = setdiff (1:msh.rdim, ind);
if (all (all (abs (normal_comp(ind2,:)) < 1e-10)))
symm_dofs = union (symm_dofs, sp.boundary(iside).dofs(sp.boundary(iside).comp_dofs{ind}));
parallel_to_axes = true;
break
end
end
if (~parallel_to_axes)
error ('solve_linear_elasticity: We have only implemented the symmetry condition for boundaries parallel to the axes')
end
end
% Apply Dirichlet boundary conditions
u = zeros (sp.ndof, 1);
[u_drchlt, drchlt_dofs] = sp_drchlt_l2_proj (sp, msh, h, drchlt_sides);
u(drchlt_dofs) = u_drchlt;
int_dofs = setdiff (1:sp.ndof, [drchlt_dofs, symm_dofs]);
rhs(int_dofs) = rhs(int_dofs) - mat (int_dofs, drchlt_dofs) * u_drchlt;
% Solve the linear system
u(int_dofs) = mat(int_dofs, int_dofs) \ rhs(int_dofs);
end
|
If $I$ is a finite set and $A_i \in M$ for all $i \in I$, then $\bigcup_{i \in I} A_i \in M$. |
/-
Copyright (c) 2022 Jannis Limperg. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jannis Limperg
-/
import Aesop
set_option aesop.check.all true
@[aesop 50% constructors]
inductive I₁
| ofI₁ : I₁ → I₁
| ofTrue : True → I₁
example : I₁ := by
aesop
example : I₁ := by
aesop (options := { strategy := .bestFirst })
example : I₁ := by
aesop (options := { strategy := .breadthFirst })
example : I₁ := by
fail_if_success
aesop (options :=
{ strategy := .depthFirst,
maxRuleApplicationDepth := 0,
maxRuleApplications := 10,
terminal := true })
aesop (options :=
{ strategy := .depthFirst,
maxRuleApplicationDepth := 10 })
|
State Before: p m n : ℕ
pp : Prime p
Hm : ¬p ∣ m
Hn : ¬p ∣ n
⊢ ¬(p ∣ m ∨ p ∣ n) State After: no goals Tactic: simp [Hm, Hn] |
using GraphUtilities, GenericTensorNetworks, Graphs
using Test
@testset "json solve" begin
for property in ["SizeMax", "SizeMin",
"SizeMax3", "SizeMin3", "CountingAll",
"CountingMax", "CountingMin", "CountingMax3", "CountingMin3", "GraphPolynomial",
"SingleConfigMax", "SingleConfigMin", "SingleConfigMax3", "SingleConfigMin3",
"ConfigsMaxTree", "ConfigsMin",
"ConfigsMaxTree3", "ConfigsMin3",
"ConfigsAll", "ConfigsAllTree"
]
res = GraphUtilities.application(Dict(
"api"=>"solve",
"api.solve"=>Dict(
"graph"=>Dict("nv"=>10, "edges"=>[[e.src, e.dst] for e in edges(smallgraph(:petersen))]),
"problem"=>"MaximalIS",
"property"=>property,
"openvertices"=>[],
"fixedvertices"=>Dict(),
"optimizer"=>Dict(
"method"=>"TreeSA",
"TreeSA"=>Dict(
"sc_target"=>20,
"sc_weight"=>1.0,
"ntrials"=>1,
"niters"=>5,
"openvertices"=>[],
"fixedvertices"=>Dict(),
"nslices"=>0,
"rw_weight"=>1.0,
"betas"=>collect(0.01:0.1:30)
)
),
"cudadevice"=>-1
)
))
@test res isa Union{Dict, Vector}
end
res = GraphUtilities.application(Dict(
"api"=>"solve",
"api.solve"=>Dict(
"graph"=>Dict("nv"=>10, "edges"=>[[e.src, e.dst] for e in edges(smallgraph(:petersen))]),
"problem"=>"MaximalIS",
"property"=>"SizeMin",
)
))
@test res == Dict("size"=>3.0)
@test_throws GraphUtilities.VerificationError GraphUtilities.application(Dict(
"api"=>"solve",
"api.solve"=>Dict(
"graph"=>Dict("nv"=>10, "edges"=>[[e.src, e.dst] for e in edges(smallgraph(:petersen))]),
"problem"=>"MaximalIS",
"property"=>"SizeMi",)
))
end
@testset "json graph" begin
d_graph = Dict(
"api"=>"graph",
"api.graph" => Dict(
"type"=>"kings",
"type.kings"=> Dict(
"m"=>8,
"n"=>8,
"filling"=>0.8,
"seed"=>2
)
)
)
res = GraphUtilities.application(d_graph)
@test res isa Dict
end
@testset "json opteinsum" begin
d_opteinsum = Dict("api"=>"opteinsum",
"api.opteinsum" => Dict(
"inputs"=>[[1,2], [2,3], [3,4], [5,4]],
"output"=>[1,6],
"method"=>"TreeSA",
"sizes"=>[1=>2, 2=>2, 3=>2,
4=>2, 5=>2]
)
)
res = GraphUtilities.application(d_opteinsum)
@test res isa Dict
@test GraphUtilities.application(Dict("api"=>"help")) isa Dict
end |
** Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
** See https://llvm.org/LICENSE.txt for license information.
** SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
* DATA statements with implied DO loops.
BLOCKDATA DO
integer a(2, 3:4, -5:-4)
real*4 b(4)
integer * 2 c(4, 4), d(3,3), e(0:2, 0:2)
character*2 ch(-3:2), g(1:3)*3
double precision f(3)
common /DO2/ i, a, b, c, d, e, ch, f, g
c ----------------------------------------- tests 1 - 8:
c initialize: offset value
c a(1, 3, -5) : 0 -1
c a(2, 3, -5) : 4 -2
c a(1, 4, -5) : 8 -5
c a(2, 4, -5) : 12 -6
c a(1, 3, -4) : 16 -3
c a(2, 3, -4) : 20 -4
c a(1, 4, -4) : 24 -7
c a(2, 4, -4) : 28 -8
data i /-100/,
' (((a(i, j, k), i = 1, 1+1), k = -5, -4), j = 1*3, 4)/
' -1, -2, -3, -4, -5, -6, -7, -8 /
c ----------------------------------------- tests 9 - 14:
c initialize:
c b(2) : 4 7.0
c b(4) : 12 8.0
c c(1, 1) : 0 1
c c(2, 2) : 10 1
c c(3, 3) : 20 1
c c(4, 4) : 30 1
data (b(i), i = 2, 5, 2) / 7, 8/,
: (c(i, i), i = 1, 4) / 4 * 1/
c ------------------------------------------ tests 15 - 20:
c initialize:
c d(1, 1) : 0 : 2
c d(1, 2) : 6 : 2
c d(2, 2) : 8 : 4
c d(1, 3) : 12 : 4
c d(2, 3) : 14 : 4
c d(3, 3) : 16 : 4
data ((d((i), j+i-i), i = 1, j), j = 1, 3) / 2*2, 4*4/
c ------------------------------------------- tests 21 - 26:
c ch(-3, -2, ... 1, 2) '1 ' '2 ' '3 ' '3 ' '4 ' '55'
data (ch(j),ch(j+1), j = -3,2,2) /'1','2',2*'3','4 ','55'/
c ------------------------------------------- tests 27 - 33:
c initialize:
c e(0, 0) : 0 : 1
c e(1, 1) : 8 : 2
c e(2, 2) : 16 : 3
c e(0, 1) : 6 : 4
c e(0, 2) : 12 : 5
c e(2, 1) : 10 : 6
c e(1, 2) : 14 : 7
data (e(i-1,i-1), i=1,3), (e(i/i-1,i), i = 1, 2),
+ (e(i*2, 1 + i - 1), i = 1, 1, 1),
+ (e(i/4, i/2+0), i = 4, 4, 99)
+ / 1, 2, 3, 4, 5, 6, 7/
c -------------------------------------------- test 34:
c pad word between eresults and dresults.
c ------------------------------------------- tests 35 - 40:
c loop with negative step:
data (f(i), i = 3, 1, -1) / 1.0D0, 2.0D0, 3.0D0/
c ------------------------------------------- tests 41 - 43:
c substring expression in implied do:
c initialize last two bytes of g(1), g(2), g(3):
data (g((i-1) * 2 - i)(2:3) , i = 3, 5) / '33', '44', '55' /
end
C ---- main program -----:
integer a(8)
real*4 b(4)
integer * 2 c(4, 4), d(3,3), e(0:2, 0:2)
character*2 ch(-3:2), g(1:3)*3
double precision f(3)
common /DO2/ i, a, b, c, d, e, ch, f, g
parameter (N = 43)
common/rslts/rslts(20),chrslts(6),erslts(8),drslts(3),grslts(3)
integer rslts, erslts
character*4 chrslts, grslts
double precision drslts
c ---- set up expected array:
integer expect(N)
c ---------------- tests 1 - 8:
data expect / -1, -2, -5, -6, -3, -4, -7, -8,
c ---------------- tests 9 - 14:
+ 7, 8, 1, 1, 1, 1,
c ---------------- tests 15 - 20:
+ 2, 2, 4, 4, 4, 4,
c ---------------- tests 21 - 26:
+ '1 ', '2 ', '3 ', '3 ', '4 ', '55 ',
c ---------------- tests 27 - 34:
+ 1, 2, 3, 4, 5, 6, 7, -99,
c ---------------- tests 35 - 40: (3 d. p. values: 3.0, 2.0, 1.0):
c BIG ENDIAN
c + '40080000'x, 0, '40000000'x, 0, '3ff00000'x, 0,
c LITTLE ENDIAN
+ 0, '40080000'x, 0, '40000000'x, 0, '3ff00000'x,
c ---------------- tests 41 - 43:
+ '33 ', '44 ', '55 ' /
c ---- assign values to results array:
c -- tests 1 - 8:
do 10 i = 1, 8
10 rslts(i) = a(i)
c -- tests 9 - 14:
rslts(9) = b(2)
rslts(10) = b(4)
rslts(11) = c(1, 1)
rslts(12) = c(2, 2)
rslts(13) = c(3, 3)
rslts(14) = c(4, 4)
c -- tests 15 - 20:
rslts(15) = d(1, 1)
rslts(16) = d(1, 2)
rslts(17) = d(2, 2)
rslts(18) = d(1, 3)
rslts(19) = d(2, 3)
rslts(20) = d(3, 3)
c -- tests 21 - 26:
do 20 i = 1, 6
20 chrslts(i) = ch(i - 4)
c -- tests 27 - 34:
erslts(1) = e(0, 0)
erslts(2) = e(1, 1)
erslts(3) = e(2, 2)
erslts(4) = e(0, 1)
erslts(5) = e(0, 2)
erslts(6) = e(2, 1)
erslts(7) = e(1, 2)
erslts(8) = -99
c -- tests 35 - 40:
drslts(1) = f(1)
drslts(2) = f(2)
drslts(3) = f(3)
c -- tests 41 - 43:
do 30 i = 1, 3
30 grslts(i) = g(i)(2:3)
c ---- check results:
call check(rslts, expect, N)
end
|
/*
* This file is part of MXE. See LICENSE.md for licensing information.
*/
/* taken from http://www.netlib.org/lapack/lapacke.html */
/* Calling CGEQRF and CUNGQR to compute Q with workspace querying */
#include <stdio.h>
#include <stdlib.h>
#include <lapacke_utils.h>
#include <cblas.h>
int main (int argc, const char * argv[])
{
(void)argc;
(void)argv;
lapack_complex_float *a,*tau,*r,*work,one,zero,query;
lapack_int info,m,n,lda,lwork;
int i,j;
float err;
m = 10; n = 5; lda = m;
one = lapack_make_complex_float(1.0,0.0);
zero= lapack_make_complex_float(0.0,0.0);
a = calloc(m*n,sizeof(lapack_complex_float));
r = calloc(n*n,sizeof(lapack_complex_float));
tau = calloc(m,sizeof(lapack_complex_float));
for(j=0;j<n;j++)
for(i=0;i<m;i++)
a[i+j*m] = lapack_make_complex_float(i+1,j+1);
info = LAPACKE_cgeqrf_work(LAPACK_COL_MAJOR,m,n,a,lda,tau,&query,-1);
lwork = (lapack_int)query;
info = LAPACKE_cungqr_work(LAPACK_COL_MAJOR,m,n,n,a,lda,tau,&query,-1);
lwork = MAX(lwork,(lapack_int)query);
work = calloc(lwork,sizeof(lapack_complex_float));
info = LAPACKE_cgeqrf_work(LAPACK_COL_MAJOR,m,n,a,lda,tau,work,lwork);
info = LAPACKE_cungqr_work(LAPACK_COL_MAJOR,m,n,n,a,lda,tau,work,lwork);
for(j=0;j<n;j++)
for(i=0;i<n;i++)
r[i+j*n]=(i==j)?-one:zero;
cblas_cgemm(CblasColMajor,CblasConjTrans,CblasNoTrans,
n,n,m,&one,a,lda,a,lda,&one,r,n);
err=0.0;
for(i=0;i<n;i++)
for(j=0;j<n;j++)
err=MAX(err,cabs(r[i+j*n]));
printf("error=%e\n",err);
free(work);
free(tau);
free(r);
free(a);
return(info);
}
|
(*
Title: Characterization of Dyck Paths (Part 1)
Author: Jose Manuel Rodriguez Caballero
We prove that DyckPath (defined in DyckPathsDef) satisfies the following properties.
proposition DyckA: \<open>DyckPath [] = True\<close>
proposition DyckB: \<open>DyckPath w \<Longrightarrow> DyckPath ([1] @ w @ [-1])\<close>
proposition DyckC: \<open>DyckPath v \<Longrightarrow> DyckPath w \<Longrightarrow> DyckPath (v @ w)\<close>
(This code was verified in Isabelle2018)
*)
theory DyckPathsCharacterization1
imports Complex_Main DyckPathsDef
begin
section {* Proposition DyckA *}
proposition DyckA: \<open>DyckPath [] = True\<close>
by simp
section {* Proposition DyckB *}
lemma DyckBDyckLetters: \<open>DyckLetters w \<Longrightarrow> DyckLetters ([1] @ w @ [-1])\<close>
proof(induction w)
case Nil
then show ?case
by simp
next
case (Cons a w)
then show ?case
by (metis DyckLetters.simps(2) append.left_neutral append_Cons)
qed
fun incr :: \<open>int list \<Rightarrow> int \<Rightarrow> int list\<close> where
\<open>incr [] c = []\<close> |
\<open>incr (x#w) c = (x+c)#(incr w c)\<close>
lemma CocatIncr: \<open> incr (v @ w) c = (incr v c) @ (incr w c)\<close>
proof(induction v)
case Nil
then show ?case
by simp
next
case (Cons a v)
then show ?case
by simp
qed
lemma IteractionIncr: \<open> incr (incr w c) d = incr w (c+d) \<close>
proof(induction w)
case Nil
then show ?case
by simp
next
case (Cons a w)
then show ?case
by simp
qed
lemma ConcatHeightSumUS1:
assumes \<open>w \<noteq> [] \<longrightarrow> (\<forall> x. x + last (HeightList w) = last (HeightList (x#w)) )\<close>
shows \<open>x + last (HeightList (y#w)) = last (HeightList (x#(y#w)))\<close>
proof(cases \<open>w = []\<close>)
case True
then show ?thesis
by simp
next
case False
then have \<open>w \<noteq> []\<close> by blast
then have \<open>y + last (HeightList w) = last (HeightList (y#w))\<close>
by (simp add: assms)
then have \<open>x + y + last (HeightList w) = x + last (HeightList (y#w))\<close>
by simp
then have \<open>(x + y) + last (HeightList w) = x + last (HeightList (y#w))\<close>
by simp
then have \<open>last (HeightList ( (x + y) # w) ) = x + last (HeightList (y#w))\<close>
by (simp add: False assms)
then show ?thesis
using HeightList.elims by auto
qed
lemma ConcatHeightSumUS: \<open>w \<noteq> [] \<Longrightarrow> (\<forall> x. x + last (HeightList w) = last (HeightList (x#w)) )\<close>
proof(induction w)
case Nil
then show ?case
by simp
next
case (Cons a w)
then show ?case using ConcatHeightSumUS1 by blast
qed
lemma ConcatHeightSumU: \<open>w \<noteq> [] \<Longrightarrow> (\<forall> x. last (HeightList [x]) + last (HeightList w) = last (HeightList ([x]@w)) )\<close>
using ConcatHeightSumUS by auto
lemma ConcatHeightSumQRec:
assumes \<open> \<forall> w. v \<noteq> [] \<and> w \<noteq> [] \<longrightarrow> last (HeightList v) + last (HeightList w) = last (HeightList (v@w)) \<close>
and \<open>w \<noteq> []\<close>
shows \<open>last (HeightList (x#v)) + last (HeightList w) = last (HeightList ((x#v)@w))\<close>
proof(cases \<open>v = []\<close>)
case True
then show ?thesis
using ConcatHeightSumU assms(2) by blast
next
case False
then have \<open>v \<noteq> []\<close> by blast
have \<open>last (HeightList v) + last (HeightList w) = last (HeightList (v@w))\<close>
by (simp add: False assms(1) assms(2))
then have \<open>last (HeightList [x]) + last (HeightList v) + last (HeightList w) = last (HeightList [x]) + last (HeightList (v@w))\<close>
by simp
then have \<open>last (HeightList ([x]@v)) + last (HeightList w) = last (HeightList [x]) + last (HeightList (v@w))\<close>
using ConcatHeightSumU \<open>v \<noteq> []\<close>
by metis
then have \<open>last (HeightList ([x]@v)) + last (HeightList w) = last (HeightList ([x]@(v@w)))\<close>
using ConcatHeightSumU \<open>v \<noteq> []\<close>
by auto
then show ?thesis by auto
qed
lemma ConcatHeightSumQ: \<open> \<forall> w. v \<noteq> [] \<and> w \<noteq> [] \<longrightarrow> last (HeightList v) + last (HeightList w) = last (HeightList (v@w)) \<close>
proof(induction v)
case Nil
then show ?case
by simp
next
case (Cons a v)
then show ?case using ConcatHeightSumQRec
by blast
qed
lemma ConcatHeightSum: \<open>v \<noteq> [] \<Longrightarrow> w \<noteq> [] \<Longrightarrow> last (HeightList v) + last (HeightList w) = last (HeightList (v@w)) \<close>
using ConcatHeightSumQ by blast
lemma ConcatHeightComm: \<open>v \<noteq> [] \<Longrightarrow> w \<noteq> [] \<Longrightarrow> last (HeightList (v@w)) = last (HeightList (w@v)) \<close>
using ConcatHeightSum
by smt
lemma Lastsumchar: \<open>last (HeightList ((a+b)#v)) = last (HeightList (a#b#v))\<close>
using HeightList.elims by auto
lemma ConcatHeightLX2Rec:
assumes \<open>\<forall> a. HeightList ((a#v)@[x]) = (HeightList (a#v)) @ [x+(last (HeightList (a#v)))] \<close>
shows \<open>HeightList ((a#(b#v))@[x]) = (HeightList (a#(b#v))) @ [x+(last (HeightList (a#(b#v))))] \<close>
proof-
have \<open>HeightList ((a#(b#v))@[x]) = HeightList (a#b#v@[x])\<close>
by simp
then have \<open>HeightList ((a#(b#v))@[x]) = a#HeightList ((a+b)#v@[x])\<close>
by auto
then have \<open>HeightList ((a#(b#v))@[x]) = a# (HeightList ((a+b)#v)) @ [x+(last (HeightList ((a+b)#v)))]\<close>
using assms by auto
have \<open>last (HeightList ((a+b)#v)) = last (HeightList (a#b#v))\<close>
by (simp add: Lastsumchar)
then have \<open>HeightList ((a#(b#v))@[x]) = a# (HeightList ((a+b)#v)) @ [x+(last (HeightList (a#b#v)))]\<close>
using \<open>HeightList ((a#(b#v))@[x]) = a# (HeightList ((a+b)#v)) @ [x+(last (HeightList ((a+b)#v)))]\<close>
by auto
have \<open>a# (HeightList ((a+b)#v)) = (HeightList (a#b#v))\<close>
by simp
then have \<open>HeightList ((a#(b#v))@[x]) = (HeightList (a#b#v)) @ [x+(last (HeightList (a#b#v)))]\<close>
using \<open>HeightList ((a#(b#v))@[x]) = a# (HeightList ((a+b)#v)) @ [x+(last (HeightList (a#b#v)))]\<close>
by simp
then show ?thesis
by blast
qed
lemma ConcatHeightLX2: \<open>\<forall> a. HeightList ((a#v)@[x]) = (HeightList (a#v)) @ [x+(last (HeightList (a#v)))] \<close>
proof(induction v)
case Nil
then show ?case by simp
next
case (Cons a v)
then show ?case using ConcatHeightLX2Rec by auto
qed
lemma ConcatHeightLX1: \<open>v \<noteq> [] \<Longrightarrow>
HeightList (v@[x]) = (HeightList v) @ [x+(last (HeightList v))] \<close>
using ConcatHeightLX2
by (metis neq_Nil_conv)
lemma ConcatHeightLX: \<open>v \<noteq> [] \<Longrightarrow>
HeightList (v@[x]) = (HeightList v)@( incr (HeightList [x]) (last (HeightList v)) )\<close>
using ConcatHeightLX1
by simp
lemma IncreHeight: \<open>v \<noteq> [] \<Longrightarrow>
( (HeightList v)@( incr (HeightList u) (last (HeightList v)) ) )
@ ( incr (HeightList [x]) (last (HeightList (v@u))) )
= (HeightList v)@( incr (HeightList (u @ [x])) (last (HeightList v)) )\<close>
proof(cases \<open>u = []\<close>)
case True
then show ?thesis
by simp
next
case False
then have \<open>u \<noteq> []\<close> by blast
assume \<open>v \<noteq> []\<close>
then have \<open>HeightList (u @ [x]) = (HeightList u)@( incr (HeightList [x]) (last (HeightList u)) )\<close>
using ConcatHeightLX \<open>u \<noteq> []\<close> by blast
then have \<open>incr (HeightList (u @ [x])) (last (HeightList v)) = incr ((HeightList u)@( incr (HeightList [x]) (last (HeightList u)) )) (last (HeightList v))\<close>
by simp
then have \<open>incr (HeightList (u @ [x])) (last (HeightList v)) = (incr (HeightList u) (last (HeightList v)) ) @ (incr ( incr (HeightList [x]) (last (HeightList u)) ) (last (HeightList v)))\<close>
by (simp add: CocatIncr)
then have \<open>incr (HeightList (u @ [x])) (last (HeightList v)) = (incr (HeightList u) (last (HeightList v)) ) @ ( incr (HeightList [x]) (last (HeightList u)+last (HeightList v)) ) \<close>
by auto
then have \<open>incr (HeightList (u @ [x])) (last (HeightList v)) = (incr (HeightList u) (last (HeightList v)) ) @ ( incr (HeightList [x]) (last (HeightList (u@v))) ) \<close>
using \<open>u \<noteq> []\<close> \<open>v \<noteq> []\<close>
by (simp add: ConcatHeightSum )
then have \<open>(HeightList v)@( incr (HeightList (u @ [x])) (last (HeightList v)) ) = (HeightList v)@ ((incr (HeightList u) (last (HeightList v)) ) @ ( incr (HeightList [x]) (last (HeightList (u@v))) ) ) \<close>
by simp
then have \<open>(HeightList v)@( incr (HeightList (u @ [x])) (last (HeightList v)) ) = ( (HeightList v)@( incr (HeightList u) (last (HeightList v)) ) ) @ ( incr (HeightList [x]) (last (HeightList (v@u))) )\<close>
using ConcatHeightComm
by (simp add: False \<open>v \<noteq> []\<close>)
then show ?thesis
by auto
qed
lemma ConcatHeightQRec:
assumes \<open>\<forall> v w. v \<noteq> [] \<and> length w = n \<longrightarrow> HeightList (v@w) = (HeightList v)@( incr (HeightList w) (last (HeightList v)) )\<close>
and \<open>v \<noteq> []\<close> and \<open>length w = Suc n\<close>
shows \<open>HeightList (v@w) = (HeightList v)@( incr (HeightList w) (last (HeightList v)) )\<close>
proof-
from \<open>length w = Suc n\<close> obtain u x where \<open>w = u @ [x]\<close>
by (metis append_butlast_last_id length_0_conv nat.simps(3))
have \<open>length u = n\<close>
using \<open>w = u @ [x]\<close> assms(3) by auto
have \<open>HeightList (v@u) = (HeightList v)@( incr (HeightList u) (last (HeightList v)) )\<close>
by (simp add: \<open>length u = n\<close> assms(1) assms(2))
have \<open>HeightList ((v@u)@[x]) = (HeightList (v@u)) @ ( incr (HeightList [x]) (last (HeightList (v@u))) )\<close>
using ConcatHeightLX assms(2) by blast
then have \<open>HeightList ((v@u)@[x]) = ( (HeightList v)@( incr (HeightList u) (last (HeightList v)) ) ) @ ( incr (HeightList [x]) (last (HeightList (v@u))) )\<close>
using \<open>HeightList (v @ u) = HeightList v @ incr (HeightList u) (last (HeightList v))\<close> by auto
then have \<open>HeightList (v @ w) = ( (HeightList v)@( incr (HeightList u) (last (HeightList v)) ) ) @ ( incr (HeightList [x]) (last (HeightList (v@u))) )\<close>
using \<open>w = u @ [x]\<close> by auto
then have \<open>HeightList (v @ w) = (HeightList v)@( incr (HeightList (u @ [x])) (last (HeightList v)) )\<close>
using IncreHeight assms(2) by auto
then show ?thesis
using \<open>w = u @ [x]\<close> by blast
qed
lemma ConcatHeightQ: \<open>\<forall> v w. v \<noteq> [] \<and> length w = n \<longrightarrow> HeightList (v@w) = (HeightList v)@( incr (HeightList w) (last (HeightList v)) )\<close>
proof(induction n)
case 0
then show ?case
by simp
next
case (Suc n)
then show ?case using ConcatHeightQRec
by blast
qed
lemma ConcatHeight: \<open>v \<noteq> [] \<Longrightarrow> HeightList (v@w) = (HeightList v)@( incr (HeightList w) (last (HeightList v)) )\<close>
using ConcatHeightQ by blast
lemma lastHeightListPrefix: \<open>w \<noteq> [] \<Longrightarrow> last (HeightList (x#w)) = x+(last (HeightList w))\<close>
by (simp add: ConcatHeightSumUS)
lemma HeightListPar: \<open>w \<noteq> [] \<Longrightarrow> HeightList ([1] @ w @ [-1]) = [1] @ (incr (HeightList w) 1) @ [last (HeightList w)]\<close>
proof-
assume \<open>w \<noteq> []\<close>
have \<open>[1] \<noteq> []\<close> by simp
have \<open>HeightList ([1] @ w) = [1] @ (incr (HeightList w) 1)\<close>
by (metis ConcatHeight HeightList.simps(2) \<open>[1] \<noteq> []\<close> last.simps)
have \<open>[1] @ w \<noteq> []\<close>
by simp
have \<open>HeightList ([1] @ w @ [-1]) = (HeightList ([1] @ w)) @ (incr [-1] (last (HeightList ([1] @ w))))\<close>
by (metis ConcatHeight HeightList.simps(2) \<open>[1] @ w \<noteq> []\<close> append.assoc)
then have \<open>HeightList ([1] @ w @ [-1]) = [1] @ (incr (HeightList w) 1) @ (incr [-1] (last (HeightList ([1] @ w))))\<close>
using \<open>HeightList ([1] @ w) = [1] @ incr (HeightList w) 1\<close> by auto
then have \<open>HeightList ([1] @ w @ [-1]) = [1] @ (incr (HeightList w) 1) @ (incr [-1] (last (HeightList w)+1))\<close>
by (simp add: \<open>w \<noteq> []\<close> lastHeightListPrefix)
then have \<open>HeightList ([1] @ w @ [-1]) = [1] @ (incr (HeightList w) 1) @ [-1+(last (HeightList w)+1)]\<close>
by auto
then have \<open>HeightList ([1] @ w @ [-1]) = [1] @ (incr (HeightList w) 1) @ [last (HeightList w)]\<close>
by simp
then show ?thesis by blast
qed
lemma NonNegPathincr: \<open>NonNeg w \<Longrightarrow> c \<ge> 0 \<Longrightarrow> NonNeg (incr w c)\<close>
proof(induction w)
case Nil
then show ?case
by simp
next
case (Cons a w)
then show ?case
by (smt NonNeg.simps(2) incr.simps(2))
qed
lemma ConcatNonNeg: \<open>NonNeg v \<Longrightarrow> NonNeg w \<Longrightarrow> NonNeg (v@w)\<close>
proof(induction v)
case Nil
then show ?case
by simp
next
case (Cons a v)
then show ?case
by (metis Cons_eq_appendI NonNeg.simps(2))
qed
lemma NonNegRecLast: \<open>NonNeg (w@[x]) = (if x \<ge> 0 then NonNeg w else False)\<close>
proof(induction w)
case Nil
then show ?case
by simp
next
case (Cons a w)
then show ?case
by auto
qed
lemma NonNegPathlast: \<open>w \<noteq> [] \<Longrightarrow> NonNegPath w \<Longrightarrow> last (HeightList w) \<ge> 0\<close>
using NonNegRecLast
by (metis HeightList.elims NonNegPath.elims(2) list.discI snoc_eq_iff_butlast)
lemma DyckBNonNegPath: \<open>NonNegPath w \<Longrightarrow> NonNegPath ([1] @ w @ [-1])\<close>
proof(cases \<open>w = []\<close>)
case True
then show ?thesis
by simp
next
case False
then have \<open>w \<noteq> []\<close> by blast
assume \<open>NonNegPath w\<close>
then have \<open>NonNeg (HeightList w)\<close>
by simp
then have \<open>NonNeg (incr (HeightList w) 1)\<close>
by (simp add: NonNegPathincr)
have \<open>HeightList ([1] @ w @ [-1]) = [1] @ (incr (HeightList w) 1) @ [last (HeightList w)]\<close>
using HeightListPar \<open>w \<noteq> []\<close> by blast
have \<open>last (HeightList w) \<ge> 0\<close>
using NonNegPathlast \<open>NonNegPath w\<close> \<open>w \<noteq> []\<close> by blast
have \<open>(1::int) \<ge> (0::int)\<close>
by simp
have \<open>NonNeg ([1] @ (incr (HeightList w) 1))\<close>
by (simp add: \<open>NonNeg (incr (HeightList w) 1)\<close>)
have \<open>NonNeg [last (HeightList w)]\<close>
by (simp add: \<open>0 \<le> last (HeightList w)\<close>)
have \<open>NonNeg ( ([1] @ (incr (HeightList w) 1)) @ [last (HeightList w)] )\<close>
using ConcatNonNeg \<open>NonNeg ([1] @ (incr (HeightList w) 1))\<close> \<open>NonNeg [last (HeightList w)]\<close>
by blast
then show ?thesis
using \<open>HeightList ([1] @ w @ [- 1]) = [1] @ incr (HeightList w) 1 @ [last (HeightList w)]\<close> by auto
qed
lemma DyckBlast: \<open>(w \<noteq> [] \<longrightarrow> last (HeightList w) = 0) \<Longrightarrow> (([1] @ w @ [-1]) \<noteq> [] \<longrightarrow> last (HeightList ([1] @ w @ [-1])) = 0)\<close>
by (metis (no_types, hide_lams) ConcatHeightComm ConcatHeightSumUS HeightList.simps(2) HeightList.simps(3) Nil_is_append_conv add_cancel_right_right append.left_neutral append_eq_Cons_conv eq_neg_iff_add_eq_0 last.simps last_ConsL not_Cons_self)
proposition DyckB: \<open>DyckPath w \<Longrightarrow> DyckPath ([1] @ w @ [-1])\<close>
using DyckBDyckLetters DyckBNonNegPath DyckBlast by auto
section {* Proposition DyckC *}
lemma DyckCNonNegPath: \<open>NonNegPath v \<Longrightarrow> NonNegPath w \<Longrightarrow> NonNegPath (v @ w)\<close>
by (metis ConcatHeightQ ConcatNonNeg NonNegPath.elims(2) NonNegPath.elims(3) NonNegPathincr NonNegPathlast append.left_neutral)
lemma DyckCDyckLetters: \<open>DyckLetters v \<Longrightarrow> DyckLetters w \<Longrightarrow> DyckLetters (v @ w)\<close>
proof(induction v)
case Nil
then show ?case
by simp
next
case (Cons a v)
then show ?case
by (metis DyckLetters.simps(2) append_Cons)
qed
lemma DyckCDyckLetterslastHeightList: \<open>(v \<noteq> [] \<longrightarrow> last (HeightList v) = 0) \<Longrightarrow> (w \<noteq> [] \<longrightarrow> last (HeightList w) = 0) \<Longrightarrow> (v@w \<noteq> [] \<longrightarrow> last (HeightList (v@w)) = 0)\<close>
by (metis ConcatHeightSumQ add_cancel_left_right append_self_conv2 self_append_conv)
proposition DyckC: \<open>DyckPath v \<Longrightarrow> DyckPath w \<Longrightarrow> DyckPath (v @ w)\<close>
using DyckCNonNegPath DyckCDyckLetters DyckCDyckLetterslastHeightList by auto
end
|
/-
Copyright (c) 2021 Praneeth Kolichala. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Praneeth Kolichala
-/
import topology.homotopy.basic
import topology.constructions
import topology.homotopy.path
import category_theory.groupoid
import topology.homotopy.fundamental_groupoid
import topology.category.Top.limits
import category_theory.limits.preserves.shapes.products
/-!
# Product of homotopies
In this file, we introduce definitions for the product of
homotopies. We show that the products of relative homotopies
are still relative homotopies. Finally, we specialize to the case
of path homotopies, and provide the definition for the product of path classes.
We show various lemmas associated with these products, such as the fact that
path products commute with path composition, and that projection is the inverse
of products.
## Definitions
### General homotopies
- `continuous_map.homotopy.pi homotopies`: Let f and g be a family of functions
indexed on I, such that for each i ∈ I, fᵢ and gᵢ are maps from A to Xᵢ.
Let `homotopies` be a family of homotopies from fᵢ to gᵢ for each i.
Then `homotopy.pi homotopies` is the canonical homotopy
from ∏ f to ∏ g, where ∏ f is the product map from A to Πi, Xᵢ,
and similarly for ∏ g.
- `continuous_map.homotopy_rel.pi homotopies`: Same as `continuous_map.homotopy.pi`, but
all homotopies are done relative to some set S ⊆ A.
- `continuous_map.homotopy.prod F G` is the product of homotopies F and G,
where F is a homotopy between f₀ and f₁, G is a homotopy between g₀ and g₁.
The result F × G is a homotopy between (f₀ × g₀) and (f₁ × g₁).
Again, all homotopies are done relative to S.
- `continuous_map.homotopy_rel.prod F G`: Same as `continuous_map.homotopy.prod`, but
all homotopies are done relative to some set S ⊆ A.
### Path products
- `path.homotopic.pi` The product of a family of path classes, where a path class is an equivalence
class of paths up to path homotopy.
- `path.homotopic.prod` The product of two path classes.
## Fundamental groupoid preserves products
- `fundamental_groupoid_functor.pi_iso` An isomorphism between Π i, (π Xᵢ) and π (Πi, Xᵢ), whose
inverse is precisely the product of the maps π (Π i, Xᵢ) → π (Xᵢ), each induced by
the projection in `Top` Π i, Xᵢ → Xᵢ.
- `fundamental_groupoid_functor.prod_iso` An isomorphism between πX × πY and π (X × Y), whose
inverse is precisely the product of the maps π (X × Y) → πX and π (X × Y) → Y, each induced by
the projections X × Y → X and X × Y → Y
- `fundamental_groupoid_functor.preserves_product` A proof that the fundamental groupoid functor
preserves all products.
-/
noncomputable theory
namespace continuous_map
open continuous_map
section pi
variables {I : Type*} {X : I → Type*} [∀i, topological_space (X i)]
{A : Type*} [topological_space A]
{f g : Π i, C(A, X i)} {S : set A}
/-- The product homotopy of `homotopies` between functions `f` and `g` -/
@[simps]
def homotopy.pi (homotopies : Π i, homotopy (f i) (g i)) :
homotopy (pi f) (pi g) :=
{ to_fun := λ t i, homotopies i t,
to_fun_zero := by { intro t, ext i, simp only [pi_eval, homotopy.apply_zero], },
to_fun_one := by { intro t, ext i, simp only [pi_eval, homotopy.apply_one], } }
/-- The relative product homotopy of `homotopies` between functions `f` and `g` -/
@[simps]
def homotopy_rel.pi (homotopies : Π i : I, homotopy_rel (f i) (g i) S) :
homotopy_rel (pi f) (pi g) S :=
{ prop' :=
begin
intros t x hx,
dsimp only [coe_mk, pi_eval, to_fun_eq_coe, homotopy_with.coe_to_continuous_map],
simp only [function.funext_iff, ← forall_and_distrib],
intro i,
exact (homotopies i).prop' t x hx,
end,
..(homotopy.pi (λ i, (homotopies i).to_homotopy)), }
end pi
section prod
variables {α β : Type*} [topological_space α] [topological_space β]
{A : Type*} [topological_space A]
{f₀ f₁ : C(A, α)} {g₀ g₁ : C(A, β)} {S : set A}
/-- The product of homotopies `F` and `G`,
where `F` takes `f₀` to `f₁` and `G` takes `g₀` to `g₁` -/
@[simps]
def homotopy.prod (F : homotopy f₀ f₁) (G : homotopy g₀ g₁) :
homotopy (prod_mk f₀ g₀) (prod_mk f₁ g₁) :=
{ to_fun := λ t, (F t, G t),
to_fun_zero := by { intro, simp only [prod_eval, homotopy.apply_zero], },
to_fun_one := by { intro, simp only [prod_eval, homotopy.apply_one], } }
/-- The relative product of homotopies `F` and `G`,
where `F` takes `f₀` to `f₁` and `G` takes `g₀` to `g₁` -/
@[simps]
def homotopy_rel.prod (F : homotopy_rel f₀ f₁ S) (G : homotopy_rel g₀ g₁ S) :
homotopy_rel (prod_mk f₀ g₀) (prod_mk f₁ g₁) S :=
{ prop' :=
begin
intros t x hx,
have hF := F.prop' t x hx,
have hG := G.prop' t x hx,
simp only [coe_mk, prod_eval, prod.mk.inj_iff, homotopy.prod] at hF hG ⊢,
exact ⟨⟨hF.1, hG.1⟩, ⟨hF.2, hG.2⟩⟩,
end,
..(homotopy.prod F.to_homotopy G.to_homotopy) }
end prod
end continuous_map
namespace path.homotopic
local attribute [instance] path.homotopic.setoid
local infix ` ⬝ `:70 := quotient.comp
section pi
variables {ι : Type*} {X : ι → Type*} [∀ i, topological_space (X i)]
{as bs cs : Π i, X i}
/-- The product of a family of path homotopies. This is just a specialization of `homotopy_rel` -/
def pi_homotopy (γ₀ γ₁ : Π i, path (as i) (bs i)) (H : ∀ i, path.homotopy (γ₀ i) (γ₁ i)) :
path.homotopy (path.pi γ₀) (path.pi γ₁) := continuous_map.homotopy_rel.pi H
/-- The product of a family of path homotopy classes -/
def pi (γ : Π i, path.homotopic.quotient (as i) (bs i)) : path.homotopic.quotient as bs :=
(quotient.map path.pi
(λ x y hxy, nonempty.map (pi_homotopy x y) (classical.nonempty_pi.mpr hxy)))
(quotient.choice γ)
lemma pi_lift (γ : Π i, path (as i) (bs i)) : path.homotopic.pi (λ i, ⟦γ i⟧) = ⟦path.pi γ⟧ :=
by { unfold pi, simp, }
/-- Composition and products commute.
This is `path.trans_pi_eq_pi_trans` descended to path homotopy classes -/
lemma comp_pi_eq_pi_comp
(γ₀ : Π i, path.homotopic.quotient (as i) (bs i))
(γ₁ : Π i, path.homotopic.quotient (bs i) (cs i)) :
pi γ₀ ⬝ pi γ₁ = pi (λ i, γ₀ i ⬝ γ₁ i) :=
begin
apply quotient.induction_on_pi γ₁,
apply quotient.induction_on_pi γ₀,
intros,
simp only [pi_lift],
rw [← path.homotopic.comp_lift,
path.trans_pi_eq_pi_trans,
← pi_lift],
refl,
end
/-- Abbreviation for projection onto the ith coordinate -/
@[reducible]
def proj (i : ι) (p : path.homotopic.quotient as bs) : path.homotopic.quotient (as i) (bs i) :=
p.map_fn ⟨_, continuous_apply i⟩
/-- Lemmas showing projection is the inverse of pi -/
@[simp] lemma proj_pi (i : ι) (paths : Π i, path.homotopic.quotient (as i) (bs i)) :
proj i (pi paths) = paths i :=
begin
apply quotient.induction_on_pi paths,
intro, unfold proj,
rw [pi_lift, ← path.homotopic.map_lift],
congr, ext, refl,
end
@[simp] lemma pi_proj (p : path.homotopic.quotient as bs) : pi (λ i, proj i p) = p :=
begin
apply quotient.induction_on p,
intro, unfold proj,
simp_rw ← path.homotopic.map_lift,
rw pi_lift,
congr, ext, refl,
end
end pi
section prod
variables {α β : Type*} [topological_space α] [topological_space β]
{a₁ a₂ a₃ : α} {b₁ b₂ b₃ : β}
{p₁ p₁' : path a₁ a₂} {p₂ p₂' : path b₁ b₂}
(q₁ : path.homotopic.quotient a₁ a₂) (q₂ : path.homotopic.quotient b₁ b₂)
/-- The product of homotopies h₁ and h₂.
This is `homotopy_rel.prod` specialized for path homotopies. -/
def prod_homotopy (h₁ : path.homotopy p₁ p₁') (h₂ : path.homotopy p₂ p₂') :
path.homotopy (p₁.prod p₂) (p₁'.prod p₂') := continuous_map.homotopy_rel.prod h₁ h₂
/-- The product of path classes q₁ and q₂. This is `path.prod` descended to the quotient -/
def prod (q₁ : path.homotopic.quotient a₁ a₂) (q₂ : path.homotopic.quotient b₁ b₂) :
path.homotopic.quotient (a₁, b₁) (a₂, b₂) :=
quotient.map₂ path.prod (λ p₁ p₁' h₁ p₂ p₂' h₂, nonempty.map2 prod_homotopy h₁ h₂) q₁ q₂
variables (p₁ p₁' p₂ p₂')
lemma prod_lift : prod ⟦p₁⟧ ⟦p₂⟧ = ⟦p₁.prod p₂⟧ := rfl
variables (r₁ : path.homotopic.quotient a₂ a₃) (r₂ : path.homotopic.quotient b₂ b₃)
/-- Products commute with path composition.
This is `trans_prod_eq_prod_trans` descended to the quotient.-/
lemma comp_prod_eq_prod_comp : (prod q₁ q₂) ⬝ (prod r₁ r₂) = prod (q₁ ⬝ r₁) (q₂ ⬝ r₂) :=
begin
apply quotient.induction_on₂ q₁ q₂,
apply quotient.induction_on₂ r₁ r₂,
intros,
simp only [prod_lift, ← path.homotopic.comp_lift, path.trans_prod_eq_prod_trans],
end
variables {c₁ c₂ : α × β}
/-- Abbreviation for projection onto the left coordinate of a path class -/
@[reducible]
def proj_left (p : path.homotopic.quotient c₁ c₂) : path.homotopic.quotient c₁.1 c₂.1 :=
p.map_fn ⟨_, continuous_fst⟩
/-- Abbreviation for projection onto the right coordinate of a path class -/
@[reducible]
def proj_right (p : path.homotopic.quotient c₁ c₂) : path.homotopic.quotient c₁.2 c₂.2 :=
p.map_fn ⟨_, continuous_snd⟩
/-- Lemmas showing projection is the inverse of product -/
@[simp] lemma proj_left_prod : proj_left (prod q₁ q₂) = q₁ :=
begin
apply quotient.induction_on₂ q₁ q₂,
intros p₁ p₂,
unfold proj_left,
rw [prod_lift, ← path.homotopic.map_lift],
congr, ext, refl,
end
@[simp] lemma proj_right_prod : proj_right (prod q₁ q₂) = q₂ :=
begin
apply quotient.induction_on₂ q₁ q₂,
intros p₁ p₂,
unfold proj_right,
rw [prod_lift, ← path.homotopic.map_lift],
congr, ext, refl,
end
@[simp] lemma prod_proj_left_proj_right (p : path.homotopic.quotient (a₁, b₁) (a₂, b₂))
: prod (proj_left p) (proj_right p) = p :=
begin
apply quotient.induction_on p,
intro p',
unfold proj_left, unfold proj_right,
simp only [← path.homotopic.map_lift, prod_lift],
congr, ext; refl,
end
end prod
end path.homotopic
namespace fundamental_groupoid_functor
open_locale fundamental_groupoid
universes u
section pi
variables {I : Type u} (X : I → Top.{u})
/--
The projection map Π i, X i → X i induces a map π(Π i, X i) ⟶ π(X i).
-/
def proj (i : I) : (πₓ (Top.of (Π i, X i))).α ⥤ (πₓ (X i)).α := πₘ ⟨_, continuous_apply i⟩
/-- The projection map is precisely path.homotopic.proj interpreted as a functor -/
@[simp] lemma proj_map (i : I) (x₀ x₁ : (πₓ (Top.of (Π i, X i))).α) (p : x₀ ⟶ x₁) :
(proj X i).map p = (@path.homotopic.proj _ _ _ _ _ i p) := rfl
/--
The map taking the pi product of a family of fundamental groupoids to the fundamental
groupoid of the pi product. This is actually an isomorphism (see `pi_iso`)
-/
@[simps]
def pi_to_pi_Top : (Π i, (πₓ (X i)).α) ⥤ (πₓ (Top.of (Π i, X i))).α :=
{ obj := λ g, g,
map := λ v₁ v₂ p, path.homotopic.pi p,
map_id' :=
begin
intro x,
change path.homotopic.pi (λ i, 𝟙 (x i)) = _,
simp only [fundamental_groupoid.id_eq_path_refl, path.homotopic.pi_lift],
refl,
end,
map_comp' := λ x y z f g, (path.homotopic.comp_pi_eq_pi_comp f g).symm, }
/--
Shows `pi_to_pi_Top` is an isomorphism, whose inverse is precisely the pi product
of the induced projections. This shows that `fundamental_groupoid_functor` preserves products.
-/
@[simps]
def pi_iso : category_theory.Groupoid.of (Π i : I, (πₓ (X i)).α) ≅ (πₓ (Top.of (Π i, X i))) :=
{ hom := pi_to_pi_Top X,
inv := category_theory.functor.pi' (proj X),
hom_inv_id' :=
begin
change pi_to_pi_Top X ⋙ (category_theory.functor.pi' (proj X)) = 𝟭 _,
apply category_theory.functor.ext; intros,
{ ext, simp, }, { refl, },
end,
inv_hom_id' :=
begin
change (category_theory.functor.pi' (proj X)) ⋙ pi_to_pi_Top X = 𝟭 _,
apply category_theory.functor.ext; intros,
{ suffices : path.homotopic.pi ((category_theory.functor.pi' (proj X)).map f) = f, { simpa, },
change (category_theory.functor.pi' (proj X)).map f
with λ i, (category_theory.functor.pi' (proj X)).map f i,
simp, }, { refl, }
end }
section preserves
open category_theory
/-- Equivalence between the categories of cones over the objects `π Xᵢ` written in two ways -/
def cone_discrete_comp : limits.cone (discrete.functor X ⋙ π) ≌
limits.cone (discrete.functor (λ i, πₓ (X i))) :=
limits.cones.postcompose_equivalence (discrete.comp_nat_iso_discrete X π)
lemma cone_discrete_comp_obj_map_cone :
(cone_discrete_comp X).functor.obj ((π).map_cone (Top.pi_fan X))
= limits.fan.mk (πₓ (Top.of (Π i, X i))) (proj X) := rfl
/-- This is `pi_iso.inv` as a cone morphism (in fact, isomorphism) -/
def pi_Top_to_pi_cone : (limits.fan.mk (πₓ (Top.of (Π i, X i))) (proj X)) ⟶
Groupoid.pi_limit_fan (λ i : I, (πₓ (X i))) := { hom := category_theory.functor.pi' (proj X) }
instance : is_iso (pi_Top_to_pi_cone X) :=
begin
haveI : is_iso (pi_Top_to_pi_cone X).hom := (infer_instance : is_iso (pi_iso X).inv),
exact limits.cones.cone_iso_of_hom_iso (pi_Top_to_pi_cone X),
end
/-- The fundamental groupoid functor preserves products -/
def preserves_product : limits.preserves_limit (discrete.functor X) π :=
begin
apply limits.preserves_limit_of_preserves_limit_cone (Top.pi_fan_is_limit X),
apply (limits.is_limit.of_cone_equiv (cone_discrete_comp X)).to_fun,
simp only [cone_discrete_comp_obj_map_cone],
apply limits.is_limit.of_iso_limit _ (as_iso (pi_Top_to_pi_cone X)).symm,
exact (Groupoid.pi_limit_cone _).is_limit,
end
end preserves
end pi
section prod
variables (A B : Top.{u})
/-- The induced map of the left projection map X × Y → X -/
def proj_left : (πₓ (Top.of (A × B))).α ⥤ (πₓ A).α := πₘ ⟨_, continuous_fst⟩
/-- The induced map of the right projection map X × Y → Y -/
def proj_right : (πₓ (Top.of (A × B))).α ⥤ (πₓ B).α := πₘ ⟨_, continuous_snd⟩
@[simp] lemma proj_left_map (x₀ x₁ : (πₓ (Top.of (A × B))).α) (p : x₀ ⟶ x₁) :
(proj_left A B).map p = path.homotopic.proj_left p := rfl
@[simp] lemma proj_right_map (x₀ x₁ : (πₓ (Top.of (A × B))).α) (p : x₀ ⟶ x₁) :
(proj_right A B).map p = path.homotopic.proj_right p := rfl
/--
The map taking the product of two fundamental groupoids to the fundamental groupoid of the product
of the two topological spaces. This is in fact an isomorphism (see `prod_iso`).
-/
@[simps]
def prod_to_prod_Top : (πₓ A).α × (πₓ B).α ⥤ (πₓ (Top.of (A × B))).α :=
{ obj := λ g, g,
map := λ x y p, match x, y, p with
| (x₀, x₁), (y₀, y₁), (p₀, p₁) := path.homotopic.prod p₀ p₁
end,
map_id' :=
begin
rintro ⟨x₀, x₁⟩,
simp only [category_theory.prod_id, fundamental_groupoid.id_eq_path_refl],
unfold_aux, rw path.homotopic.prod_lift, refl,
end,
map_comp' := λ x y z f g, match x, y, z, f, g with
| (x₀, x₁), (y₀, y₁), (z₀, z₁), (f₀, f₁), (g₀, g₁) :=
(path.homotopic.comp_prod_eq_prod_comp f₀ f₁ g₀ g₁).symm
end }
/--
Shows `prod_to_prod_Top` is an isomorphism, whose inverse is precisely the product
of the induced left and right projections.
-/
@[simps]
def prod_iso : category_theory.Groupoid.of ((πₓ A).α × (πₓ B).α) ≅ (πₓ (Top.of (A × B))) :=
{ hom := prod_to_prod_Top A B,
inv := (proj_left A B).prod' (proj_right A B),
hom_inv_id' :=
begin
change prod_to_prod_Top A B ⋙ ((proj_left A B).prod' (proj_right A B)) = 𝟭 _,
apply category_theory.functor.hext, { intros, ext; simp; refl, },
rintros ⟨x₀, x₁⟩ ⟨y₀, y₁⟩ ⟨f₀, f₁⟩,
have := and.intro (path.homotopic.proj_left_prod f₀ f₁) (path.homotopic.proj_right_prod f₀ f₁),
simpa,
end,
inv_hom_id' :=
begin
change ((proj_left A B).prod' (proj_right A B)) ⋙ prod_to_prod_Top A B = 𝟭 _,
apply category_theory.functor.hext, { intros, ext; simp; refl, },
rintros ⟨x₀, x₁⟩ ⟨y₀, y₁⟩ f,
have := path.homotopic.prod_proj_left_proj_right f,
simpa,
end }
end prod
end fundamental_groupoid_functor
|
(**
MathComp における古典論理
======
2020/01/25
この文書のソースコードは以下にあります。
https://github.com/suharahiromichi/coq/blob/master/pearl/ssr_classically.v
*)
(**
OCaml 4.07.1, Coq 8.9.1, MathComp 1.9.0
*)
From mathcomp Require Import all_ssreflect.
(**
----------------
# MathComp は排中律を仮定しているのか
Stackoverflow (英語版)に、こんな質問がありました([1.])。
``forall A: Prop, A \/ ~A``
の証明を教えてほしいという趣旨です。クローズしているようなのですが、
その回答にあるように、MathComp のライブラリには排中律の公理が定義されていないので、
任意の ``A : Prop`` については証明できません。
排中律の公理を自分で定義するか、
Standard CoqのClassical.vを導入すればよいのですが、
そもそも MathComp のライブラリには公理、すなわち、証明なしで導入される
命題は含まれていないのです。
このことは [2.] の3.3節に説明があって、
The Mathematical Components library is axiom free. This makes the
library compatible with any combination of axioms that is known to be
consistent with the Calculus of Inductive Constructions.
要するに(Standard Coqと違って)、
CICとの互換性が保たれない(かもしれない)命題は一切入れないのだ、
ということのようです(注1)。これを "axiom free" というのだそうです。
では、排中律ががなくて困ることはないのでしょうか?
結論からいうと、ある命題が同値なbool値の式に変換(リフレクト)できるならば(注2)、
その命題は真か偽の決まる(注3)という決定性があることになり、
排中律や二重否定除去が定理として証明できるはずです。
なので、公理として排中律を導入する必要はないわけです。
(注1)実際には、MathComp のライブラリの中で Axiom コマンドは使われています。
(注2) bool型の式は、計算すればtrueかfalseに値が決まる決定性を持つため。
(注3) 正確には、真であると証明できるか、その否定が証明できる、というべき。
*)
(**
----------------
# Standard Coq の場合
Standard Coq では Classical.v で次のように定義されています。
*)
Require Import Classical.
(**
まず、排中律(classic)が公理として定義され、
*)
Check classic : forall P : Prop, P \/ ~ P. (* 排中律 *)
(**
それから二重否定除去(NNPP)が証明されています。
*)
Lemma NNPP : forall P : Prop, ~ ~ P -> P. (* 二重否定除去 *)
Proof.
intro P.
now case (classic P).
Qed.
(**
----------------
# MathComp の場合
## 一般の命題 P
MathComp では、Prop型の命題 P が bool型の式 b にリフレクトできる(注4)
ことを ``reflect P b`` で表します。``reflect P b`` が成り立っていることを前提として
(公理とするわけではない)、排中律を導いてみましょう。
(注4) b を ``b = true`` というProp型の命題と解釈したときに、それがPと同値である。
*)
(**
命題Pにリフレクトできるブール型の式bがあるなら、命題Pは排中律は成り立つ。
証明自体は単純で、b を true と false で場合分け(case)したのち、
true ならゴールの選言のleftをとり、bool値にリフレクトするとtrue。
false ならゴールの選言のrightをとり、bool値にリフレクトすると~~ false。
になります。bool値falseの否定はtrueに決まっているので、どちらも真になります。
*)
Lemma ssr_em_p (P : Prop) (b : bool) : reflect P b -> P \/ ~ P.
Proof.
case: b => Hr.
- left.
apply/Hr.
done.
- right.
apply/Hr.
done.
Restart.
by case: b => Hr; [left | right]; apply/Hr.
Qed.
(**
Staandard Coq の場合と同様に、排中律から二重否定除去は証明できます。
*)
Lemma ssr_nnpp_p (P : Prop) (b : bool) : reflect P b -> ~ ~ P -> P.
move=> Hr.
by case: (ssr_em_p P b Hr).
Qed.
(**
[2.] の3.3節では、
```
Definition classically P := forall b : bool, (P -> b) -> b
```
を導入していくつかの補題を証明していますが、classically の「見た目」から判るように
これはモナディックな定義です。今回はそれを使いません。
モナデックな方法ついては稿を改めて説明したいとおもいます。
*)
(**
## 具体的な P (自然数の等式の例)
では、命題Pにリフレクトできるブール式bがあるような命題Pとはなんでしょうか。
自然数どうしの等式がこれにあたります。
*)
(**
MathComp では、このような決定性のある等式の成り立つ型を eqType といいます。
nat は eqType ですので(注5)、これが成り立ちます。
(注5)eqType のインスタンス nat_eqType が nat の正準型(カノニカル)であるという。
*)
(**
自然数の等式の命題 m = n は、bool型の式 m == n にリフレクトできます。
具体的には、次の補題 eqP が MathComp のなかで定義されています。
*)
Check @eqP nat_eqType : forall (m n : nat), reflect (m = n) (m == n).
(**
eqP を使うと、自然数の等式の命題の排中律と二重否定除去が証明できます。
*)
Lemma ssr_em_eq (m n : nat) : m = n \/ m <> n.
Proof.
apply: ssr_em_p.
by apply: eqP.
Qed.
Lemma ssr_nnpp_eq (m n : nat) : ~ m <> n -> m = n.
Proof.
apply: ssr_nnpp_p.
by apply: eqP.
Qed.
(**
# まとめ
実際、ふたつの自然数m と n の等式 m = n は、成立するかしないかのどちらかで、決定性
があります。また、 m <> n の否定は m = n でよいはずです。
MathComp はこのように、Coqのうえで普通の「数学」をするための仕組みなわけです。
*)
(**
---------------
# 文献
[1.] Does ssreflect assume excluded middle?
https://stackoverflow.com/questions/34520944/does-ssreflect-assume-excluded-middle
[2.] Mathematical Components Book
https://math-comp.github.io/mcb/
*)
(* END *)
|
!
! The Laboratory of Algorithms
!
! The MIT License
!
! Copyright 2011-2015 Andrey Pudov.
!
! Permission is hereby granted, free of charge, to any person obtaining a copy
! of this software and associated documentation files (the 'Software'), to deal
! in the Software without restriction, including without limitation the rights
! to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
! copies of the Software, and to permit persons to whom the Software is
! furnished to do so, subject to the following conditions:
!
! The above copyright notice and this permission notice shall be included in
! all copies or substantial portions of the Software.
!
! THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
! IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
! FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
! AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
! LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
! OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
! THE SOFTWARE.
!
module MFInheritance
use MFAnimal
use MFCat
use MFDog
use MFShape
use MFCircle
use MUAsserts
use MUReport
implicit none
private
type, public :: TFInheritance
contains
procedure :: present
end type
contains
subroutine present(instance)
class(TFInheritance), intent(in) :: instance
type(TFCat) cat
type(TFCircle) circle
! construct the circle
circle = TFCircle(17.0)
call say(cat)
call area(circle)
end subroutine
subroutine say(animal)
class(TFAnimal), intent(in) :: animal
character(len=80) :: word
real start
call cpu_time(start)
word = animal%say()
call report('Inheritance', 'Say', '', start)
call assert_equals(trim(word), 'Myaw')
end subroutine
subroutine area(shape)
class(TFShape), intent(in) :: shape
real value
real start
call cpu_time(start)
value = shape%getArea()
call report('Inheritance', 'Area', '', start)
call assert_equals(value, 17 * 17 * 3.14159265 / 4.0)
end subroutine
end module
|
lemma closed_INT [continuous_intros, intro]: "\<forall>x\<in>A. closed (B x) \<Longrightarrow> closed (\<Inter>x\<in>A. B x)" |
||| Implementation of ordering relations for `Fin`ite numbers
module Data.Fin.Order
import Control.Relation
import Control.Order
import Data.Fin
import Data.Fun
import Data.Rel
import Data.Nat
import Data.Nat.Order
import Decidable.Decidable
%default total
using (k : Nat)
public export
data FinLTE : Fin k -> Fin k -> Type where
FromNatPrf : {m, n : Fin k} -> LTE (finToNat m) (finToNat n) -> FinLTE m n
public export
Transitive (Fin k) FinLTE where
transitive (FromNatPrf xy) (FromNatPrf yz) =
FromNatPrf $ transitive {rel = LTE} xy yz
public export
Reflexive (Fin k) FinLTE where
reflexive = FromNatPrf $ reflexive {rel = LTE}
public export
Preorder (Fin k) FinLTE where
public export
Antisymmetric (Fin k) FinLTE where
antisymmetric {x} {y} (FromNatPrf xy) (FromNatPrf yx) =
finToNatInjective x y $
antisymmetric {rel = LTE} xy yx
public export
PartialOrder (Fin k) FinLTE where
public export
Connex (Fin k) FinLTE where
connex {x = FZ} _ = Left $ FromNatPrf LTEZero
connex {y = FZ} _ = Right $ FromNatPrf LTEZero
connex {x = FS k} {y = FS j} prf =
case connex {rel = FinLTE} $ prf . (cong FS) of
Left (FromNatPrf p) => Left $ FromNatPrf $ LTESucc p
Right (FromNatPrf p) => Right $ FromNatPrf $ LTESucc p
public export
Decidable 2 [Fin k, Fin k] FinLTE where
decide m n with (decideLTE (finToNat m) (finToNat n))
decide m n | Yes prf = Yes (FromNatPrf prf)
decide m n | No disprf = No (\ (FromNatPrf prf) => disprf prf)
|
# Read file
statesInfo <- read.csv()
# subset data by region if region is 1
subset(statesInfo, state.region == 1)
stateSubset <- statesInfo[statesInfo$illiteracy == 0.5, ]
library(ggplot2)
library(plyr)
library(twitteR)
# Attributes and dimensions of data
dim(stateSubset)
str(stateSubset)
stateSubset # print out stateSubset
|
-- https://www.youtube.com/watch?v=8upRv9wjHp0
inductive A
| i : ℤ → A
| pred : A → A
| succ : A → A
| plus : A → A → A
| mult : A → A → A
.
open A
instance : has_one A := ⟨ A.i 1 ⟩
instance : has_add A := ⟨ A.plus ⟩
-- Natural semantics -- Big-step == Binary relation ↓ ⊆ A × ℤ
inductive natsem_A : A → ℤ → Prop
| r1 : ∀ i : ℤ, natsem_A (A.i i) i
| r2 : ∀ e i, natsem_A e i → natsem_A (pred e) (i - 1)
| r3 : ∀ e i, natsem_A e i → natsem_A (succ e) (i + 1)
| r4 : ∀ e₁ e₂ i j, (natsem_A e₁ i) → (natsem_A e₂ j) → natsem_A (plus e₁ e₂) (i + j)
| r5 : ∀ e₁ e₂ i j, (natsem_A e₁ i) → (natsem_A e₂ j) → natsem_A (plus e₁ e₂) (i * j)
notation a ` ⇓ ` b := natsem_A a b
example : (plus 4 (succ 2)) ⇓ 7 := sorry
--Evaluator semantics of A
def eval : A → ℤ
| (i x) := x
| (pred e) := (eval e) - 1
| (succ e) := (eval e) + 1
| (plus e₁ e₂) := (eval e₁) + (eval e₂)
| (mult e₁ e₂) := (eval e₁) * (eval e₂)
#reduce eval (plus 4 (succ 2))
-- SOS semantics of A == Binary relation ⟶ ⊆ A × A
inductive sossem_A : A → A → Prop
--axioms
| sos1 : ∀ i, sossem_A (pred (A.i i)) (A.i (i - 1)) -- i:ℤ
| sos2 : ∀ i, sossem_A (succ (A.i i)) (A.i (i + 1))
| sos3 : ∀ i j, sossem_A (plus (A.i i) (A.i j)) (A.i (i + j))
| sos4 : ∀ i j, sossem_A (mult (A.i i) (A.i j)) (A.i (i * j))
--contexts -- these contexts are known are the compatible closure
| sos05 : ∀ e₁ e₂, sossem_A e₁ e₂ → sossem_A (pred e₁) (pred e₂)
| sos06 : ∀ e₁ e₂, sossem_A e₁ e₂ → sossem_A (succ e₁) (succ e₂)
| sos07 : ∀ e₁ e₂ e₃, sossem_A e₁ e₂ → sossem_A (plus e₁ e₃) (plus e₂ e₃)
| sos08 : ∀ e₁ e₂ e₃, sossem_A e₁ e₂ → sossem_A (plus e₃ e₁) (plus e₃ e₂)
| sos09 : ∀ e₁ e₂ e₃, sossem_A e₁ e₂ → sossem_A (mult e₁ e₃) (mult e₂ e₃)
| sos10 : ∀ e₁ e₂ e₃, sossem_A e₁ e₂ → sossem_A (mult e₃ e₁) (mult e₃ e₂)
notation a ` ⟶ ` b := sossem_A a b
inductive sos_closure : A → A → Prop
| step : ∀ e₁ e₂, (e₁ ⟶ e₂) → sos_closure e₁ e₂
| reflexive : ∀ e, sos_closure e e
| transitive : ∀ e₁ e₂ e₃, sos_closure e₁ e₂ → sos_closure e₂ e₃ → sos_closure e₁ e₃
notation a ` ⟶* ` b := sos_closure a b
--I need something that extracts the value from A.i i in SOS
lemma natural_imp_sos : ∀ e i, (e ⇓ i) → (e ⟶* (A.i i)) := sorry
lemma sos_imp_natural : ∀ e i, (e ⟶* (A.i i)) → (e ⇓ i) := sorry
theorem natural_equivalent_sos : ∀ e i, (e ⇓ i) ↔ (e ⟶* (A.i i)) :=
begin
intros, split; apply natural_imp_sos <|> apply sos_imp_natural
end
-- Reduction semantics of A
inductive axiom_redsem_A : A → A → Prop
--axioms
| red1 : ∀ i, axiom_redsem_A (pred (A.i i)) (A.i (i - 1)) -- i:ℤ
| red2 : ∀ i, axiom_redsem_A (succ (A.i i)) (A.i (i + 1))
| red3 : ∀ i j, axiom_redsem_A (plus (A.i i) (A.i j)) (A.i (i + j))
| red4 : ∀ i j, axiom_redsem_A (mult (A.i i) (A.i j)) (A.i (i * j))
inductive ctx
| hole
| pred : ctx → ctx
| succ : ctx → ctx
| plus {e}: ctx → e → ctx | Plus {e}: e → ctx → ctx
| mult {e}: ctx → e → ctx | Mult {e}: e → ctx → ctx |
[STATEMENT]
lemma (in dp_consistency_heap) map\<^sub>T_transfer[transfer_rule]:
"crel_vs ((R0 ===>\<^sub>T R1) ===>\<^sub>T list_all2 R0 ===>\<^sub>T list_all2 R1) map map\<^sub>T"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. crel_vs ((R0 ===>\<^sub>T R1) ===>\<^sub>T list_all2 R0 ===>\<^sub>T list_all2 R1) map map\<^sub>T
[PROOF STEP]
apply memoize_combinator_init
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x uu_ xa uua_. \<lbrakk>(R0 ===>\<^sub>T R1) x uu_; list_all2 R0 xa uua_\<rbrakk> \<Longrightarrow> crel_vs (list_all2 R1) (map x xa) (map\<^sub>T' uu_ uua_)
[PROOF STEP]
apply (erule list_all2_induct)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>x uu_ xa _. (R0 ===>\<^sub>T R1) x uu_ \<Longrightarrow> crel_vs (list_all2 R1) (map x []) (map\<^sub>T' uu_ [])
2. \<And>x uu_ xa _ xb xs y ys. \<lbrakk>(R0 ===>\<^sub>T R1) x uu_; R0 xb y; list_all2 R0 xs ys; crel_vs (list_all2 R1) (map x xs) (map\<^sub>T' uu_ ys)\<rbrakk> \<Longrightarrow> crel_vs (list_all2 R1) (map x (xb # xs)) (map\<^sub>T' uu_ (y # ys))
[PROOF STEP]
subgoal premises [transfer_rule]
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. crel_vs (list_all2 R1) (map x_ []) (map\<^sub>T' uu_ [])
[PROOF STEP]
by memoize_unfold_defs transfer_prover
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x uu_ xa _ xb xs y ys. \<lbrakk>(R0 ===>\<^sub>T R1) x uu_; R0 xb y; list_all2 R0 xs ys; crel_vs (list_all2 R1) (map x xs) (map\<^sub>T' uu_ ys)\<rbrakk> \<Longrightarrow> crel_vs (list_all2 R1) (map x (xb # xs)) (map\<^sub>T' uu_ (y # ys))
[PROOF STEP]
subgoal premises [transfer_rule]
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. crel_vs (list_all2 R1) (map x_ (xb_ # xs_)) (map\<^sub>T' uu_ (y_ # ys_))
[PROOF STEP]
by memoize_unfold_defs transfer_prover
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
import Mathlib.Tactic.Basic
import Mathlib.Tactic.Cases
import Basics.FunctionDefinitions
namespace hglv
/-!
## Proofs by Mathematical Induction
The `induction'` tactic performs structural induction on an inductive type. _Structural induction_
simply means that the induction follows the structure of the inductive type. For natural numbers
constructed from `Nat.zero` and `Nat.succ`, structural induction corresponds to standard mathematical
induction: To prove `p n`, it suffices to prove `p 0` and `∀k, p k → p (k + 1)`. Equipped with `induction'`,
we can reason about the addition and multiplication operations we defined by recursion in
[Function Definitions](../Basics/FunctionDefinitions.lean.md). Addition is defined by recursion on its
second argument. We will prove two lemmas, `add_zero` and `add_succ`, that give us alternative equations
that recurse on the first argument. We start with `add_zero`:
-/
lemma add_zero (n : ℕ) :
add 0 n = n := by
induction' n with n ih
{ rfl }
{ simp [add, ih] }
/-!
The first `{ }` encloses the base case `⊢ add 0 0 = 0`. The second block corresponds
to the induction step
```lean
n : N, ih : add 0 n = n ` add 0 (Nat.succ n) = (Nat.succ n)
```
The local variable `n` in the induction step should not be confused with the `n` in
the lemma statement. Like mathematicians, `induction’` tries to reuse variable
names that are no longer needed. The name `ih` for the induction hypothesis, in
the induction step, is also generated by the `induction’` tactic.
We can keep on proving lemmas by structural induction:
-/
lemma add_succ (m n : ℕ) :
add (Nat.succ m) n = Nat.succ (add m n) := by
induction' n with n ih
{ rfl }
{ simp [add, ih] }
lemma add_comm (m n : ℕ) :
add m n = add n m := by
induction' n with n ih
{ simp [add, add_zero] }
{ simp [add, add_succ, ih] }
lemma add_assoc (l m n : ℕ) :
add (add l m) n = add l (add m n) := by
induction' n with n ih -- ih: add (add l m) n = add l (add m n)
{ rfl }
{ simp [add, ih] }
/-!
Once we have proved that a binary operator is commutative and associative, it
is a good idea to let Lean’s automation, notably `cc`, know about this
using type classes. See [Type Classes](../Basics/TypeClasses.lean.md).
The following example uses the `cc` tactic to reason up to associativity and commutativity of `add`:
-/
set_option trace.Meta.Tactic.simp.rewrite true
lemma mul_add (l m n : ℕ) :
mul l (add m n) = add (mul l m) (mul l n) := by
induction' n with n ih -- ih: mul l (add m n) = add (mul l m) (mul l n)
{ rfl }
{
simp [add_comm, add_assoc, add_succ]
}
/-!
-- BUGBUG: 'cc' is still missing from mathlib... trying to write it without cc.
Here are a few hints on how to carry out proofs by induction:
- It is usually beneficial to perform induction following the structure of the definition of one of
the functions appearing in the goal. In particular, if a function is defined by recursion on its
_n_ th argument, it usually makes sense to perform the induction on that argument.
- If the base case of an induction is difficult, this is often a sign that the wrong variable was
chosen or that some lemmas should be proved first.
-/ |
(* Title: HOL/Proofs/ex/XML_Data.thy
Author: Makarius
Author: Stefan Berghofer
XML data representation of proof terms.
*)
theory XML_Data
imports "~~/src/HOL/Isar_Examples/Drinker"
begin
subsection \<open>Export and re-import of global proof terms\<close>
ML \<open>
fun export_proof ctxt thm =
let
val thy = Proof_Context.theory_of ctxt;
val (_, prop) =
Logic.unconstrainT (Thm.shyps_of thm)
(Logic.list_implies (Thm.hyps_of thm, Thm.prop_of thm));
val prf =
Proofterm.proof_of (Proofterm.strip_thm (Thm.proof_body_of thm)) |>
Reconstruct.reconstruct_proof ctxt prop |>
Reconstruct.expand_proof ctxt [("", NONE)] |>
Proofterm.rew_proof thy |>
Proofterm.no_thm_proofs;
in Proofterm.encode prf end;
fun import_proof thy xml =
let
val prf = Proofterm.decode xml;
val (prf', _) = Proofterm.freeze_thaw_prf prf;
in Drule.export_without_context (Proof_Checker.thm_of_proof thy prf') end;
\<close>
subsection \<open>Examples\<close>
ML \<open>val thy1 = @{theory}\<close>
lemma ex: "A \<longrightarrow> A" ..
ML_val \<open>
val xml = export_proof @{context} @{thm ex};
val thm = import_proof thy1 xml;
\<close>
ML_val \<open>
val xml = export_proof @{context} @{thm de_Morgan};
val thm = import_proof thy1 xml;
\<close>
ML_val \<open>
val xml = export_proof @{context} @{thm Drinker's_Principle};
val thm = import_proof thy1 xml;
\<close>
text \<open>Some fairly large proof:\<close>
ML_val \<open>
val xml = export_proof @{context} @{thm abs_less_iff};
val thm = import_proof thy1 xml;
@{assert} (size (YXML.string_of_body xml) > 1000000);
\<close>
end
|
Timing for all models of the simulation is controlled by the Temporal Discretization (TDIS) Package. Input to the TDIS Package is read from the filename specified for TDIS in the TIMING input block of the simulation name file.
\vspace{5mm}
\subsection{Structure of Blocks}
\lstinputlisting[style=blockdefinition]{./mf6ivar/tex/sim-tdis-options.dat}
\lstinputlisting[style=blockdefinition]{./mf6ivar/tex/sim-tdis-dimensions.dat}
\lstinputlisting[style=blockdefinition]{./mf6ivar/tex/sim-tdis-perioddata.dat}
\vspace{5mm}
\subsection{Explanation of Variables}
\begin{description}
\input{./mf6ivar/tex/sim-tdis-desc.tex}
\end{description}
\vspace{5mm}
\subsection{Example Input File}
\lstinputlisting[style=inputfile]{./mf6ivar/examples/sim-tdis-example.dat}
|
{-# OPTIONS --cubical #-}
module TranspComputing where
open import Agda.Builtin.Cubical.Path
open import Agda.Primitive.Cubical
open import Agda.Builtin.List
transpList : ∀ (φ : I) (A : Set) x xs → primTransp (λ _ → List A) φ (x ∷ xs) ≡ (primTransp (λ i → A) φ x ∷ primTransp (λ i → List A) φ xs)
transpList φ A x xs = \ _ → primTransp (λ _ → List A) φ (x ∷ xs)
data S¹ : Set where
base : S¹
loop : base ≡ base
-- This should be refl.
transpS¹ : ∀ (φ : I) (u0 : S¹) → primTransp (λ _ → S¹) φ u0 ≡ u0
transpS¹ φ u0 = \ _ → u0
|
lemma pow_add (a m n : mynat) : a ^ (m + n) = a ^ m * a ^ n :=
begin
induction n with k Pk,
rw add_zero,
rw pow_zero,
rw mul_one,
refl,
rw pow_succ,
rw ← mul_assoc,
rw ← Pk,
rw ← pow_succ,
rw add_succ,
refl,
end |
A set $S$ is simply connected if and only if for every path $p$ with image in $S$ and every point $a \in S$, the path $p$ is homotopic to the constant path at $a$ in $S$. |
I am having trouble adding other layouts to the iPad. I see that i am connected on the same network because with the Mix2 iPad layout, i see the midi flashing in OSCulator. However when i try to add a layout my iPad does not find hosts. I see that if you press edit on the add layout screen and add a host it gives the option to add a host but i dont know how to do that. Can someone please help, would love to get Logic or Traktor templates working. Thanks.
Have you tried to reboot your computer and iPad?
As for the manual Editor Host setting, I am sorry but I don't know how it works. |
//
// libieeep1788
//
// An implementation of the preliminary IEEE P1788 standard for
// interval arithmetic
//
//
// Copyright 2013 - 2015
//
// Marco Nehmeier ([email protected])
// Department of Computer Science,
// University of Wuerzburg, Germany
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// UnF<double>::less required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#define BOOST_TEST_MODULE "Flavor: IO [p1788/flavor/infsup/setbased/mpfr_bin_ieee754_flavor]"
#include "test/util/boost_test_wrapper.hpp"
#include "p1788/exception/exception.hpp"
#include "p1788/decoration/decoration.hpp"
#include "p1788/flavor/infsup/setbased/mpfr_bin_ieee754_flavor.hpp"
#include "test/util/mpfr_bin_ieee754_flavor_io_test_util.hpp"
#include <boost/test/output_test_stream.hpp>
#include <limits>
#include <sstream>
template<typename T>
using F = p1788::flavor::infsup::setbased::mpfr_bin_ieee754_flavor<T>;
template<typename T>
using REP = typename F<T>::representation;
template<typename T>
using REP_DEC = typename F<T>::representation_dec;
typedef p1788::decoration::decoration DEC;
const double INF_D = std::numeric_limits<double>::infinity();
const double MAX_D = std::numeric_limits<double>::max();
BOOST_AUTO_TEST_CASE(minimal_empty_output_test)
{
boost::test_tools::output_test_stream output;
F<double>::operator_interval_to_text(output, F<double>::empty());
BOOST_CHECK( output.is_equal( "[empty]" ) );
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, F<double>::empty());
BOOST_CHECK( output.is_equal( "[EMPTY]" ) );
output << p1788::io::hex;
output << p1788::io::lower_case;
output << p1788::io::string_width(12);
F<double>::operator_interval_to_text(output, F<double>::empty());
BOOST_CHECK( output.is_equal( "[ empty ]" ) );
output << p1788::io::no_punctuation;
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, F<double>::empty());
BOOST_CHECK( output.is_equal( " EMPTY " ) );
output << p1788::io::string_width(11);
F<double>::operator_interval_to_text(output, F<double>::empty());
BOOST_CHECK( output.is_equal( " EMPTY " ) );
output << p1788::io::decimal;
output << p1788::io::punctuation;
output << p1788::io::lower_case;
F<double>::operator_interval_to_text(output, F<double>::empty());
BOOST_CHECK( output.is_equal( "[ empty ]" ) );
output << p1788::io::dec_numeric;
output << p1788::io::lower_case;
output << p1788::io::string_width(0);
output << p1788::io::special_no_bounds;
F<double>::operator_interval_to_text(output, F<double>::empty());
BOOST_CHECK( output.is_equal( "[ ]" ) );
output << p1788::io::hex;
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, F<double>::empty());
BOOST_CHECK( output.is_equal( "[ ]" ) );
output << p1788::io::lower_case;
output << p1788::io::string_width(9);
F<double>::operator_interval_to_text(output, F<double>::empty());
BOOST_CHECK( output.is_equal( "[ ]" ) );
output << p1788::io::no_punctuation;
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, F<double>::empty());
BOOST_CHECK( output.is_equal( " EMPTY " ) );
output << p1788::io::decimal;
output << p1788::io::lower_case;
output << p1788::io::string_width(0);
output << p1788::io::special_bounds;
output << p1788::io::punctuation;
F<double>::operator_interval_to_text(output, F<double>::empty());
BOOST_CHECK( output.is_equal( "[inf,-inf]" ) );
output << p1788::io::dec_alpha;
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, F<double>::empty());
BOOST_CHECK( output.is_equal( "[INF,-INF]" ) );
output << p1788::io::hex;
output << p1788::io::lower_case;
output << p1788::io::string_width(9);
F<double>::operator_interval_to_text(output, F<double>::empty());
BOOST_CHECK( output.is_equal( "[inf,-inf]" ) );
output << p1788::io::string_width(15);
F<double>::operator_interval_to_text(output, F<double>::empty());
BOOST_CHECK( output.is_equal( "[ inf, -inf]" ) );
output << p1788::io::string_width(16);
F<double>::operator_interval_to_text(output, F<double>::empty());
BOOST_CHECK( output.is_equal( "[ inf, -inf]" ) );
output << p1788::io::decimal;
output << p1788::io::string_width(17);
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, F<double>::empty());
BOOST_CHECK( output.is_equal( "[ INF, -INF]" ) );
output << p1788::io::string_width(18);
output << p1788::io::lower_case;
F<double>::operator_interval_to_text(output, F<double>::empty());
BOOST_CHECK( output.is_equal( "[ inf, -inf]" ) );
output << p1788::io::no_punctuation;
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, F<double>::empty());
BOOST_CHECK( output.is_equal( " INF -INF" ) );
output << p1788::io::hex;
output << p1788::io::upper_case;
output << p1788::io::string_width(0);
F<double>::operator_interval_to_text(output, F<double>::empty());
BOOST_CHECK( output.is_equal( "INF -INF" ) );
output << p1788::io::special_text;
output << p1788::io::punctuation;
output << p1788::io::width(10);
F<double>::operator_interval_to_text(output, F<double>::empty());
BOOST_CHECK( output.is_equal( "[ EMPTY ]" ) );
output << p1788::io::special_bounds;
F<double>::operator_interval_to_text(output, F<double>::empty());
BOOST_CHECK( output.is_equal( "[ INF, -INF]" ) );
output << p1788::io::string_width(23);
F<double>::operator_interval_to_text(output, F<double>::empty());
BOOST_CHECK( output.is_equal( "[ INF, -INF]" ) );
output << p1788::io::string_width(25);
F<double>::operator_interval_to_text(output, F<double>::empty());
BOOST_CHECK( output.is_equal( "[ INF, -INF]" ) );
output << p1788::io::special_no_bounds;
F<double>::operator_interval_to_text(output, F<double>::empty());
BOOST_CHECK( output.is_equal( "[ ]" ) );
output << p1788::io::string_width(0);
F<double>::operator_interval_to_text(output, F<double>::empty());
BOOST_CHECK( output.is_equal( "[ ]" ) );
output << p1788::io::special_bounds;
output << p1788::io::precision(27);
output << p1788::io::lower_case;
F<double>::operator_interval_to_text(output, F<double>::empty());
BOOST_CHECK( output.is_equal( "[ inf, -inf]" ) );
}
BOOST_AUTO_TEST_CASE(minimal_empty_dec_output_test)
{
boost::test_tools::output_test_stream output;
F<double>::operator_interval_to_text(output, F<double>::empty_dec());
BOOST_CHECK( output.is_equal( "[empty]" ) );
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, F<double>::empty_dec());
BOOST_CHECK( output.is_equal( "[EMPTY]" ) );
output << p1788::io::hex;
output << p1788::io::lower_case;
output << p1788::io::string_width(12);
F<double>::operator_interval_to_text(output, F<double>::empty_dec());
BOOST_CHECK( output.is_equal( "[ empty ]" ) );
output << p1788::io::no_punctuation;
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, F<double>::empty_dec());
BOOST_CHECK( output.is_equal( " EMPTY " ) );
output << p1788::io::string_width(11);
F<double>::operator_interval_to_text(output, F<double>::empty_dec());
BOOST_CHECK( output.is_equal( " EMPTY " ) );
output << p1788::io::decimal;
output << p1788::io::punctuation;
output << p1788::io::lower_case;
F<double>::operator_interval_to_text(output, F<double>::empty_dec());
BOOST_CHECK( output.is_equal( "[ empty ]" ) );
output << p1788::io::lower_case;
output << p1788::io::string_width(0);
output << p1788::io::special_no_bounds;
F<double>::operator_interval_to_text(output, F<double>::empty_dec());
BOOST_CHECK( output.is_equal( "[ ]" ) );
output << p1788::io::dec_numeric;
output << p1788::io::hex;
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, F<double>::empty_dec());
BOOST_CHECK( output.is_equal( "[ ]" ) );
output << p1788::io::lower_case;
output << p1788::io::string_width(9);
F<double>::operator_interval_to_text(output, F<double>::empty_dec());
BOOST_CHECK( output.is_equal( "[ ]" ) );
output << p1788::io::no_punctuation;
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, F<double>::empty_dec());
BOOST_CHECK( output.is_equal( " EMPTY " ) );
output << p1788::io::decimal;
output << p1788::io::lower_case;
output << p1788::io::string_width(0);
output << p1788::io::special_bounds;
output << p1788::io::punctuation;
F<double>::operator_interval_to_text(output, F<double>::empty_dec());
BOOST_CHECK( output.is_equal( "[inf,-inf]" ) );
output << p1788::io::dec_alpha;
output << p1788::io::hex;
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, F<double>::empty_dec());
BOOST_CHECK( output.is_equal( "[INF,-INF]" ) );
output << p1788::io::lower_case;
output << p1788::io::string_width(9);
F<double>::operator_interval_to_text(output, F<double>::empty_dec());
BOOST_CHECK( output.is_equal( "[inf,-inf]" ) );
output << p1788::io::string_width(15);
F<double>::operator_interval_to_text(output, F<double>::empty_dec());
BOOST_CHECK( output.is_equal( "[ inf, -inf]" ) );
output << p1788::io::string_width(16);
F<double>::operator_interval_to_text(output, F<double>::empty_dec());
BOOST_CHECK( output.is_equal( "[ inf, -inf]" ) );
output << p1788::io::decimal;
output << p1788::io::string_width(17);
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, F<double>::empty_dec());
BOOST_CHECK( output.is_equal( "[ INF, -INF]" ) );
output << p1788::io::hex;
output << p1788::io::string_width(18);
output << p1788::io::lower_case;
F<double>::operator_interval_to_text(output, F<double>::empty_dec());
BOOST_CHECK( output.is_equal( "[ inf, -inf]" ) );
output << p1788::io::no_punctuation;
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, F<double>::empty_dec());
BOOST_CHECK( output.is_equal( " INF -INF" ) );
output << p1788::io::decimal;
output << p1788::io::lower_case;
output << p1788::io::string_width(0);
F<double>::operator_interval_to_text(output, F<double>::empty_dec());
BOOST_CHECK( output.is_equal( "inf -inf" ) );
output << p1788::io::hex;
output << p1788::io::special_text;
output << p1788::io::punctuation;
F<double>::operator_interval_to_text(output, F<double>::empty_dec());
BOOST_CHECK( output.is_equal( "[empty]" ) );
output << p1788::io::inf_sup_form;
output << p1788::io::width(10);
F<double>::operator_interval_to_text(output, F<double>::empty_dec());
BOOST_CHECK( output.is_equal( "[ empty ]" ) );
output << p1788::io::special_bounds;
F<double>::operator_interval_to_text(output, F<double>::empty_dec());
BOOST_CHECK( output.is_equal( "[ inf, -inf]" ) );
output << p1788::io::string_width(23);
F<double>::operator_interval_to_text(output, F<double>::empty_dec());
BOOST_CHECK( output.is_equal( "[ inf, -inf]" ) );
output << p1788::io::string_width(25);
F<double>::operator_interval_to_text(output, F<double>::empty_dec());
BOOST_CHECK( output.is_equal( "[ inf, -inf]" ) );
output << p1788::io::special_no_bounds;
F<double>::operator_interval_to_text(output, F<double>::empty_dec());
BOOST_CHECK( output.is_equal( "[ ]" ) );
output << p1788::io::string_width(0);
F<double>::operator_interval_to_text(output, F<double>::empty_dec());
BOOST_CHECK( output.is_equal( "[ ]" ) );
output << p1788::io::special_bounds;
output << p1788::io::precision(27);
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, F<double>::empty_dec());
BOOST_CHECK( output.is_equal( "[ INF, -INF]" ) );
}
BOOST_AUTO_TEST_CASE(minimal_entire_output_test)
{
boost::test_tools::output_test_stream output;
F<double>::operator_interval_to_text(output, F<double>::entire() );
BOOST_CHECK( output.is_equal( "[entire]" ) );
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, F<double>::entire() );
BOOST_CHECK( output.is_equal( "[ENTIRE]" ) );
output << p1788::io::lower_case;
output << p1788::io::string_width(12);
F<double>::operator_interval_to_text(output, F<double>::entire() );
BOOST_CHECK( output.is_equal( "[ entire ]" ) );
output << p1788::io::no_punctuation;
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, F<double>::entire() );
BOOST_CHECK( output.is_equal( " ENTIRE " ) );
output << p1788::io::hex;
output << p1788::io::string_width(11);
F<double>::operator_interval_to_text(output, F<double>::entire() );
BOOST_CHECK( output.is_equal( " ENTIRE " ) );
output << p1788::io::punctuation;
output << p1788::io::lower_case;
F<double>::operator_interval_to_text(output, F<double>::entire() );
BOOST_CHECK( output.is_equal( "[ entire ]" ) );
output << p1788::io::decimal;
output << p1788::io::lower_case;
output << p1788::io::string_width(0);
output << p1788::io::special_no_bounds;
F<double>::operator_interval_to_text(output, F<double>::entire() );
BOOST_CHECK( output.is_equal( "[,]" ) );
output << p1788::io::dec_numeric;
output << p1788::io::hex;
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, F<double>::entire() );
BOOST_CHECK( output.is_equal( "[,]" ) );
output << p1788::io::decimal;
output << p1788::io::lower_case;
output << p1788::io::string_width(9);
F<double>::operator_interval_to_text(output, F<double>::entire() );
BOOST_CHECK( output.is_equal( "[ , ]" ) );
output << p1788::io::no_punctuation;
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, F<double>::entire() );
BOOST_CHECK( output.is_equal( " ENTIRE " ) );
output << p1788::io::hex;
output << p1788::io::lower_case;
output << p1788::io::string_width(0);
output << p1788::io::special_bounds;
output << p1788::io::punctuation;
F<double>::operator_interval_to_text(output, F<double>::entire() );
BOOST_CHECK( output.is_equal( "[-inf,inf]" ) );
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, F<double>::entire() );
BOOST_CHECK( output.is_equal( "[-INF,INF]" ) );
output << p1788::io::lower_case;
output << p1788::io::string_width(6);
F<double>::operator_interval_to_text(output, F<double>::entire() );
BOOST_CHECK( output.is_equal( "[-inf,inf]" ) );
output << p1788::io::dec_alpha;
output << p1788::io::decimal;
output << p1788::io::string_width(15);
F<double>::operator_interval_to_text(output, F<double>::entire() );
BOOST_CHECK( output.is_equal( "[ -inf, inf]" ) );
output << p1788::io::string_width(16);
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, F<double>::entire() );
BOOST_CHECK( output.is_equal( "[ -INF, INF]" ) );
output << p1788::io::hex;
output << p1788::io::string_width(17);
F<double>::operator_interval_to_text(output, F<double>::entire() );
BOOST_CHECK( output.is_equal( "[ -INF, INF]" ) );
output << p1788::io::string_width(18);
output << p1788::io::lower_case;
F<double>::operator_interval_to_text(output, F<double>::entire() );
BOOST_CHECK( output.is_equal( "[ -inf, inf]" ) );
output << p1788::io::decimal;
output << p1788::io::no_punctuation;
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, F<double>::entire() );
BOOST_CHECK( output.is_equal( " -INF INF" ) );
output << p1788::io::string_width(0);
F<double>::operator_interval_to_text(output, F<double>::entire() );
BOOST_CHECK( output.is_equal( "-INF INF" ) );
output << p1788::io::special_text;
output << p1788::io::punctuation;
output << p1788::io::inf_sup_form;
output << p1788::io::width(10);
F<double>::operator_interval_to_text(output, F<double>::entire());
BOOST_CHECK( output.is_equal( "[ ENTIRE ]" ) );
output << p1788::io::special_bounds;
F<double>::operator_interval_to_text(output, F<double>::entire());
BOOST_CHECK( output.is_equal( "[ -INF, INF]" ) );
output << p1788::io::string_width(23);
F<double>::operator_interval_to_text(output, F<double>::entire());
BOOST_CHECK( output.is_equal( "[ -INF, INF]" ) );
output << p1788::io::string_width(25);
F<double>::operator_interval_to_text(output, F<double>::entire());
BOOST_CHECK( output.is_equal( "[ -INF, INF]" ) );
output << p1788::io::special_no_bounds;
F<double>::operator_interval_to_text(output, F<double>::entire());
BOOST_CHECK( output.is_equal( "[ , ]" ) );
output << p1788::io::string_width(0);
F<double>::operator_interval_to_text(output, F<double>::entire());
BOOST_CHECK( output.is_equal( "[ , ]" ) );
output << p1788::io::special_bounds;
output << p1788::io::precision(27);
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, F<double>::entire());
BOOST_CHECK( output.is_equal( "[ -INF, INF]" ) );
}
BOOST_AUTO_TEST_CASE(minimal_entire_dec_output_test)
{
boost::test_tools::output_test_stream output;
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-INF_D,INF_D), DEC::trv) );
BOOST_CHECK( output.is_equal( "[entire]_trv" ) );
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-INF_D,INF_D), DEC::dac) );
BOOST_CHECK( output.is_equal( "[ENTIRE]_DAC" ) );
output << p1788::io::hex;
output << p1788::io::lower_case;
output << p1788::io::string_width(13);
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-INF_D,INF_D), DEC::trv) );
BOOST_CHECK( output.is_equal( "[entire ]_trv" ) );
output << p1788::io::no_punctuation;
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-INF_D,INF_D), DEC::def) );
BOOST_CHECK( output.is_equal( " ENTIRE DEF" ) );
output << p1788::io::decimal;
output << p1788::io::string_width(11);
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-INF_D,INF_D), DEC::dac) );
BOOST_CHECK( output.is_equal( "ENTIRE DAC" ) );
output << p1788::io::hex;
output << p1788::io::punctuation;
output << p1788::io::lower_case;
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-INF_D,INF_D), DEC::trv) );
BOOST_CHECK( output.is_equal( "[entire]_trv" ) );
output << p1788::io::dec_numeric;
output << p1788::io::decimal;
output << p1788::io::lower_case;
output << p1788::io::string_width(0);
output << p1788::io::special_no_bounds;
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-INF_D,INF_D), DEC::def) );
BOOST_CHECK( output.is_equal( "[,]_8" ) );
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-INF_D,INF_D), DEC::trv) );
BOOST_CHECK( output.is_equal( "[,]_4" ) );
output << p1788::io::lower_case;
output << p1788::io::string_width(9);
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-INF_D,INF_D), DEC::def) );
BOOST_CHECK( output.is_equal( "[ , ]_8" ) );
output << p1788::io::hex;
output << p1788::io::no_punctuation;
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-INF_D,INF_D), DEC::dac) );
BOOST_CHECK( output.is_equal( "ENTIRE 12" ) );
output << p1788::io::dec_alpha;
output << p1788::io::decimal;
output << p1788::io::lower_case;
output << p1788::io::string_width(0);
output << p1788::io::special_bounds;
output << p1788::io::punctuation;
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-INF_D,INF_D), DEC::trv) );
BOOST_CHECK( output.is_equal( "[-inf,inf]_trv" ) );
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-INF_D,INF_D), DEC::dac) );
BOOST_CHECK( output.is_equal( "[-INF,INF]_DAC" ) );
output << p1788::io::hex;
output << p1788::io::lower_case;
output << p1788::io::string_width(6);
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-INF_D,INF_D), DEC::trv) );
BOOST_CHECK( output.is_equal( "[-inf,inf]_trv" ) );
output << p1788::io::string_width(15);
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-INF_D,INF_D), DEC::def) );
BOOST_CHECK( output.is_equal( "[-inf, inf]_def" ) );
output << p1788::io::decimal;
output << p1788::io::string_width(16);
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-INF_D,INF_D), DEC::dac) );
BOOST_CHECK( output.is_equal( "[ -INF, INF]_DAC" ) );
output << p1788::io::string_width(17);
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-INF_D,INF_D), DEC::trv) );
BOOST_CHECK( output.is_equal( "[ -INF, INF]_TRV" ) );
output << p1788::io::hex;
output << p1788::io::string_width(18);
output << p1788::io::lower_case;
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-INF_D,INF_D), DEC::def) );
BOOST_CHECK( output.is_equal( "[ -inf, inf]_def" ) );
output << p1788::io::decimal;
output << p1788::io::no_punctuation;
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-INF_D,INF_D), DEC::trv) );
BOOST_CHECK( output.is_equal( " -INF INF TRV" ) );
output << p1788::io::string_width(0);
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-INF_D,INF_D), DEC::dac) );
BOOST_CHECK( output.is_equal( "-INF INF DAC" ) );
output << p1788::io::lower_case;
output << p1788::io::special_text;
output << p1788::io::punctuation;
output << p1788::io::inf_sup_form;
output << p1788::io::width(10);
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-INF_D,INF_D), DEC::trv) );
BOOST_CHECK( output.is_equal( "[ entire ]_trv" ) );
output << p1788::io::special_bounds;
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-INF_D,INF_D), DEC::def) );
BOOST_CHECK( output.is_equal( "[ -inf, inf]_def" ) );
output << p1788::io::string_width(27);
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-INF_D,INF_D), DEC::dac) );
BOOST_CHECK( output.is_equal( "[ -inf, inf]_dac" ) );
output << p1788::io::string_width(29);
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-INF_D,INF_D), DEC::trv) );
BOOST_CHECK( output.is_equal( "[ -inf, inf]_trv" ) );
output << p1788::io::special_no_bounds;
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-INF_D,INF_D), DEC::def) );
BOOST_CHECK( output.is_equal( "[ , ]_def" ) );
output << p1788::io::string_width(0);
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-INF_D,INF_D), DEC::dac) );
BOOST_CHECK( output.is_equal( "[ , ]_dac" ) );
output << p1788::io::special_bounds;
output << p1788::io::precision(27);
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-INF_D,INF_D), DEC::trv) );
BOOST_CHECK( output.is_equal( "[ -INF, INF]_TRV" ) );
}
BOOST_AUTO_TEST_CASE(minimal_nai_output_test)
{
boost::test_tools::output_test_stream output;
F<double>::operator_interval_to_text(output, F<double>::nai());
BOOST_CHECK( output.is_equal( "[nai]" ) );
output << p1788::io::hex;
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, F<double>::nai());
BOOST_CHECK( output.is_equal( "[NAI]" ) );
output << p1788::io::decimal;
output << p1788::io::lower_case;
output << p1788::io::string_width(12);
F<double>::operator_interval_to_text(output, F<double>::nai());
BOOST_CHECK( output.is_equal( "[ nai ]" ) );
output << p1788::io::dec_numeric;
output << p1788::io::no_punctuation;
output << p1788::io::upper_case;
output << p1788::io::hex;
F<double>::operator_interval_to_text(output, F<double>::nai());
BOOST_CHECK( output.is_equal( " NAI " ) );
output << p1788::io::string_width(11);
output << p1788::io::decimal;
F<double>::operator_interval_to_text(output, F<double>::nai());
BOOST_CHECK( output.is_equal( " NAI " ) );
output << p1788::io::dec_alpha;
output << p1788::io::punctuation;
output << p1788::io::lower_case;
F<double>::operator_interval_to_text(output, F<double>::nai());
BOOST_CHECK( output.is_equal( "[ nai ]" ) );
output << p1788::io::upper_case;
output << p1788::io::special_text;
output << p1788::io::no_punctuation;
output << p1788::io::inf_sup_form;
output << p1788::io::punctuation;
output << p1788::io::width(10);
F<double>::operator_interval_to_text(output, F<double>::nai() );
BOOST_CHECK( output.is_equal( "[ NAI ]" ) );
output << p1788::io::special_bounds;
F<double>::operator_interval_to_text(output, F<double>::nai() );
BOOST_CHECK( output.is_equal( "[ NAI ]" ) );
output << p1788::io::string_width(23);
F<double>::operator_interval_to_text(output, F<double>::nai() );
BOOST_CHECK( output.is_equal( "[ NAI ]" ) );
output << p1788::io::string_width(25);
F<double>::operator_interval_to_text(output, F<double>::nai() );
BOOST_CHECK( output.is_equal( "[ NAI ]" ) );
output << p1788::io::special_no_bounds;
F<double>::operator_interval_to_text(output, F<double>::nai() );
BOOST_CHECK( output.is_equal( "[ NAI ]" ) );
output << p1788::io::string_width(0);
F<double>::operator_interval_to_text(output, F<double>::nai() );
BOOST_CHECK( output.is_equal( "[ NAI ]" ) );
output << p1788::io::special_bounds;
output << p1788::io::precision(27);
output << p1788::io::lower_case;
F<double>::operator_interval_to_text(output, F<double>::nai() );
BOOST_CHECK( output.is_equal( "[ nai ]" ) );
}
BOOST_AUTO_TEST_CASE(minimal_bare_interval_output_test)
{
boost::test_tools::output_test_stream output;
F<double>::operator_interval_to_text(output, REP<double>(0.1,0.1) );
BOOST_CHECK( output.is_equal( "[0.1,0.100001]" ) );
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, REP<double>(-0.1,0.1) );
BOOST_CHECK( output.is_equal( "[-0.100001,0.100001]" ) );
output << p1788::io::lower_case;
output << p1788::io::string_width(22);
F<double>::operator_interval_to_text(output, REP<double>(-0.1,0.1) );
BOOST_CHECK( output.is_equal( "[ -0.100001, 0.100001]" ) );
output << p1788::io::no_punctuation;
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, REP<double>(-0.1,0.1) );
BOOST_CHECK( output.is_equal( " -0.100001 0.100001" ) );
output << p1788::io::hex;
output << p1788::io::string_width(0);
F<double>::operator_interval_to_text(output, REP<double>(-0.1,1.3) );
BOOST_CHECK( output.is_equal( "-0X1.999999999999AP-4 0X1.4CCCCCCCCCCCDP+0" ) );
output << p1788::io::punctuation;
output << p1788::io::lower_case;
output << p1788::io::precision(5);
output << p1788::io::width(15);
F<double>::operator_interval_to_text(output, REP<double>(-0.1,1.3) );
BOOST_CHECK( output.is_equal( "[ -0x1.9999ap-4, 0x1.4cccdp+0]" ) );
output << p1788::io::string_width(35);
output << p1788::io::special_no_bounds;
F<double>::operator_interval_to_text(output, REP<double>(0.1,1.3) );
BOOST_CHECK( output.is_equal( "[ 0x1.99999p-4, 0x1.4cccdp+0]" ) );
output << p1788::io::string_width(0);
output << p1788::io::precision(20);
F<double>::operator_interval_to_text(output, REP<double>(0.1,1.3) );
BOOST_CHECK( output.is_equal( "[0x1.999999999999a0000000p-4,0x1.4cccccccccccd0000000p+0]" ) );
output << p1788::io::decimal;
output << p1788::io::precision(0);
output << p1788::io::width(10);
F<double>::operator_interval_to_text(output, REP<double>(0.1,0.1) );
BOOST_CHECK( output.is_equal( "[ 0.100000, 0.100001]" ) );
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, REP<double>(-0.1,0.1) );
BOOST_CHECK( output.is_equal( "[ -0.100001, 0.100001]" ) );
output << p1788::io::lower_case;
output << p1788::io::string_width(25);
output << p1788::io::precision(7);
F<double>::operator_interval_to_text(output, REP<double>(0.1,0.1) );
BOOST_CHECK( output.is_equal( "[ 0.1000000, 0.1000001]" ) );
output << p1788::io::decimal_scientific;
output << p1788::io::string_width(0);
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, REP<double>(0.1,0.1) );
BOOST_CHECK( output.is_equal( "[ 0.1, 0.1000001]" ) );
output << p1788::io::scientific;
F<double>::operator_interval_to_text(output, REP<double>(0.1,1.3) );
BOOST_CHECK( output.is_equal( "[1.0000000E-01,1.3000001E+00]" ) );
output << p1788::io::lower_case;
F<double>::operator_interval_to_text(output, REP<double>(-INF_D,0.1) );
BOOST_CHECK( output.is_equal( "[ -inf,1.0000001e-01]" ) );
output << p1788::io::decimal_scientific;
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, REP<double>(0.1,INF_D) );
BOOST_CHECK( output.is_equal( "[ 0.1, INF]" ) );
output << p1788::io::hex;
F<double>::operator_interval_to_text(output, REP<double>(-INF_D,0.1) );
BOOST_CHECK( output.is_equal( "[ -INF,0X1.999999AP-4]" ) );
output << p1788::io::decimal;
F<double>::operator_interval_to_text(output, REP<double>(0.1,INF_D) );
BOOST_CHECK( output.is_equal( "[ 0.1000000, INF]" ) );
}
BOOST_AUTO_TEST_CASE(minimal_decorated_interval_output_test)
{
boost::test_tools::output_test_stream output;
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(0.1,0.1), DEC::trv) );
BOOST_CHECK( output.is_equal( "[0.1,0.100001]_trv" ) );
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-0.1,0.1), DEC::def) );
BOOST_CHECK( output.is_equal( "[-0.100001,0.100001]_DEF" ) );
output << p1788::io::lower_case;
output << p1788::io::string_width(26);
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-0.1,0.1), DEC::dac) );
BOOST_CHECK( output.is_equal( "[ -0.100001, 0.100001]_dac" ) );
output << p1788::io::no_punctuation;
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-0.1,0.1), DEC::com) );
BOOST_CHECK( output.is_equal( " -0.100001 0.100001 COM" ) );
output << p1788::io::dec_numeric;
output << p1788::io::hex;
output << p1788::io::string_width(0);
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-0.1,1.3), DEC::trv) );
BOOST_CHECK( output.is_equal( "-0X1.999999999999AP-4 0X1.4CCCCCCCCCCCDP+0 4" ) );
output << p1788::io::punctuation;
output << p1788::io::lower_case;
output << p1788::io::precision(5);
output << p1788::io::width(15);
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-0.1,1.3), DEC::def) );
BOOST_CHECK( output.is_equal( "[ -0x1.9999ap-4, 0x1.4cccdp+0]_8" ) );
output << p1788::io::string_width(35);
output << p1788::io::special_no_bounds;
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(0.1,1.3), DEC::dac) );
BOOST_CHECK( output.is_equal( "[ 0x1.99999p-4, 0x1.4cccdp+0]_12" ) );
output << p1788::io::string_width(0);
output << p1788::io::precision(20);
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(0.1,1.3), DEC::com) );
BOOST_CHECK( output.is_equal( "[0x1.999999999999a0000000p-4,0x1.4cccccccccccd0000000p+0]_16" ) );
output << p1788::io::decimal;
output << p1788::io::precision(0);
output << p1788::io::width(10);
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(0.1,0.1), DEC::trv) );
BOOST_CHECK( output.is_equal( "[ 0.100000, 0.100001]_4" ) );
output << p1788::io::dec_alpha;
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-0.1,0.1), DEC::def) );
BOOST_CHECK( output.is_equal( "[ -0.100001, 0.100001]_DEF" ) );
output << p1788::io::lower_case;
output << p1788::io::string_width(29);
output << p1788::io::precision(7);
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(0.1,0.1), DEC::dac) );
BOOST_CHECK( output.is_equal( "[ 0.1000000, 0.1000001]_dac" ) );
output << p1788::io::decimal_scientific;
output << p1788::io::string_width(0);
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(0.1,0.1), DEC::com) );
BOOST_CHECK( output.is_equal( "[ 0.1, 0.1000001]_COM" ) );
output << p1788::io::scientific;
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(0.1,1.3), DEC::trv) );
BOOST_CHECK( output.is_equal( "[1.0000000E-01,1.3000001E+00]_TRV" ) );
output << p1788::io::lower_case;
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-INF_D,0.1), DEC::def) );
BOOST_CHECK( output.is_equal( "[ -inf,1.0000001e-01]_def" ) );
output << p1788::io::decimal_scientific;
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(0.1,INF_D), DEC::dac) );
BOOST_CHECK( output.is_equal( "[ 0.1, INF]_DAC" ) );
output << p1788::io::hex;
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-INF_D,0.1), DEC::trv) );
BOOST_CHECK( output.is_equal( "[ -INF,0X1.999999AP-4]_TRV" ) );
output << p1788::io::decimal;
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(0.1,INF_D), DEC::def) );
BOOST_CHECK( output.is_equal( "[ 0.1000000, INF]_DEF" ) );
}
BOOST_AUTO_TEST_CASE(minimal_uncertain_interval_output_test)
{
boost::test_tools::output_test_stream output;
output << p1788::io::uncertain_form;
F<double>::operator_interval_to_text(output, REP<double>(-INF_D,INF_D) );
BOOST_CHECK( output.is_equal( "0.000000??" ) );
F<double>::operator_interval_to_text(output, REP<double>(0.1,INF_D) );
BOOST_CHECK( output.is_equal( "0.100000??u" ) );
F<double>::operator_interval_to_text(output, REP<double>(-INF_D,0.1) );
BOOST_CHECK( output.is_equal( "0.100001??d" ) );
output << p1788::io::uncertain_exponent;
F<double>::operator_interval_to_text(output, REP<double>(-INF_D,INF_D) );
BOOST_CHECK( output.is_equal( "0.000000??e+00" ) );
F<double>::operator_interval_to_text(output, REP<double>(0.1,INF_D) );
BOOST_CHECK( output.is_equal( "1.000000??ue-01" ) );
F<double>::operator_interval_to_text(output, REP<double>(-INF_D,0.1) );
BOOST_CHECK( output.is_equal( "1.000001??de-01" ) );
output << p1788::io::precision(1);
output << p1788::io::string_width(20);
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, REP<double>(-INF_D,INF_D) );
BOOST_CHECK( output.is_equal( " 0.0??E+00" ) );
F<double>::operator_interval_to_text(output, REP<double>(0.1,INF_D) );
BOOST_CHECK( output.is_equal( " 1.0??UE-01" ) );
F<double>::operator_interval_to_text(output, REP<double>(-INF_D,0.1) );
BOOST_CHECK( output.is_equal( " 1.1??DE-01" ) );
output << p1788::io::no_uncertain_exponent;
output << p1788::io::no_punctuation;
F<double>::operator_interval_to_text(output, REP<double>(-INF_D,INF_D) );
BOOST_CHECK( output.is_equal( " 0.0??" ) );
F<double>::operator_interval_to_text(output, REP<double>(0.1,INF_D) );
BOOST_CHECK( output.is_equal( " 0.1??U" ) );
F<double>::operator_interval_to_text(output, REP<double>(-INF_D,0.1) );
BOOST_CHECK( output.is_equal( " 0.2??D" ) );
output << p1788::io::precision(5);
output << p1788::io::string_width(0);
output << p1788::io::lower_case;
F<double>::operator_interval_to_text(output, REP<double>(0.1, 0.2) );
BOOST_CHECK( output.is_equal( "0.15000?5001" ) );
output << p1788::io::uncertain_down_form;
output << p1788::io::precision(7);
F<double>::operator_interval_to_text(output, REP<double>(0.1, 0.2) );
BOOST_CHECK( output.is_equal( "0.2000001?1000001d" ) );
output << p1788::io::uncertain_up_form;
output << p1788::io::precision(0);
output << p1788::io::string_width(18);
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, REP<double>(-0.1, 0.2) );
BOOST_CHECK( output.is_equal( " -0.100001?300002U" ) );
output << p1788::io::precision(0);
output << p1788::io::string_width(0);
output << p1788::io::lower_case;
output << p1788::io::uncertain_exponent;
F<double>::operator_interval_to_text(output, REP<double>(-100, 0.2) );
BOOST_CHECK( output.is_equal( "-1.000000?1002001ue+02" ) );
F<double>::operator_interval_to_text(output, REP<double>(-100, 0.0) );
BOOST_CHECK( output.is_equal( "-1.000000?1000000ue+02" ) );
F<double>::operator_interval_to_text(output, REP<double>(-100, -0.2) );
BOOST_CHECK( output.is_equal( "-1.000000?998000ue+02" ) );
F<double>::operator_interval_to_text(output, REP<double>(-0.2, 100) );
BOOST_CHECK( output.is_equal( "-2.000001?1002000001ue-01" ) );
F<double>::operator_interval_to_text(output, REP<double>(0.0,100) );
BOOST_CHECK( output.is_equal( "0.000000?100000000ue+00" ) );
F<double>::operator_interval_to_text(output, REP<double>(0.2, 100) );
BOOST_CHECK( output.is_equal( "2.000000?998000000ue-01" ) );
output << p1788::io::uncertain_down_form;
F<double>::operator_interval_to_text(output, REP<double>(-0.2, 100) );
BOOST_CHECK( output.is_equal( "1.000000?1002001de+02" ) );
F<double>::operator_interval_to_text(output, REP<double>(0.0,100) );
BOOST_CHECK( output.is_equal( "1.000000?1000000de+02" ) );
F<double>::operator_interval_to_text(output, REP<double>(0.2, 100) );
BOOST_CHECK( output.is_equal( "1.000000?998000de+02" ) );
F<double>::operator_interval_to_text(output, REP<double>(-100, 0.2) );
BOOST_CHECK( output.is_equal( "2.000001?1002000001de-01" ) );
F<double>::operator_interval_to_text(output, REP<double>(-100, 0.0) );
BOOST_CHECK( output.is_equal( "0.000000?100000000de+00" ) );
F<double>::operator_interval_to_text(output, REP<double>(-100, -0.2) );
BOOST_CHECK( output.is_equal( "-2.000000?998000000de-01" ) );
output << p1788::io::uncertain_form;
F<double>::operator_interval_to_text(output, REP<double>(-0.2, 100) );
BOOST_CHECK( output.is_equal( "4.990000?5010001e+01" ) );
F<double>::operator_interval_to_text(output, REP<double>(0.0,100) );
BOOST_CHECK( output.is_equal( "5.000000?5000000e+01" ) );
F<double>::operator_interval_to_text(output, REP<double>(0.2, 100) );
BOOST_CHECK( output.is_equal( "5.010000?4990000e+01" ) );
F<double>::operator_interval_to_text(output, REP<double>(-100, 0.2) );
BOOST_CHECK( output.is_equal( "-4.990000?5010001e+01" ) );
F<double>::operator_interval_to_text(output, REP<double>(-100, 0.0) );
BOOST_CHECK( output.is_equal( "-5.000000?5000000e+01" ) );
F<double>::operator_interval_to_text(output, REP<double>(-100, -0.2) );
BOOST_CHECK( output.is_equal( "-5.010000?4990000e+01" ) );
output << p1788::io::uncertain_form;
output << p1788::io::upper_case;
output << p1788::io::special_bounds;
output << p1788::io::no_punctuation;
F<double>::operator_interval_to_text(output, F<double>::empty());
BOOST_CHECK( output.is_equal( "INF -INF" ) );
output << p1788::io::special_text;
output << p1788::io::punctuation;
F<double>::operator_interval_to_text(output, F<double>::empty());
BOOST_CHECK( output.is_equal( "[EMPTY]" ) );
}
BOOST_AUTO_TEST_CASE(minimal_uncertain_interval_dec_output_test)
{
boost::test_tools::output_test_stream output;
output << p1788::io::uncertain_form;
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-INF_D,INF_D), DEC::trv) );
BOOST_CHECK( output.is_equal( "0.000000??_trv" ) );
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(0.1,INF_D), DEC::def) );
BOOST_CHECK( output.is_equal( "0.100000??u_def" ) );
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-INF_D,0.1), DEC::dac) );
BOOST_CHECK( output.is_equal( "0.100001??d_dac" ) );
output << p1788::io::uncertain_exponent;
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-INF_D,INF_D), DEC::trv) );
BOOST_CHECK( output.is_equal( "0.000000??e+00_trv" ) );
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(0.1,INF_D), DEC::def) );
BOOST_CHECK( output.is_equal( "1.000000??ue-01_def" ) );
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-INF_D,0.1), DEC::dac) );
BOOST_CHECK( output.is_equal( "1.000001??de-01_dac" ) );
output << p1788::io::precision(1);
output << p1788::io::string_width(20);
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-INF_D,INF_D), DEC::trv) );
BOOST_CHECK( output.is_equal( " 0.0??E+00_TRV" ) );
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(0.1,INF_D), DEC::def) );
BOOST_CHECK( output.is_equal( " 1.0??UE-01_DEF" ) );
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-INF_D,0.1), DEC::dac) );
BOOST_CHECK( output.is_equal( " 1.1??DE-01_DAC" ) );
output << p1788::io::no_uncertain_exponent;
output << p1788::io::no_punctuation;
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-INF_D,INF_D), DEC::trv) );
BOOST_CHECK( output.is_equal( " 0.0?? TRV" ) );
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(0.1,INF_D), DEC::def) );
BOOST_CHECK( output.is_equal( " 0.1??U DEF" ) );
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-INF_D,0.1), DEC::dac) );
BOOST_CHECK( output.is_equal( " 0.2??D DAC" ) );
output << p1788::io::precision(5);
output << p1788::io::string_width(0);
output << p1788::io::lower_case;
output << p1788::io::dec_numeric;
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(0.1, 0.2), DEC::trv) );
BOOST_CHECK( output.is_equal( "0.15000?5001 4" ) );
output << p1788::io::uncertain_down_form;
output << p1788::io::precision(7);
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(0.1, 0.2), DEC::def) );
BOOST_CHECK( output.is_equal( "0.2000001?1000001d 8" ) );
output << p1788::io::uncertain_up_form;
output << p1788::io::precision(0);
output << p1788::io::string_width(21);
output << p1788::io::upper_case;
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-0.1, 0.2), DEC::dac) );
BOOST_CHECK( output.is_equal( " -0.100001?300002U 12" ) );
output << p1788::io::precision(0);
output << p1788::io::string_width(0);
output << p1788::io::lower_case;
output << p1788::io::uncertain_exponent;
output << p1788::io::punctuation;
output << p1788::io::dec_alpha;
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-100, 0.2), DEC::com) );
BOOST_CHECK( output.is_equal( "-1.000000?1002001ue+02_com" ) );
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-100, 0.0), DEC::trv) );
BOOST_CHECK( output.is_equal( "-1.000000?1000000ue+02_trv" ) );
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-100, -0.2), DEC::def) );
BOOST_CHECK( output.is_equal( "-1.000000?998000ue+02_def" ) );
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-0.2, 100), DEC::dac) );
BOOST_CHECK( output.is_equal( "-2.000001?1002000001ue-01_dac" ) );
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(0.0,100), DEC::com) );
BOOST_CHECK( output.is_equal( "0.000000?100000000ue+00_com" ) );
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(0.2, 100), DEC::trv) );
BOOST_CHECK( output.is_equal( "2.000000?998000000ue-01_trv" ) );
output << p1788::io::uncertain_down_form;
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-0.2, 100), DEC::def) );
BOOST_CHECK( output.is_equal( "1.000000?1002001de+02_def" ) );
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(0.0,100), DEC::dac) );
BOOST_CHECK( output.is_equal( "1.000000?1000000de+02_dac" ) );
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(0.2, 100), DEC::com) );
BOOST_CHECK( output.is_equal( "1.000000?998000de+02_com" ) );
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-100, 0.2), DEC::trv) );
BOOST_CHECK( output.is_equal( "2.000001?1002000001de-01_trv" ) );
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-100, 0.0), DEC::def) );
BOOST_CHECK( output.is_equal( "0.000000?100000000de+00_def" ) );
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-100, -0.2), DEC::dac) );
BOOST_CHECK( output.is_equal( "-2.000000?998000000de-01_dac" ) );
output << p1788::io::uncertain_form;
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-0.2, 100), DEC::com) );
BOOST_CHECK( output.is_equal( "4.990000?5010001e+01_com" ) );
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(0.0,100), DEC::trv) );
BOOST_CHECK( output.is_equal( "5.000000?5000000e+01_trv" ) );
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(0.2, 100), DEC::def) );
BOOST_CHECK( output.is_equal( "5.010000?4990000e+01_def" ) );
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-100, 0.2), DEC::dac) );
BOOST_CHECK( output.is_equal( "-4.990000?5010001e+01_dac" ) );
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-100, 0.0), DEC::com) );
BOOST_CHECK( output.is_equal( "-5.000000?5000000e+01_com" ) );
F<double>::operator_interval_to_text(output, REP_DEC<double>(REP<double>(-100, -0.2), DEC::trv) );
BOOST_CHECK( output.is_equal( "-5.010000?4990000e+01_trv" ) );
output << p1788::io::special_bounds;
output << p1788::io::no_punctuation;
F<double>::operator_interval_to_text(output, F<double>::empty_dec());
BOOST_CHECK( output.is_equal( "inf -inf" ) );
output << p1788::io::hex;
output << p1788::io::special_text;
output << p1788::io::punctuation;
F<double>::operator_interval_to_text(output, F<double>::empty_dec());
BOOST_CHECK( output.is_equal( "[empty]" ) );
F<double>::operator_interval_to_text(output, F<double>::nai() );
BOOST_CHECK( output.is_equal( "[nai]" ) );
output << p1788::io::upper_case;
output << p1788::io::no_punctuation;
F<double>::operator_interval_to_text(output, F<double>::nai() );
BOOST_CHECK( output.is_equal( "NAI" ) );
}
BOOST_AUTO_TEST_CASE(minimal_decorated_interval_input_test)
{
{
REP_DEC<double> di;
std::istringstream is("[ Nai ]");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK( F<double>::is_nai(di) );
BOOST_CHECK(is);
}
{
REP_DEC<double> di(REP<double>(-1.0,2.0),DEC::trv);
std::istringstream is("[ Nai ]_ill");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(-1.0,2.0),DEC::trv) );
BOOST_CHECK(!is);
}
{
REP_DEC<double> di(REP<double>(-1.0,2.0),DEC::trv);
std::istringstream is("[ Nai ]_trv");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(-1.0,2.0),DEC::trv) );
BOOST_CHECK(!is);
}
{
REP_DEC<double> di;
std::istringstream is("[ Empty ]");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK( F<double>::is_empty(di) );
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("[ Empty ]_trv");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK( F<double>::is_empty(di) );
BOOST_CHECK(is);
}
{
REP_DEC<double> di(REP<double>(-1.0,2.0),DEC::trv);
std::istringstream is("[ Empty ]_ill");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(-1.0,2.0),DEC::trv) );
BOOST_CHECK(!is);
}
{
REP_DEC<double> di;
std::istringstream is("[ ]");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK( F<double>::is_empty(di) );
BOOST_CHECK(is);
}
{
REP_DEC<double> di(REP<double>(-1.0,2.0),DEC::trv);
std::istringstream is("[ ]_com");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(-1.0,2.0),DEC::trv) );
BOOST_CHECK(!is);
}
{
REP_DEC<double> di;
std::istringstream is("[ ]_trv");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK( F<double>::is_empty(di) );
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("[,]");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, F<double>::entire_dec());
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("[,]_trv");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(-INF_D,INF_D),DEC::trv));
BOOST_CHECK(is);
}
{
REP_DEC<double> di(REP<double>(-1.0,2.0),DEC::trv);
std::istringstream is("[,]_com");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(-1.0,2.0),DEC::trv) );
BOOST_CHECK(!is);
}
{
REP_DEC<double> di;
std::istringstream is("[ entire ]");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, F<double>::entire_dec());
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("[ ENTIRE ]_dac");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(-INF_D,INF_D),DEC::dac));
BOOST_CHECK(is);
}
{
REP_DEC<double> di(REP<double>(-1.0,2.0),DEC::trv);
std::istringstream is("[ Entire ]_com");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(-1.0,2.0),DEC::trv) );
BOOST_CHECK(!is);
}
{
REP_DEC<double> di;
std::istringstream is("[ -inf , INF ]");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, F<double>::entire_dec());
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("[ -inf, INF ]_def");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(-INF_D,INF_D),DEC::def));
BOOST_CHECK(is);
}
{
REP_DEC<double> di(REP<double>(-1.0,2.0),DEC::trv);
std::istringstream is("[ -inf , INF ]_com");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(-1.0,2.0),DEC::trv) );
BOOST_CHECK(!is);
}
{
REP_DEC<double> di;
std::istringstream is("[-1.0,1.0]");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(-1.0,1.0),DEC::com));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("[ -1.0 , 1.0 ]_com");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(-1.0,1.0),DEC::com));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("[ -1.0 , 1.0]_trv");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(-1.0,1.0),DEC::trv));
BOOST_CHECK(is);
}
{
REP_DEC<double> di(REP<double>(-1.0,2.0),DEC::trv);
std::istringstream is("[ -1.0 , 1.0]_ill");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(-1.0,2.0),DEC::trv) );
BOOST_CHECK(!is);
}
{
REP_DEC<double> di(REP<double>(-1.0,2.0),DEC::trv);
std::istringstream is("[ -1.0 , 1.0]_fooo");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(-1.0,2.0),DEC::trv) );
BOOST_CHECK(!is);
}
{
REP_DEC<double> di(REP<double>(-1.0,2.0),DEC::trv);
std::istringstream is("[ -1.0 , 1.0]_da c");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(-1.0,2.0),DEC::trv) );
BOOST_CHECK(!is);
}
{
REP_DEC<double> di(REP<double>(-1.0,2.0),DEC::trv);
std::istringstream is("[ -1.0 , 1.0]_da");
is.exceptions(is.eofbit);
BOOST_CHECK_THROW(F<double>::operator_text_to_interval(is, di), std::ios_base::failure);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(-1.0,2.0),DEC::trv) );
BOOST_CHECK(!is);
}
{
REP_DEC<double> di;
std::istringstream is("[-1,]");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(-1.0,INF_D),DEC::dac));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("[-1.0, +inf]_def");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(-1.0,INF_D),DEC::def));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("[-1.0, +infinity]_def");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(-1.0,INF_D),DEC::def));
BOOST_CHECK(is);
}
{
REP_DEC<double> di(REP<double>(-1.0,2.0),DEC::trv);
std::istringstream is("[-1.0,]_com");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(-1.0,2.0),DEC::trv) );
BOOST_CHECK(!is);
}
{
REP_DEC<double> di;
std::istringstream is("[-Inf, 1.000 ]");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(-INF_D,1.0),DEC::dac));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("[-Infinity, 1.000 ]_trv");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(-INF_D,1.0),DEC::trv));
BOOST_CHECK(is);
}
{
REP_DEC<double> di(REP<double>(-15.0,27.0),DEC::trv);
std::istringstream is("[-Inf, 1.000 ]_ill");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(-15.0,27.0),DEC::trv) );
BOOST_CHECK(!is);
}
{
REP_DEC<double> di;
std::istringstream is("[1.0E+400 ]_com");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(MAX_D,INF_D),DEC::dac));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("[1.0000000000000002E+6000 ]_com");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(MAX_D,INF_D),DEC::dac));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("[-1.0000000000000002E+6000 ]_com");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(-INF_D,-MAX_D),DEC::dac));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("[10000000000000002]_com");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(10000000000000002,10000000000000002),DEC::com));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("[-10000000000000002]_com");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(-10000000000000002,-10000000000000002),DEC::com));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("[-1.0000000000000002E+6000, 1.0000000000000001E+6000]_com");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(-INF_D,INF_D),DEC::dac));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("[ -4/2, 10/5 ]_com");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(-2.0,2.0),DEC::com));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("[ -1/10, 1/10 ]_com");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(-0.1,0.1),DEC::com));
BOOST_CHECK(is);
}
{
REP_DEC<double> di(REP<double>(-1.0,2.0),DEC::trv);
std::istringstream is("[ 1/-10, 1/10 ]_com");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(-1.0,2.0),DEC::trv) );
BOOST_CHECK(!is);
}
{
REP_DEC<double> di(REP<double>(-1.0,2.0),DEC::trv);
std::istringstream is("[ 1/10, 1/+10 ]_com");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(-1.0,2.0),DEC::trv) );
BOOST_CHECK(!is);
}
{
REP_DEC<double> di(REP<double>(-1.0,2.0),DEC::trv);
std::istringstream is("[ 1/0, 1/10 ]_com");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(-1.0,2.0),DEC::trv) );
BOOST_CHECK(!is);
}
{
REP_DEC<double> di(REP<double>(-1.0,2.0),DEC::trv);
std::istringstream is("[-I nf, 1.000 ]");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(-1.0,2.0),DEC::trv) );
BOOST_CHECK(!is);
}
{
REP_DEC<double> di(REP<double>(-1.0,2.0),DEC::trv);
std::istringstream is("[-Inf, 1.0 00 ]");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(-1.0,2.0),DEC::trv) );
BOOST_CHECK(!is);
}
{
REP_DEC<double> di(REP<double>(-1.0,12.0),DEC::trv);
std::istringstream is("[-Inf ]");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(-1.0,12.0),DEC::trv) );
BOOST_CHECK(!is);
}
{
REP_DEC<double> di(REP<double>(-1.0,2.0),DEC::trv);
std::istringstream is("[1.0,-1.0]");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(-1.0,2.0),DEC::trv) );
BOOST_CHECK(!is);
}
{
REP_DEC<double> di(REP<double>(-1.0,2.0),DEC::trv);
std::istringstream is("[-1.0,1.0");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(-1.0,2.0),DEC::trv) );
BOOST_CHECK(!is);
}
{
REP_DEC<double> di(REP<double>(-1.0,2.0),DEC::trv);
std::istringstream is("-1.0,1.0]");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(-1.0,2.0),DEC::trv) );
BOOST_CHECK(!is);
}
{
REP_DEC<double> di(REP<double>(-1.0,2.0),DEC::trv);
std::istringstream is("[1.0");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(-1.0,2.0),DEC::trv) );
BOOST_CHECK(!is);
}
{
REP_DEC<double> di(REP<double>(-1.0,2.0),DEC::trv);
std::istringstream is("-1.0]");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(-1.0,2.0),DEC::trv) );
BOOST_CHECK(!is);
}
{
REP_DEC<double> di(REP<double>(-1.0,2.0),DEC::trv);
std::istringstream is("[Inf , INF]");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(-1.0,2.0),DEC::trv) );
BOOST_CHECK(!is);
}
{
REP_DEC<double> di(REP<double>(-1.0,5.0),DEC::trv);
std::istringstream is("[ foo ]");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(-1.0,5.0),DEC::trv) );
BOOST_CHECK(!is);
}
{
REP_DEC<double> di(REP<double>(-1.0,5.0),DEC::trv);
std::istringstream is("[1.0000000000000002,1.0000000000000001]");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(-1.0,5.0),DEC::trv) );
BOOST_CHECK(!is);
}
{
REP_DEC<double> di(REP<double>(-1.0,5.0),DEC::trv);
std::istringstream is("[1.0000000000000002E+6000,1.0000000000000001E+6000]");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(-1.0,5.0),DEC::trv) );
BOOST_CHECK(!is);
}
{
REP_DEC<double> di(REP<double>(-1.0,5.0),DEC::trv);
std::istringstream is("[10000000000000001/10000000000000000,10000000000000002/10000000000000001]");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(-1.0,5.0),DEC::trv) );
BOOST_CHECK(!is);
}
{
REP_DEC<double> di(REP<double>(-1.0,5.0),DEC::trv);
std::istringstream is("[0x1.00000000000001p0,0x1.00000000000002p0]");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(std::stod("0x1.0p0"),std::stod("0x1.0000000000001p0")),DEC::com) );
BOOST_CHECK(is);
}
{
REP_DEC<double> di(REP<double>(-1.0,5.0),DEC::trv);
std::istringstream is("[-0x1.00000000000001p0,0x1.00000000000002p0]");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(std::stod("-0x1.0000000000001p0"),std::stod("0x1.0000000000001p0")),DEC::com) );
BOOST_CHECK(is);
}
{
REP_DEC<double> di(REP<double>(-1.0,5.0),DEC::trv);
std::istringstream is("[0x1.00000000000001,0x1.00000000000002p0]");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(-1.0,5.0),DEC::trv) );
BOOST_CHECK(!is);
}
{
REP_DEC<double> di(REP<double>(-1.0,5.0),DEC::trv);
std::istringstream is("[0x1.00000000000001p0,0x1.00000000000002]");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(-1.0,5.0),DEC::trv) );
BOOST_CHECK(!is);
}
{
REP_DEC<double> di(REP<double>(-1.0,5.0),DEC::trv);
std::istringstream is("[0x1.00000000000001,0x1.00000000000002]");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(-1.0,5.0),DEC::trv) );
BOOST_CHECK(!is);
}
{
REP_DEC<double> di(REP<double>(-1.0,5.0),DEC::trv);
std::istringstream is("[0x1.00000000000001p0]");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(std::stod("0x1.0p0"),std::stod("0x1.0000000000001p0")),DEC::com) );
BOOST_CHECK(is);
}
{
REP_DEC<double> di(REP<double>(-1.0,5.0),DEC::trv);
std::istringstream is("[-0x1.00000000000001p0]");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(std::stod("-0x1.0000000000001p0"),std::stod("-0x1.0p0")),DEC::com) );
BOOST_CHECK(is);
}
{
REP_DEC<double> di(REP<double>(-1.0,5.0),DEC::trv);
std::istringstream is("[0x1.00000000000001]");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(-1.0,5.0),DEC::trv) );
BOOST_CHECK(!is);
}
{
REP_DEC<double> di(REP<double>(-1.0,5.0),DEC::trv);
std::istringstream is("[0x1.00000000000001]");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(-1.0,5.0),DEC::trv) );
BOOST_CHECK(!is);
}
{
REP_DEC<double> di(REP<double>(-1.0,5.0),DEC::trv);
std::istringstream is("[0x1.00000000000002p0,0x1.00000000000001p0]");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(-1.0,5.0),DEC::trv) );
BOOST_CHECK(!is);
}
{
REP_DEC<double> di(REP<double>(-1.0,5.0),DEC::trv);
std::istringstream is("[-0x1.00000000000001p0,-0x1.00000000000002p0]");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(-1.0,5.0),DEC::trv) );
BOOST_CHECK(!is);
}
{
REP_DEC<double> di(REP<double>(-1.0,5.0),DEC::trv);
std::istringstream is("[-0x1.00000000000001p0, 10/5]");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(std::stod("-0x1.0000000000001p0"), 2.0),DEC::com) );
BOOST_CHECK(is);
}
{
REP_DEC<double> di(REP<double>(-1.0,5.0),DEC::trv);
std::istringstream is("[0x1.00000000000001p0, 2.5]");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL( di, REP_DEC<double>(REP<double>(1.0, 2.5),DEC::com) );
BOOST_CHECK(is);
}
}
BOOST_AUTO_TEST_CASE(minimal_interval_input_test)
{
{
REP<double> i;
std::istringstream is("[ Empty ]");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK( F<double>::is_empty(i) );
BOOST_CHECK(is);
}
{
REP<double> i;
std::istringstream is("[ Empty ]_trv");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK( F<double>::is_empty(i) );
BOOST_CHECK(is);
}
{
REP<double> i(3.5,4.7);
std::istringstream is("[ Empty ]_ill");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL( i, REP<double>(3.5,4.7) );
BOOST_CHECK(!is);
}
{
REP<double> i;
std::istringstream is("[ ]");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK( F<double>::is_empty(i) );
BOOST_CHECK(is);
}
{
REP<double> i(3.5,4.7);
std::istringstream is("[ ]_com");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL( i, REP<double>(3.5,4.7) );
BOOST_CHECK(!is);
}
{
REP<double> i;
std::istringstream is("[ ]_trv");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK( F<double>::is_empty(i) );
BOOST_CHECK(is);
}
{
REP<double> i;
std::istringstream is("[,]");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, F<double>::entire());
BOOST_CHECK(is);
}
{
REP<double> i;
std::istringstream is("[,]_trv");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, F<double>::entire());
BOOST_CHECK(is);
}
{
REP<double> i(3.5,4.7);
std::istringstream is("[,]_com");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL( i, REP<double>(3.5,4.7) );
BOOST_CHECK(!is);
}
{
REP<double> i;
std::istringstream is("[ entire ]");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, F<double>::entire());
BOOST_CHECK(is);
}
{
REP<double> i;
std::istringstream is("[ ENTIRE ]_dac");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, F<double>::entire());
BOOST_CHECK(is);
}
{
REP<double> i(3.5,4.7);
std::istringstream is("[ Entire ]_com");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL( i, REP<double>(3.5,4.7) );
BOOST_CHECK(!is);
}
{
REP<double> i;
std::istringstream is("[ -inf , INF ]");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, F<double>::entire());
BOOST_CHECK(is);
}
{
REP<double> i;
std::istringstream is("[ -inf, INF ]_def");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, F<double>::entire());
BOOST_CHECK(is);
}
{
REP<double> i(3.5,4.7);
std::istringstream is("[ -inf , INF ]_com");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL( i, REP<double>(3.5,4.7) );
BOOST_CHECK(!is);
}
{
REP<double> i;
std::istringstream is("[-1.0,1]");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(-1.0,1.0));
BOOST_CHECK(is);
}
{
REP<double> i;
std::istringstream is("[ -1.0 , 1.0 ]_com");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(-1.0,1.0));
BOOST_CHECK(is);
}
{
REP<double> i;
std::istringstream is("[ -1.0 , 1.0]_trv");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(-1.0,1.0));
BOOST_CHECK(is);
}
{
REP<double> i(3.5,4.7);
std::istringstream is("[ -1.0 , 1.0]_ill");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL( i, REP<double>(3.5,4.7) );
BOOST_CHECK(!is);
}
{
REP<double> i(3.5,4.7);
std::istringstream is("[ -1.0 , 1.0]_fooo");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL( i, REP<double>(3.5,4.7) );
BOOST_CHECK(!is);
}
{
REP<double> i(3.5,4.7);
std::istringstream is("[ -1.0 , 1.0]_da c");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL( i, REP<double>(3.5,4.7) );
BOOST_CHECK(!is);
}
{
REP<double> i;
std::istringstream is("[-1.0,]");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(-1.0,INF_D));
BOOST_CHECK(is);
}
{
REP<double> i;
std::istringstream is("[-1.0, +inf]_def");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(-1.0,INF_D));
BOOST_CHECK(is);
}
{
REP<double> i;
std::istringstream is("[-1.0, +infinity]_def");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(-1.0,INF_D));
BOOST_CHECK(is);
}
{
REP<double> i(3.5,4.7);
std::istringstream is("[-1.0,]_com");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL( i, REP<double>(3.5,4.7) );
BOOST_CHECK(!is);
}
{
REP<double> i;
std::istringstream is("[-Inf, 1.000 ]");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(-INF_D,1.0));
BOOST_CHECK(is);
}
{
REP<double> i;
std::istringstream is("[-Infinity, 1.000 ]_trv");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(-INF_D,1.0));
BOOST_CHECK(is);
}
{
REP<double> i(-35.0,-0.7);
std::istringstream is("[-Inf, 1.000 ]_ill");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL( i, REP<double>(-35.0,-0.7) );
BOOST_CHECK(!is);
}
{
REP<double> i;
std::istringstream is("[ 0.1 ]_com");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(std::stod("0x1.9999999999999p-4"),std::stod("0x1.999999999999ap-4")));
BOOST_CHECK(is);
}
{
REP<double> i;
std::istringstream is("[ 1/10 ]");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(std::stod("0x1.9999999999999p-4"),std::stod("0x1.999999999999ap-4")));
BOOST_CHECK(is);
}
{
REP<double> i;
std::istringstream is("[1.0E+400 ]_com");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(MAX_D,INF_D));
BOOST_CHECK(is);
}
{
REP<double> i;
std::istringstream is("[ -4/2, 10/5 ]_com");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(-2.0,2.0));
BOOST_CHECK(is);
}
{
REP<double> i;
std::istringstream is("[ -1/10, 1/10 ]_com");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(-0.1,0.1));
BOOST_CHECK(is);
}
{
REP<double> i(3.5,4.7);
std::istringstream is("[ 1/-10, 1/10 ]_com");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL( i, REP<double>(3.5,4.7) );
BOOST_CHECK(!is);
}
{
REP<double> i(3.5,45.7);
std::istringstream is("1.0, 1.000]");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL( i, REP<double>(3.5,45.7) );
BOOST_CHECK(!is);
}
{
REP<double> i(3.5,45.7);
std::istringstream is("[1.0, 1.000");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL( i, REP<double>(3.5,45.7) );
BOOST_CHECK(!is);
}
{
REP<double> i(3.5,45.7);
std::istringstream is("1.000]");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL( i, REP<double>(3.5,45.7) );
BOOST_CHECK(!is);
}
{
REP<double> i(3.5,45.7);
std::istringstream is("[1.0");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL( i, REP<double>(3.5,45.7) );
BOOST_CHECK(!is);
}
{
REP<double> i(3.5,45.7);
std::istringstream is("[-I nf, 1.000 ]");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL( i, REP<double>(3.5,45.7) );
BOOST_CHECK(!is);
}
{
REP<double> i(35.0,45.7);
std::istringstream is("[-Inf, 1.0 00 ]");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL( i, REP<double>(35.0,45.7) );
BOOST_CHECK(!is);
}
{
REP<double> i(-1.0,2.0);
std::istringstream is("[1.0,-1.0]");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL( i, REP<double>(-1.0,2.0) );
BOOST_CHECK(!is);
}
{
REP<double> i(-0.5,4.7);
std::istringstream is("[-Inf ]");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL( i, REP<double>(-0.5,4.7) );
BOOST_CHECK(!is);
}
{
REP<double> i(3.5,4.7);
std::istringstream is("[Inf , INF]");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL( i, REP<double>(3.5,4.7) );
BOOST_CHECK(!is);
}
{
REP<double> i(3.5,4.7);
std::istringstream is("[ foo ]");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL( i, REP<double>(3.5,4.7) );
BOOST_CHECK(!is);
}
{
REP<double> i(3.5,4.7);
std::istringstream is("[1.0000000000000002,1.0000000000000001]");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL( i, REP<double>(3.5,4.7) );
BOOST_CHECK(!is);
}
{
REP<double> i(3.5,4.7);
std::istringstream is("[10000000000000001/10000000000000000,10000000000000002/10000000000000001]");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL( i, REP<double>(3.5,4.7) );
BOOST_CHECK(!is);
}
{
REP<double> i(3.5,4.7);
std::istringstream is("[0x1.00000000000002p0,0x1.00000000000001p0]");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL( i, REP<double>(3.5,4.7) );
BOOST_CHECK(!is);
}
}
BOOST_AUTO_TEST_CASE(minimal_uncertain_interval_dec_input_test)
{
{
REP_DEC<double> di;
std::istringstream is("0.0?");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(-0.05,0.05),DEC::com));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("0.0?u_trv");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(0.0,0.05),DEC::trv));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("0.0?d_dac");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(-0.05,0.0),DEC::dac));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("2.5?");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(std::stod("0x1.3999999999999p+1"),std::stod("0x1.4666666666667p+1")),DEC::com));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("2.5?u");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(2.5,std::stod("0x1.4666666666667p+1")),DEC::com));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("2.5?d_trv");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(std::stod("0x1.3999999999999p+1"),2.5),DEC::trv));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("0.000?5");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(-0.005,0.005),DEC::com));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("0.000?5u_def");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(0.0,0.005),DEC::def));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("0.000?5d");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(-0.005,0.0),DEC::com));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("2.500?5");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(std::stod("0x1.3f5c28f5c28f5p+1"),std::stod("0x1.40a3d70a3d70bp+1")),DEC::com));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("2.500?5u");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(2.5,std::stod("0x1.40a3d70a3d70bp+1")),DEC::com));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("2.500?5d");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(std::stod("0x1.3f5c28f5c28f5p+1"),2.5),DEC::com));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("0.0??_dac");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(-INF_D,INF_D),DEC::dac));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("0.0??u_trv");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(0.0,INF_D),DEC::trv));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("0.0??d");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(-INF_D,0.0),DEC::dac));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("5?");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(4.5,5.5),DEC::com));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("5?d");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(4.5,5.0),DEC::com));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("5?u");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(5.0,5.5),DEC::com));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("-5?");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(-5.5,-4.5),DEC::com));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("-5?d");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(-5.5,-5.0),DEC::com));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("-5?u");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(-5.0,-4.5),DEC::com));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("2.5??");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(-INF_D,INF_D),DEC::dac));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("2.5??u_def");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(2.5,INF_D),DEC::def));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("2.5??d_dac");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(-INF_D,2.5),DEC::dac));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("2.500?5e+27");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(std::stod("0x1.01fa19a08fe7fp+91"),std::stod("0x1.0302cc4352683p+91")),DEC::com));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("2.500?5ue4_def");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(std::stod("0x1.86ap+14"),std::stod("0x1.8768p+14")),DEC::def));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("2.500?5de-5");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(std::stod("0x1.a2976f1cee4d5p-16"),std::stod("0x1.a36e2eb1c432dp-16")),DEC::com));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::string rep = "10?18";
rep += std::string(308, '0');
rep += "_com";
std::stringstream is(rep);
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(-INF_D,INF_D),DEC::dac));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("10?3_com");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(7.0,13.0),DEC::com));
BOOST_CHECK(is);
}
{
REP_DEC<double> di;
std::istringstream is("10?3e380_com");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(MAX_D,INF_D),DEC::dac));
BOOST_CHECK(is);
}
{
REP_DEC<double> di(REP<double>(-3.3,10.01),DEC::com);
std::istringstream is("5");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(-3.3,10.01),DEC::com));
BOOST_CHECK(!is);
}
{
REP_DEC<double> di(REP<double>(-3.7,0.01),DEC::def);
std::istringstream is("0.0??_com");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(-3.7,0.01),DEC::def));
BOOST_CHECK(!is);
}
{
REP_DEC<double> di(REP<double>(3.7,12.4),DEC::trv);
std::istringstream is("0.0??u_ill");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(3.7,12.4),DEC::trv));
BOOST_CHECK(!is);
}
{
REP_DEC<double> di(REP<double>(-3.3,10.01),DEC::com);
std::istringstream is("0.0??d_com");
F<double>::operator_text_to_interval(is, di);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(-3.3,10.01),DEC::com));
BOOST_CHECK(!is);
}
{
REP_DEC<double> di(REP<double>(-3.3,10.01),DEC::com);
std::istringstream is("0.0?22d_cm");
is.exceptions(is.eofbit);
BOOST_CHECK_THROW(F<double>::operator_text_to_interval(is, di), std::ios_base::failure);
BOOST_CHECK_EQUAL(di, REP_DEC<double>(REP<double>(-3.3,10.01),DEC::com));
BOOST_CHECK(!is);
}
}
BOOST_AUTO_TEST_CASE(minimal_uncertain_interval_input_test)
{
{
REP<double> i;
std::istringstream is("0.0?");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(-0.05,0.05));
BOOST_CHECK(is);
}
{
REP<double> i;
std::istringstream is("0.0?u_trv");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(0.0,0.05));
BOOST_CHECK(is);
}
{
REP<double> i;
std::istringstream is("0.0?d_dac");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(-0.05,0.0));
BOOST_CHECK(is);
}
{
REP<double> i;
std::istringstream is("2.5?");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(std::stod("0x1.3999999999999p+1"),std::stod("0x1.4666666666667p+1")));
BOOST_CHECK(is);
}
{
REP<double> i;
std::istringstream is("2.5?u");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(2.5,std::stod("0x1.4666666666667p+1")));
BOOST_CHECK(is);
}
{
REP<double> i;
std::istringstream is("2.5?d_trv");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(std::stod("0x1.3999999999999p+1"),2.5));
BOOST_CHECK(is);
}
{
REP<double> i;
std::istringstream is("0.000?5");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(-0.005,0.005));
BOOST_CHECK(is);
}
{
REP<double> i;
std::istringstream is("0.000?5u_def");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(0.0,0.005));
BOOST_CHECK(is);
}
{
REP<double> i;
std::istringstream is("0.000?5d");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(-0.005,0.0));
BOOST_CHECK(is);
}
{
REP<double> i;
std::istringstream is("2.500?5");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(std::stod("0x1.3f5c28f5c28f5p+1"),std::stod("0x1.40a3d70a3d70bp+1")));
BOOST_CHECK(is);
}
{
REP<double> i;
std::istringstream is("2.500?5u");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(2.5,std::stod("0x1.40a3d70a3d70bp+1")));
BOOST_CHECK(is);
}
{
REP<double> i;
std::istringstream is("2.500?5d");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(std::stod("0x1.3f5c28f5c28f5p+1"),2.5));
BOOST_CHECK(is);
}
{
REP<double> i;
std::istringstream is("0.0??_dac");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(-INF_D,INF_D));
BOOST_CHECK(is);
}
{
REP<double> i;
std::istringstream is("0.0??u_trv");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(0.0,INF_D));
BOOST_CHECK(is);
}
{
REP<double> i;
std::istringstream is("0.0??d");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(-INF_D,0.0));
BOOST_CHECK(is);
}
{
REP<double> i;
std::istringstream is("2.5??");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(-INF_D,INF_D));
BOOST_CHECK(is);
}
{
REP<double> i;
std::istringstream is("2.5??u_def");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(2.5,INF_D));
BOOST_CHECK(is);
}
{
REP<double> i;
std::istringstream is("2.5??d_dac");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(-INF_D,2.5));
BOOST_CHECK(is);
}
{
REP<double> i;
std::istringstream is("2.500?5e+27");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(std::stod("0x1.01fa19a08fe7fp+91"),std::stod("0x1.0302cc4352683p+91")));
BOOST_CHECK(is);
}
{
REP<double> i;
std::istringstream is("2.500?5ue4_def");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(std::stod("0x1.86ap+14"),std::stod("0x1.8768p+14")));
BOOST_CHECK(is);
}
{
REP<double> i;
std::istringstream is("2.500?5de-5");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(std::stod("0x1.a2976f1cee4d5p-16"),std::stod("0x1.a36e2eb1c432dp-16")));
BOOST_CHECK(is);
}
{
REP<double> i;
std::string rep = "10?18";
rep += std::string(308, '0');
rep += "_com";
std::stringstream is(rep);
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(-INF_D,INF_D));
BOOST_CHECK(is);
}
{
REP<double> i;
std::istringstream is("10?3_com");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(7.0,13.0));
BOOST_CHECK(is);
}
{
REP<double> i;
std::istringstream is("10?3e380_com");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(MAX_D,INF_D));
BOOST_CHECK(is);
}
{
REP<double> i;
std::istringstream is("1.0000000000000001?1");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(1.0,std::stod("0x1.0000000000001p+0")));
BOOST_CHECK(is);
}
{
REP<double> i(2.0,3.0);
std::istringstream is("12");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL(i, REP<double>(2.0,3.0));
BOOST_CHECK(!is);
}
{
REP<double> i(2.0,3.0);
std::istringstream is("0.0??_com");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL( i, REP<double>(2.0,3.0) );
BOOST_CHECK(!is);
}
{
REP<double> i(2.5,3.4);
std::istringstream is("0.0??u_ill");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL( i, REP<double>(2.5,3.4) );
BOOST_CHECK(!is);
}
{
REP<double> i(-2.0,-1.0);
std::istringstream is("0.0??d_com");
F<double>::operator_text_to_interval(is, i);
BOOST_CHECK_EQUAL( i, REP<double>(-2.0,-1.0) );
BOOST_CHECK(!is);
}
}
|
State Before: q : ℚ
x y : ℝ
h : Irrational x
m : ℤ
⊢ Irrational (↑m + x) State After: q : ℚ
x y : ℝ
h : Irrational x
m : ℤ
⊢ Irrational (↑↑m + x) Tactic: rw [← cast_coe_int] State Before: q : ℚ
x y : ℝ
h : Irrational x
m : ℤ
⊢ Irrational (↑↑m + x) State After: no goals Tactic: exact h.rat_add m |
{-# LANGUAGE RankNTypes, BangPatterns, GADTs #-}
{-# OPTIONS -Wall #-}
module Language.Hakaru.Distribution where
import System.Random
import Language.Hakaru.Mixture
import Language.Hakaru.Types
import Data.Ix
import Data.Maybe (fromMaybe)
import Data.List (findIndex, foldl')
import Numeric.SpecFunctions
import qualified Data.Map.Strict as M
import qualified Data.Number.LogFloat as LF
mapFst :: (t -> s) -> (t, u) -> (s, u)
mapFst f (a,b) = (f a, b)
dirac :: (Eq a) => a -> Dist a
dirac theta = Dist {logDensity = (\ (Discrete x) -> if x == theta then 0 else log 0),
distSample = (\ g -> (Discrete theta,g))}
bern :: Double -> Dist Bool
bern p = Dist {logDensity = (\ (Discrete x) -> log (if x then p else 1 - p)),
distSample = (\ g -> case randomR (0, 1) g of
(t, g') -> (Discrete $ t <= p, g'))}
uniform :: Double -> Double -> Dist Double
uniform lo hi =
let uniformLogDensity lo' hi' x | lo' <= x && x <= hi' = log (recip (hi' - lo'))
uniformLogDensity _ _ _ = log 0
in Dist {logDensity = (\ (Lebesgue x) -> uniformLogDensity lo hi x),
distSample = (\ g -> mapFst Lebesgue $ randomR (lo, hi) g)}
uniformD :: (Ix a, Random a) => a -> a -> Dist a
uniformD lo hi =
let uniformLogDensity lo' hi' x | lo' <= x && x <= hi' = log density
uniformLogDensity _ _ _ = log 0
density = recip (fromInteger (toInteger (rangeSize (lo,hi))))
in Dist {logDensity = (\ (Discrete x) -> uniformLogDensity lo hi x),
distSample = (\ g -> mapFst Discrete $ randomR (lo, hi) g)}
marsaglia :: (RandomGen g, Random a, Ord a, Floating a) => g -> ((a, a), g)
marsaglia g0 = -- "Marsaglia polar method"
let (x, g1) = randomR (-1,1) g0
(y, g ) = randomR (-1,1) g1
s = x * x + y * y
q = sqrt ((-2) * log s / s)
in if 1 >= s && s > 0 then ((x * q, y * q), g) else marsaglia g
choose :: (RandomGen g) => Mixture k -> g -> (k, Prob, g)
choose (Mixture m) g0 =
let peak = maximum (M.elems m)
unMix = M.map (LF.fromLogFloat . (/peak)) m
total = M.foldl' (+) (0::Double) unMix
(p, g) = randomR (0, total) g0
f !k !v b !p0 = let p1 = p0 + v in if p <= p1 then k else b p1
err p0 = error ("choose: failure p0=" ++ show p0 ++
" total=" ++ show total ++
" size=" ++ show (M.size m))
in (M.foldrWithKey f err unMix 0, LF.logFloat total * peak, g)
chooseIndex :: (RandomGen g) => [Double] -> g -> (Int, g)
chooseIndex probs g0 =
let (p, g) = random g0
k = fromMaybe (error ("chooseIndex: failure p=" ++ show p))
(findIndex (p <=) (scanl1 (+) probs))
in (k, g)
normal_rng :: (Real a, Floating a, Random a, RandomGen g) =>
a -> a -> g -> (a, g)
normal_rng mu sd g | sd > 0 = case marsaglia g of
((x, _), g1) -> (mu + sd * x, g1)
normal_rng _ _ _ = error "normal: invalid parameters"
normalLogDensity :: Floating a => a -> a -> a -> a
normalLogDensity mu sd x = (-tau * square (x - mu)
+ log (tau / pi / 2)) / 2
where square y = y * y
tau = 1 / square sd
normal :: Double -> Double -> Dist Double
normal mu sd = Dist {logDensity = normalLogDensity mu sd . fromLebesgue,
distSample = mapFst Lebesgue . normal_rng mu sd}
categoricalLogDensity :: (Eq b, Floating a) => [(b, a)] -> b -> a
categoricalLogDensity list x = log $ fromMaybe 0 (lookup x list)
categoricalSample :: (Num b, Ord b, RandomGen g, Random b) =>
[(t,b)] -> g -> (t, g)
categoricalSample list g = (elem', g1)
where
(p, g1) = randomR (0, total) g
elem' = fst $ head $ filter (\(_,p0) -> p <= p0) sumList
sumList = scanl1 (\acc (a, b) -> (a, b + snd(acc))) list
total = sum $ map snd list
categorical :: Eq a => [(a,Double)] -> Dist a
categorical list = Dist {logDensity = categoricalLogDensity list . fromDiscrete,
distSample = mapFst Discrete . categoricalSample list}
lnFact :: Integer -> Double
lnFact = logFactorial
-- Makes use of Atkinson's algorithm as described in:
-- Monte Carlo Statistical Methods pg. 55
--
-- Further discussion at:
-- http://www.johndcook.com/blog/2010/06/14/generating-poisson-random-values/
poisson_rng :: (RandomGen g) => Double -> g -> (Integer, g)
poisson_rng lambda g0 = make_poisson g0
where smu = sqrt lambda
b = 0.931 + 2.53*smu
a = -0.059 + 0.02483*b
vr = 0.9277 - 3.6224/(b - 2)
arep = 1.1239 + 1.1368/(b-3.4)
lnlam = log lambda
make_poisson :: (RandomGen g) => g -> (Integer,g)
make_poisson g = let (u, g1) = randomR (-0.5,0.5) g
(v, g2) = randomR (0,1) g1
us = 0.5 - abs u
k = floor $ (2*a / us + b)*u + lambda + 0.43 in
case () of
() | us >= 0.07 && v <= vr -> (k, g2)
() | k < 0 -> make_poisson g2
() | us <= 0.013 && v > us -> make_poisson g2
() | accept_region us v k -> (k, g2)
_ -> make_poisson g2
accept_region :: Double -> Double -> Integer -> Bool
accept_region us v k = log (v * arep / (a/(us*us)+b)) <=
-lambda + (fromIntegral k)*lnlam - lnFact k
poisson :: Double -> Dist Integer
poisson l =
let poissonLogDensity l' x | l' > 0 && x> 0 = (fromIntegral x)*(log l') - lnFact x - l'
poissonLogDensity l' x | x==0 = -l'
poissonLogDensity _ _ = log 0
in Dist {logDensity = poissonLogDensity l . fromDiscrete,
distSample = mapFst Discrete . poisson_rng l}
-- Direct implementation of "A Simple Method for Generating Gamma Variables"
-- by George Marsaglia and Wai Wan Tsang.
gamma_rng :: (RandomGen g) => Double -> Double -> g -> (Double, g)
gamma_rng shape _ _ | shape <= 0.0 = error "gamma: got a negative shape paramater"
gamma_rng _ scl _ | scl <= 0.0 = error "gamma: got a negative scale paramater"
gamma_rng shape scl g | shape < 1.0 = (gvar2, g2)
where (gvar1, g1) = gamma_rng (shape + 1) scl g
(w, g2) = randomR (0,1) g1
gvar2 = scl * gvar1 * (w ** recip shape)
gamma_rng shape scl g =
let d = shape - 1/3
c = recip $ sqrt $ 9*d
-- Algorithm recommends inlining normal generator
n = normal_rng 1 c
(v, g2) = until (\y -> fst y > 0.0) (\ (_, g') -> normal_rng 1 c g') (n g)
x = (v - 1) / c
sqr = x * x
v3 = v * v * v
(u, g3) = randomR (0.0, 1.0) g2
accept = u < 1.0 - 0.0331*(sqr*sqr) || log u < 0.5*sqr + d*(1.0 - v3 + log v3)
in case accept of
True -> (scl*d*v3, g3)
False -> gamma_rng shape scl g3
gammaLogDensity :: Double -> Double -> Double -> Double
gammaLogDensity shape scl x | x>= 0 && shape > 0 && scl > 0 =
scl * log shape - scl * x + (shape - 1) * log x - logGamma shape
gammaLogDensity _ _ _ = log 0
gamma :: Double -> Double -> Dist Double
gamma shape scl = Dist {logDensity = gammaLogDensity shape scl . fromLebesgue,
distSample = mapFst Lebesgue . gamma_rng shape scl}
beta_rng :: (RandomGen g) => Double -> Double -> g -> (Double, g)
beta_rng a b g | a <= 1.0 && b <= 1.0 =
let (u, g1) = randomR (0.0, 1.0) g
(v, g2) = randomR (0.0, 1.0) g1
x = u ** (recip a)
y = v ** (recip b)
in case (x+y) <= 1.0 of
True -> (x / (x + y), g2)
False -> beta_rng a b g2
beta_rng a b g = let (ga, g1) = gamma_rng a 1 g
(gb, g2) = gamma_rng b 1 g1
in (ga / (ga + gb), g2)
betaLogDensity :: Double -> Double -> Double -> Double
betaLogDensity _ _ x | x < 0 || x > 1 = error "beta: value must be between 0 and 1"
betaLogDensity a b _ | a <= 0 || b <= 0 = error "beta: parameters must be positve"
betaLogDensity a b x = (logGamma (a + b)
- logGamma a
- logGamma b
+ (a - 1) * log x
+ (b - 1) * log (1 - x))
beta :: Double -> Double -> Dist Double
beta a b = Dist {logDensity = betaLogDensity a b . fromLebesgue,
distSample = mapFst Lebesgue . beta_rng a b}
laplace_rng :: (RandomGen g) => Double -> Double -> g -> (Double, g)
laplace_rng mu sd g = sample (randomR (0.0, 1.0) g)
where sample (u, g1) = case u < 0.5 of
True -> (mu + sd * log (u + u), g1)
False -> (mu - sd * log (2.0 - u - u), g1)
laplaceLogDensity :: Floating a => a -> a -> a -> a
laplaceLogDensity mu sd x = - log (2 * sd) - abs (x - mu) / sd
laplace :: Double -> Double -> Dist Double
laplace mu sd = Dist {logDensity = laplaceLogDensity mu sd . fromLebesgue,
distSample = mapFst Lebesgue . laplace_rng mu sd}
-- Consider having dirichlet return Vector
-- Note: This is acutally symmetric dirichlet
dirichlet_rng :: (RandomGen g) => Int -> Double -> g -> ([Double], g)
dirichlet_rng n' a g' = normalize (gammas g' n')
where gammas g 0 = ([], 0, g)
gammas g n = let (xs, total, g1) = gammas g (n-1)
( x, g2) = gamma_rng a 1 g1
in ((x : xs), x+total, g2)
normalize (b, total, h) = (map (/ total) b, h)
dirichletLogDensity :: [Double] -> [Double] -> Double
dirichletLogDensity a x | all (> 0) x = sum' (zipWith logTerm a x) + logGamma (sum a)
where sum' = foldl' (+) 0
logTerm b y = (b-1) * log y - logGamma b
dirichletLogDensity _ _ = error "dirichlet: all values must be between 0 and 1"
|
! ##################################################################################################################################
! Begin MIT license text.
! _______________________________________________________________________________________________________
! Copyright 2019 Dr William R Case, Jr (dbcase29@gmail,com)
! Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
! associated documentation files (the "Software"), to deal in the Software without restriction, including
! without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
! copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to
! the following conditions:
! The above copyright notice and this permission notice shall be included in all copies or substantial
! portions of the Software and documentation.
! THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
! OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
! FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
! AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
! LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
! OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
! THE SOFTWARE.
! _______________________________________________________________________________________________________
! End MIT license text.
SUBROUTINE WRITE_PCOMP_EQUIV ( PCOMP_TM, PCOMP_IB, PCOMP_TS )
! Write equiv PSHELL and MAT2's for a PCOMP used, if requested, based on user Bulk Data PARAM PCOMPEQ
USE PENTIUM_II_KIND, ONLY : BYTE, LONG, DOUBLE
USE CONSTANTS_1, ONLY : TWELVE
USE IOUNT1, ONLY : ERR, F04, F06, WRT_ERR, WRT_LOG, WRT_LOG
USE SCONTR, ONLY : BLNK_SUB_NAM, MEMATC, MID1_PCOMP_EQ, MID2_PCOMP_EQ, MID3_PCOMP_EQ, &
MID4_PCOMP_EQ, MID1_PCOMP_EQ, MID2_PCOMP_EQ, MID3_PCOMP_EQ, MID4_PCOMP_EQ
USE PARAMS, ONLY : EPSIL, PCOMPEQ, SUPINFO
USE MODEL_STUF, ONLY : INTL_PID, PCOMP, RHO, SHELL_ALP, SHELL_A, SHELL_B, SHELL_D, SHELL_T, SHELL_T_MOD, &
TREF, ZS
USE TIMDAT, ONLY : TSEC
USE WRITE_PCOMP_EQUIV_USE_IFs
IMPLICIT NONE
CHARACTER(LEN=LEN(BLNK_SUB_NAM)):: SUBR_NAME = 'WRITE_PCOMP_EQUIV'
CHARACTER( 8*BYTE) :: C8FLD_GIJ(3,3,4) ! Char representation of MAT2 Gij entries
CHARACTER( 8*BYTE) :: C8FLD_ALP(6,MEMATC)! Char representation of MAT2 Ai (CTE) entries
CHARACTER( 8*BYTE) :: C8FLD_RHO(4) ! Char representation of MAT2 RHO entry
CHARACTER( 8*BYTE) :: C8FLD_TREF(4) ! Char representation of MAT2 TREF entry
CHARACTER( 8*BYTE) :: C8FLD_TM ! Char representation of MAT2 TM entry
CHARACTER( 8*BYTE) :: C8FLD_ZS(2) ! Char representation of MAT2 ZS entry
CHARACTER(18*BYTE) :: NAME1(4) ! Name of a SHELL matrix (A, D, T, or B)
CHARACTER( 7*BYTE) :: NAME2(4) ! Name of a SHELL matrix (A, D, T, or B)
CHARACTER( 1*BYTE) :: FINITE_MAT_PROPS(4)! Indicator of whether any SHELL matrix had zero diag terms
INTEGER(LONG) :: I,J ! DO loop indices
INTEGER(LONG) :: ICONT ! Continuation mnemonic for PSHELL equivalent B.D. entry
INTEGER(LONG) :: IERR(4) ! Error indicator if SHELL_ALP was calculated
REAL(DOUBLE), INTENT(IN) :: PCOMP_TM ! Membrane thickness of PCOMP for equivalent PSHELL
REAL(DOUBLE), INTENT(IN) :: PCOMP_IB ! Bending MOI of PCOMP for equivalent PSHELL
REAL(DOUBLE), INTENT(IN) :: PCOMP_TS ! Transverse shear thickness of PCOMP for equivalent PSHELL
REAL(DOUBLE) :: EPS1 ! Small number
REAL(DOUBLE) :: PCOMP_IBP ! 12*IB/TM^3
REAL(DOUBLE) :: PCOMP_TSTM ! TM/TS
REAL(DOUBLE) :: PCOMP_BEN_MAT2(3,3)! MAT2 material matrix for bending for equivalent PSHELL
REAL(DOUBLE) :: PCOMP_MBC_MAT2(3,3)! MAT2 material matrix for mem/ben coupling for equivalent PSHELL
REAL(DOUBLE) :: PCOMP_MEM_MAT2(3,3)! MAT2 material matrix for membrane for equivalent PSHELL
REAL(DOUBLE) :: PCOMP_TSH_MAT2(2,2)! MAT2 material matrix for transverse shear for equivalent PSHELL
! **********************************************************************************************************************************
EPS1 = EPSIL(1)
! Determine if any SHELL_A, D, T, B matrices are null
FINITE_MAT_PROPS(1) = 'Y'
IF ((DABS(SHELL_A(1,1)) < EPS1) .OR. (DABS(SHELL_A(1,1)) < EPS1) .OR. (DABS(SHELL_A(1,1)) < EPS1) .OR. &
(DABS(SHELL_A(1,1)) < EPS1)) THEN
FINITE_MAT_PROPS(1) = 'N'
ENDIF
FINITE_MAT_PROPS(2) = 'Y'
IF ((DABS(SHELL_D(1,1)) < EPS1) .OR. (DABS(SHELL_D(1,1)) < EPS1) .OR. (DABS(SHELL_D(1,1)) < EPS1) .OR. &
(DABS(SHELL_D(1,1)) < EPS1)) THEN
FINITE_MAT_PROPS(2) = 'N'
ENDIF
FINITE_MAT_PROPS(3) = 'Y'
IF ((DABS(SHELL_T(1,1)) < EPS1) .OR. (DABS(SHELL_T(1,1)) < EPS1) .OR. (DABS(SHELL_T(1,1)) < EPS1)) THEN
FINITE_MAT_PROPS(3) = 'N'
ENDIF
FINITE_MAT_PROPS(4) = 'Y'
IF ((DABS(SHELL_B(1,1)) < EPS1) .OR. (DABS(SHELL_B(1,1)) < EPS1) .OR. (DABS(SHELL_B(1,1)) < EPS1) .OR. &
(DABS(SHELL_B(1,1)) < EPS1)) THEN
FINITE_MAT_PROPS(4) = 'N'
ENDIF
! Write message if any CTE's were not able to be calculated, but only if that SHELL matrix had props to be output
NAME1(1) = 'MEMBRANE' ; NAME1(2) = 'BENDING' ; NAME1(3) = 'TRANSVERSE SHEAR' ; NAME1(4) = 'MEMB/BEND COUPLING'
NAME2(1) = 'SHELL_A' ; NAME2(2) = 'SHELL_D' ; NAME2(3) = 'SHELL_T' ; NAME2(4) = 'SHELL_B'
CALL SOLVE_SHELL_ALP ( IERR ) ! Solve for equiv CTE's for this PCOMP
WRITE(F06,9900)
DO I=1,4
IF (IERR(I) == 1) THEN
IF (FINITE_MAT_PROPS(I) == 'Y') THEN
WRITE(ERR,9801) NAME1(I), PCOMP(INTL_PID,1), NAME2(I)
IF (SUPINFO == 'N') THEN
WRITE(F06,9801) NAME1(I), PCOMP(INTL_PID,1), NAME2(I)
ENDIF
ENDIF
ELSE IF (IERR(I) == 2) THEN
IF (FINITE_MAT_PROPS(I) == 'Y') THEN
WRITE(ERR,9802) NAME1(I), PCOMP(INTL_PID,1), NAME2(I)
IF (SUPINFO == 'N') THEN
WRITE(F06,9802) NAME1(I), PCOMP(INTL_PID,1), NAME2(I)
ENDIF
ENDIF
ENDIF
ENDDO
PCOMP_IBP = TWELVE*PCOMP_IB/(PCOMP_TM*PCOMP_TM*PCOMP_TM)
PCOMP_TSTM = PCOMP_TS/PCOMP_TM
WRITE(F06,9901) PCOMP(INTL_PID,1), PCOMP_TM, PCOMP_IBP, PCOMP_TSTM
IF (SUPINFO == 'N') THEN
WRITE(F06,9901) PCOMP(INTL_PID,1), PCOMP_TM, PCOMP_IBP, PCOMP_TSTM
ENDIF
DO I=1,3
DO J =1,3
PCOMP_MEM_MAT2(I,J) = SHELL_A(I,J)/PCOMP_TM
PCOMP_MBC_MAT2(I,J) = SHELL_B(I,J)
PCOMP_BEN_MAT2(I,J) = SHELL_D(I,J)/PCOMP_IB
ENDDO
ENDDO
DO I=1,2
DO J =1,2
PCOMP_TSH_MAT2(I,J) = SHELL_T(I,J)/PCOMP_TS
ENDDO
ENDDO
MID1_PCOMP_EQ = MID1_PCOMP_EQ + PCOMP(INTL_PID,1)
MID2_PCOMP_EQ = MID2_PCOMP_EQ + PCOMP(INTL_PID,1)
MID3_PCOMP_EQ = MID3_PCOMP_EQ + PCOMP(INTL_PID,1)
MID4_PCOMP_EQ = MID4_PCOMP_EQ + PCOMP(INTL_PID,1)
ICONT = MID1_PCOMP_EQ
IF (PCOMPEQ > 1) THEN
IF ((IERR(1) == 0) .OR. (IERR(2) == 0) .OR. (IERR(3) == 0) .OR. (IERR(4) == 0)) THEN
WRITE(F06,9902)
ENDIF
IF (FINITE_MAT_PROPS(1) == 'Y') THEN
IF (IERR(1) == 0) THEN
WRITE(F06,9903) MID1_PCOMP_EQ, PCOMP_MEM_MAT2(1,1), PCOMP_MEM_MAT2(1,2), PCOMP_MEM_MAT2(1,3), &
PCOMP_MEM_MAT2(2,2), PCOMP_MEM_MAT2(2,3), PCOMP_MEM_MAT2(3,3), &
SHELL_ALP(1,1), SHELL_ALP(2,1), SHELL_ALP(3,1)
ELSE
WRITE(F06,9903) MID1_PCOMP_EQ, PCOMP_MEM_MAT2(1,1), PCOMP_MEM_MAT2(1,2), PCOMP_MEM_MAT2(1,3), &
PCOMP_MEM_MAT2(2,2), PCOMP_MEM_MAT2(2,3), PCOMP_MEM_MAT2(3,3)
ENDIF
ENDIF
IF (FINITE_MAT_PROPS(2) == 'Y') THEN
IF (IERR(2) == 0) THEN
WRITE(F06,9904) MID2_PCOMP_EQ, PCOMP_BEN_MAT2(1,1), PCOMP_BEN_MAT2(1,2), PCOMP_BEN_MAT2(1,3), &
PCOMP_BEN_MAT2(2,2), PCOMP_BEN_MAT2(2,3), PCOMP_BEN_MAT2(3,3), &
SHELL_ALP(1,2), SHELL_ALP(2,2), SHELL_ALP(3,2)
ELSE
WRITE(F06,9904) MID2_PCOMP_EQ, PCOMP_BEN_MAT2(1,1), PCOMP_BEN_MAT2(1,2), PCOMP_BEN_MAT2(1,3), &
PCOMP_BEN_MAT2(2,2), PCOMP_BEN_MAT2(2,3), PCOMP_BEN_MAT2(3,3)
ENDIF
ENDIF
IF (FINITE_MAT_PROPS(3) == 'Y') THEN
IF (IERR(3) == 0) THEN
WRITE(F06,9905) MID3_PCOMP_EQ, PCOMP_TSH_MAT2(1,1), PCOMP_TSH_MAT2(1,2), PCOMP_TSH_MAT2(2,2), &
SHELL_ALP(5,3), SHELL_ALP(6,3)
ELSE
WRITE(F06,9905) MID3_PCOMP_EQ, PCOMP_TSH_MAT2(1,1), PCOMP_TSH_MAT2(1,2), PCOMP_TSH_MAT2(2,2)
ENDIF
ENDIF
IF (FINITE_MAT_PROPS(4) == 'Y') THEN
IF (IERR(4) == 0) THEN
WRITE(F06,9906) MID4_PCOMP_EQ, PCOMP_MBC_MAT2(1,1), PCOMP_MBC_MAT2(1,2), PCOMP_MBC_MAT2(1,3), &
PCOMP_MBC_MAT2(2,2), PCOMP_MBC_MAT2(2,3), PCOMP_MBC_MAT2(3,3), &
SHELL_ALP(1,4), SHELL_ALP(2,4), SHELL_ALP(3,4)
ELSE
WRITE(F06,9906) MID4_PCOMP_EQ, PCOMP_MBC_MAT2(1,1), PCOMP_MBC_MAT2(1,2), PCOMP_MBC_MAT2(1,3), &
PCOMP_MBC_MAT2(2,2), PCOMP_MBC_MAT2(2,3), PCOMP_MBC_MAT2(3,3)
ENDIF
ENDIF
WRITE(F06,*)
ENDIF
CALL GET_CHAR8_OUTPUTS ( C8FLD_GIJ, C8FLD_ALP, C8FLD_RHO, C8FLD_TREF, C8FLD_TM, C8FLD_ZS )
! Write PSHELL
IF (IERR(4) == 0) THEN
WRITE(F06,9912) PCOMP(INTL_PID,1), MID1_PCOMP_EQ, C8FLD_TM, MID2_PCOMP_EQ, MID3_PCOMP_EQ,ICONT, &
ICONT, C8FLD_ZS(2), C8FLD_ZS(2), MID4_PCOMP_EQ
ELSE
WRITE(F06,9913) PCOMP(INTL_PID,1), MID1_PCOMP_EQ, C8FLD_TM, MID2_PCOMP_EQ, MID3_PCOMP_EQ,ICONT, &
ICONT, C8FLD_ZS(1), C8FLD_ZS(2)
ENDIF
! Write MAT2 for membrane
IF (FINITE_MAT_PROPS(1) == 'Y') THEN
IF (IERR(1) == 0) THEN
WRITE(F06,9922) MID1_PCOMP_EQ, C8FLD_GIJ(1,1,1), C8FLD_GIJ(1,2,1), C8FLD_GIJ(1,3,1), &
C8FLD_GIJ(2,2,1), C8FLD_GIJ(2,3,1), C8FLD_GIJ(3,3,1), C8FLD_RHO(1),ICONT+1, &
ICONT+1, C8FLD_ALP(1,1), C8FLD_ALP(2,1), C8FLD_ALP(3,1), C8FLD_TREF(1)
ELSE
WRITE(F06,9923) MID1_PCOMP_EQ, C8FLD_GIJ(1,1,1), C8FLD_GIJ(1,2,1), C8FLD_GIJ(1,3,1), &
C8FLD_GIJ(2,2,1), C8FLD_GIJ(2,3,1), C8FLD_GIJ(3,3,1), C8FLD_RHO(1),ICONT+1, &
ICONT+1, C8FLD_TREF(1)
ENDIF
ENDIF
! Write MAT2 for bending
IF (FINITE_MAT_PROPS(2) == 'Y') THEN
IF (IERR(2) == 0) THEN
WRITE(F06,9922) MID2_PCOMP_EQ, C8FLD_GIJ(1,1,2), C8FLD_GIJ(1,2,2), C8FLD_GIJ(1,3,2), &
C8FLD_GIJ(2,2,2), C8FLD_GIJ(2,3,2), C8FLD_GIJ(3,3,2), C8FLD_RHO(2),ICONT+2, &
ICONT+2, C8FLD_ALP(1,2), C8FLD_ALP(2,2), C8FLD_ALP(3,2), C8FLD_TREF(1)
ELSE
WRITE(F06,9923) MID2_PCOMP_EQ, C8FLD_GIJ(1,1,2), C8FLD_GIJ(1,2,2), C8FLD_GIJ(1,3,2), &
C8FLD_GIJ(2,2,2), C8FLD_GIJ(2,3,2), C8FLD_GIJ(3,3,2), C8FLD_RHO(2),ICONT+2, &
ICONT+2, C8FLD_TREF(1)
ENDIF
ENDIF
! Write MAT2 for transverse shear
IF (FINITE_MAT_PROPS(3) == 'Y') THEN
IF (IERR(3) == 0) THEN
WRITE(F06,9932) MID3_PCOMP_EQ, C8FLD_GIJ(1,1,3), C8FLD_GIJ(1,2,3), '0.0000+0' , &
C8FLD_GIJ(2,2,3), '0.0000+0' , '0.0000+0', C8FLD_RHO(3), ICONT+3, &
ICONT+3, C8FLD_ALP(5,3), C8FLD_ALP(6,3), C8FLD_TREF(1)
ELSE
WRITE(F06,9933) MID3_PCOMP_EQ, C8FLD_GIJ(1,1,3), C8FLD_GIJ(1,2,3), '0.0000+0' , &
C8FLD_GIJ(2,2,3), '0.0000+0' , '0.0000+0', C8FLD_RHO(3), ICONT+3, &
ICONT+3, C8FLD_TREF(1)
ENDIF
ENDIF
! Write MAT2 for membrane/bending coupling
IF (FINITE_MAT_PROPS(4) == 'Y') THEN
IF (IERR(4) == 0) THEN
WRITE(F06,9922) MID4_PCOMP_EQ, C8FLD_GIJ(1,1,4), C8FLD_GIJ(1,2,4), C8FLD_GIJ(1,3,4), &
C8FLD_GIJ(2,2,4), C8FLD_GIJ(2,3,4), C8FLD_GIJ(3,3,4), C8FLD_RHO(4),ICONT+4, &
ICONT+4, C8FLD_ALP(1,4), C8FLD_ALP(2,4), C8FLD_ALP(3,4), C8FLD_TREF(1)
ELSE
WRITE(F06,9923) MID4_PCOMP_EQ, C8FLD_GIJ(1,1,4), C8FLD_GIJ(1,2,4), C8FLD_GIJ(1,3,4), &
C8FLD_GIJ(2,2,4), C8FLD_GIJ(2,3,4), C8FLD_GIJ(3,3,4), C8FLD_RHO(4),ICONT+4, &
ICONT+4, C8FLD_TREF(1)
ENDIF
ENDIF
! Write message if we have changed SHELL_T to reflect approx zero transverse shear flex
IF (SHELL_T_MOD == 'Y') THEN
WRITE(ERR,9995) MID3_PCOMP_EQ
IF (SUPINFO == 'N') THEN
WRITE(F06,9995) MID3_PCOMP_EQ
ENDIF
ENDIF
WRITE(F06,9900)
PCOMP(INTL_PID,6) = 1 ! Lets future calls to this subr know that PSHELL, MAT2 were written
! **********************************************************************************************************************************
9801 FORMAT(' *INFORMATION: Cannot calculate equiv CTE''s for ',A,' for PCOMP ',I8,' since the det of matrix ',A,' is zero',/)
9802 FORMAT(' *INFORMATION: Cannot calculate equiv CTE''s for ',A,' for PCOMP ',I8,' since matrix ',A,' cannot be inverted',/)
9900 FORMAT('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++', &
'+++++++++++++++++++++++++++++++++++++++++++++++++')
9901 FORMAT(' *INFORMATION: Equivalent PSHELL amd MAT2 entries for PCOMP ',I8,' with:' &
,/,14X,' membrane thickness TM = ',1ES13.6,', 12*IB/(TM^3) = ',1ES13.6,', TS/TM = ',1ES13.6,':',/)
9902 FORMAT(' Type MATL ID G11 G12 G13 G22 G23 G33', &
' A1 A2 A3',/ &
' ------------------ -------- ------------- ------------- ------------- ------------- -------------', &
' ------------- ------------- ------------- -------------')
9903 FORMAT(' membrane ',I8,9(1ES15.6))
9904 FORMAT(' bending ',I8,9(1ES15.6))
9905 FORMAT(' transverse shear ',I8,2(1ES15.6),15X,1ES15.6,30X,2(1ES15.6))
9906 FORMAT(' mem/bend coupling ',I8,9(1ES15.6))
9912 FORMAT('PSHELL ,',I9,',',I9,',',A9,',',I9,', 1.0,',I9,',',' 1.0, , +',I7,/,' +',I7,2(',',A9),I9,/)
9913 FORMAT('PSHELL ,',I9,',',I9,',',A9,',',I9,', 1.0,',I9,',',' 1.0, , +',I7,/,' +',I7,2(',',A9),/)
9922 FORMAT('MAT2 ,',I9,7(',',A9),', +',I7,/,' +',I7,3(',',A9),',',A9,/)
9923 FORMAT('MAT2 ,',I9,7(',',A9),', +',I7,/,' +',I7,3(',',8X),',',A9,/)
9932 FORMAT('MAT2 ,',I9,7(',',A9),', +',I7,/,' +',I7,2(',',A9),',',9X,',',A9,/)
9933 FORMAT('MAT2 ,',I9,7(',',A9),', +',I7,/,' +',I7,2(',',8X),',',9X,',',A9,/)
9995 FORMAT(' *INFORMATION: The transverse shear modulii on the above MAT2 ',I8,' entry are the MYSTRAN calculated values to', &
' replace the zero input values by the user that',/,14X,' were meant to simulate zero transverse shear',&
' flexibility')
! ##################################################################################################################################
CONTAINS
! ##################################################################################################################################
SUBROUTINE GET_CHAR8_OUTPUTS ( C8FLD_GIJ, C8FLD_ALP, C8FLD_RHO, C8FLD_TREF, C8FLD_TM, C8FLD_ZS )
IMPLICIT NONE
CHARACTER(8*BYTE), INTENT(OUT) :: C8FLD_GIJ(3,3,4) ! Char representation of MAT2 Gij entries
CHARACTER(8*BYTE), INTENT(OUT) :: C8FLD_ALP(6,MEMATC) ! Char representation of MAT2 CTE entries
CHARACTER(8*BYTE), INTENT(OUT) :: C8FLD_RHO(4) ! Char representation of MAT2 RHO entry
CHARACTER(8*BYTE), INTENT(OUT) :: C8FLD_TREF(4) ! Char representation of MAT2 TREF entry
CHARACTER(8*BYTE), INTENT(OUT) :: C8FLD_TM ! Char representation of MAT2 TM entry
CHARACTER(8*BYTE), INTENT(OUT) :: C8FLD_ZS(2) ! Char representation of MAT2 ZS entry
INTEGER(LONG) :: II,JJ,KK ! DO loop indices
! **********************************************************************************************************************************
DO II=1,3
DO JJ=1,3
DO KK=1,4
C8FLD_GIJ(II,JJ,KK)(1:8) = ' '
ENDDO
ENDDO
ENDDO
DO II=1,6
DO JJ=1,MEMATC
C8FLD_ALP(II,JJ)(1:8) = ' '
ENDDO
ENDDO
! Put GIJ values in character format
CALL REAL_DATA_TO_C8FLD ( PCOMP_MEM_MAT2(1,1) ,C8FLD_GIJ(1,1,1) )
CALL REAL_DATA_TO_C8FLD ( PCOMP_MEM_MAT2(1,2) ,C8FLD_GIJ(1,2,1) )
CALL REAL_DATA_TO_C8FLD ( PCOMP_MEM_MAT2(1,3) ,C8FLD_GIJ(1,3,1) )
CALL REAL_DATA_TO_C8FLD ( PCOMP_MEM_MAT2(2,2) ,C8FLD_GIJ(2,2,1) )
CALL REAL_DATA_TO_C8FLD ( PCOMP_MEM_MAT2(2,3) ,C8FLD_GIJ(2,3,1) )
CALL REAL_DATA_TO_C8FLD ( PCOMP_MEM_MAT2(3,3) ,C8FLD_GIJ(3,3,1) )
CALL REAL_DATA_TO_C8FLD ( PCOMP_BEN_MAT2(1,1) ,C8FLD_GIJ(1,1,2) )
CALL REAL_DATA_TO_C8FLD ( PCOMP_BEN_MAT2(1,2) ,C8FLD_GIJ(1,2,2) )
CALL REAL_DATA_TO_C8FLD ( PCOMP_BEN_MAT2(1,3) ,C8FLD_GIJ(1,3,2) )
CALL REAL_DATA_TO_C8FLD ( PCOMP_BEN_MAT2(2,2) ,C8FLD_GIJ(2,2,2) )
CALL REAL_DATA_TO_C8FLD ( PCOMP_BEN_MAT2(2,3) ,C8FLD_GIJ(2,3,2) )
CALL REAL_DATA_TO_C8FLD ( PCOMP_BEN_MAT2(3,3) ,C8FLD_GIJ(3,3,2) )
CALL REAL_DATA_TO_C8FLD ( PCOMP_TSH_MAT2(1,1) ,C8FLD_GIJ(1,1,3) )
CALL REAL_DATA_TO_C8FLD ( PCOMP_TSH_MAT2(1,2) ,C8FLD_GIJ(1,2,3) )
CALL REAL_DATA_TO_C8FLD ( PCOMP_TSH_MAT2(2,2) ,C8FLD_GIJ(2,2,3) )
CALL REAL_DATA_TO_C8FLD ( PCOMP_MBC_MAT2(1,1) ,C8FLD_GIJ(1,1,4) )
CALL REAL_DATA_TO_C8FLD ( PCOMP_MBC_MAT2(1,2) ,C8FLD_GIJ(1,2,4) )
CALL REAL_DATA_TO_C8FLD ( PCOMP_MBC_MAT2(1,3) ,C8FLD_GIJ(1,3,4) )
CALL REAL_DATA_TO_C8FLD ( PCOMP_MBC_MAT2(2,2) ,C8FLD_GIJ(2,2,4) )
CALL REAL_DATA_TO_C8FLD ( PCOMP_MBC_MAT2(2,3) ,C8FLD_GIJ(2,3,4) )
CALL REAL_DATA_TO_C8FLD ( PCOMP_MBC_MAT2(3,3) ,C8FLD_GIJ(3,3,4) )
! Put CTE's in character format
CALL REAL_DATA_TO_C8FLD ( SHELL_ALP(1,1), C8FLD_ALP(1,1) )
CALL REAL_DATA_TO_C8FLD ( SHELL_ALP(2,1), C8FLD_ALP(2,1) )
CALL REAL_DATA_TO_C8FLD ( SHELL_ALP(3,1), C8FLD_ALP(3,1) )
CALL REAL_DATA_TO_C8FLD ( SHELL_ALP(1,2), C8FLD_ALP(1,2) )
CALL REAL_DATA_TO_C8FLD ( SHELL_ALP(2,2), C8FLD_ALP(2,2) )
CALL REAL_DATA_TO_C8FLD ( SHELL_ALP(3,2), C8FLD_ALP(3,2) )
CALL REAL_DATA_TO_C8FLD ( SHELL_ALP(5,3), C8FLD_ALP(5,3) )
CALL REAL_DATA_TO_C8FLD ( SHELL_ALP(6,3), C8FLD_ALP(6,3) )
CALL REAL_DATA_TO_C8FLD ( SHELL_ALP(1,4), C8FLD_ALP(1,4) )
CALL REAL_DATA_TO_C8FLD ( SHELL_ALP(2,4), C8FLD_ALP(2,4) )
CALL REAL_DATA_TO_C8FLD ( SHELL_ALP(3,4), C8FLD_ALP(3,4) )
! Put RHO's in character format
CALL REAL_DATA_TO_C8FLD ( RHO(1), C8FLD_RHO(1) )
CALL REAL_DATA_TO_C8FLD ( RHO(2), C8FLD_RHO(2) )
CALL REAL_DATA_TO_C8FLD ( RHO(3), C8FLD_RHO(3) )
CALL REAL_DATA_TO_C8FLD ( RHO(4), C8FLD_RHO(4) )
! Put TREF's in character format
CALL REAL_DATA_TO_C8FLD ( TREF(1), C8FLD_TREF(1) )
CALL REAL_DATA_TO_C8FLD ( TREF(2), C8FLD_TREF(2) )
CALL REAL_DATA_TO_C8FLD ( TREF(3), C8FLD_TREF(3) )
CALL REAL_DATA_TO_C8FLD ( TREF(4), C8FLD_TREF(4) )
! Put TM in character format
CALL REAL_DATA_TO_C8FLD ( PCOMP_TM, C8FLD_TM )
! Put ZS's in character format
CALL REAL_DATA_TO_C8FLD ( ZS(1), C8FLD_ZS(1) )
CALL REAL_DATA_TO_C8FLD ( ZS(2), C8FLD_ZS(2) )
! **********************************************************************************************************************************
END SUBROUTINE GET_CHAR8_OUTPUTS
END SUBROUTINE WRITE_PCOMP_EQUIV
|
-- 2013-11-xx Andreas
-- Previous clauses did not reduce in later clauses under
-- a module telescope.
{-# OPTIONS --copatterns #-}
-- {-# OPTIONS -v interaction.give:20 -v tc.cc:60 -v reify.clause:60 -v tc.section.check:10 -v tc:90 #-}
-- {-# OPTIONS -v tc.lhs:20 #-}
-- {-# OPTIONS -v tc.cover:20 #-}
module Issue937 where
data Nat : Set where
zero : Nat
suc : Nat → Nat
{-# BUILTIN NATURAL Nat #-}
infixr 4 _,_
record Σ (A : Set) (B : A → Set) : Set where
constructor _,_
field
proj₁ : A
proj₂ : B proj₁
open Σ public
data _≤_ : Nat → Nat → Set where
z≤n : ∀ {n} → zero ≤ n
s≤s : ∀ {m n} (m≤n : m ≤ n) → suc m ≤ suc n
_<_ : Nat → Nat → Set
m < n = suc m ≤ n
ex : Σ Nat (λ n → zero < n)
proj₁ ex = suc zero
proj₂ ex = s≤s z≤n -- works
ex' : Σ Nat (λ n → zero < n)
proj₁ ex' = suc zero
proj₂ ex' = {! s≤s z≤n !}
module _ (A : Set) where
ex'' : Σ Nat (λ n → zero < n)
proj₁ ex'' = suc zero
proj₂ ex'' = s≤s z≤n -- works
ex''' : Σ Nat (λ n → zero < n)
proj₁ ex''' = suc zero
proj₂ ex''' = {! s≤s z≤n !}
-- The normalized goals should be printed as 1 ≤ 1
|
const DOC_ROOT_PATH = "public"
const CONFIG_PATH = "config"
const ENV_PATH = joinpath(CONFIG_PATH, "env")
const APP_PATH = "app"
const RESOURCES_PATH = joinpath(APP_PATH, "resources")
const TEST_PATH = "test"
const TEST_PATH_UNIT = joinpath(TEST_PATH, "unit")
const LIB_PATH = "lib"
const HELPERS_PATH = joinpath(APP_PATH, "helpers")
const LOG_PATH = "log"
const LAYOUTS_PATH = joinpath(APP_PATH, "layouts")
const TASKS_PATH = "task"
const BUILD_PATH = "build"
const PLUGINS_PATH = "plugins"
const SESSIONS_PATH = "sessions"
const CACHE_PATH = "cache"
const GENIE_CONTROLLER_FILE_POSTFIX = "Controller.jl"
const ROUTES_FILE_NAME = "routes.jl"
const PARAMS_REQUEST_KEY = :REQUEST
const PARAMS_RESPONSE_KEY = :RESPONSE
const PARAMS_SESSION_KEY = :SESSION
const PARAMS_FLASH_KEY = :FLASH
const PARAMS_POST_KEY = :POST
const PARAMS_GET_KEY = :GET
const PARAMS_WS_CLIENT = :WS_CLIENT
const PARAMS_JSON_PAYLOAD = :JSON_PAYLOAD
const PARAMS_RAW_PAYLOAD = :RAW_PAYLOAD
const PARAMS_FILES = :FILES
const TEST_FILE_IDENTIFIER = "_test.jl"
const VIEWS_FOLDER = "views"
const LAYOUTS_FOLDER = "layouts"
|
(* Title: FOLP/ex/Propositional_Int.thy
Author: Lawrence C Paulson, Cambridge University Computer Laboratory
Copyright 1991 University of Cambridge
*)
section {* First-Order Logic: propositional examples *}
theory Propositional_Int
imports IFOLP
begin
text "commutative laws of & and | "
schematic_lemma "?p : P & Q --> Q & P"
by (tactic {* IntPr.fast_tac @{context} 1 *})
schematic_lemma "?p : P | Q --> Q | P"
by (tactic {* IntPr.fast_tac @{context} 1 *})
text "associative laws of & and | "
schematic_lemma "?p : (P & Q) & R --> P & (Q & R)"
by (tactic {* IntPr.fast_tac @{context} 1 *})
schematic_lemma "?p : (P | Q) | R --> P | (Q | R)"
by (tactic {* IntPr.fast_tac @{context} 1 *})
text "distributive laws of & and | "
schematic_lemma "?p : (P & Q) | R --> (P | R) & (Q | R)"
by (tactic {* IntPr.fast_tac @{context} 1 *})
schematic_lemma "?p : (P | R) & (Q | R) --> (P & Q) | R"
by (tactic {* IntPr.fast_tac @{context} 1 *})
schematic_lemma "?p : (P | Q) & R --> (P & R) | (Q & R)"
by (tactic {* IntPr.fast_tac @{context} 1 *})
schematic_lemma "?p : (P & R) | (Q & R) --> (P | Q) & R"
by (tactic {* IntPr.fast_tac @{context} 1 *})
text "Laws involving implication"
schematic_lemma "?p : (P-->R) & (Q-->R) <-> (P|Q --> R)"
by (tactic {* IntPr.fast_tac @{context} 1 *})
schematic_lemma "?p : (P & Q --> R) <-> (P--> (Q-->R))"
by (tactic {* IntPr.fast_tac @{context} 1 *})
schematic_lemma "?p : ((P-->R)-->R) --> ((Q-->R)-->R) --> (P&Q-->R) --> R"
by (tactic {* IntPr.fast_tac @{context} 1 *})
schematic_lemma "?p : ~(P-->R) --> ~(Q-->R) --> ~(P&Q-->R)"
by (tactic {* IntPr.fast_tac @{context} 1 *})
schematic_lemma "?p : (P --> Q & R) <-> (P-->Q) & (P-->R)"
by (tactic {* IntPr.fast_tac @{context} 1 *})
text "Propositions-as-types"
(*The combinator K*)
schematic_lemma "?p : P --> (Q --> P)"
by (tactic {* IntPr.fast_tac @{context} 1 *})
(*The combinator S*)
schematic_lemma "?p : (P-->Q-->R) --> (P-->Q) --> (P-->R)"
by (tactic {* IntPr.fast_tac @{context} 1 *})
(*Converse is classical*)
schematic_lemma "?p : (P-->Q) | (P-->R) --> (P --> Q | R)"
by (tactic {* IntPr.fast_tac @{context} 1 *})
schematic_lemma "?p : (P-->Q) --> (~Q --> ~P)"
by (tactic {* IntPr.fast_tac @{context} 1 *})
text "Schwichtenberg's examples (via T. Nipkow)"
schematic_lemma stab_imp: "?p : (((Q-->R)-->R)-->Q) --> (((P-->Q)-->R)-->R)-->P-->Q"
by (tactic {* IntPr.fast_tac @{context} 1 *})
schematic_lemma stab_to_peirce: "?p : (((P --> R) --> R) --> P) --> (((Q --> R) --> R) --> Q)
--> ((P --> Q) --> P) --> P"
by (tactic {* IntPr.fast_tac @{context} 1 *})
schematic_lemma peirce_imp1: "?p : (((Q --> R) --> Q) --> Q)
--> (((P --> Q) --> R) --> P --> Q) --> P --> Q"
by (tactic {* IntPr.fast_tac @{context} 1 *})
schematic_lemma peirce_imp2: "?p : (((P --> R) --> P) --> P) --> ((P --> Q --> R) --> P) --> P"
by (tactic {* IntPr.fast_tac @{context} 1 *})
schematic_lemma mints: "?p : ((((P --> Q) --> P) --> P) --> Q) --> Q"
by (tactic {* IntPr.fast_tac @{context} 1 *})
schematic_lemma mints_solovev: "?p : (P --> (Q --> R) --> Q) --> ((P --> Q) --> R) --> R"
by (tactic {* IntPr.fast_tac @{context} 1 *})
schematic_lemma tatsuta: "?p : (((P7 --> P1) --> P10) --> P4 --> P5)
--> (((P8 --> P2) --> P9) --> P3 --> P10)
--> (P1 --> P8) --> P6 --> P7
--> (((P3 --> P2) --> P9) --> P4)
--> (P1 --> P3) --> (((P6 --> P1) --> P2) --> P9) --> P5"
by (tactic {* IntPr.fast_tac @{context} 1 *})
schematic_lemma tatsuta1: "?p : (((P8 --> P2) --> P9) --> P3 --> P10)
--> (((P3 --> P2) --> P9) --> P4)
--> (((P6 --> P1) --> P2) --> P9)
--> (((P7 --> P1) --> P10) --> P4 --> P5)
--> (P1 --> P3) --> (P1 --> P8) --> P6 --> P7 --> P5"
by (tactic {* IntPr.fast_tac @{context} 1 *})
end
|
Formal statement is: lemma dvd_imp_degree_le: "p dvd q \<Longrightarrow> q \<noteq> 0 \<Longrightarrow> degree p \<le> degree q" for p q :: "'a::{comm_semiring_1,semiring_no_zero_divisors} poly" Informal statement is: If $p$ divides $q$ and $q \neq 0$, then the degree of $p$ is less than or equal to the degree of $q$. |
For the keen-eared amongst you, there is a continuity error made by the hosts within this episode, and it isn't to do with the Blake's 7 subject matter either, so you can all join in! Simply submit your guesses to [email protected] and you'll win a prize if you get it right!
And you call me underhanded.
Amongst other things, yes! But until you include both email addresses in your GP main page ads for Shake & Blake, I think it's fair game! |
///////////////////////////////////////////////////////////////////////////////
// http_config.cpp
//
// unicomm - Unified Communication protocol C++ library.
//
// Http server & client example based on unicomm engine.
// This is just an example shows how to use unicomm to define
// different type of protocols based on tcp/ip.
// There are only few messages defined not all of HTTP.
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// 2011, (c) Dmitry Timoshenko.
#include "http_config.hpp"
#include "http_response.hpp"
#include "http_request.hpp"
#include "http_message_encoder.hpp"
#include "http_message_decoder.hpp"
#include <unicomm/config.hpp>
#include <boost/thread/mutex.hpp>
#include <boost/assign.hpp>
#include <boost/bind.hpp>
#include <iostream>
using std::cout;
using std::endl;
using std::string;
using std::flush;
namespace
{
/** Common config that holds same data. */
unicomm::config& common_config(void)
{
using namespace unicomm;
static unicomm::config conf
(
unicomm::config()
.tcp_port(uni_http::default_port())
.timeouts_enabled(true)
.message_encoder(uni_http::message_encoder::create())
.message_decoder(uni_http::message_decoder::create())
);
return conf;
}
} // unnamed namespace
/** Returns a reference to the echo configuration object. */
unicomm::config& uni_http::server_config(void)
{
using namespace unicomm;
static unicomm::config conf
(
common_config()
.dispatcher_idle_tout(10)
.message_factory
(
message_base::factory_type(&unicomm::create<uni_http::request>)
)
#ifdef UNICOMM_SSL
.ssl_server_key_password("test")
.ssl_server_cert_chain_fn("../../../../http/ssl/server.pem")
.ssl_server_key_fn("../../../../http/ssl/server.pem")
.ssl_server_dh_fn("../../../../http/ssl/dh512.pem")
#endif // UNICOMM_SSL
);
return conf;
}
//////////////////////////////////////////////////////////////////////////
// client definition
/** Returns a reference to the unicomm client configuration object. */
unicomm::config& uni_http::client_config(void)
{
using namespace unicomm;
static unicomm::config conf
(
common_config()
.message_info(message_info(uni_http::request::get(), true, 10000))
.message_factory
(
message_base::factory_type(&unicomm::create<uni_http::response>)
)
#ifdef UNICOMM_SSL
.ssl_client_verity_fn("../../../../http/ssl/ca.pem")
#endif // UNICOMM_SSL
);
return conf;
}
|
[STATEMENT]
lemma extNTA2J_iff [simp]:
"extNTA2J P (C, M, a) = ({this:Class (fst (method P C M))=\<lfloor>Addr a\<rfloor>; snd (the (snd (snd (snd (method P C M)))))}, Map.empty)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. extNTA2J P (C, M, a) = ({this:Class (fst (method P C M))=\<lfloor>Addr a\<rfloor>; snd (the (snd (snd (snd (method P C M)))))}, Map.empty)
[PROOF STEP]
by(simp add: extNTA2J_def split_beta) |
\chapter{My Chapter}
This is the first main chapter of my dissertation.
Fact 1~\cite{Schoute2021}.
|
module exampleTypeAbbreviations where
postulate A : Set
A2 : Set
A2 = A -> A
A3 : Set
A3 = A2 -> A2
a2 : A2
a2 = \x -> x
a3 : A3
a3 = \x -> x |
module Ch03.Arith
import public Ch03.Relations
%default total
%access public export
----------------------------
-- Term syntax
----------------------------
namespace Terms
data Term = True
| False
| IfThenElse Term Term Term
| Zero
| Succ Term
| Pred Term
| IsZero Term
namespace Values
mutual
Value : Type
Value = Either BoolValue NumValue
data BoolValue = True | False
data NumValue = Zero | Succ NumValue
||| Converts a boolean value to its corresponding term
bv2t : BoolValue -> Term
bv2t True = True
bv2t False = False
||| Converts a numeric value to its corresponding term
nv2t : NumValue -> Term
nv2t Zero = Zero
nv2t (Succ x) = Succ (nv2t x)
||| Converts a value to its corresponding term
v2t : Value -> Term
v2t (Left bv) = bv2t bv
v2t (Right nv) = nv2t nv
namespace IsValue
||| Propositional type describing that a term "is" indeed a value
data IsValue : Term -> Type where
ConvertedFrom : (v : Value) -> IsValue (v2t v)
namespace IsNumValue
||| Propositional type describing that a term "is" indeed a numeric value
data IsNumValue : Term -> Type where
ConvertedFrom : (nv : NumValue) -> IsNumValue (v2t (Right nv))
namespace IsBoolValue
||| Propositional type describing that a term "is" indeed a boolean value
data IsBoolValue : Term -> Type where
ConvertedFrom : (bv : BoolValue) -> IsBoolValue (v2t (Left bv))
----------------------------
-- Evaluation rules
----------------------------
||| Propositional type describing that the first term one-step-evaluates to the second
|||
||| Explicitly, an inhabitant of `EvalsTo t1 t2` is a proof that `t1` evaluates to `t2` in one step.
data EvalsTo : Term -> Term -> Type where
EIfTrue : EvalsTo (IfThenElse True t2 t3) t2
EIfFalse : EvalsTo (IfThenElse False t2 t3) t3
EIf : EvalsTo t1 t1' -> EvalsTo (IfThenElse t1 t2 t3) (IfThenElse t1' t2 t3)
ESucc : EvalsTo t1 t2 -> EvalsTo (Succ t1) (Succ t2)
EPredZero : EvalsTo (Pred Zero) Zero
EPredSucc : {pf : IsNumValue nv1} -> EvalsTo (Pred (Succ nv1)) nv1
EPred : EvalsTo t1 t2 -> EvalsTo (Pred t1) (Pred t2)
EIsZeroZero : EvalsTo (IsZero Zero) True
EIsZeroSucc : {pf : IsNumValue nv1} -> EvalsTo (IsZero (Succ nv1)) False
EIsZero : EvalsTo t1 t2 -> EvalsTo (IsZero t1) (IsZero t2)
||| Propositional type describing that the first term evaluates to the second in a finite number of steps
|||
||| Explicitly, an inhabitant of `EvalToStar t1 t2` is a proof that there is a finite sequence
|||
||| t1 = s_0, s_1, ..., s_n = t2
|||
||| of terms (where `0 <= n`), such that `s_i` one-step-evaluates to `s_{i+1}`.
EvalsToStar : Term -> Term -> Type
EvalsToStar = ReflSymmClos EvalsTo
----------------------------
-- Big Step Evaluation rules
----------------------------
||| Propositional type describing that the first term big-step evaluates to the second
data BigEvalsTo : Term -> Term -> Type where
BValue : {pf : IsValue v} -> BigEvalsTo v v
BIfTrue : {pf : IsValue v2} ->
BigEvalsTo t1 True ->
BigEvalsTo t2 v2 ->
BigEvalsTo (IfThenElse t1 t2 t3) v2
BIfFalse : {pf : IsValue v3} ->
BigEvalsTo t1 False ->
BigEvalsTo t3 v3 ->
BigEvalsTo (IfThenElse t1 t2 t3) v3
BSucc : {pf : IsNumValue nv1} ->
BigEvalsTo t1 nv1 ->
BigEvalsTo (Succ t1) (Succ nv1)
BPredZero : BigEvalsTo t1 Zero ->
BigEvalsTo (Pred t1) Zero
BPredSucc : {pf : IsNumValue nv1} ->
BigEvalsTo t1 (Succ nv1) ->
BigEvalsTo (Pred t1) nv1
BIsZeroZero : BigEvalsTo t1 Zero ->
BigEvalsTo (IsZero t1) True
BIsZeroSucc : {pf : IsNumValue nv1} ->
BigEvalsTo t1 (Succ nv1) ->
BigEvalsTo (IsZero t1) False
--------------------------------------------------------------------------------
-- Some properties of values and evaluation
--------------------------------------------------------------------------------
||| A numeric value is also a value
numValueIsValue : IsNumValue t -> IsValue t
numValueIsValue {t = (v2t (Right nv))} (ConvertedFrom nv) = IsValue.ConvertedFrom (Right nv)
||| The successor of a numeric value is a numeric value
succNumValueIsNumValue : IsNumValue t -> IsNumValue (Succ t)
succNumValueIsNumValue {t = (v2t (Right nv))} (ConvertedFrom nv) = ConvertedFrom (Succ nv)
||| A boolean value is also a value
boolValueIsValue : IsBoolValue t -> IsValue t
boolValueIsValue {t = (v2t (Left bv))} (ConvertedFrom bv) = IsValue.ConvertedFrom (Left bv)
numValueEither : IsNumValue t -> Either (t = Zero) (t' : Term ** (IsNumValue t', t = Succ t'))
numValueEither (ConvertedFrom nv) = case nv of
Zero => Left Refl
(Succ nv') => Right (nv2t nv' ** (ConvertedFrom nv', Refl))
boolValueEither : IsBoolValue t -> Either (t = True) (t = False)
boolValueEither (ConvertedFrom True) = Left Refl
boolValueEither (ConvertedFrom False) = Right Refl
zeroNotBool : IsBoolValue Zero -> Void
zeroNotBool (ConvertedFrom True) impossible
zeroNotBool (ConvertedFrom False) impossible
succNotTrue : {t : Term} -> (Succ t = True) -> Void
succNotTrue Refl impossible
succNotFalse : {t : Term} -> (Succ t = False) -> Void
succNotFalse Refl impossible
succNotBool : IsBoolValue (Succ t) -> Void
succNotBool {t} x = case boolValueEither x of
(Left l) => succNotTrue l
(Right r) => succNotFalse r
||| A value can't be both numeric and boolean at the same time.
numNotBool : IsNumValue t -> IsBoolValue t -> Void
numNotBool x y = case numValueEither x of
(Left Refl) => zeroNotBool y
(Right (_ ** (_, Refl))) => succNotBool y
||| Proof that values don't evaluate to anything in the `E`-calculus.
valuesDontEvaluate : {pf : IsValue v} -> EvalsTo v t -> Void
valuesDontEvaluate {pf = (ConvertedFrom (Left bv))} {v = (bv2t bv)} x = case bv of
True => (case x of
EIfTrue impossible
EIfFalse impossible
(EIf _) impossible
(ESucc _) impossible
EPredZero impossible
EPredSucc impossible
(EPred _) impossible
EIsZeroZero impossible
EIsZeroSucc impossible
(EIsZero _) impossible)
False => (case x of
EIfTrue impossible
EIfFalse impossible
(EIf _) impossible
(ESucc _) impossible
EPredZero impossible
EPredSucc impossible
(EPred _) impossible
EIsZeroZero impossible
EIsZeroSucc impossible
(EIsZero _) impossible)
valuesDontEvaluate {pf = (ConvertedFrom (Right nv))} {v = (nv2t nv)} x = case nv of
Zero => (case x of
EIfTrue impossible
EIfFalse impossible
(EIf _) impossible
(ESucc _) impossible
EPredZero impossible
EPredSucc impossible
(EPred _) impossible
EIsZeroZero impossible
EIsZeroSucc impossible
(EIsZero _) impossible)
(Succ nv) => (case x of
(ESucc y) => valuesDontEvaluate {pf=ConvertedFrom (Right nv)} y)
||| Proof that the only derivation of a value term in the reflexive transitive of the `E`-evaluation rules
||| is the trivial derivation.
valuesAreNormal : {pf : IsValue v} -> (r : EvalsToStar v t) -> (r = (Refl {rel=EvalsTo} {x=v}))
valuesAreNormal (Refl {x}) = Refl
valuesAreNormal {pf} (Cons x y) with (valuesDontEvaluate {pf=pf} x)
valuesAreNormal {pf} (Cons x y) | with_pat impossible
||| Proof that a value is either
|||
||| 1. `True`
||| 2. `False`
||| 3. `Zero`
||| 4. `Succ nv`, with `nv` a numeric value
valueIsEither : (v : Term) -> {pf : IsValue v} -> Either (v = True) (Either (v = False) (Either (v = Zero) (nv : Term ** ((v = Succ nv), IsNumValue nv))))
valueIsEither (bv2t x) {pf = (ConvertedFrom (Left x))} = case x of
True => Left Refl
False => Right (Left Refl)
valueIsEither (nv2t x) {pf = (ConvertedFrom (Right x))} = case x of
Zero => Right (Right (Left Refl))
(Succ y) => Right (Right (Right (nv2t y ** (Refl, ConvertedFrom y))))
||| Proof that a term of the form `Succ t` is only a value if `t` is a numeric value.
succIsValueIf : IsValue (Succ t) -> IsNumValue t
succIsValueIf (ConvertedFrom (Left Values.True)) impossible
succIsValueIf (ConvertedFrom (Left Values.False)) impossible
succIsValueIf (ConvertedFrom (Right Values.Zero)) impossible
succIsValueIf (ConvertedFrom (Right (Succ nv))) = ConvertedFrom nv
||| Proof that a term of the form `Pred t` is never a value.
predNotValue : IsValue (Pred t) -> Void
predNotValue (ConvertedFrom (Left Values.True)) impossible
predNotValue (ConvertedFrom (Left Values.False)) impossible
predNotValue (ConvertedFrom (Right Values.Zero)) impossible
predNotValue (ConvertedFrom (Right (Values.Succ nv))) impossible
||| Proof that a term of the form `IsZero t` is never a value.
isZeroNotValue : IsValue (IsZero t) -> Void
isZeroNotValue (ConvertedFrom (Left Values.True)) impossible
isZeroNotValue (ConvertedFrom (Left Values.False)) impossible
isZeroNotValue (ConvertedFrom (Right Values.Zero)) impossible
isZeroNotValue (ConvertedFrom (Right (Values.Succ nv))) impossible
||| Proof that a value only evaluates to itself under the reflexive transitive closure of
||| the `E`-evaluation rules.
valuesAreNormal' : {pf : IsValue v} ->
EvalsToStar v t ->
(t = v)
valuesAreNormal' {pf} x with (valuesAreNormal {pf=pf} x)
valuesAreNormal' {pf} x | with_pat = case with_pat of
Refl => Refl
||| Proof that a term of the form `IfThenElse x y z` is never a value.
ifThenElseNotNormal : (pf : IsValue (IfThenElse x y z)) -> Void
ifThenElseNotNormal {x} {y} {z} pf with (valueIsEither (IfThenElse x y z) {pf=pf})
ifThenElseNotNormal {x} {y} {z} pf | (Left l) = case l of
Refl impossible
ifThenElseNotNormal {x} {y} {z} pf | (Right (Left l)) = case l of
Refl impossible
ifThenElseNotNormal {x} {y} {z} pf | (Right (Right (Left l))) = case l of
Refl impossible
ifThenElseNotNormal {x} {y} {z} pf | (Right (Right (Right (nv ** (pf1, pf2))))) = case pf1 of
Refl impossible
----------------------------
-- Miscellanea
----------------------------
t1 : Term
t1 = IfThenElse False Zero (Succ Zero)
t2 : Term
t2 = IsZero (Pred (Succ Zero))
toString : Term -> String
toString True = "true"
toString False = "false"
toString (IfThenElse x y z) = "if " ++ toString x ++
" then " ++ toString y ++
" else " ++ toString z
toString Zero = "0"
toString (Succ x) = "succ (" ++ toString x ++ ")"
toString (Pred x) = "pred (" ++ toString x ++ ")"
toString (IsZero x) = "iszero (" ++ toString x ++ ")"
eval : Term -> Value
eval True = Left True
eval False = Left True
eval (IfThenElse x y z) = case eval x of
(Left r) => case r of
True => eval y
False => eval z
(Right l) => ?eval_rhs_1
eval Zero = Right Zero
eval (Succ x) = case eval x of
Left l => ?eval_rhs_4
Right r => Right (Succ r)
eval (Pred x) = case eval x of
Left l => ?eval_rhs_5
Right r => case r of
Zero => Right Zero
Succ x => Right x
eval (IsZero x) = case x of
Zero => Left True
Succ y => Left False
_ => ?eval_rhs2
||| The size of a term is the number of constructors it contains.
size : Term -> Nat
size True = 1
size False = 1
size (IfThenElse x y z) = (size x) + (size y) + (size z) + 1
size Zero = 1
size (Succ x) = S (size x)
size (Pred x) = S (size x)
size (IsZero x) = S (size x)
||| The depth of a term is depth of its derivation tree.
depth : Term -> Nat
depth True = 1
depth False = 1
depth (IfThenElse x y z) = (max (depth x) (max (depth y) (depth z))) + 1
depth Zero = 1
depth (Succ x) = S (depth x)
depth (Pred x) = S (depth x)
depth (IsZero x) = S (depth x)
|
module Issue5563 where
F : (@0 A : Set) → A → A
F A x =
let
y : A
y = x
in y
|
#include <gsl/gsl_math.h>
#include <gsl/gsl_cblas.h>
#include "cblas.h"
void
cblas_ssyr2k (const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
const float alpha, const float *A, const int lda,
const float *B, const int ldb, const float beta, float *C,
const int ldc)
{
#define BASE float
#include "source_syr2k_r.h"
#undef BASE
}
|
/-
Copyright (c) 2020 Jeremy Avigad. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jeremy Avigad, Mario Carneiro
-/
import tactic.by_contra
import data.set.image
/-!
# Well-founded relations
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
A relation is well-founded if it can be used for induction: for each `x`, `(∀ y, r y x → P y) → P x`
implies `P x`. Well-founded relations can be used for induction and recursion, including
construction of fixed points in the space of dependent functions `Π x : α , β x`.
The predicate `well_founded` is defined in the core library. In this file we prove some extra lemmas
and provide a few new definitions: `well_founded.min`, `well_founded.sup`, and `well_founded.succ`,
and an induction principle `well_founded.induction_bot`.
-/
variables {α : Type*}
namespace well_founded
protected theorem is_asymm {α : Sort*} {r : α → α → Prop} (h : well_founded r) : is_asymm α r :=
⟨h.asymmetric⟩
instance {α : Sort*} [has_well_founded α] : is_asymm α has_well_founded.r :=
has_well_founded.wf.is_asymm
protected theorem is_irrefl {α : Sort*} {r : α → α → Prop} (h : well_founded r) : is_irrefl α r :=
(@is_asymm.is_irrefl α r h.is_asymm)
instance {α : Sort*} [has_well_founded α] : is_irrefl α has_well_founded.r :=
is_asymm.is_irrefl
/-- If `r` is a well-founded relation, then any nonempty set has a minimal element
with respect to `r`. -/
theorem has_min {α} {r : α → α → Prop} (H : well_founded r)
(s : set α) : s.nonempty → ∃ a ∈ s, ∀ x ∈ s, ¬ r x a
| ⟨a, ha⟩ := (acc.rec_on (H.apply a) $ λ x _ IH, not_imp_not.1 $ λ hne hx, hne $
⟨x, hx, λ y hy hyx, hne $ IH y hyx hy⟩) ha
/-- A minimal element of a nonempty set in a well-founded order.
If you're working with a nonempty linear order, consider defining a
`conditionally_complete_linear_order_bot` instance via
`well_founded.conditionally_complete_linear_order_with_bot` and using `Inf` instead. -/
noncomputable def min {r : α → α → Prop} (H : well_founded r)
(s : set α) (h : s.nonempty) : α :=
classical.some (H.has_min s h)
theorem min_mem {r : α → α → Prop} (H : well_founded r)
(s : set α) (h : s.nonempty) : H.min s h ∈ s :=
let ⟨h, _⟩ := classical.some_spec (H.has_min s h) in h
theorem not_lt_min {r : α → α → Prop} (H : well_founded r)
(s : set α) (h : s.nonempty) {x} (hx : x ∈ s) : ¬ r x (H.min s h) :=
let ⟨_, h'⟩ := classical.some_spec (H.has_min s h) in h' _ hx
theorem well_founded_iff_has_min {r : α → α → Prop} : (well_founded r) ↔
∀ (s : set α), s.nonempty → ∃ m ∈ s, ∀ x ∈ s, ¬ r x m :=
begin
refine ⟨λ h, h.has_min, λ h, ⟨λ x, _⟩⟩,
by_contra hx,
obtain ⟨m, hm, hm'⟩ := h _ ⟨x, hx⟩,
refine hm ⟨_, λ y hy, _⟩,
by_contra hy',
exact hm' y hy' hy
end
open set
/-- The supremum of a bounded, well-founded order -/
protected noncomputable def sup {r : α → α → Prop} (wf : well_founded r) (s : set α)
(h : bounded r s) : α :=
wf.min { x | ∀a ∈ s, r a x } h
protected lemma lt_sup {r : α → α → Prop} (wf : well_founded r) {s : set α} (h : bounded r s)
{x} (hx : x ∈ s) : r x (wf.sup s h) :=
min_mem wf { x | ∀a ∈ s, r a x } h x hx
section
open_locale classical
/-- A successor of an element `x` in a well-founded order is a minimal element `y` such that
`x < y` if one exists. Otherwise it is `x` itself. -/
protected noncomputable def succ {r : α → α → Prop} (wf : well_founded r) (x : α) : α :=
if h : ∃y, r x y then wf.min { y | r x y } h else x
protected lemma lt_succ {r : α → α → Prop} (wf : well_founded r) {x : α} (h : ∃y, r x y) :
r x (wf.succ x) :=
by { rw [well_founded.succ, dif_pos h], apply min_mem }
end
protected lemma lt_succ_iff {r : α → α → Prop} [wo : is_well_order α r] {x : α} (h : ∃y, r x y)
(y : α) : r y (wo.wf.succ x) ↔ r y x ∨ y = x :=
begin
split,
{ intro h', have : ¬r x y,
{ intro hy, rw [well_founded.succ, dif_pos] at h',
exact wo.wf.not_lt_min _ h hy h' },
rcases trichotomous_of r x y with hy | hy | hy,
exfalso, exact this hy,
right, exact hy.symm,
left, exact hy },
rintro (hy | rfl), exact trans hy (wo.wf.lt_succ h), exact wo.wf.lt_succ h
end
section linear_order
variables {β : Type*} [linear_order β] (h : well_founded ((<) : β → β → Prop))
{γ : Type*} [partial_order γ]
theorem min_le {x : β} {s : set β} (hx : x ∈ s) (hne : s.nonempty := ⟨x, hx⟩) :
h.min s hne ≤ x :=
not_lt.1 $ h.not_lt_min _ _ hx
private theorem eq_strict_mono_iff_eq_range_aux {f g : β → γ} (hf : strict_mono f)
(hg : strict_mono g) (hfg : set.range f = set.range g) {b : β} (H : ∀ a < b, f a = g a) :
f b ≤ g b :=
begin
obtain ⟨c, hc⟩ : g b ∈ set.range f := by { rw hfg, exact set.mem_range_self b },
cases lt_or_le c b with hcb hbc,
{ rw [H c hcb] at hc,
rw hg.injective hc at hcb,
exact hcb.false.elim },
{ rw ←hc,
exact hf.monotone hbc }
end
include h
theorem eq_strict_mono_iff_eq_range {f g : β → γ} (hf : strict_mono f)
(hg : strict_mono g) : set.range f = set.range g ↔ f = g :=
⟨λ hfg, begin
funext a,
apply h.induction a,
exact λ b H, le_antisymm
(eq_strict_mono_iff_eq_range_aux hf hg hfg H)
(eq_strict_mono_iff_eq_range_aux hg hf hfg.symm (λ a hab, (H a hab).symm))
end, congr_arg _⟩
theorem self_le_of_strict_mono {f : β → β} (hf : strict_mono f) : ∀ n, n ≤ f n :=
by { by_contra' h₁, have h₂ := h.min_mem _ h₁, exact h.not_lt_min _ h₁ (hf h₂) h₂ }
end linear_order
end well_founded
namespace function
variables {β : Type*} (f : α → β)
section has_lt
variables [has_lt β] (h : well_founded ((<) : β → β → Prop))
/-- Given a function `f : α → β` where `β` carries a well-founded `<`, this is an element of `α`
whose image under `f` is minimal in the sense of `function.not_lt_argmin`. -/
noncomputable def argmin [nonempty α] : α :=
well_founded.min (inv_image.wf f h) set.univ set.univ_nonempty
lemma not_lt_argmin [nonempty α] (a : α) : ¬ f a < f (argmin f h) :=
well_founded.not_lt_min (inv_image.wf f h) _ _ (set.mem_univ a)
/-- Given a function `f : α → β` where `β` carries a well-founded `<`, and a non-empty subset `s`
of `α`, this is an element of `s` whose image under `f` is minimal in the sense of
`function.not_lt_argmin_on`. -/
noncomputable def argmin_on (s : set α) (hs : s.nonempty) : α :=
well_founded.min (inv_image.wf f h) s hs
@[simp] lemma argmin_on_mem (s : set α) (hs : s.nonempty) :
argmin_on f h s hs ∈ s :=
well_founded.min_mem _ _ _
@[simp] lemma not_lt_argmin_on (s : set α) {a : α} (ha : a ∈ s)
(hs : s.nonempty := set.nonempty_of_mem ha) :
¬ f a < f (argmin_on f h s hs) :=
well_founded.not_lt_min (inv_image.wf f h) s hs ha
end has_lt
section linear_order
variables [linear_order β] (h : well_founded ((<) : β → β → Prop))
@[simp] lemma argmin_le (a : α) [nonempty α] : f (argmin f h) ≤ f a :=
not_lt.mp $ not_lt_argmin f h a
@[simp] lemma argmin_on_le (s : set α) {a : α} (ha : a ∈ s)
(hs : s.nonempty := set.nonempty_of_mem ha) : f (argmin_on f h s hs) ≤ f a :=
not_lt.mp $ not_lt_argmin_on f h s ha hs
end linear_order
end function
section induction
/-- Let `r` be a relation on `α`, let `f : α → β` be a function, let `C : β → Prop`, and
let `bot : α`. This induction principle shows that `C (f bot)` holds, given that
* some `a` that is accessible by `r` satisfies `C (f a)`, and
* for each `b` such that `f b ≠ f bot` and `C (f b)` holds, there is `c`
satisfying `r c b` and `C (f c)`. -/
lemma acc.induction_bot' {α β} {r : α → α → Prop} {a bot : α} (ha : acc r a) {C : β → Prop}
{f : α → β} (ih : ∀ b, f b ≠ f bot → C (f b) → ∃ c, r c b ∧ C (f c)) : C (f a) → C (f bot) :=
@acc.rec_on _ _ (λ x, C (f x) → C (f bot)) _ ha $ λ x ac ih' hC,
(eq_or_ne (f x) (f bot)).elim (λ h, h ▸ hC)
(λ h, let ⟨y, hy₁, hy₂⟩ := ih x h hC in ih' y hy₁ hy₂)
/-- Let `r` be a relation on `α`, let `C : α → Prop` and let `bot : α`.
This induction principle shows that `C bot` holds, given that
* some `a` that is accessible by `r` satisfies `C a`, and
* for each `b ≠ bot` such that `C b` holds, there is `c` satisfying `r c b` and `C c`. -/
lemma acc.induction_bot {α} {r : α → α → Prop} {a bot : α} (ha : acc r a)
{C : α → Prop} (ih : ∀ b, b ≠ bot → C b → ∃ c, r c b ∧ C c) : C a → C bot :=
ha.induction_bot' ih
/-- Let `r` be a well-founded relation on `α`, let `f : α → β` be a function,
let `C : β → Prop`, and let `bot : α`.
This induction principle shows that `C (f bot)` holds, given that
* some `a` satisfies `C (f a)`, and
* for each `b` such that `f b ≠ f bot` and `C (f b)` holds, there is `c`
satisfying `r c b` and `C (f c)`. -/
lemma well_founded.induction_bot' {α β} {r : α → α → Prop} (hwf : well_founded r) {a bot : α}
{C : β → Prop} {f : α → β} (ih : ∀ b, f b ≠ f bot → C (f b) → ∃ c, r c b ∧ C (f c)) :
C (f a) → C (f bot) :=
(hwf.apply a).induction_bot' ih
/-- Let `r` be a well-founded relation on `α`, let `C : α → Prop`, and let `bot : α`.
This induction principle shows that `C bot` holds, given that
* some `a` satisfies `C a`, and
* for each `b` that satisfies `C b`, there is `c` satisfying `r c b` and `C c`.
The naming is inspired by the fact that when `r` is transitive, it follows that `bot` is
the smallest element w.r.t. `r` that satisfies `C`. -/
lemma well_founded.induction_bot {α} {r : α → α → Prop} (hwf : well_founded r) {a bot : α}
{C : α → Prop} (ih : ∀ b, b ≠ bot → C b → ∃ c, r c b ∧ C c) : C a → C bot :=
hwf.induction_bot' ih
end induction
|
// (C) Copyright Raffi Enficiaud 2017.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// See http://www.boost.org/libs/test for the library home page.
//
//! @file
//! Customization point for printing user defined types
// *****************************************************************************
#define BOOST_TEST_MODULE user type logger customization points
#include <boost/test/unit_test.hpp>
namespace printing_test {
struct user_defined_type {
int value;
user_defined_type(int value_) : value(value_)
{}
bool operator==(int right) const {
return right == value;
}
};
std::ostream& boost_test_print_type(std::ostream& ostr, user_defined_type const& right) {
ostr << "** value of my type is " << right.value << " **";
return ostr;
}
}
//using namespace printing_test;
BOOST_AUTO_TEST_CASE(test1)
{
//using printing_test::user_defined_type;
printing_test::user_defined_type t(10);
BOOST_CHECK_EQUAL(t, 10);
#ifndef BOOST_TEST_MACRO_LIMITED_SUPPORT
BOOST_TEST(t == 10);
#endif
}
// on unary expressions as well
struct s {
operator bool() const { return true; }
};
std::ostream &boost_test_print_type(std::ostream &o, const s &) {
return o << "printed-s";
}
BOOST_AUTO_TEST_CASE( test_logs )
{
BOOST_TEST(s());
}
|
import Data.Vect
import Data.Nat
export
lemmaPlusOneRight : (n : Nat) -> n + 1 = S n
lemmaPlusOneRight n = rewrite plusCommutative n 1 in Refl
public export
consLin : (1 _ : Unit) -> (1 _ : Vect n Unit) -> Vect (S n) Unit
consLin () [] = [()]
consLin () (x :: xs) = () :: x :: xs
consNonLin : Unit -> Vect n Unit -> Vect (n+1) Unit
consNonLin u us = rewrite lemmaPlusOneRight n in u `consLin` us
consLin2 : (1 _ : Unit) -> (1 _ : Vect n Unit) -> Vect (n+1) Unit
consLin2 u us = rewrite lemmaPlusOneRight n in u `consLin` us
|
Formal statement is: lemma not_pos_poly_0 [simp]: "\<not> pos_poly 0" Informal statement is: The zero polynomial is not positive. |
From the thousands of pictures on the net regarding tropical paint colors for interior, picks the very best choices using greatest image resolution only for you, and now this pictures is actually one of images series in our greatest pictures gallery regarding Tropical Paint Colors For Interior. I am hoping you will like it.
This kind of impression (Glidden Premium 8 Oz. #hdgo02 Tropical Coral Satin Interior Paint throughout Tropical Paint Colors For Interior) earlier mentioned is usually branded together with: tropical paint colors for interior, .
submitted by Abidin from December, 2 2018. To find out just about all graphics within Tropical Paint Colors For Interior photos gallery make sure you follow this specific link.
15 Photos of "Glidden Premium 8 Oz. #hdgo02 Tropical Coral Satin Interior Paint throughout Tropical Paint Colors For Interior"
Related Posts of "Glidden Premium 8 Oz. #hdgo02 Tropical Coral Satin Interior Paint throughout Tropical Paint Colors For Interior" |
function [basis, Hw] = deconv_z(psf, fun, t)
% psf: 3-tap symmetric psf: [a _b_ a]
% H(z) = 1 / (az + b + a/z)
if numel(psf) == 1
basis = fun(t) / psf;
Hw = @(t) psf * ones(size(t));
return
end
if length(psf) ~= 3, error 'not done', end
a = psf(1);
b = psf(2);
c = b / a / 2;
p = -c + sign(c) * sqrt(c^2 - 1); % pole
scale = 1/a * 1/(p - 1/p);
basis = fun(t);
for n=1:9
basis = basis + p^n * (fun(t-n) + fun(t+n));
end
basis = scale * basis;
Hw = @(om) 1 ./ (b + 2 * a * cos(om));
|
The plain maskray or brown stingray ( Neotrygon annotata ) is a species of stingray in the family Dasyatidae . It is found in shallow , soft @-@ bottomed habitats off northern Australia . Reaching 24 cm ( 9 @.@ 4 in ) in width , this species has a diamond @-@ shaped , grayish green pectoral fin disc . Its short , whip @-@ like tail has alternating black and white bands and fin folds above and below . There are short rows of thorns on the back and the base of the tail , but otherwise the skin is smooth . While this species possesses the dark mask @-@ like pattern across its eyes common to its genus , it is not ornately patterned like other maskrays .
|
State Before: X : Type u
Y : Type v
inst✝¹ : TopologicalSpace X
inst✝ : TopologicalSpace Y
x₀ x₁ : X
t : ↑I
⊢ transAssocReparamAux t ∈ I State After: X : Type u
Y : Type v
inst✝¹ : TopologicalSpace X
inst✝ : TopologicalSpace Y
x₀ x₁ : X
t : ↑I
⊢ (if ↑t ≤ 1 / 4 then 2 * ↑t else if ↑t ≤ 1 / 2 then ↑t + 1 / 4 else 1 / 2 * (↑t + 1)) ∈ I Tactic: unfold transAssocReparamAux State Before: X : Type u
Y : Type v
inst✝¹ : TopologicalSpace X
inst✝ : TopologicalSpace Y
x₀ x₁ : X
t : ↑I
⊢ (if ↑t ≤ 1 / 4 then 2 * ↑t else if ↑t ≤ 1 / 2 then ↑t + 1 / 4 else 1 / 2 * (↑t + 1)) ∈ I State After: no goals Tactic: split_ifs <;> constructor <;> linarith [unitInterval.le_one t, unitInterval.nonneg t] |
using Distributions, Plots, LaTeXStrings; pyplot()
cUnif = Uniform(0,2π)
xGrid, N = 0:0.1:2π, 10^6
stephist( rand(N)*2π, bins=xGrid,
normed=:true, c=:blue,
label="MC Estimate")
plot!( xGrid, pdf.(cUnif,xGrid),
c=:red,ylims=(0,0.2),label="PDF", ylabel="Density",xticks=([0:π/2:2π;],
["0", L"\dfrac{\pi}{2}", L"\pi", L"\dfrac{3\pi}{2}", L"2\pi"])) |
import data.real.basic
import tactic.suggest
import game.Completeness.level01
noncomputable theory
open_locale classical
/-
# Chapter 6 : Completeness
## Level 3
The infimum, call it y, of a set A is the greatest lower bound
of A. In lean, we define the infimum as the maximum of the lwoer bound.
Prove that for any number in a lower bounded set,
there exists an element in A such that
the element will be less than the number.
Hint: it may help to prove by contrapositive.
-/
lemma inf_lt {A : set ℝ} {x : ℝ} (hx : x is_an_inf_of A) :
∀ y, x < y → ∃ a ∈ A, a < y :=
begin
-- Let `y` be any real number.
intro y,
-- Let's prove the contrapositive
contrapose,
-- The symbol `¬` means negation. Let's ask Lean to rewrite the goal without negation,
-- pushing negation through quantifiers and inequalities
push_neg,
-- Let's assume the premise, calling the assumption `h`
intro h,
-- `h` is exactly saying `y` is a lower bound of `A` so the second part of
-- the infimum assumption `hx` applied to `y` and `h` is exactly what we want.
exact hx.2 y h
end
|
module Lib.Nat where
open import Lib.Bool
open import Lib.Logic
open import Lib.Id
data Nat : Set where
zero : Nat
suc : Nat -> Nat
{-# BUILTIN NATURAL Nat #-}
{-# BUILTIN ZERO zero #-}
{-# BUILTIN SUC suc #-}
infixr 50 _*_
infixr 40 _+_
_+_ : Nat -> Nat -> Nat
zero + m = m
suc n + m = suc (n + m)
lem-plus-zero : (n : Nat) -> n + 0 ≡ n
lem-plus-zero zero = refl
lem-plus-zero (suc n) = cong suc (lem-plus-zero n)
lem-plus-suc : (n m : Nat) -> n + suc m ≡ suc (n + m)
lem-plus-suc zero m = refl
lem-plus-suc (suc n) m = cong suc (lem-plus-suc n m)
lem-plus-commute : (n m : Nat) -> n + m ≡ m + n
lem-plus-commute n zero = lem-plus-zero _
lem-plus-commute n (suc m) with n + suc m | lem-plus-suc n m
... | .(suc (n + m)) | refl = cong suc (lem-plus-commute n m)
_*_ : Nat -> Nat -> Nat
zero * m = zero
suc n * m = m + n * m
{-# BUILTIN NATPLUS _+_ #-}
{-# BUILTIN NATTIMES _*_ #-}
_==_ : Nat -> Nat -> Bool
zero == zero = true
zero == suc _ = false
suc _ == zero = false
suc n == suc m = n == m
{-# BUILTIN NATEQUALS _==_ #-}
NonZero : Nat -> Set
NonZero zero = False
NonZero (suc _) = True
|
lemma (in ring_of_sets) sets_Collect_finite_Ex: assumes "\<And>i. i \<in> S \<Longrightarrow> {x\<in>\<Omega>. P i x} \<in> M" "finite S" shows "{x\<in>\<Omega>. \<exists>i\<in>S. P i x} \<in> M" |
{-# LANGUAGE DataKinds #-}
module CannyEdgeIllusoryContour where
import Control.Monad as M
import Control.Monad.IO.Class
import Data.Array.Repa as R
import Data.Array.Unboxed as AU
import Data.Binary
import Data.ByteString as BS
import Data.Complex
import Data.List as L
import DFT.Plan
import FokkerPlanck.DomainChange
import FokkerPlanck.MonteCarlo
import FokkerPlanck.Pinwheel
import GHC.Word
import Image.IO
import Image.Transform
import Linear.V2
import OpenCV as CV hiding (Z)
import STC
import System.Directory
import System.Environment
import System.FilePath
import Text.Printf
import Types
import Utils.Array
import Utils.Parallel hiding ((.|))
import Utils.Time
main = do
args <- getArgs
let (numPointStr:numOrientationStr:numScaleStr:thetaSigmaStr:scaleSigmaStr:maxScaleStr:taoStr:numTrailStr:maxTrailStr:theta0FreqsStr:thetaFreqsStr:scale0FreqsStr:scaleFreqsStr:histFileName:histFileNameEndPoint:numIterationStr:numIterationEndPointStr:writeSourceFlagStr:cutoffRadiusEndPointStr:cutoffRadiusStr:reversalFactorStr:inputImgPath:threshold1Str:threshold2Str:writeSegmentsFlagStr:writeEndPointFlagStr:minSegLenStr:useFFTWWisdomFlagStr:fftwWisdomFileName:numThreadStr:_) =
args
numPoint = read numPointStr :: Int
numOrientation = read numOrientationStr :: Int
numScale = read numScaleStr :: Int
thetaSigma = read thetaSigmaStr :: Double
scaleSigma = read scaleSigmaStr :: Double
maxScale = read maxScaleStr :: Double
tao = read taoStr :: Double
numTrail = read numTrailStr :: Int
maxTrail = read maxTrailStr :: Int
theta0Freq = read theta0FreqsStr :: Double
theta0Freqs = [-theta0Freq .. theta0Freq]
thetaFreq = read thetaFreqsStr :: Double
thetaFreqs = [-thetaFreq .. thetaFreq]
scale0Freq = read scale0FreqsStr :: Double
scaleFreq = read scaleFreqsStr :: Double
scale0Freqs = [-scale0Freq .. scale0Freq]
scaleFreqs = [-scaleFreq .. scaleFreq]
numIteration = read numIterationStr :: Int
numIterationEndPoint = read numIterationEndPointStr :: Int
writeSourceFlag = read writeSourceFlagStr :: Bool
cutoffRadiusEndPoint = read cutoffRadiusEndPointStr :: Int
cutoffRadius = read cutoffRadiusStr :: Int
reversalFactor = read reversalFactorStr :: Double
threshold1 = read threshold1Str :: Double
threshold2 = read threshold2Str :: Double
minSegLen = read minSegLenStr :: Int
writeSegmentsFlag = read writeSegmentsFlagStr :: Bool
writeEndPointFlag = read writeEndPointFlagStr :: Bool
minimumPixelDist = 1 :: Int
useFFTWWisdomFlag = read useFFTWWisdomFlagStr :: Bool
numThread = read numThreadStr :: Int
folderPath = "output/test/CannyEdgeIllusoryContour"
histFilePath = folderPath </> histFileName
histFilePathEndPoint = folderPath </> histFileNameEndPoint
edgeFilePath = folderPath </> (takeBaseName inputImgPath L.++ "_edge.png")
segmentsFilePath =
folderPath </> (takeBaseName inputImgPath L.++ "_segments.dat")
endPointFilePath =
folderPath </>
(printf
"%s_EndPoint_%d_%d_%d_%d_%.2f_%f.dat"
(takeBaseName inputImgPath)
(numPoint * minimumPixelDist)
(round thetaFreq :: Int)
(round tao :: Int)
cutoffRadiusEndPoint
thetaSigma
reversalFactor)
fftwWisdomFilePath = folderPath </> fftwWisdomFileName
createDirectoryIfMissing True folderPath
copyFile inputImgPath (folderPath </> takeFileName inputImgPath)
-- Find edges using Canny dege detector (Opencv)
img <-
(exceptError . coerceMat . imdecode ImreadUnchanged) <$>
BS.readFile inputImgPath :: IO (Mat ('S '[ 'D, 'D]) 'D ('S GHC.Word.Word8))
let [cols, rows] = miShape . matInfo $ img
n = numPoint - cutoffRadiusEndPoint - 1 -- make sure that there is a zero wrap
(w, h) =
if rows > cols
then ( n
, round $ fromIntegral cols * fromIntegral n / fromIntegral rows)
else ( round $ fromIntegral rows * fromIntegral n / fromIntegral cols
, n)
resizedImg =
exceptError .
resize
(ResizeAbs . toSize $ V2 (fromIntegral w) (fromIntegral h))
InterCubic $
img
edge =
exceptError . canny threshold1 threshold2 (Just 3) CannyNormL2 $
resizedImg
edgeRepa =
pad [numPoint, numPoint, 1] 0 .
normalizeValueRange (0, 1) . R.map fromIntegral . toRepa $
edge
plotImageRepa edgeFilePath . ImageRepa 8 . computeS $ edgeRepa
edgeRepa <- (\(ImageRepa _ img) -> img) <$> readImageRepa edgeFilePath False
doesEndPointFileExist <- doesFileExist endPointFilePath
endPointArray <-
if doesEndPointFileExist && (not writeEndPointFlag)
then do
printCurrentTime "Read endpoint from file."
readRepaArray endPointFilePath
else do
printCurrentTime "Start computing endpoint..."
-- find segments for normalization:
-- two adjacent non-zero-valued points are connected
patchNormMethod <-
if writeSegmentsFlag
then do
printCurrentTime "Start computing segments..."
let nonzerorPoints =
createIndex2D .
L.map fst . L.filter (\(_, v) -> v /= 0) . AU.assocs $
(AU.listArray ((0, 0), (numPoint - 1, numPoint - 1)) .
R.toList $
edgeRepa :: AU.Array (Int, Int) Double)
segments =
L.map
(L.map
(\(a, b) ->
(a * minimumPixelDist, b * minimumPixelDist))) .
L.filter (\xs -> L.length xs >= minSegLen) .
pointCluster
(connectionMatrixP
(ParallelParams numThread 1)
1
nonzerorPoints) $
nonzerorPoints
encodeFile segmentsFilePath segments
removePathForcibly (folderPath </> "segments")
createDirectoryIfMissing True (folderPath </> "segments")
M.zipWithM_
(\i ->
plotImageRepa
(folderPath </> "segments" </> (printf "Cluster%03d.png" i)) .
ImageRepa 8)
[1 :: Int ..] .
cluster2Array
(numPoint * minimumPixelDist)
(numPoint * minimumPixelDist) $
segments
printCurrentTime "Done computing segments."
return . PowerMethodConnection $ segments
else do
printCurrentTime "Read segments from files."
PowerMethodConnection <$> decodeFile segmentsFilePath
-- Compute the Green's function
let numPointEndPoint = minimumPixelDist * numPoint
maxScaleEndPoint = 1.00000000001
flag <- doesFileExist histFilePathEndPoint
radialArr <-
if flag
then do
printCurrentTime "Read endpoint filter histogram from files."
R.map magnitude . getNormalizedHistogramArr <$>
decodeFile histFilePathEndPoint
else do
printCurrentTime
"Couldn't find a Green's function data. Start simulation..."
solveMonteCarloR2Z2T0S0Radial
numThread
numTrail
maxTrail
numPointEndPoint
numPointEndPoint
thetaSigma
0.0
maxScaleEndPoint
tao
theta0Freqs
thetaFreqs
[0]
[0]
histFilePathEndPoint
(emptyHistogram
[ (round . sqrt . fromIntegral $
2 * (div numPointEndPoint 2) ^ 2)
, 1
, L.length theta0Freqs
, 1
, L.length thetaFreqs
]
0)
arrR2Z2T0S0 <-
computeUnboxedP $
computeR2Z2T0S0ArrayRadial
(pinwheelHollowNonzeronCenter 12)
(cutoff cutoffRadiusEndPoint radialArr)
numPointEndPoint
numPointEndPoint
1
maxScaleEndPoint
thetaFreqs
[0]
theta0Freqs
[0]
plan <-
makeR2Z2T0S0Plan
emptyPlan
useFFTWWisdomFlag
fftwWisdomFilePath
arrR2Z2T0S0
-- Compute initial eigenvector and bias
-- increase the distance between pixels to aovid aliasing
let resizedEdgeRepa =
R.traverse
edgeRepa
(const (Z :. numPointEndPoint :. numPointEndPoint)) $ \f (Z :. i :. j) ->
if mod i minimumPixelDist == 0 && mod j minimumPixelDist == 0
then f (Z :. (0 :: Int) :. (div i minimumPixelDist) :.
(div j minimumPixelDist))
else 0
bias =
computeBiasR2T0S0FromRepa
numPointEndPoint
numPointEndPoint
(L.length theta0Freqs)
1
resizedEdgeRepa
eigenVec =
computeInitialEigenVectorR2T0S0FromRepa
numPointEndPoint
numPointEndPoint
(L.length theta0Freqs)
1
(L.length thetaFreqs)
1
resizedEdgeRepa
plotImageRepa
(folderPath </> takeBaseName inputImgPath L.++ "_resizedEdge.png") .
ImageRepa 8 . computeS . extend (Z :. (1 :: Int) :. All :. All) $
resizedEdgeRepa
endPointSource <-
computeS . R.zipWith (*) bias <$>
powerMethodR2Z2T0S0Reversal
plan
folderPath
numPointEndPoint
numPointEndPoint
numOrientation
thetaFreqs
theta0Freqs
1
[0]
[0]
0
arrR2Z2T0S0
patchNormMethod
numIterationEndPoint
writeSourceFlag
(printf
"_%d_%d_%d_%d_%.2f_%f_%s_EndPoint"
(numPoint * minimumPixelDist)
(round thetaFreq :: Int)
(round tao :: Int)
cutoffRadiusEndPoint
thetaSigma
reversalFactor
(takeBaseName inputImgPath))
0.5
reversalFactor
bias
eigenVec
writeRepaArray endPointFilePath endPointSource
return endPointSource
print "done"
|
! RUN: %f18 -fparse-only %s
! RUN: rm -rf %t && mkdir %t
! RUN: touch %t/empty.f90
! RUN: %f18 -fparse-only %t/empty.f90
|
[STATEMENT]
lemma absolute_neighbourhood_extensor_imp_ANR:
fixes S :: "'a::euclidean_space set"
assumes "\<And>f :: 'a * real \<Rightarrow> 'a.
\<And>U T. \<lbrakk>continuous_on T f; f ` T \<subseteq> S;
closedin (top_of_set U) T\<rbrakk>
\<Longrightarrow> \<exists>V g. T \<subseteq> V \<and> openin (top_of_set U) V \<and>
continuous_on V g \<and> g ` V \<subseteq> S \<and> (\<forall>x \<in> T. g x = f x)"
shows "ANR S"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ANR S
[PROOF STEP]
proof (clarsimp simp: ANR_def)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>U S'. \<lbrakk>S homeomorphic S'; closedin (top_of_set U) S'\<rbrakk> \<Longrightarrow> \<exists>T. openin (top_of_set U) T \<and> S' retract_of T
[PROOF STEP]
fix U and T :: "('a * real) set"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>U S'. \<lbrakk>S homeomorphic S'; closedin (top_of_set U) S'\<rbrakk> \<Longrightarrow> \<exists>T. openin (top_of_set U) T \<and> S' retract_of T
[PROOF STEP]
assume "S homeomorphic T" and clo: "closedin (top_of_set U) T"
[PROOF STATE]
proof (state)
this:
S homeomorphic T
closedin (top_of_set U) T
goal (1 subgoal):
1. \<And>U S'. \<lbrakk>S homeomorphic S'; closedin (top_of_set U) S'\<rbrakk> \<Longrightarrow> \<exists>T. openin (top_of_set U) T \<and> S' retract_of T
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
S homeomorphic T
closedin (top_of_set U) T
[PROOF STEP]
obtain g h where hom: "homeomorphism S T g h"
[PROOF STATE]
proof (prove)
using this:
S homeomorphic T
closedin (top_of_set U) T
goal (1 subgoal):
1. (\<And>g h. homeomorphism S T g h \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (force simp: homeomorphic_def)
[PROOF STATE]
proof (state)
this:
homeomorphism S T g h
goal (1 subgoal):
1. \<And>U S'. \<lbrakk>S homeomorphic S'; closedin (top_of_set U) S'\<rbrakk> \<Longrightarrow> \<exists>T. openin (top_of_set U) T \<and> S' retract_of T
[PROOF STEP]
obtain h: "continuous_on T h" " h ` T \<subseteq> S"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lbrakk>continuous_on T h; h ` T \<subseteq> S\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using hom homeomorphism_def
[PROOF STATE]
proof (prove)
using this:
homeomorphism S T g h
homeomorphism ?s ?t ?f ?g = ((\<forall>x\<in>?s. ?g (?f x) = x) \<and> ?f ` ?s = ?t \<and> continuous_on ?s ?f \<and> (\<forall>y\<in>?t. ?f (?g y) = y) \<and> ?g ` ?t = ?s \<and> continuous_on ?t ?g)
goal (1 subgoal):
1. (\<lbrakk>continuous_on T h; h ` T \<subseteq> S\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
continuous_on T h
h ` T \<subseteq> S
goal (1 subgoal):
1. \<And>U S'. \<lbrakk>S homeomorphic S'; closedin (top_of_set U) S'\<rbrakk> \<Longrightarrow> \<exists>T. openin (top_of_set U) T \<and> S' retract_of T
[PROOF STEP]
obtain V h' where "T \<subseteq> V" and opV: "openin (top_of_set U) V"
and h': "continuous_on V h'" "h' ` V \<subseteq> S"
and h'h: "\<forall>x\<in>T. h' x = h x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>V h'. \<lbrakk>T \<subseteq> V; openin (top_of_set U) V; continuous_on V h'; h' ` V \<subseteq> S; \<forall>x\<in>T. h' x = h x\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using assms [OF h clo]
[PROOF STATE]
proof (prove)
using this:
\<exists>V g. T \<subseteq> V \<and> openin (top_of_set U) V \<and> continuous_on V g \<and> g ` V \<subseteq> S \<and> (\<forall>x\<in>T. g x = h x)
goal (1 subgoal):
1. (\<And>V h'. \<lbrakk>T \<subseteq> V; openin (top_of_set U) V; continuous_on V h'; h' ` V \<subseteq> S; \<forall>x\<in>T. h' x = h x\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
T \<subseteq> V
openin (top_of_set U) V
continuous_on V h'
h' ` V \<subseteq> S
\<forall>x\<in>T. h' x = h x
goal (1 subgoal):
1. \<And>U S'. \<lbrakk>S homeomorphic S'; closedin (top_of_set U) S'\<rbrakk> \<Longrightarrow> \<exists>T. openin (top_of_set U) T \<and> S' retract_of T
[PROOF STEP]
have [simp]: "T \<subseteq> U"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. T \<subseteq> U
[PROOF STEP]
using clo closedin_imp_subset
[PROOF STATE]
proof (prove)
using this:
closedin (top_of_set U) T
closedin (subtopology ?U ?S) ?T \<Longrightarrow> ?T \<subseteq> ?S
goal (1 subgoal):
1. T \<subseteq> U
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
T \<subseteq> U
goal (1 subgoal):
1. \<And>U S'. \<lbrakk>S homeomorphic S'; closedin (top_of_set U) S'\<rbrakk> \<Longrightarrow> \<exists>T. openin (top_of_set U) T \<and> S' retract_of T
[PROOF STEP]
have "T retract_of V"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. T retract_of V
[PROOF STEP]
proof (simp add: retraction_def retract_of_def, intro exI conjI \<open>T \<subseteq> V\<close>)
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. continuous_on V ?r8
2. ?r8 ` V \<subseteq> T
3. \<forall>x\<in>T. ?r8 x = x
[PROOF STEP]
show "continuous_on V (g \<circ> h')"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. continuous_on V (g \<circ> h')
[PROOF STEP]
by (meson continuous_on_compose continuous_on_subset h' hom homeomorphism_cont1)
[PROOF STATE]
proof (state)
this:
continuous_on V (g \<circ> h')
goal (2 subgoals):
1. (g \<circ> h') ` V \<subseteq> T
2. \<forall>x\<in>T. (g \<circ> h') x = x
[PROOF STEP]
show "(g \<circ> h') ` V \<subseteq> T"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (g \<circ> h') ` V \<subseteq> T
[PROOF STEP]
using h'
[PROOF STATE]
proof (prove)
using this:
continuous_on V h'
h' ` V \<subseteq> S
goal (1 subgoal):
1. (g \<circ> h') ` V \<subseteq> T
[PROOF STEP]
by clarsimp (metis hom subsetD homeomorphism_def imageI)
[PROOF STATE]
proof (state)
this:
(g \<circ> h') ` V \<subseteq> T
goal (1 subgoal):
1. \<forall>x\<in>T. (g \<circ> h') x = x
[PROOF STEP]
show "\<forall>x\<in>T. (g \<circ> h') x = x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>x\<in>T. (g \<circ> h') x = x
[PROOF STEP]
by clarsimp (metis h'h hom homeomorphism_def)
[PROOF STATE]
proof (state)
this:
\<forall>x\<in>T. (g \<circ> h') x = x
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
T retract_of V
goal (1 subgoal):
1. \<And>U S'. \<lbrakk>S homeomorphic S'; closedin (top_of_set U) S'\<rbrakk> \<Longrightarrow> \<exists>T. openin (top_of_set U) T \<and> S' retract_of T
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
T retract_of V
[PROOF STEP]
show "\<exists>V. openin (top_of_set U) V \<and> T retract_of V"
[PROOF STATE]
proof (prove)
using this:
T retract_of V
goal (1 subgoal):
1. \<exists>V. openin (top_of_set U) V \<and> T retract_of V
[PROOF STEP]
using opV
[PROOF STATE]
proof (prove)
using this:
T retract_of V
openin (top_of_set U) V
goal (1 subgoal):
1. \<exists>V. openin (top_of_set U) V \<and> T retract_of V
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<exists>V. openin (top_of_set U) V \<and> T retract_of V
goal:
No subgoals!
[PROOF STEP]
qed |
Elephanta Island , or Gharapuri , is about 11 km ( 6 @.@ 8 mi ) east of the Apollo Bunder ( Bunder in Marathi means a " pier for embarkation and disembarkation of passengers and goods " ) on the Mumbai Harbour and 10 km ( 6 @.@ 2 mi ) south of Pir Pal in Trombay . The island covers about 10 km2 ( 3 @.@ 9 sq mi ) at high tide and about 16 km2 ( 6 @.@ 2 sq mi ) at low tide . Gharapuri is small village on the south side of the island . The Elephanta Caves can be reached by a ferry from the Gateway of India , Mumbai , which has the nearest airport and train station . The cave is closed on Monday .
|
lemma norm_cis [simp]: "norm (cis a) = 1" |
/*
* Copyright (c) 2010-2012 frankee zhou (frankee.zhou at gmail dot com)
*
* Distributed under under the Apache License, version 2.0 (the "License").
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
#include <boost/program_options.hpp>
#include <cetty/util/SimpleTrie.h>
#include <cetty/config/ConfigObject.h>
#include <cetty/config/ConfigCenter.h>
#include <cetty/config/ConfigDescriptor.h>
#include <cetty/logging/LoggerHelper.h>
namespace cetty {
namespace config {
using namespace boost::program_options;
using namespace cetty::util;
static int parseField(const ConfigFieldDescriptor* field,
const variable_value& option,
const ConfigCenter::CmdlineTrie& cmdline,
ConfigObject* object) {
if (field->repeatedType == ConfigFieldDescriptor::LIST) {
switch (field->cppType) {
case ConfigFieldDescriptor::CPPTYPE_INT32:
object->add(field, option.as<std::vector<int> >());
break;
case ConfigFieldDescriptor::CPPTYPE_INT64:
object->add(field, option.as<std::vector<int64_t> >());
break;
case ConfigFieldDescriptor::CPPTYPE_DOUBLE:
object->add(field, option.as<std::vector<double> >());
break;
case ConfigFieldDescriptor::CPPTYPE_STRING:
object->add(field, option.as<std::vector<std::string> >());
break;
case ConfigFieldDescriptor::CPPTYPE_OBJECT:
// ConfigObject* obj = object->addObject(field);
//
// if (!parseConfigObject(*itr, obj)) {
// return false;
// }
break;
}
}
else {
switch (field->cppType) {
case ConfigFieldDescriptor::CPPTYPE_INT32:
object->set(field, option.as<int>());
break;
case ConfigFieldDescriptor::CPPTYPE_INT64:
object->set(field, option.as<int64_t>());
break;
case ConfigFieldDescriptor::CPPTYPE_DOUBLE:
object->set(field, option.as<double>());
break;
case ConfigFieldDescriptor::CPPTYPE_STRING:
object->set(field, option.as<std::string>());
break;
case ConfigFieldDescriptor::CPPTYPE_OBJECT:
ConfigObject* obj = object->mutableObject(field);
//return parseConfigObject(, obj);
break;
}
}
return true;
}
bool parseConfigObject(const variables_map& vm,
const ConfigCenter::CmdlineTrie& cmdline,
ConfigObject* object) {
if (!object) {
LOG_ERROR << "parsed object is NULL.";
return false;
}
const ConfigObjectDescriptor* descriptor = object->descriptor();
ConfigObjectDescriptor::ConstIterator itr = descriptor->begin();
if (cmdline.countPrefix(descriptor->className()) == 0) {
LOG_INFO << "there is no field set with cmdline in "
<< descriptor->className();
return true;
}
for (; itr != descriptor->end(); ++itr) {
const ConfigFieldDescriptor* field = *itr;
std::string* value = cmdline.getValue(field->name);
if (!value) {
continue;
}
if (vm.count(*value) == 0) {
LOG_INFO << "field " << field->name << " has none value, skip it.";
continue;
}
if (!parseField(field, vm[*value], cmdline, object)) {
return false;
}
}
return true;
}
}
}
|
[STATEMENT]
lemma sas_plus_problem_has_serial_solution_iff_ii':
assumes "is_valid_problem_sas_plus \<Psi>"
and "SAS_Plus_Semantics.is_serial_solution_for_problem \<Psi> \<psi>"
and "length \<psi> \<le> h"
shows "\<exists>\<A>. (\<A> \<Turnstile> \<Phi>\<^sub>\<forall> (\<phi> (prob_with_noop \<Psi>)) h)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>\<A>. \<A> \<Turnstile> \<Phi>\<^sub>\<forall> \<phi> prob_with_noop \<Psi> h
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
is_valid_problem_sas_plus \<Psi>
SAS_Plus_Semantics.is_serial_solution_for_problem \<Psi> \<psi>
length \<psi> \<le> h
goal (1 subgoal):
1. \<exists>\<A>. \<A> \<Turnstile> \<Phi>\<^sub>\<forall> \<phi> prob_with_noop \<Psi> h
[PROOF STEP]
by(fastforce
intro!: assms noops_valid noops_complete
sas_plus_problem_has_serial_solution_iff_ii
[where \<psi> = "(replicate (h - length \<psi>) empty_sasp_action) @ \<psi>"] ) |
(*
Copyright 2016 Luxembourg University
Copyright 2017 Luxembourg University
This file is part of Velisarios.
Velisarios is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Velisarios is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Velisarios. If not, see <http://www.gnu.org/licenses/>.
Authors: Vincent Rahli
Ivana Vukotic
*)
Require Export PBFTwell_formed_log.
Section PBFTwf.
Local Open Scope eo.
Local Open Scope proc.
Context { pbft_context : PBFTcontext }.
Context { pbft_auth : PBFTauth }.
Context { pbft_keys : PBFTinitial_keys }.
Context { pbft_hash : PBFThash }.
Lemma update_state_new_view_preserves_wf :
forall i s1 nv s2 msgs,
update_state_new_view i s1 nv = (s2, msgs)
-> well_formed_log (log s1)
-> well_formed_log (log s2).
Proof.
introv upd wf.
unfold update_state_new_view in upd; smash_pbft.
Qed.
Hint Resolve update_state_new_view_preserves_wf : pbft.
End PBFTwf.
Hint Resolve update_state_new_view_preserves_wf : pbft.
|
[GOAL]
P : Type u_1
inst✝ : LE P
I J s✝ t✝ : Ideal P
x y : P
s t : Ideal P
x✝ : s.toLowerSet = t.toLowerSet
⊢ s = t
[PROOFSTEP]
cases s
[GOAL]
case mk
P : Type u_1
inst✝ : LE P
I J s t✝ : Ideal P
x y : P
t : Ideal P
toLowerSet✝ : LowerSet P
nonempty'✝ : Set.Nonempty toLowerSet✝.carrier
directed'✝ : DirectedOn (fun x x_1 => x ≤ x_1) toLowerSet✝.carrier
x✝ : { toLowerSet := toLowerSet✝, nonempty' := nonempty'✝, directed' := directed'✝ }.toLowerSet = t.toLowerSet
⊢ { toLowerSet := toLowerSet✝, nonempty' := nonempty'✝, directed' := directed'✝ } = t
[PROOFSTEP]
cases t
[GOAL]
case mk.mk
P : Type u_1
inst✝ : LE P
I J s t : Ideal P
x y : P
toLowerSet✝¹ : LowerSet P
nonempty'✝¹ : Set.Nonempty toLowerSet✝¹.carrier
directed'✝¹ : DirectedOn (fun x x_1 => x ≤ x_1) toLowerSet✝¹.carrier
toLowerSet✝ : LowerSet P
nonempty'✝ : Set.Nonempty toLowerSet✝.carrier
directed'✝ : DirectedOn (fun x x_1 => x ≤ x_1) toLowerSet✝.carrier
x✝ :
{ toLowerSet := toLowerSet✝¹, nonempty' := nonempty'✝¹, directed' := directed'✝¹ }.toLowerSet =
{ toLowerSet := toLowerSet✝, nonempty' := nonempty'✝, directed' := directed'✝ }.toLowerSet
⊢ { toLowerSet := toLowerSet✝¹, nonempty' := nonempty'✝¹, directed' := directed'✝¹ } =
{ toLowerSet := toLowerSet✝, nonempty' := nonempty'✝, directed' := directed'✝ }
[PROOFSTEP]
congr
[GOAL]
P : Type u_1
inst✝ : LE P
I✝ J s t : Ideal P
x y : P
I : Ideal P
p : P
nmem : ¬p ∈ I
hp : ↑I = univ
⊢ False
[PROOFSTEP]
have := mem_univ p
[GOAL]
P : Type u_1
inst✝ : LE P
I✝ J s t : Ideal P
x y : P
I : Ideal P
p : P
nmem : ¬p ∈ I
hp : ↑I = univ
this : p ∈ univ
⊢ False
[PROOFSTEP]
rw [← hp] at this
[GOAL]
P : Type u_1
inst✝ : LE P
I✝ J s t : Ideal P
x y : P
I : Ideal P
p : P
nmem : ¬p ∈ I
hp : ↑I = univ
this : p ∈ ↑I
⊢ False
[PROOFSTEP]
exact nmem this
[GOAL]
P : Type u_1
inst✝¹ : LE P
I✝ J✝ s t : Ideal P
x y : P
inst✝ : IsDirected P fun x x_1 => x ≥ x_1
I J : Ideal P
⊢ Set.Nonempty (↑I ∩ ↑J)
[PROOFSTEP]
obtain ⟨a, ha⟩ := I.nonempty
[GOAL]
case intro
P : Type u_1
inst✝¹ : LE P
I✝ J✝ s t : Ideal P
x y : P
inst✝ : IsDirected P fun x x_1 => x ≥ x_1
I J : Ideal P
a : P
ha : a ∈ ↑I
⊢ Set.Nonempty (↑I ∩ ↑J)
[PROOFSTEP]
obtain ⟨b, hb⟩ := J.nonempty
[GOAL]
case intro.intro
P : Type u_1
inst✝¹ : LE P
I✝ J✝ s t : Ideal P
x y : P
inst✝ : IsDirected P fun x x_1 => x ≥ x_1
I J : Ideal P
a : P
ha : a ∈ ↑I
b : P
hb : b ∈ ↑J
⊢ Set.Nonempty (↑I ∩ ↑J)
[PROOFSTEP]
obtain ⟨c, hac, hbc⟩ := exists_le_le a b
[GOAL]
case intro.intro.intro.intro
P : Type u_1
inst✝¹ : LE P
I✝ J✝ s t : Ideal P
x y : P
inst✝ : IsDirected P fun x x_1 => x ≥ x_1
I J : Ideal P
a : P
ha : a ∈ ↑I
b : P
hb : b ∈ ↑J
c : P
hac : c ≤ a
hbc : c ≤ b
⊢ Set.Nonempty (↑I ∩ ↑J)
[PROOFSTEP]
exact ⟨c, I.lower hac ha, J.lower hbc hb⟩
[GOAL]
P : Type u_1
inst✝² : LE P
inst✝¹ : IsDirected P fun x x_1 => x ≤ x_1
inst✝ : Nonempty P
I : Ideal P
hI : IsCoatom I
src✝ : IsProper I := IsCoatom.isProper hI
x✝ : Ideal P
hJ : I < x✝
⊢ ↑x✝ = univ
[PROOFSTEP]
simp [hI.2 _ hJ]
[GOAL]
P : Type u_1
inst✝¹ : LE P
inst✝ : OrderTop P
I : Ideal P
h : ⊤ ∈ I
⊢ I = ⊤
[PROOFSTEP]
ext
[GOAL]
case a.h
P : Type u_1
inst✝¹ : LE P
inst✝ : OrderTop P
I : Ideal P
h : ⊤ ∈ I
x✝ : P
⊢ x✝ ∈ ↑I ↔ x✝ ∈ ↑⊤
[PROOFSTEP]
exact iff_of_true (I.lower le_top h) trivial
[GOAL]
P : Type u_1
inst✝¹ : Preorder P
inst✝ : OrderBot P
⊢ ∀ (a : Ideal P), ⊥ ≤ a
[PROOFSTEP]
simp
[GOAL]
P : Type u_1
inst✝¹ : SemilatticeSup P
inst✝ : IsDirected P fun x x_1 => x ≥ x_1
x✝ : P
I✝ J✝ K s t I J : Ideal P
x : P
hx : x ∈ (I.toLowerSet ⊓ J.toLowerSet).carrier
y : P
hy : y ∈ (I.toLowerSet ⊓ J.toLowerSet).carrier
⊢ (fun x x_1 => x ≤ x_1) x (x ⊔ y) ∧ (fun x x_1 => x ≤ x_1) y (x ⊔ y)
[PROOFSTEP]
simp
[GOAL]
P : Type u_1
inst✝¹ : SemilatticeSup P
inst✝ : IsDirected P fun x x_1 => x ≥ x_1
x : P
I✝ J✝ K s t I J : Ideal P
⊢ Set.Nonempty
{ carrier := {x | ∃ i, i ∈ I ∧ ∃ j, j ∈ J ∧ x ≤ i ⊔ j},
lower' :=
(_ :
∀ (x y : P),
y ≤ x →
x ∈ {x | ∃ i, i ∈ I ∧ ∃ j, j ∈ J ∧ x ≤ i ⊔ j} → y ∈ {x | ∃ i, i ∈ I ∧ ∃ j, j ∈ J ∧ x ≤ i ⊔ j}) }.carrier
[PROOFSTEP]
cases' inter_nonempty I J with w h
[GOAL]
case intro
P : Type u_1
inst✝¹ : SemilatticeSup P
inst✝ : IsDirected P fun x x_1 => x ≥ x_1
x : P
I✝ J✝ K s t I J : Ideal P
w : P
h : w ∈ ↑I ∩ ↑J
⊢ Set.Nonempty
{ carrier := {x | ∃ i, i ∈ I ∧ ∃ j, j ∈ J ∧ x ≤ i ⊔ j},
lower' :=
(_ :
∀ (x y : P),
y ≤ x →
x ∈ {x | ∃ i, i ∈ I ∧ ∃ j, j ∈ J ∧ x ≤ i ⊔ j} → y ∈ {x | ∃ i, i ∈ I ∧ ∃ j, j ∈ J ∧ x ≤ i ⊔ j}) }.carrier
[PROOFSTEP]
exact ⟨w, w, h.1, w, h.2, le_sup_left⟩
[GOAL]
P : Type u_1
inst✝¹ : SemilatticeSup P
inst✝ : IsDirected P fun x x_1 => x ≥ x_1
x : P
I J K s t : Ideal P
hx : ¬x ∈ I
h : I = I ⊔ principal x
⊢ x ∈ I
[PROOFSTEP]
simpa only [left_eq_sup, principal_le_iff] using h
[GOAL]
P : Type u_1
inst✝¹ : SemilatticeSup P
inst✝ : OrderBot P
x : P
I J K : Ideal P
S : Set (Ideal P)
⊢ ⊥ ∈ (⨅ (s : Ideal P) (_ : s ∈ S), s.toLowerSet).carrier
[PROOFSTEP]
rw [LowerSet.carrier_eq_coe, LowerSet.coe_iInf₂, Set.mem_iInter₂]
[GOAL]
P : Type u_1
inst✝¹ : SemilatticeSup P
inst✝ : OrderBot P
x : P
I J K : Ideal P
S : Set (Ideal P)
⊢ ∀ (i : Ideal P), i ∈ S → ⊥ ∈ ↑i.toLowerSet
[PROOFSTEP]
exact fun s _ ↦ s.bot_mem
[GOAL]
P : Type u_1
inst✝¹ : SemilatticeSup P
inst✝ : OrderBot P
x : P
I J K : Ideal P
S : Set (Ideal P)
a : P
ha : a ∈ (⨅ (s : Ideal P) (_ : s ∈ S), s.toLowerSet).carrier
b : P
hb : b ∈ (⨅ (s : Ideal P) (_ : s ∈ S), s.toLowerSet).carrier
⊢ a ⊔ b ∈ (⨅ (s : Ideal P) (_ : s ∈ S), s.toLowerSet).carrier
[PROOFSTEP]
rw [LowerSet.carrier_eq_coe, LowerSet.coe_iInf₂, Set.mem_iInter₂] at ha hb ⊢
[GOAL]
P : Type u_1
inst✝¹ : SemilatticeSup P
inst✝ : OrderBot P
x : P
I J K : Ideal P
S : Set (Ideal P)
a : P
ha : ∀ (i : Ideal P), i ∈ S → a ∈ ↑i.toLowerSet
b : P
hb : ∀ (i : Ideal P), i ∈ S → b ∈ ↑i.toLowerSet
⊢ ∀ (i : Ideal P), i ∈ S → a ⊔ b ∈ ↑i.toLowerSet
[PROOFSTEP]
exact fun s hs ↦ sup_mem (ha _ hs) (hb _ hs)
[GOAL]
P : Type u_1
inst✝¹ : SemilatticeSup P
inst✝ : OrderBot P
x : P
I J K : Ideal P
S : Set (Ideal P)
⊢ x ∈ sInf S ↔ ∀ (s : Ideal P), s ∈ S → x ∈ s
[PROOFSTEP]
simp_rw [← SetLike.mem_coe, coe_sInf, mem_iInter₂]
[GOAL]
P : Type u_1
inst✝¹ : SemilatticeSup P
inst✝ : OrderBot P
x : P
I J K : Ideal P
S✝ : Set (Ideal P)
src✝ : Lattice (Ideal P) := inferInstance
S : Set (Ideal P)
⊢ IsGLB S (sInf S)
[PROOFSTEP]
refine' ⟨fun s hs ↦ _, fun s hs ↦ by rwa [← coe_subset_coe, coe_sInf, subset_iInter₂_iff]⟩
[GOAL]
P : Type u_1
inst✝¹ : SemilatticeSup P
inst✝ : OrderBot P
x : P
I J K : Ideal P
S✝ : Set (Ideal P)
src✝ : Lattice (Ideal P) := inferInstance
S : Set (Ideal P)
s : Ideal P
hs : s ∈ lowerBounds S
⊢ s ≤ sInf S
[PROOFSTEP]
rwa [← coe_subset_coe, coe_sInf, subset_iInter₂_iff]
[GOAL]
P : Type u_1
inst✝¹ : SemilatticeSup P
inst✝ : OrderBot P
x : P
I J K : Ideal P
S✝ : Set (Ideal P)
src✝ : Lattice (Ideal P) := inferInstance
S : Set (Ideal P)
s : Ideal P
hs : s ∈ S
⊢ sInf S ≤ s
[PROOFSTEP]
rw [← coe_subset_coe, coe_sInf]
[GOAL]
P : Type u_1
inst✝¹ : SemilatticeSup P
inst✝ : OrderBot P
x : P
I J K : Ideal P
S✝ : Set (Ideal P)
src✝ : Lattice (Ideal P) := inferInstance
S : Set (Ideal P)
s : Ideal P
hs : s ∈ S
⊢ ⋂ (s : Ideal P) (_ : s ∈ S), ↑s ⊆ ↑s
[PROOFSTEP]
exact biInter_subset_of_mem hs
[GOAL]
P : Type u_1
inst✝ : DistribLattice P
I J : Ideal P
x i j : P
hi : i ∈ I
hj : j ∈ J
hx : x ≤ i ⊔ j
⊢ ∃ i', i' ∈ I ∧ ∃ j', j' ∈ J ∧ x = i' ⊔ j'
[PROOFSTEP]
refine' ⟨x ⊓ i, I.lower inf_le_right hi, x ⊓ j, J.lower inf_le_right hj, _⟩
[GOAL]
P : Type u_1
inst✝ : DistribLattice P
I J : Ideal P
x i j : P
hi : i ∈ I
hj : j ∈ J
hx : x ≤ i ⊔ j
⊢ x = x ⊓ i ⊔ x ⊓ j
[PROOFSTEP]
calc
x = x ⊓ (i ⊔ j) := left_eq_inf.mpr hx
_ = x ⊓ i ⊔ x ⊓ j := inf_sup_left
[GOAL]
P : Type u_1
inst✝ : BooleanAlgebra P
x : P
I : Ideal P
hI : IsProper I
hxc : xᶜ ∈ I
⊢ ¬x ∈ I
[PROOFSTEP]
intro hx
[GOAL]
P : Type u_1
inst✝ : BooleanAlgebra P
x : P
I : Ideal P
hI : IsProper I
hxc : xᶜ ∈ I
hx : x ∈ I
⊢ False
[PROOFSTEP]
apply hI.top_not_mem
[GOAL]
P : Type u_1
inst✝ : BooleanAlgebra P
x : P
I : Ideal P
hI : IsProper I
hxc : xᶜ ∈ I
hx : x ∈ I
⊢ ⊤ ∈ I
[PROOFSTEP]
have ht : x ⊔ xᶜ ∈ I := sup_mem ‹_› ‹_›
[GOAL]
P : Type u_1
inst✝ : BooleanAlgebra P
x : P
I : Ideal P
hI : IsProper I
hxc : xᶜ ∈ I
hx : x ∈ I
ht : x ⊔ xᶜ ∈ I
⊢ ⊤ ∈ I
[PROOFSTEP]
rwa [sup_compl_eq_top] at ht
[GOAL]
P : Type u_1
inst✝ : BooleanAlgebra P
x : P
I : Ideal P
hI : IsProper I
⊢ ¬x ∈ I ∨ ¬xᶜ ∈ I
[PROOFSTEP]
have h : xᶜ ∈ I → x ∉ I := hI.not_mem_of_compl_mem
[GOAL]
P : Type u_1
inst✝ : BooleanAlgebra P
x : P
I : Ideal P
hI : IsProper I
h : xᶜ ∈ I → ¬x ∈ I
⊢ ¬x ∈ I ∨ ¬xᶜ ∈ I
[PROOFSTEP]
tauto
[GOAL]
P : Type u_1
inst✝¹ : Preorder P
p : P
ι : Type u_2
inst✝ : Encodable ι
𝒟 : ι → Cofinal P
⊢ Monotone (sequenceOfCofinals p 𝒟)
[PROOFSTEP]
apply monotone_nat_of_le_succ
[GOAL]
case hf
P : Type u_1
inst✝¹ : Preorder P
p : P
ι : Type u_2
inst✝ : Encodable ι
𝒟 : ι → Cofinal P
⊢ ∀ (n : ℕ), sequenceOfCofinals p 𝒟 n ≤ sequenceOfCofinals p 𝒟 (n + 1)
[PROOFSTEP]
intro n
[GOAL]
case hf
P : Type u_1
inst✝¹ : Preorder P
p : P
ι : Type u_2
inst✝ : Encodable ι
𝒟 : ι → Cofinal P
n : ℕ
⊢ sequenceOfCofinals p 𝒟 n ≤ sequenceOfCofinals p 𝒟 (n + 1)
[PROOFSTEP]
dsimp only [sequenceOfCofinals, Nat.add]
[GOAL]
case hf
P : Type u_1
inst✝¹ : Preorder P
p : P
ι : Type u_2
inst✝ : Encodable ι
𝒟 : ι → Cofinal P
n : ℕ
⊢ sequenceOfCofinals p 𝒟 n ≤
match Encodable.decode n with
| none => sequenceOfCofinals p 𝒟 n
| some i => Cofinal.above (𝒟 i) (sequenceOfCofinals p 𝒟 n)
[PROOFSTEP]
cases (Encodable.decode n : Option ι)
[GOAL]
case hf.none
P : Type u_1
inst✝¹ : Preorder P
p : P
ι : Type u_2
inst✝ : Encodable ι
𝒟 : ι → Cofinal P
n : ℕ
⊢ sequenceOfCofinals p 𝒟 n ≤
match none with
| none => sequenceOfCofinals p 𝒟 n
| some i => Cofinal.above (𝒟 i) (sequenceOfCofinals p 𝒟 n)
[PROOFSTEP]
rfl
[GOAL]
case hf.some
P : Type u_1
inst✝¹ : Preorder P
p : P
ι : Type u_2
inst✝ : Encodable ι
𝒟 : ι → Cofinal P
n : ℕ
val✝ : ι
⊢ sequenceOfCofinals p 𝒟 n ≤
match some val✝ with
| none => sequenceOfCofinals p 𝒟 n
| some i => Cofinal.above (𝒟 i) (sequenceOfCofinals p 𝒟 n)
[PROOFSTEP]
apply Cofinal.le_above
[GOAL]
P : Type u_1
inst✝¹ : Preorder P
p : P
ι : Type u_2
inst✝ : Encodable ι
𝒟 : ι → Cofinal P
i : ι
⊢ sequenceOfCofinals p 𝒟 (Encodable.encode i + 1) ∈ 𝒟 i
[PROOFSTEP]
dsimp only [sequenceOfCofinals, Nat.add]
[GOAL]
P : Type u_1
inst✝¹ : Preorder P
p : P
ι : Type u_2
inst✝ : Encodable ι
𝒟 : ι → Cofinal P
i : ι
⊢ (match Encodable.decode (Encodable.encode i) with
| none => sequenceOfCofinals p 𝒟 (Encodable.encode i)
| some i_1 => Cofinal.above (𝒟 i_1) (sequenceOfCofinals p 𝒟 (Encodable.encode i))) ∈
𝒟 i
[PROOFSTEP]
rw [Encodable.encodek]
[GOAL]
P : Type u_1
inst✝¹ : Preorder P
p : P
ι : Type u_2
inst✝ : Encodable ι
𝒟 : ι → Cofinal P
i : ι
⊢ (match some i with
| none => sequenceOfCofinals p 𝒟 (Encodable.encode i)
| some i_1 => Cofinal.above (𝒟 i_1) (sequenceOfCofinals p 𝒟 (Encodable.encode i))) ∈
𝒟 i
[PROOFSTEP]
apply Cofinal.above_mem
|
c
c $Id$
c
SUBROUTINE print_output(istep,nprnt,ekin,ecoul,eshrt,ebond,eshel)
implicit none
include 'p_const.inc'
include 'p_input.inc'
include 'cm_temp.inc'
integer istep,nprnt
real*8 ekin,ecoul,eshrt,ebond,eshel
real*8 stptmp,stpke,stppe,stpte
logical lnew
data lnew/.true./
save lnew
stptmp=2.0*ekin/boltzmann/degfree
stpke=ekin/convfct2
stppe=ecoul+eshrt+ebond+eshel
stpte=stppe+stpke
if(lnew)write(output,'(/,a12,8a12)')' STEP ',
$ ' TOTAL E. ',' KINETIC E. ','POTENTIAL E.',' COUL. E. ',
$ ' VDW E. ',' BOND E. ',' SHELL E. ',' TEMPERATURE'
if(lnew)lnew=.false.
if(mod(istep,nprnt).eq.0)write(output,'(i12,8f12.4)')
$ istep,stpte,stpke,stppe,ecoul,eshrt,ebond,eshel,stptmp
return
END
|
lemma measurable_lfp2_coinduct[consumes 1, case_names continuity step]: fixes F :: "('a \<Rightarrow> 'c \<Rightarrow> 'b) \<Rightarrow> ('a \<Rightarrow> 'c \<Rightarrow> 'b::{complete_lattice, countable})" assumes "P M s" assumes F: "sup_continuous F" assumes *: "\<And>M A s. P M s \<Longrightarrow> (\<And>N t. P N t \<Longrightarrow> A t \<in> measurable N (count_space UNIV)) \<Longrightarrow> F A s \<in> measurable M (count_space UNIV)" shows "lfp F s \<in> measurable M (count_space UNIV)" |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.