text
stringlengths 0
3.34M
|
---|
= = Numbering = =
|
//
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include <armnn/Descriptors.hpp>
#include <armnn/IRuntime.hpp>
#include <armnn/INetwork.hpp>
#include <armnn/Types.hpp>
#include <Runtime.hpp>
#include <boost/test/unit_test.hpp>
BOOST_AUTO_TEST_SUITE(DebugCallback)
namespace
{
using namespace armnn;
INetworkPtr CreateSimpleNetwork()
{
INetworkPtr net(INetwork::Create());
IConnectableLayer* input = net->AddInputLayer(0, "Input");
ActivationDescriptor descriptor;
descriptor.m_Function = ActivationFunction::ReLu;
IConnectableLayer* activationLayer = net->AddActivationLayer(descriptor, "Activation:ReLu");
IConnectableLayer* output = net->AddOutputLayer(0);
input->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
activationLayer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 5 }, DataType::Float32));
activationLayer->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 5 }, DataType::Float32));
return net;
}
BOOST_AUTO_TEST_CASE(RuntimeRegisterDebugCallback)
{
INetworkPtr net = CreateSimpleNetwork();
IRuntime::CreationOptions options;
IRuntimePtr runtime(IRuntime::Create(options));
// Optimize the network with debug option
OptimizerOptions optimizerOptions(false, true);
std::vector<BackendId> backends = { "CpuRef" };
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizerOptions);
NetworkId netId;
BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success);
// Set up callback function
int callCount = 0;
std::vector<TensorShape> tensorShapes;
std::vector<unsigned int> slotIndexes;
auto mockCallback = [&](LayerGuid guid, unsigned int slotIndex, ITensorHandle* tensor)
{
IgnoreUnused(guid);
slotIndexes.push_back(slotIndex);
tensorShapes.push_back(tensor->GetShape());
callCount++;
};
runtime->RegisterDebugCallback(netId, mockCallback);
std::vector<float> inputData({-2, -1, 0, 1, 2});
std::vector<float> outputData(5);
InputTensors inputTensors
{
{0, ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())}
};
OutputTensors outputTensors
{
{0, Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
};
runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
// Check that the callback was called twice
BOOST_TEST(callCount == 2);
// Check that tensor handles passed to callback have correct shapes
const std::vector<TensorShape> expectedShapes({TensorShape({1, 1, 1, 5}), TensorShape({1, 1, 1, 5})});
BOOST_TEST(tensorShapes == expectedShapes);
// Check that slot indexes passed to callback are correct
const std::vector<unsigned int> expectedSlotIndexes({0, 0});
BOOST_TEST(slotIndexes == expectedSlotIndexes);
}
} // anonymous namespace
BOOST_AUTO_TEST_SUITE_END()
|
/-
Copyright (c) 2018 Keeley Hoek. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Keeley Hoek
-/
import tactic.converter.interactive
import tactic.ring
example : 0 + 0 = 0 :=
begin
conv_lhs {erw [add_zero]}
end
example : 0 + 0 = 0 :=
begin
conv_lhs {simp}
end
example : 0 = 0 + 0 :=
begin
conv_rhs {simp}
end
-- Example with ring discharging the goal
example : 22 + 7 * 4 + 3 * 8 = 0 + 7 * 4 + 46 :=
begin
conv { ring, },
end
-- Example with ring failing to discharge, to normalizing the goal
example : (22 + 7 * 4 + 3 * 8 = 0 + 7 * 4 + 47) = (74 = 75) :=
begin
conv { ring_nf, },
end
-- Example with ring discharging the goal
example (x : ℕ) : 22 + 7 * x + 3 * 8 = 0 + 7 * x + 46 :=
begin
conv { ring, },
end
-- Example with ring failing to discharge, to normalizing the goal
example (x : ℕ) : (22 + 7 * x + 3 * 8 = 0 + 7 * x + 46 + 1)
= (7 * x + 46 = 7 * x + 47) :=
begin
conv { ring_nf, },
end
-- norm_num examples:
example : 22 + 7 * 4 + 3 * 8 = 74 :=
begin
conv { norm_num, },
end
example (x : ℕ) : 22 + 7 * x + 3 * 8 = 7 * x + 46 :=
begin
simp [add_comm, add_left_comm],
conv { norm_num, },
end
|
Require Import String.
Require Import Bool.
Require Import core.utils.Utils.
Require Import core.modeling.Metamodel.
Require Import core.Model.
Require Import core.Engine.
Require Import core.Syntax.
Require Import core.Semantics.
Require Import core.Certification.
Require Import core.EqDec.
Require Import core.modeling.iteratetraces.IterateTracesSemantics.
Require Import Coq.Logic.FunctionalExtensionality.
Section IterateTracesCertification.
Context {tc: TransformationConfiguration} {mtc: ModelingTransformationConfiguration tc}.
(** EXECUTE TRACE *)
Lemma tr_executeTraces_in_elements :
forall (tr: Transformation) (sm : SourceModel) (te : TargetModelElement),
In te (allModelElements (executeTraces tr sm)) <->
(exists (tl : TraceLink) (sp : list SourceModelElement),
In sp (allTuples tr sm) /\
In tl (tracePattern tr sm sp) /\
te = TraceLink_getTargetElement tl).
Proof.
intros.
split.
+ intro.
assert (exists (tl : TraceLink),
In tl (trace tr sm) /\
te = (TraceLink_getTargetElement tl) ).
{ simpl in H.
induction (trace tr sm).
++ crush.
++ intros.
simpl in H.
destruct H.
+++ exists a.
crush.
+++ specialize (IHl H).
destruct IHl.
exists x.
crush. }
destruct H0.
destruct H0.
assert (exists (sp : list SourceModelElement),
In sp (allTuples tr sm) /\
In x (tracePattern tr sm sp)).
{ apply in_flat_map. crush. }
destruct H2.
destruct H2.
exists x. exists x0.
crush.
+ intros.
destruct H.
destruct H.
destruct H.
destruct H0.
rewrite H1.
apply in_map.
apply in_flat_map.
exists x0.
split.
++ exact H.
++ exact H0.
Qed.
(** Instantiate *)
(* Please check the lemma formula *)
(* These lemmas of traces are useful when we get sth like (In e traces) *)
Lemma tr_trace_in:
forall (tr: Transformation) (sm : SourceModel) (tl : TraceLink),
In tl (trace tr sm) <->
(exists (sp : list SourceModelElement),
In sp (allTuples tr sm) /\
In tl (tracePattern tr sm sp)).
Proof.
intros.
apply in_flat_map.
Qed.
Lemma tr_tracePattern_in:
forall (tr: Transformation) (sm : SourceModel) (sp : list SourceModelElement) (tl : TraceLink),
In tl (tracePattern tr sm sp) <->
(exists (r:Rule),
In r (matchPattern tr sm sp) /\
In tl (traceRuleOnPattern r sm sp)).
Proof.
intros.
apply in_flat_map.
Qed.
Lemma tr_traceRuleOnPattern_in:
forall (r: Rule) (sm : SourceModel) (sp : list SourceModelElement) (tl : TraceLink),
In tl (traceRuleOnPattern r sm sp) <->
(exists (iter: nat),
In iter (seq 0 (evalIteratorExpr r sm sp)) /\
In tl (traceIterationOnPattern r sm sp iter)).
Proof.
intros.
apply in_flat_map.
Qed.
Lemma tr_traceIterationOnPattern_in:
forall (r: Rule) (sm : SourceModel) (sp : list SourceModelElement) (iter: nat) (tl : TraceLink),
In tl (traceIterationOnPattern r sm sp iter) <->
(exists (o: OutputPatternElement),
In o (Rule_getOutputPatternElements r) /\
In tl ((fun o => optionToList (traceElementOnPattern o sm sp iter)) o)).
Proof.
intros.
apply in_flat_map.
Qed.
(* TODO works inside TwoPhaseSemantics.v *)
Lemma tr_traceElementOnPattern_leaf:
forall (o: OutputPatternElement) (sm : SourceModel) (sp : list SourceModelElement) (iter: nat) (o: OutputPatternElement) (tl : TraceLink),
Some tl = (traceElementOnPattern o sm sp iter) <->
(exists (e: TargetModelElement),
Some e = (instantiateElementOnPattern o sm sp iter) /\
tl = (buildTraceLink (sp, iter, OutputPatternElement_getName o) e)).
Proof.
intros.
split.
- intros.
unfold traceElementOnPattern in H.
destruct (instantiateElementOnPattern o0 sm sp iter) eqn: e1.
-- exists t.
split. crush. crush.
-- crush.
- intros.
destruct H.
destruct H.
unfold traceElementOnPattern.
destruct (instantiateElementOnPattern o0 sm sp iter).
-- crush.
-- crush.
Qed.
(** * Apply **)
Lemma tr_applyPatternTraces_in:
forall (tr: Transformation) (sm : SourceModel) (sp: list SourceModelElement) (tl : TargetModelLink) (tls: list TraceLink),
In tl (applyPatternTraces tr sm sp tls) <->
(exists (r : Rule),
In r (matchPattern tr sm sp) /\
In tl (applyRuleOnPatternTraces r tr sm sp tls)).
Proof.
intros.
apply in_flat_map.
Qed.
Lemma tr_applyRuleOnPatternTraces_in :
forall (tr: Transformation) (r : Rule) (sm : SourceModel) (sp: list SourceModelElement) (tl : TargetModelLink) (tls: list TraceLink),
In tl (applyRuleOnPatternTraces r tr sm sp tls) <->
(exists (i: nat),
In i (seq 0 (evalIteratorExpr r sm sp)) /\
In tl (applyIterationOnPatternTraces r tr sm sp i tls)).
Proof.
intros.
apply in_flat_map.
Qed.
Lemma tr_applyIterationOnPatternTraces_in :
forall (tr: Transformation) (r : Rule) (sm : SourceModel) (sp: list SourceModelElement) (tl : TargetModelLink) (i:nat) (tls: list TraceLink),
In tl (applyIterationOnPatternTraces r tr sm sp i tls) <->
(exists (ope: OutputPatternElement),
In ope (Rule_getOutputPatternElements r) /\
In tl (applyElementOnPatternTraces ope tr sm sp i tls)).
Proof.
intros.
apply in_flat_map.
Qed.
Lemma tr_applyElementOnPatternTraces_in :
forall (tr: Transformation) (sm : SourceModel) (sp: list SourceModelElement) (tl : TargetModelLink)
(i:nat) (ope: OutputPatternElement) (tls: list TraceLink),
In tl (applyElementOnPatternTraces ope tr sm sp i tls) <->
(exists (oper: OutputPatternLink) (te: TargetModelElement),
In oper (OutputPatternElement_getOutputLinks ope) /\
(evalOutputPatternElementExpr sm sp i ope) = Some te /\
applyLinkOnPatternTraces oper tr sm sp i te tls = Some tl).
Proof.
split.
* intros.
apply in_flat_map in H.
destruct H.
exists x.
unfold optionToList in H.
destruct H.
destruct (evalOutputPatternElementExpr sm sp i ope) eqn: eval_ca.
- destruct (applyLinkOnPatternTraces x tr sm sp i t) eqn: ref_ca.
-- eexists t.
split; crush.
-- contradiction.
- contradiction.
* intros.
apply in_flat_map.
destruct H.
exists x.
unfold optionToList.
destruct H.
destruct H.
destruct H0.
split.
- assumption.
- crush.
Qed.
Lemma tr_applyLinkOnPatternTraces_leaf :
forall (oper: OutputPatternLink)
(tr: Transformation)
(sm: SourceModel)
(sp: list SourceModelElement) (iter: nat) (te: TargetModelElement) (tls: list TraceLink),
applyLinkOnPatternTraces oper tr sm sp iter te tls = evalOutputPatternLinkExpr sm sp te iter tls oper.
Proof.
crush.
Qed.
Lemma tr_applyTraces_in :
forall (tr: Transformation) (sm : SourceModel) (tl : TargetModelLink),
In tl (applyTraces tr sm (trace tr sm)) <->
(exists (sp : list SourceModelElement),
In sp (allTuples tr sm) /\
In tl (applyPatternTraces tr sm sp (trace tr sm))).
Proof.
split.
- intros.
apply in_flat_map in H.
destruct H.
exists x.
crush.
apply In_noDup_sp in H0.
unfold trace in H0.
induction (allTuples tr sm).
* simpl in H0. contradiction.
* simpl. simpl in H0.
rewrite map_app in H0.
Admitted. (*
apply in_app_or in H0.
destruct H0.
+ left.
unfold tracePattern in H.
induction (matchPattern tr sm a).
-- simpl in H. contradiction.
-- simpl in H.
rewrite map_app in H.
apply in_app_or in H.
destruct H.
** apply in_map_iff in H.
destruct H. destruct H.
apply tr_traceRuleOnPattern_in in H0.
destruct H0. destruct H0.
apply tr_traceIterationOnPattern_in in H2.
destruct H2. destruct H2.
unfold traceElementOnPattern in H3.
destruct (instantiateElementOnPattern x2 sm a x1) eqn:inst.
simpl in H3.
destruct H3.
*** rewrite <- H3 in H. simpl in H.
assumption.
*** contradiction.
*** contradiction.
** apply IHl0 in H. assumption.
+ auto.
- intros.
destruct H. destruct H.
unfold applyTraces.
apply in_flat_map.
exists x.
crush.
unfold trace.
unfold tracePattern.
apply tr_applyPatternTraces_in in H0.
repeat destruct H0.
apply tr_matchPattern_in in H0.
repeat destruct H0.
induction (allTuples tr sm).
+ contradiction.
+ simpl in H. simpl.
Admitted.*)
Lemma tr_executeTraces_in_links :
forall (tr: Transformation) (sm : SourceModel) (tl : TargetModelLink),
In tl (allModelLinks (executeTraces tr sm)) <->
(exists (sp : list SourceModelElement),
In sp (allTuples tr sm) /\
In tl (applyPatternTraces tr sm sp (trace tr sm))).
Proof.
apply tr_applyTraces_in.
Qed.
Theorem exe_preserv :
forall (tr: Transformation) (sm : SourceModel),
core.modeling.iteratetraces.IterateTracesSemantics.executeTraces tr sm = core.Semantics.execute tr sm.
Proof.
intros.
unfold core.Semantics.execute, executeTraces. simpl.
f_equal.
unfold trace.
rewrite flat_map_concat_map. rewrite flat_map_concat_map.
rewrite concat_map. f_equal.
rewrite map_map. f_equal.
unfold tracePattern, Semantics.instantiatePattern.
apply functional_extensionality. intros.
rewrite flat_map_concat_map. rewrite flat_map_concat_map.
rewrite concat_map. f_equal.
rewrite map_map. f_equal.
unfold traceRuleOnPattern, Semantics.instantiateRuleOnPattern.
apply functional_extensionality. intros.
rewrite flat_map_concat_map. rewrite flat_map_concat_map.
rewrite concat_map. f_equal.
rewrite map_map. f_equal.
unfold traceIterationOnPattern, Semantics.instantiateIterationOnPattern.
apply functional_extensionality. intros.
rewrite flat_map_concat_map. rewrite flat_map_concat_map.
rewrite concat_map. f_equal.
rewrite map_map. f_equal.
unfold traceElementOnPattern.
apply functional_extensionality. intros.
(* TODO FACTOR OUT *)
assert ((Semantics.instantiateElementOnPattern x2 sm x x1) = (instantiateElementOnPattern x2 sm x x1)).
{ crush. }
destruct (instantiateElementOnPattern x2 sm x x1).
reflexivity. reflexivity.
Admitted.
Lemma tr_execute_in_elements' :
forall (tr: Transformation) (sm : SourceModel) (te : TargetModelElement),
In te (allModelElements (executeTraces tr sm)) <->
(exists (sp : list SourceModelElement),
In sp (allTuples tr sm) /\
In te (instantiatePattern tr sm sp)).
Proof.
intros.
assert ((executeTraces tr sm) = (execute tr sm)). { apply exe_preserv. }
rewrite H.
specialize (Certification.tr_execute_in_elements tr sm te).
crush.
Qed.
Lemma tr_execute_in_links' :
forall (tr: Transformation) (sm : SourceModel) (tl : TargetModelLink),
In tl (allModelLinks (executeTraces tr sm)) <->
(exists (sp : list SourceModelElement),
In sp (allTuples tr sm) /\
In tl (applyPattern tr sm sp)).
Proof.
intros.
assert ((executeTraces tr sm) = (execute tr sm)). { apply exe_preserv. }
rewrite H.
specialize (Certification.tr_execute_in_links tr sm tl).
crush.
Qed.
(*
Instance CoqTLEngine :
TransformationEngine :=
{
SourceModelElement := SourceModelElement;
SourceModelClass := SourceModelClass;
SourceModelLink := SourceModelLink;
SourceModelReference := SourceModelReference;
TargetModelElement := TargetModelElement;
TargetModelClass := TargetModelClass;
TargetModelLink := TargetModelLink;
TargetModelReference := TargetModelReference;
(* syntax and accessors *)
Transformation := Transformation;
Rule := Rule;
OutputPatternElement := OutputPatternElement;
OutputPatternLink := OutputPatternLink;
TraceLink := TraceLink;
Transformation_getRules := Transformation_getRules;
Rule_getInTypes := Rule_getInTypes;
Rule_getOutputPatternElements := Rule_getOutputPatternElements;
OutputPatternElement_getOutputLinks := OutputPatternElement_getOutputLinks;
TraceLink_getSourcePattern := TraceLink_getSourcePattern;
TraceLink_getIterator := TraceLink_getIterator;
TraceLink_getName := TraceLink_getName;
TraceLink_getTargetElement := TraceLink_getTargetElement;
(* semantic functions *)
execute := executeTraces;
matchPattern := matchPattern;
matchRuleOnPattern := matchRuleOnPattern;
instantiatePattern := instantiatePattern;
instantiateRuleOnPattern := instantiateRuleOnPattern;
instantiateIterationOnPattern := instantiateIterationOnPattern;
instantiateElementOnPattern := instantiateElementOnPattern;
applyPattern := applyPattern;
applyRuleOnPattern := applyRuleOnPattern;
applyIterationOnPattern := applyIterationOnPattern;
applyElementOnPattern := applyElementOnPattern;
applyLinkOnPattern := applyLinkOnPattern;
evalOutputPatternElementExpr := evalOutputPatternElementExpr;
evalIteratorExpr := evalIteratorExpr;
evalOutputPatternLinkExpr := evalOutputPatternLinkExpr;
evalGuardExpr := evalGuardExpr;
trace := trace;
resolveAll := resolveAllIter;
resolve := resolveIter;
(* lemmas *)
tr_execute_in_elements := tr_execute_in_elements';
tr_execute_in_links := tr_execute_in_links';
tr_matchPattern_in := tr_matchPattern_in;
tr_matchRuleOnPattern_Leaf := tr_matchRuleOnPattern_Leaf;
tr_instantiatePattern_in := tr_instantiatePattern_in;
tr_instantiateRuleOnPattern_in := tr_instantiateRuleOnPattern_in;
tr_instantiateIterationOnPattern_in := tr_instantiateIterationOnPattern_in;
tr_instantiateElementOnPattern_leaf := tr_instantiateElementOnPattern_leaf;
tr_applyPattern_in := tr_applyPattern_in;
tr_applyRuleOnPattern_in := tr_applyRuleOnPattern_in;
tr_applyIterationOnPattern_in := tr_applyIterationOnPattern_in;
tr_applyElementOnPattern_in := tr_applyElementOnPattern_in;
tr_applyLinkOnPatternTraces_leaf := tr_applyLinkOnPattern_leaf;
tr_resolveAll_in := tr_resolveAllIter_in;
tr_resolve_Leaf := tr_resolveIter_leaf;
(*tr_matchPattern_None := tr_matchPattern_None;
tr_matchRuleOnPattern_None := tr_matchRuleOnPattern_None;
tr_instantiatePattern_non_None := tr_instantiatePattern_non_None;
tr_instantiatePattern_None := tr_instantiatePattern_None;
tr_instantiateRuleOnPattern_non_None := tr_instantiateRuleOnPattern_non_None;
tr_instantiateIterationOnPattern_non_None := tr_instantiateIterationOnPattern_non_None;
tr_instantiateElementOnPattern_None := tr_instantiateElementOnPattern_None;
tr_instantiateElementOnPattern_None_iterator := tr_instantiateElementOnPattern_None_iterator;
tr_applyPattern_non_None := tr_applyPattern_non_None;
tr_applyPattern_None := tr_applyPattern_None;
tr_applyRuleOnPattern_non_None := tr_applyRuleOnPattern_non_None;
tr_applyIterationOnPattern_non_None := tr_applyIterationOnPattern_non_None;
tr_applyElementOnPattern_non_None := tr_applyElementOnPattern_non_None;
tr_applyLinkOnPattern_None := tr_applyLinkOnPattern_None;
tr_applyLinkOnPattern_None_iterator := tr_applyLinkOnPattern_None_iterator;
tr_maxArity_in := tr_maxArity_in;
tr_instantiateElementOnPattern_Leaf := tr_instantiateElementOnPattern_Leaf;
tr_applyLinkOnPattern_Leaf := tr_applyLinkOnPattern_Leaf;
tr_matchRuleOnPattern_Leaf := tr_matchRuleOnPattern_Leaf;
tr_resolveAll_in := tr_resolveAllIter_in;
tr_resolve_Leaf := tr_resolveIter_Leaf';*)
}.
Instance CoqTLEngineTrace :
(TransformationEngineTrace CoqTLEngine).
Proof.
eexists.
(* tr_executeTraces_in_elements *) exact tr_executeTraces_in_elements.
(* tr_executeTraces_in_links *) exact tr_executeTraces_in_links.
(* tr_tracePattern_in *) exact tr_tracePattern_in.
(* tr_traceRuleOnPattern_in *) exact tr_traceRuleOnPattern_in.
(* tr_traceIterationOnPattern_in *) exact tr_traceIterationOnPattern_in.
(* tr_traceElementOnPattern_leaf *) exact tr_traceElementOnPattern_leaf.
(* tr_applyPatternTraces_in *) exact tr_applyPatternTraces_in.
(* tr_applyRuleOnPattern_in *) exact tr_applyRuleOnPatternTraces_in.
(* tr_applyIterationOnPattern_in *) exact tr_applyIterationOnPatternTraces_in.
(* tr_applyElementOnPatternTraces_in *) exact tr_applyElementOnPatternTraces_in.
(* tr_applyLinkOnPatternTraces_leaf *) exact tr_applyLinkOnPatternTraces_leaf.
Qed.
*)
End IterateTracesCertification. |
import Mathbin.Data.Set.Basic
import Mathbin.Data.Complex.Exponential
import CvxLean.Lib.Missing.Mathlib
import Mathbin.Algebra.GroupWithZero.Basic
attribute [-simp] Set.inj_on_empty Set.inj_on_singleton Quot.lift_on₂_mk Quot.lift_on_mk Quot.lift₂_mk
namespace Real
def expCone (x y z : ℝ) : Prop :=
(0 < y ∧ y * exp (x / y) ≤ z) ∨ (y = 0 ∧ 0 ≤ z ∧ x ≤ 0)
def Vec.expCone (x y z : Finₓ n → ℝ) : Prop :=
∀ i, Real.expCone (x i) (y i) (z i)
theorem exp_iff_expCone (t x : ℝ) : exp x ≤ t ↔ expCone x 1 t := by
unfold expCone
rw [iff_def]
apply And.intro
· intro hexp
apply Or.intro_left
apply And.intro
apply zero_lt_one
change One.one * exp (x / One.one) ≤ t
rw [@div_one ℝ (@GroupWithZeroₓ.toDivisionMonoid Real
(@DivisionSemiring.toGroupWithZero Real (@DivisionRing.toDivisionSemiring Real Real.divisionRing)))]
rw [one_mulₓ]
assumption
· intro h
cases h with
| inl h =>
have h : One.one * exp (x / One.one) ≤ t := h.2
rwa [@div_one ℝ (@GroupWithZeroₓ.toDivisionMonoid Real
(@DivisionSemiring.toGroupWithZero Real (@DivisionRing.toDivisionSemiring Real Real.divisionRing))),
one_mulₓ] at h
| inr h =>
exfalso
apply @one_ne_zero Real
apply h.1
end Real
|
function [y] = gt(x, val)
y = cell(size(x));
for k = 1:numel(x)
y{k} = x{k}>val;
end
|
{-# LANGUAGE AllowAmbiguousTypes #-}
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE GADTs #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE PolyKinds #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE TypeApplications #-}
{-# LANGUAGE TypeOperators #-}
{-# LANGUAGE UndecidableInstances #-}
module BlueRipple.Model.Preference where
import qualified Control.Foldl as FL
import qualified Control.Lens as L
import qualified Control.Monad.Except as X
import Control.Monad.IO.Class ( MonadIO(liftIO) )
import qualified Colonnade as C
import qualified Text.Blaze.Colonnade as BC
import qualified Text.Blaze.Html as BH
import qualified Text.Blaze.Html5.Attributes as BHA
import qualified Data.List as L
import qualified Data.Map as M
import qualified Data.Array as A
import qualified Data.Vector as VB
import qualified Data.Vector.Storable as VS
import qualified Text.Pandoc.Error as PA
import qualified Data.Profunctor as PF
import qualified Data.Text as T
import qualified Data.Time.Calendar as Time
import qualified Data.Time.Clock as Time
import qualified Data.Time.Format as Time
import qualified Data.Vinyl as V
import qualified Text.Printf as PF
import qualified Frames as F
import qualified Frames.Melt as F
import qualified Frames.CSV as F
import qualified Frames.InCore as F
hiding ( inCoreAoS )
import qualified Pipes as P
import qualified Pipes.Prelude as P
import qualified Statistics.Types as S
import qualified Statistics.Distribution as S
import qualified Statistics.Distribution.StudentT as S
import qualified Numeric.LinearAlgebra as LA
import qualified Graphics.Vega.VegaLite as GV
import Graphics.Vega.VegaLite.Configuration as FV
import qualified Graphics.Vega.VegaLite.Compat as FV
import qualified Frames.Visualization.VegaLite.Data
as FV
import qualified Frames.Visualization.VegaLite.StackedArea
as FV
import qualified Frames.Visualization.VegaLite.LineVsTime
as FV
import qualified Frames.Visualization.VegaLite.ParameterPlots
as FV
import qualified Frames.Visualization.VegaLite.Correlation
as FV
import qualified Frames.Transform as FT
import qualified Frames.Folds as FF
import qualified Frames.MapReduce as MR
import qualified Frames.Enumerations as FE
import qualified Knit.Report as K
import qualified Knit.Report.Input.MarkDown.PandocMarkDown as K
import Polysemy.Error (Error)
import qualified Text.Pandoc.Options as PA
import Data.String.Here ( here, i )
import qualified Relude.Extra as Relude
import BlueRipple.Configuration
import BlueRipple.Utilities.KnitUtils
import BlueRipple.Utilities.TableUtils
import BlueRipple.Data.DataFrames
import BlueRipple.Data.PrefModel
import BlueRipple.Data.PrefModel.SimpleAgeSexRace
import BlueRipple.Data.PrefModel.SimpleAgeSexEducation
import qualified BlueRipple.Model.PreferenceBayes as PB
import qualified BlueRipple.Model.TurnoutAdjustment as TA
modeledResults :: ( MonadIO (K.Sem r)
, K.KnitEffects r
, Show tr
, Show b
, Enum b
, Bounded b
, A.Ix b
, FL.Vector (F.VectorFor b) b)
=> DemographicStructure dr tr HouseElections b
-> (F.Record LocationKey -> Bool)
-> F.Frame dr
-> F.Frame tr
-> F.Frame HouseElections
-> M.Map Int Int
-> K.Sem r (M.Map Int (PreferenceResults b FV.ParameterEstimate))
modeledResults ds locFilter dFrame tFrame eFrame years = flip traverse years $ \y -> do
K.logLE K.Info $ "inferring " <> show (dsCategories ds) <> " for " <> show y
preferenceModel ds locFilter y dFrame eFrame tFrame
-- PreferenceResults to list of group names and predicted D votes
-- But we want them as a fraction of D/D+R
data VoteShare = ShareOfAll | ShareOfD
modeledDVotes :: forall b. (A.Ix b, Bounded b, Enum b, Show b)
=> VoteShare -> PreferenceResults b Double -> [(T.Text, Double)]
modeledDVotes vs pr =
let
summed = FL.fold
(votesAndPopByDistrictF @b)
(F.rcast <$> votesAndPopByDistrict pr)
popArray =
F.rgetField @(PopArray b) summed
turnoutArray =
F.rgetField @(TurnoutArray b) summed
predVoters = zipWith (*) (A.elems turnoutArray) $ fmap realToFrac (A.elems popArray)
allDVotes = F.rgetField @DVotes summed
allRVotes = F.rgetField @RVotes summed
dVotes b =
realToFrac (popArray A.! b)
* (turnoutArray A.! b)
* (modeled pr A.! b)
allPredictedD = FL.fold FL.sum $ fmap dVotes Relude.universe --[minBound..maxBound]
scale = case vs of
ShareOfAll -> (realToFrac allDVotes/realToFrac (allDVotes + allRVotes))/allPredictedD
ShareOfD -> 1/allPredictedD
in
fmap (\b -> (show b, scale * dVotes b))
[(minBound :: b) .. maxBound]
data DeltaTableRow =
DeltaTableRow
{ dtrGroup :: T.Text
, dtrPop :: Int
, dtrFromPop :: Int
, dtrFromTurnout :: Int
, dtrFromOpinion :: Int
, dtrTotal :: Int
, dtrPct :: Double
} deriving (Show)
deltaTable
:: forall dr tr e b r
. (A.Ix b
, Bounded b
, Enum b
, Show b
, MonadIO (K.Sem r)
, K.KnitEffects r
)
=> DemographicStructure dr tr e b
-> (F.Record LocationKey -> Bool)
-> F.Frame e
-> Int -- ^ year A
-> Int -- ^ year B
-> PreferenceResults b FV.ParameterEstimate
-> PreferenceResults b FV.ParameterEstimate
-> K.Sem r ([DeltaTableRow], (Int, Int), (Int, Int))
deltaTable ds locFilter electionResultsFrame yA yB trA trB = do
let
groupNames = show <$> dsCategories ds
getPopAndTurnout
:: Int -> PreferenceResults b FV.ParameterEstimate -> K.Sem r (A.Array b Int, A.Array b Double)
getPopAndTurnout y tr = do
resultsFrame <- knitX $ dsPreprocessElectionData ds y electionResultsFrame
let
totalDRVotes =
let filteredResultsF = F.filterFrame (locFilter . F.rcast) resultsFrame
in FL.fold (FL.premap (\r -> F.rgetField @DVotes r + F.rgetField @RVotes r) FL.sum) filteredResultsF
totalRec = FL.fold
votesAndPopByDistrictF
( fmap
(F.rcast
@'[PopArray b, TurnoutArray b, DVotes, RVotes]
)
$ F.filterFrame (locFilter . F.rcast)
$ F.toFrame
$ votesAndPopByDistrict tr
)
totalCounts = F.rgetField @(PopArray b) totalRec
unAdjTurnout = nationalTurnout tr
tDelta <- liftIO $ TA.findDeltaA totalDRVotes totalCounts unAdjTurnout
let adjTurnout = TA.adjTurnoutP tDelta unAdjTurnout
return (totalCounts, adjTurnout)
(popA, turnoutA) <- getPopAndTurnout yA trA
(popB, turnoutB) <- getPopAndTurnout yB trB
{- K.logLE K.Info $ (T.pack $ show yA) <> "->" <> (T.pack $ show yB)
K.logLE K.Info $ T.pack $ show turnoutA
K.logLE K.Info $ T.pack $ show turnoutB -}
let
pop = FL.fold FL.sum popA
probsArray = fmap FV.value . modeled
probA = probsArray trA
probB = probsArray trB
modeledVotes popArray turnoutArray probArray =
let dVotes b =
round
$ realToFrac (popArray A.! b)
* (turnoutArray A.! b)
* (probArray A.! b)
rVotes b =
round
$ realToFrac (popArray A.! b)
* (turnoutArray A.! b)
* (1.0 - probArray A.! b)
in FL.fold
((,) <$> FL.premap dVotes FL.sum <*> FL.premap rVotes FL.sum) Relude.universe
makeDTR b =
let pop0 = realToFrac $ popA A.! b
dPop = realToFrac $ (popB A.! b) - (popA A.! b)
turnout0 = realToFrac $ turnoutA A.! b
dTurnout = realToFrac $ (turnoutB A.! b) - (turnoutA A.! b)
prob0 = realToFrac (probA A.! b)
dProb = realToFrac $ (probB A.! b) - (probA A.! b)
dtrCombo = dPop * dTurnout * (2 * dProb) / 4 -- the rest is accounted for in other terms, we spread this among them
dtrN =
round
$ dPop
* (turnout0 + dTurnout / 2)
* (2 * (prob0 + dProb / 2) - 1)
+ (dtrCombo / 3)
dtrT =
round
$ (pop0 + dPop / 2)
* dTurnout
* (2 * (prob0 + dProb / 2) - 1)
+ (dtrCombo / 3)
dtrO =
round
$ (pop0 + dPop / 2)
* (turnout0 + dTurnout / 2)
* (2 * dProb)
+ (dtrCombo / 3)
dtrTotal = dtrN + dtrT + dtrO
in DeltaTableRow (show b)
(popB A.! b)
dtrN
dtrT
dtrO
dtrTotal
(realToFrac dtrTotal / realToFrac pop)
groupRows = fmap makeDTR [minBound ..]
addRow (DeltaTableRow g p fp ft fo t _) (DeltaTableRow _ p' fp' ft' fo' t' _)
= DeltaTableRow g
(p + p')
(fp + fp')
(ft + ft')
(fo + fo')
(t + t')
(realToFrac (t + t') / realToFrac (p + p'))
totalRow = FL.fold
(FL.Fold addRow (DeltaTableRow "Total" 0 0 0 0 0 0) id)
groupRows
dVotesA = modeledVotes popA turnoutA probA
dVotesB = modeledVotes popB turnoutB probB
return (groupRows ++ [totalRow], dVotesA, dVotesB)
deltaTableColonnade :: C.Colonnade C.Headed DeltaTableRow T.Text
deltaTableColonnade =
C.headed "Group" dtrGroup
<> C.headed "Population (k)" (show . (`div` 1000) . dtrPop)
<> C.headed "+/- From Population (k)"
(show . (`div` 1000) . dtrFromPop)
<> C.headed "+/- From Turnout (k)"
(show . (`div` 1000) . dtrFromTurnout)
<> C.headed "+/- From Opinion (k)"
(show . (`div` 1000) . dtrFromOpinion)
<> C.headed "+/- Total (k)" (show . (`div` 1000) . dtrTotal)
<> C.headed "+/- %Vote" (toText @String . PF.printf "%2.2f" . (* 100) . dtrPct)
deltaTableColonnadeBlaze :: CellStyle DeltaTableRow T.Text -> C.Colonnade C.Headed DeltaTableRow BC.Cell
deltaTableColonnadeBlaze cas =
C.headed "Group" (toCell cas "Group" "" (textToStyledHtml . dtrGroup))
<> C.headed "Population (k)" (toCell cas "Population" "Population" (textToStyledHtml . show . (`div` 1000) . dtrPop))
<> C.headed "+/- From Population (k)"
(toCell cas "FromPop" "+/- From Population" (numberToStyledHtml "%d" . (`div` 1000) . dtrFromPop))
<> C.headed "+/- From Turnout (k)"
(toCell cas "FromTurnout" "+/- From Turnout" (numberToStyledHtml "%d" . (`div` 1000) . dtrFromTurnout))
<> C.headed "+/- From Opinion (k)"
(toCell cas "FromOpinion" "+/- From Opinion" (numberToStyledHtml "%d" . (`div` 1000) . dtrFromOpinion))
-- (\tr -> (numberCell "%.0d" (opinionHighlight (dtrGroup tr)) . (`div` 1000) $ dtrFromOpinion tr))
<> C.headed "+/- Total (k)" (toCell cas "Total" "Total" (numberToStyledHtml "%d" . (`div` 1000) . dtrTotal))
<> C.headed "+/- %Vote" (toCell cas "PctVote" "% Vote" (numberToStyledHtml "%2.2f" . (* 100) . dtrPct))
type X = "X" F.:-> Double
type ScaledDVotes = "ScaledDVotes" F.:-> Int
type ScaledRVotes = "ScaledRVotes" F.:-> Int
type PopArray b = "PopArray" F.:-> A.Array b Int
type TurnoutArray b = "TurnoutArray" F.:-> A.Array b Double
votesAndPopByDistrictF
:: forall b
. (A.Ix b, Bounded b, Enum b)
=> FL.Fold
(F.Record '[PopArray b, TurnoutArray b, DVotes, RVotes])
(F.Record '[PopArray b, TurnoutArray b, DVotes, RVotes])
votesAndPopByDistrictF =
let voters r = A.listArray (minBound, maxBound)
$ zipWith (*) (A.elems $ F.rgetField @(TurnoutArray b) r) (fmap realToFrac $ A.elems $ F.rgetField @(PopArray b) r)
g r = A.listArray (minBound, maxBound)
$ zipWith (/) (A.elems $ F.rgetField @(TurnoutArray b) r) (fmap realToFrac $ A.elems $ F.rgetField @(PopArray b) r)
recomputeTurnout r = F.rputField @(TurnoutArray b) (g r) r
in PF.dimap (F.rcast @'[PopArray b, TurnoutArray b, DVotes, RVotes]) recomputeTurnout
$ FF.sequenceRecFold
$ FF.FoldRecord (PF.dimap (F.rgetField @(PopArray b)) V.Field FE.sumTotalNumArray)
V.:& FF.FoldRecord (PF.dimap voters V.Field FE.sumTotalNumArray)
V.:& FF.FoldRecord (PF.dimap (F.rgetField @DVotes) V.Field FL.sum)
V.:& FF.FoldRecord (PF.dimap (F.rgetField @RVotes) V.Field FL.sum)
V.:& V.RNil
data Aggregation c b where
Aggregation :: (Enum b, Bounded b, Eq b, Ord b, A.Ix b,
Enum c, Bounded c, Eq c, Ord c, A.Ix c) => (c -> [b]) -> Aggregation c b
aggregateFold :: forall c b a x. Aggregation c b -> FL.Fold a x -> A.Array b a -> A.Array c x
aggregateFold (Aggregation children) fld arr =
let cs = Relude.universe
expanded :: [[a]]
expanded = fmap (fmap (arr A.!) . children) cs
folded = fmap (FL.fold fld) expanded
in A.listArray (minBound,maxBound) folded
aggregateFold2 :: Aggregation c b -> FL.Fold (a,w) x -> A.Array b a -> A.Array b w -> A.Array c x
aggregateFold2 (Aggregation children) fld arrA arrW =
let cs = Relude.universe
as = fmap (fmap (arrA A.!) . children) cs
ws = fmap (fmap (arrW A.!) . children) cs
folded = FL.fold fld . uncurry zip <$> zip as ws
in A.listArray (minBound, maxBound) folded
weightedFold :: (Num a, Fractional a) => FL.Fold (a,a) a
weightedFold =
let sumWeightsF = FL.premap snd FL.sum
weightedSumF = FL.premap (uncurry (*)) FL.sum
in fmap (uncurry (/)) $ (,) <$> weightedSumF <*> sumWeightsF
popWeightedAggregate :: (Num d, Fractional d) => Aggregation c b -> A.Array b Int -> A.Array b d -> A.Array c d
popWeightedAggregate agg popArray datArray = aggregateFold2 agg weightedFold datArray (fmap realToFrac popArray)
aggregateRecord
:: forall b c. Aggregation c b
-> F.Record [StateAbbreviation
, CongressionalDistrict
, PopArray b -- population by group
, TurnoutArray b -- adjusted turnout by group
, DVotes
, RVotes
]
-> F.Record [StateAbbreviation
, CongressionalDistrict
, PopArray c -- population by group
, TurnoutArray c -- adjusted turnout by group
, DVotes
, RVotes
]
aggregateRecord agg r =
let
f :: F.Record [PopArray b, TurnoutArray b] -> F.Record [PopArray c, TurnoutArray c]
f x =
let popB = F.rgetField @(PopArray b) x
turnoutB = F.rgetField @(TurnoutArray b) x
in aggregateFold agg FL.sum popB F.&: popWeightedAggregate agg popB turnoutB F.&: V.RNil
in F.rcast $ FT.transform f r
cByB :: Aggregation c b -> LA.Matrix Double
cByB (Aggregation children) =
let allBs = Relude.universe
toZeroOne y = if y then 1 else 0
getCol c = LA.fromList $ fmap (toZeroOne . (`elem` children c)) allBs
in LA.fromColumns $ fmap getCol Relude.universe
aggregatePreferenceResults :: Fractional a => Aggregation c b -> PreferenceResults b a -> PreferenceResults c a
aggregatePreferenceResults agg pr =
let prVBPAD' = fmap (aggregateRecord agg) (votesAndPopByDistrict pr)
prTurnout' = popWeightedAggregate agg (nationalVoters pr) (nationalTurnout pr)
prVoters' = aggregateFold agg FL.sum (nationalVoters pr)
prModeled' = popWeightedAggregate agg (nationalVoters pr) (modeled pr)
mCB = cByB agg
prCovar' = LA.tr mCB LA.<> covariances pr LA.<> mCB
in PreferenceResults prVBPAD' prTurnout' prVoters' prModeled' prCovar'
data PreferenceResults b a = PreferenceResults
{
votesAndPopByDistrict :: [F.Record [ StateAbbreviation
, CongressionalDistrict
, PopArray b -- population by group
, TurnoutArray b -- adjusted turnout by group
, DVotes
, RVotes
]]
, nationalTurnout :: A.Array b Double
, nationalVoters :: A.Array b Int
, modeled :: A.Array b a
, covariances :: LA.Matrix Double
}
instance Functor (PreferenceResults b) where
fmap f (PreferenceResults v nt nv m c) = PreferenceResults v nt nv (fmap f m) c
preferenceModel
:: forall dr tr b r
. ( Show tr
, Show b
, Enum b
, Bounded b
, A.Ix b
, FL.Vector (F.VectorFor b) b
, K.KnitEffects r
, MonadIO (K.Sem r)
)
=> DemographicStructure dr tr HouseElections b
-> (F.Record LocationKey -> Bool)
-> Int
-> F.Frame dr
-> F.Frame HouseElections
-> F.Frame tr
-> K.Sem
r
(PreferenceResults b FV.ParameterEstimate)
preferenceModel ds locFilter year identityDFrame houseElexFrame turnoutFrame =
do
-- reorganize data from loaded Frames
resultsFlattenedFrameFull <- knitX
$ dsPreprocessElectionData ds year houseElexFrame
let resultsFlattenedFrame = F.filterFrame (locFilter . F.rcast) resultsFlattenedFrameFull
filteredTurnoutFrame <- knitX
$ dsPreprocessTurnoutData ds year turnoutFrame
let year' = year --if (year == 2018) then 2017 else year -- we're using 2017 for now, until census updated ACS data
longByDCategoryFrame <- knitX
$ dsPreprocessDemographicData ds year' identityDFrame
-- turn long-format data into Arrays by demographic category, beginning with national turnout
turnoutByGroupArray <-
K.knitMaybe "Missing or extra group in turnout data?" $ FL.foldM
(FE.makeArrayMF (F.rgetField @(DemographicCategory b))
(F.rgetField @VotedPctOfAll)
(flip const)
)
filteredTurnoutFrame
-- now the populations in each district
let votersArrayMF = MR.mapReduceFoldM
(MR.generalizeUnpack MR.noUnpack)
(MR.generalizeAssign $ MR.splitOnKeys @LocationKey)
(MR.foldAndLabelM
(fmap (FT.recordSingleton @(PopArray b))
(FE.recordsToArrayMF @(DemographicCategory b) @PopCount)
)
V.rappend
)
-- F.Frame (LocationKey V.++ (PopArray b))
populationsFrame <-
K.knitMaybe "Error converting long demographic data to arrays!"
$ F.toFrame
<$> FL.foldM votersArrayMF longByDCategoryFrame
-- and the total populations in each group
let addArray :: (A.Ix k, Num a) => A.Array k a -> A.Array k a -> A.Array k a
addArray a1 a2 = A.accum (+) a1 (A.assocs a2)
zeroArray :: (A.Ix k, Bounded k, Enum k, Num a) => A.Array k a
zeroArray = A.listArray (minBound, maxBound) $ L.repeat 0
popByGroupArray = FL.fold (FL.premap (F.rgetField @(PopArray b)) (FL.Fold addArray zeroArray id)) populationsFrame
let
resultsWithPopulationsFrame =
catMaybes $ fmap F.recMaybe $ F.leftJoin @LocationKey resultsFlattenedFrame
populationsFrame
K.logLE K.Info "Computing Ghitza-Gelman turnout adjustment for each district so turnouts produce correct number D+R votes."
resultsWithPopulationsAndGGAdjFrame <- fmap F.toFrame $ flip traverse resultsWithPopulationsFrame $ \r -> do
let tVotesF x = F.rgetField @DVotes x + F.rgetField @RVotes x -- Should this be D + R or total?
ggDelta <- ggTurnoutAdj r tVotesF turnoutByGroupArray
K.logLE K.Diagnostic $
"Ghitza-Gelman turnout adj="
<> show ggDelta
<> "; Adj Turnout=" <> show (TA.adjTurnoutP ggDelta turnoutByGroupArray)
return $ FT.mutate (const $ FT.recordSingleton @(TurnoutArray b) $ TA.adjTurnoutP ggDelta turnoutByGroupArray) r
let onlyOpposed r =
(F.rgetField @DVotes r > 0) && (F.rgetField @RVotes r > 0)
opposedFrame = F.filterFrame onlyOpposed resultsWithPopulationsAndGGAdjFrame
numCompetitiveRaces = FL.fold FL.length opposedFrame
K.logLE K.Info
$ "After removing races where someone is running unopposed we have "
<> show numCompetitiveRaces
<> " contested races."
totalVoteDiagnostics @b resultsWithPopulationsAndGGAdjFrame opposedFrame
let
scaleInt s n = round $ s * realToFrac n
mcmcData =
fmap
(\r ->
( F.rgetField @DVotes r
, VB.fromList $ fmap round (adjVotersL (F.rgetField @(TurnoutArray b) r) (F.rgetField @(PopArray b) r))
)
)
$ FL.fold FL.list opposedFrame
numParams = length $ dsCategories ds
(cgRes, _, _) <- liftIO $ PB.cgOptimizeAD mcmcData (VB.fromList $ (const 0.5) <$> dsCategories ds)
let cgParamsA = A.listArray (minBound :: b, maxBound) $ VB.toList cgRes
cgVarsA = A.listArray (minBound :: b, maxBound) $ VS.toList $ PB.variances mcmcData cgRes
npe cl b =
let
x = cgParamsA A.! b
sigma = sqrt $ cgVarsA A.! b
dof = realToFrac $ numCompetitiveRaces - L.length (A.elems cgParamsA)
interval = S.quantile (S.studentTUnstandardized dof 0 sigma) (1.0 - (S.significanceLevel cl/2))
in FV.ParameterEstimate x (x - interval/2.0, x + interval/2.0)
-- in FV.NamedParameterEstimate (T.pack $ show b) pEstimate
parameterEstimatesA = A.listArray (minBound :: b, maxBound) $ fmap (npe S.cl95) Relude.universe
K.logLE K.Info $ "MLE results: " <> show (A.elems parameterEstimatesA)
-- For now this bit is diagnostic. But we should chart the correlations
-- and, perhaps, the eigenvectors of the covariance??
let cgCovar = PB.covar mcmcData cgRes -- TODO: make a chart out of this
(cgEv, cgEvs) = PB.mleCovEigens mcmcData cgRes
K.logLE K.Diagnostic $ "sigma = " <> show (fmap sqrt $ cgVarsA)
K.logLE K.Diagnostic $ "Covariances=" <> toText (PB.disps 3 cgCovar)
K.logLE K.Diagnostic $ "Correlations=" <> toText (PB.disps 3 $ PB.correlFromCov cgCovar)
K.logLE K.Diagnostic $ "Eigenvalues=" <> show cgEv
K.logLE K.Diagnostic $ "Eigenvectors=" <> toText (PB.disps 3 cgEvs)
return $ PreferenceResults
(F.rcast <$> FL.fold FL.list opposedFrame)
turnoutByGroupArray
popByGroupArray
parameterEstimatesA
cgCovar
ggTurnoutAdj :: forall b rs r. (A.Ix b
, F.ElemOf rs (PopArray b)
, MonadIO (K.Sem r)
) => F.Record rs -> (F.Record rs -> Int) -> A.Array b Double -> K.Sem r Double
ggTurnoutAdj r totalVotesF unadjTurnoutP = do
let population = F.rgetField @(PopArray b) r
totalVotes = totalVotesF r
liftIO $ TA.findDeltaA totalVotes population unadjTurnoutP
adjVotersL :: A.Array b Double -> A.Array b Int -> [Double]
adjVotersL turnoutPA popA = zipWith (*) (A.elems turnoutPA) (realToFrac <$> A.elems popA)
totalVoteDiagnostics :: forall b rs f r
. (A.Ix b
, Foldable f
, F.ElemOf rs (PopArray b)
, F.ElemOf rs (TurnoutArray b)
, F.ElemOf rs Totalvotes
, F.ElemOf rs DVotes
, F.ElemOf rs RVotes
, K.KnitEffects r
)
=> f (F.Record rs) -- ^ frame with all rows
-> f (F.Record rs) -- ^ frame with only rows from competitive races
-> K.Sem r ()
totalVoteDiagnostics allFrame opposedFrame = K.wrapPrefix "VoteSummary" $ do
let allVoters r = FL.fold FL.sum
$ zipWith (*) (A.elems $ F.rgetField @(TurnoutArray b) r) (fmap realToFrac $ A.elems $ F.rgetField @(PopArray b) r)
allVotersF = FL.premap allVoters FL.sum
allVotesF = FL.premap (F.rgetField @Totalvotes) FL.sum
allDVotesF = FL.premap (F.rgetField @DVotes) FL.sum
allRVotesF = FL.premap (F.rgetField @RVotes) FL.sum
-- allDRVotesF = FL.premap (\r -> F.rgetField @DVotes r + F.rgetField @RVotes r) FL.sum
(totalVoters, totalVotes, totalDVotes, totalRVotes) = FL.fold
((,,,) <$> allVotersF <*> allVotesF <*> allDVotesF <*> allRVotesF)
allFrame
(totalVotersCD, totalVotesCD, totalDVotesCD, totalRVotesCD) = FL.fold
((,,,) <$> allVotersF <*> allVotesF <*> allDVotesF <*> allRVotesF)
opposedFrame
K.logLE K.Info $ "voters=" <> show totalVoters
K.logLE K.Info $ "house votes=" <> show totalVotes
K.logLE K.Info
$ "D/R/D+R house votes="
<> show totalDVotes
<> "/"
<> show totalRVotes
<> "/"
<> show (totalDVotes + totalRVotes)
K.logLE K.Info
$ "voters (competitive districts)="
<> show totalVotersCD
K.logLE K.Info
$ "house votes (competitive districts)="
<> show totalVotesCD
K.logLE K.Info
$ "D/R/D+R house votes (competitive districts)="
<> show totalDVotesCD
<> "/"
<> show totalRVotesCD
<> "/"
<> show (totalDVotesCD + totalRVotesCD)
totalArrayZipWith :: (A.Ix b, Enum b, Bounded b)
=> (x -> y -> z)
-> A.Array b x
-> A.Array b y
-> A.Array b z
totalArrayZipWith f xs ys = A.listArray (minBound, maxBound) $ zipWith f (A.elems xs) (A.elems ys)
vlGroupingChart :: Foldable f
=> T.Text
-> FV.ViewConfig
-> f (F.Record ['("Group", T.Text)
,'("VotingAgePop", Int)
,'("Turnout",Double)
,'("Voters", Int)
,'("D Voter Preference", Double)
])
-> GV.VegaLite
vlGroupingChart title vc rows =
let dat = FV.recordsToVLData id FV.defaultParse rows
xLabel = "Inferred (%) Likelihood of Voting Democratic"
estimateXenc = GV.position GV.X [FV.pName @'("D Voter Preference", Double)
,GV.PmType GV.Quantitative
,GV.PAxis [GV.AxTitle xLabel]
]
estimateYenc = GV.position GV.Y [FV.pName @'("Group",T.Text)
,GV.PmType GV.Ordinal
,GV.PAxis [GV.AxTitle "Demographic Group"]
]
estimateSizeEnc = GV.size [FV.mName @'("Voters",Int)
, GV.MmType GV.Quantitative
, GV.MScale [GV.SDomain $ GV.DNumbers [5e6,30e6]]
, GV.MLegend [GV.LFormatAsNum]
]
estimateColorEnc = GV.color [FV.mName @'("Turnout", Double)
, GV.MmType GV.Quantitative
, GV.MScale [GV.SDomain $ GV.DNumbers [0.2,0.8]
,GV.SScheme "blues" [0.3,1.0]
]
, GV.MLegend [GV.LGradientLength (vcHeight vc / 3)
, GV.LFormatAsNum
, GV.LFormat "%"
]
]
estEnc = estimateXenc . estimateYenc . estimateSizeEnc . estimateColorEnc
estSpec = GV.asSpec [(GV.encoding . estEnc) [], GV.mark GV.Point [GV.MFilled True]]
in
FV.configuredVegaLite vc [FV.title title, GV.layer [estSpec], dat]
exitCompareChart :: Foldable f
=> T.Text
-> FV.ViewConfig
-> f (F.Record ['("Group", T.Text)
,'("Model Dem Pref", Double)
,'("ModelvsExit",Double)
])
-> GV.VegaLite
exitCompareChart title vc rows =
let dat = FV.recordsToVLData id FV.defaultParse rows
xLabel = "Modeled % Likelihood of Voting Democratic"
xEnc = GV.position GV.X [FV.pName @'("Model Dem Pref", Double)
,GV.PmType GV.Quantitative
,GV.PAxis [GV.AxTitle xLabel
, GV.AxFormatAsNum
, GV.AxFormat "%"
]
]
yEnc = GV.position GV.Y [FV.pName @'("ModelvsExit", Double)
,GV.PmType GV.Quantitative
,GV.PScale [GV.SDomain $ GV.DNumbers [negate 0.15,0.15]]
,GV.PAxis [GV.AxTitle "Model - Exit Poll"
, GV.AxFormatAsNum
, GV.AxFormat "%"
]
]
colorEnc = GV.color [FV.mName @'("Group", T.Text)
, GV.MmType GV.Nominal
]
enc = xEnc . yEnc . colorEnc
spec = GV.asSpec [(GV.encoding . enc) [], GV.mark GV.Point [GV.MFilled True, GV.MSize 100]]
in
FV.configuredVegaLite vc [FV.title title, GV.layer [spec], dat]
vlGroupingChartExit :: Foldable f
=> T.Text
-> FV.ViewConfig
-> f (F.Record ['("Group", T.Text)
,'("VotingAgePop", Int)
,'("Voters", Int)
,'("D Voter Preference", Double)
,'("InfMinusExit", Double)
])
-> GV.VegaLite
vlGroupingChartExit title vc rows =
let dat = FV.recordsToVLData id FV.defaultParse rows
xLabel = "Inferred Likelihood of Voting Democratic"
estimateXenc = GV.position GV.X [FV.pName @'("D Voter Preference", Double)
,GV.PmType GV.Quantitative
,GV.PAxis [GV.AxTitle xLabel]
]
estimateYenc = GV.position GV.Y [FV.pName @'("Group",T.Text)
,GV.PmType GV.Ordinal
]
estimateSizeEnc = GV.size [FV.mName @'("VotingAgePop",Int)
, GV.MmType GV.Quantitative]
estimateColorEnc = GV.color [FV.mName @'("InfMinusExit", Double)
, GV.MmType GV.Quantitative]
estEnc = estimateXenc . estimateYenc . estimateSizeEnc . estimateColorEnc
estSpec = GV.asSpec [(GV.encoding . estEnc) [], GV.mark GV.Point []]
in
FV.configuredVegaLite vc [FV.title title, GV.layer [estSpec], dat]
|
(** * Perm: Basic Techniques for Comparisons and Permutations *)
(** Consider these algorithms and data structures:
- sort a sequence of numbers
- finite maps from numbers to (arbitrary-type) data
- finite maps from any ordered type to (arbitrary-type) data
- priority queues: finding/deleting the highest number in a set
To prove the correctness of such programs, we need to reason about
comparisons, and about whether two collections have the same
contents. In this chapter, we introduce some techniques for
reasoning about:
- less-than comparisons on natural numbers, and
- permutations (rearrangements of lists).
In later chapters, we'll apply these proof techniques to reasoning
about algorithms and data structures. *)
Set Warnings "-notation-overridden,-parsing,-deprecated-hint-without-locality".
From Coq Require Import Strings.String. (* for manual grading *)
From Coq Require Export Bool.Bool.
From Coq Require Export Arith.Arith.
From Coq Require Export Arith.EqNat.
From Coq Require Export Lia.
From Coq Require Export Lists.List.
Export ListNotations.
From Coq Require Export Permutation.
(* ################################################################# *)
(** * The Less-Than Order on the Natural Numbers *)
(** In our proofs about searching and sorting algorithms, we often
have to reason about the less-than order on natural numbers.
greater-than. Recall that the Coq standard library contains both
propositional and Boolean less-than operators on natural numbers.
We write [x < y] for the proposition that [x] is less than [y]: *)
Locate "_ < _". (* "x < y" := lt x y *)
Check lt : nat -> nat -> Prop.
(** And we write [x <? y] for the computation that returns [true] or
[false] depending on whether [x] is less than [y]: *)
Locate "_ <? _". (* x <? y := Nat.ltb x y *)
Check Nat.ltb : nat -> nat -> bool.
(** Operation [<] is a reflection of [<?], as discussed in
[Logic] and [IndProp]. The [Nat] module has a
theorem showing how they relate: *)
Check Nat.ltb_lt : forall n m : nat, (n <? m) = true <-> n < m.
(** The [Nat] module contains a synonym for [lt]. *)
Print Nat.lt. (* Nat.lt = lt *)
(** For unknown reasons, [Nat] does not define notations
for [>?] or [>=?]. So we define them here: *)
Notation "a >=? b" := (Nat.leb b a)
(at level 70) : nat_scope.
Notation "a >? b" := (Nat.ltb b a)
(at level 70) : nat_scope.
(* ================================================================= *)
(** ** The Lia Tactic *)
(** Reasoning about inequalities by hand can be a little painful. Luckily, Coq
provides a tactic called [lia] that is quite helpful. *)
Theorem lia_example1:
forall i j k,
i < j ->
~ (k - 3 <= j) ->
k > i.
Proof.
intros.
(** The hard way to prove this is by hand. *)
(* try to remember the name of the lemma about negation and [<=] *)
Search (~ _ <= _ -> _).
apply not_le in H0.
(* try to remember the name of the transitivity lemma about [>] *)
Search (_ > _ -> _ > _ -> _ > _).
apply gt_trans with j.
apply gt_trans with (k-3).
(* Is [k] greater than [k-3]? On the integers, sure. But we're working
with natural numbers, which truncate subtraction at zero. *)
Abort.
Theorem truncated_subtraction: ~ (forall k:nat, k > k - 3).
Proof.
intros contra.
(* [specialize] applies a hypothesis to an argument *)
specialize (contra 0).
simpl in contra.
inversion contra.
Qed.
(** Since subtraction is truncated, does [lia_example1] actually hold?
It does. Let's try again, the hard way, to find the proof. *)
Theorem lia_example1:
forall i j k,
i < j ->
~ (k - 3 <= j) ->
k > i.
Proof. (* try again! *)
intros.
apply not_le in H0.
unfold gt in H0.
unfold gt.
(* try to remember the name ... *)
Search (_ < _ -> _ <= _ -> _ < _).
apply lt_le_trans with j.
apply H.
apply le_trans with (k-3).
Search (_ < _ -> _ <= _).
apply lt_le_weak.
auto.
apply le_minus.
Qed.
(** That was tedious. Here's a much easier way: *)
Theorem lia_example2:
forall i j k,
i < j ->
~ (k - 3 <= j) ->
k > i.
Proof.
intros.
lia.
Qed.
(** Lia is a decision procedure for integer linear arithemetic.
The [lia] tactic was made available by importing [Lia] at the
beginning of the file. The tactic
works with Coq types [Z] and [nat], and these operators: [<] [=] [>]
[<=] [>=] [+] [-] [~], as well as multiplication by small integer
literals (such as 0,1,2,3...), and some uses of [\/], [/\], and [<->].
Lia does not "understand" other operators. It treats
expressions such as [f x y] as variables. That is, it
can prove [f x y > a * b -> f x y + 3 >= a * b], in the same way it
would prove [u > v -> u + 3 >= v].
*)
Theorem lia_example_3 : forall (f : nat -> nat -> nat) a b x y,
f x y > a * b -> f x y + 3 >= a * b.
Proof.
intros. lia.
Qed.
(* ################################################################# *)
(** * Swapping *)
(** Consider trying to sort a list of natural numbers. As a small piece of
a sorting algorithm, we might need to swap the first two elements of a list
if they are out of order. *)
Definition maybe_swap (al: list nat) : list nat :=
match al with
| a :: b :: ar => if a >? b then b :: a :: ar else a :: b :: ar
| _ => al
end.
Example maybe_swap_123:
maybe_swap [1; 2; 3] = [1; 2; 3].
Proof. reflexivity. Qed.
Example maybe_swap_321:
maybe_swap [3; 2; 1] = [2; 3; 1].
Proof. reflexivity. Qed.
(** Applying [maybe_swap] twice should give the same result as applying it once.
That is, [maybe_swap] is _idempotent_. *)
Theorem maybe_swap_idempotent: forall al,
maybe_swap (maybe_swap al) = maybe_swap al.
Proof.
intros [ | a [ | b al]]; simpl; try reflexivity.
destruct (b <? a) eqn:Hb_lt_a; simpl.
- destruct (a <? b) eqn:Ha_lt_b; simpl.
+ (** Now what? We have a contradiction in the hypotheses: it
cannot hold that [a] is less than [b] and [b] is less than
[a]. Unfortunately, [lia] cannot immediately show that
for us, because it reasons about comparisons in [Prop] not
[bool]. *)
Fail lia.
Abort.
(** Of course we could finish the proof by reasoning directly about
inequalities in [bool]. But this situation is going to occur
repeatedly in our study of sorting. *)
(** Let's set up some machinery to enable using [lia] on boolean
tests. *)
(* ================================================================= *)
(** ** Reflection *)
(** The [reflect] type, defined in the standard library (and presented
in [IndProp]), relates a proposition to a Boolean. That is,
a value of type [reflect P b] contains a proof of [P] if [b] is
[true], or a proof of [~ P] if [b] is [false]. *)
Print reflect.
(*
Inductive reflect (P : Prop) : bool -> Set :=
| ReflectT : P -> reflect P true
| ReflectF : ~ P -> reflect P false
*)
(** The standard library proves a theorem that says if [P] is provable
whenever [b = true] is provable, then [P] reflects [b]. *)
Check iff_reflect : forall (P : Prop) (b : bool),
P <-> b = true -> reflect P b.
(** Using that theorem, we can quickly prove that the propositional
(in)equality operators are reflections of the Boolean
operators. *)
Lemma eqb_reflect : forall x y, reflect (x = y) (x =? y).
Proof.
intros x y. apply iff_reflect. symmetry.
apply Nat.eqb_eq.
Qed.
Lemma ltb_reflect : forall x y, reflect (x < y) (x <? y).
Proof.
intros x y. apply iff_reflect. symmetry.
apply Nat.ltb_lt.
Qed.
Lemma leb_reflect : forall x y, reflect (x <= y) (x <=? y).
Proof.
intros x y. apply iff_reflect. symmetry.
apply Nat.leb_le.
Qed.
(** Here's an example of how you could use these lemmas. Suppose you
have this simple program, [(if a <? 5 then a else 2)], and you
want to prove that it evaluates to a number smaller than 6. You
can use [ltb_reflect] "by hand": *)
Example reflect_example1: forall a,
(if a <? 5 then a else 2) < 6.
Proof.
intros a.
(* The next two lines aren't strictly necessary, but they
help make it clear what [destruct] does. *)
assert (R: reflect (a < 5) (a <? 5)) by apply ltb_reflect.
remember (a <? 5) as guard.
destruct R as [H|H] eqn:HR.
* (* ReflectT *) lia.
* (* ReflectF *) lia.
Qed.
(** For the [ReflectT] constructor, the guard [a <? 5] must be equal
to [true]. The [if] expression in the goal has already been
simplified to take advantage of that fact. Also, for [ReflectT] to
have been used, there must be evidence [H] that [a < 5] holds.
From there, all that remains is to show [a < 5] entails [a < 6].
The [lia] tactic, which is capable of automatically proving some
theorems about inequalities, succeeds.
For the [ReflectF] constructor, the guard [a <? 5] must be equal
to [false]. So the [if] expression simplifies to [2 < 6], which is
immediately provable by [lia]. *)
(** A less didactic version of the above proof wouldn't do the
[assert] and [remember]: we can directly skip to [destruct]. *)
Example reflect_example1': forall a,
(if a <? 5 then a else 2) < 6.
Proof.
intros a. destruct (ltb_reflect a 5); lia.
Qed.
(** But even that proof is a little unsatisfactory. The original expression,
[a <? 5], is not perfectly apparent from the expression [ltb_reflect a 5]
that we pass to [destruct]. *)
(** It would be nice to be able to just say something like [destruct
(a <? 5)] and get the reflection "for free." That's what we'll
engineer, next. *)
(* ================================================================= *)
(** ** A Tactic for Boolean Destruction *)
(** We're now going to build a tactic that you'll want to _use_, but
you won't need to understand the details of how to _build_ it
yourself.
Let's put several of these [reflect] lemmas into a Hint database.
We call it [bdestruct], because we'll use it in our
boolean-destruction tactic: *)
Hint Resolve ltb_reflect leb_reflect eqb_reflect : bdestruct.
(** Here is the tactic, the body of which you do not need to
understand. Invoking [bdestruct] on Boolean expression [b] does
the same kind of reasoning we did above: reflection and
destruction. It also attempts to simplify negations involving
inequalities in hypotheses. *)
Ltac bdestruct X :=
let H := fresh in let e := fresh "e" in
evar (e: Prop);
assert (H: reflect e X); subst e;
[eauto with bdestruct
| destruct H as [H|H];
[ | try first [apply not_lt in H | apply not_le in H]]].
(** This tactic makes quick, easy-to-read work of our running example. *)
Example reflect_example2: forall a,
(if a <? 5 then a else 2) < 6.
Proof.
intros.
bdestruct (a <? 5); (* instead of: [destruct (ltb_reflect a 5)]. *)
lia.
Qed.
(* ================================================================= *)
(** ** Finishing the [maybe_swap] Proof *)
(** Now that we have [bdestruct], we can finish the proof of [maybe_swap]'s
idempotence. *)
Theorem maybe_swap_idempotent: forall al,
maybe_swap (maybe_swap al) = maybe_swap al.
Proof.
intros [ | a [ | b al]]; simpl; try reflexivity.
bdestruct (a >? b); simpl.
(** Note how [b < a] is a hypothesis, rather than [b <? a = true]. *)
- bdestruct (b >? a); simpl.
+ (** [lia] can take care of the contradictory propositional inequalities. *)
lia.
+ reflexivity.
- bdestruct (a >? b); simpl.
+ lia.
+ reflexivity.
Qed.
(** When proving theorems about a program that uses Boolean
comparisons, use [bdestruct] followed by [lia], rather than
[destruct] followed by application of various theorems about
Boolean operators. *)
(* ################################################################# *)
(** * Permutations *)
(** Another useful fact about [maybe_swap] is that it doesn't add or
remove elements from the list: it only reorders them. That is,
the output list is a permutation of the input. List [al] is a
_permutation_ of list [bl] if the elements of [al] can be
reordered to get the list [bl]. Note that reordering does not
permit adding or removing duplicate elements. *)
(** Coq's [Permutation] library has an inductive definition of
permutations. *)
Print Permutation.
(*
Inductive Permutation {A : Type} : list A -> list A -> Prop :=
| perm_nil : Permutation [] []
| perm_skip : forall (x : A) (l l' : list A),
Permutation l l' ->
Permutation (x :: l) (x :: l')
| perm_swap : forall (x y : A) (l : list A),
Permutation (y :: x :: l) (x :: y :: l)
| perm_trans : forall l l' l'' : list A,
Permutation l l' ->
Permutation l' l'' ->
Permutation l l''.
*)
(** You might wonder, "is that really the right definition?" And
indeed, it's important that we get a right definition, because
[Permutation] is going to be used in our specifications of
searching and sorting algorithms. If we have the wrong
specification, then all our proofs of "correctness" will be
useless.
It's not obvious that this is indeed the right specification of
permutations. (It happens to be, but that's not obvious.) To gain
confidence that we have the right specification, let's use it
prove some properties that permutations ought to have. *)
(** **** Exercise: 2 stars, standard (Permutation_properties)
Think of some desirable properties of the [Permutation] relation
and write them down informally in English, or a mix of Coq and
English. Here are four to get you started:
- 1. If [Permutation al bl], then [length al = length bl].
- 2. If [Permutation al bl], then [Permutation bl al].
- 3. [[1;1]] is NOT a permutation of [[1;2]].
- 4. [[1;2;3;4]] IS a permutation of [[3;4;2;1]].
YOUR TASK: Add three more properties. Write them here: *)
(** Now, let's examine all the theorems in the Coq library about
permutations: *)
Search Permutation. (* Browse through the results of this query! *)
(** Which of the properties that you wrote down above have already
been proved as theorems by the Coq library developers? Answer
here:
*)
(* Do not modify the following line: *)
Definition manual_grade_for_Permutation_properties : option (nat*string) := None.
(** [] *)
(** Let's use the permutation theorems in the library to prove the
following theorem. *)
Example butterfly: forall b u t e r f l y : nat,
Permutation ([b;u;t;t;e;r]++[f;l;y]) ([f;l;u;t;t;e;r]++[b;y]).
Proof.
intros.
(** Let's group [[u;t;t;e;r]] together on both sides. Tactic
[change t with u] replaces [t] with [u]. Terms [t] and [u] must
be _convertible_, here meaning that they evalute to the same
term. *)
change [b;u;t;t;e;r] with ([b]++[u;t;t;e;r]).
change [f;l;u;t;t;e;r] with ([f;l]++[u;t;t;e;r]).
(** We don't actually need to know the list elements in
[[u;t;t;e;r]]. Let's forget about them and just remember them
as a variable named [utter]. *)
remember [u;t;t;e;r] as utter. clear Hequtter.
(** Likewise, let's group [[f;l]] and remember it as a variable. *)
change [f;l;y] with ([f;l]++[y]).
remember [f;l] as fl. clear Heqfl.
(** Next, let's cancel [fl] from both sides. In order to do that,
we need to bring it to the beginning of each list. For the right
list, that follows easily from the associativity of [++]. *)
replace ((fl ++ utter) ++ [b;y]) with (fl ++ utter ++ [b;y])
by apply app_assoc.
(** But for the left list, we can't just use associativity.
Instead, we need to reason about permutations and use some
library theorems. *)
apply perm_trans with (fl ++ [y] ++ ([b] ++ utter)).
- replace (fl ++ [y] ++ [b] ++ utter) with ((fl ++ [y]) ++ [b] ++ utter).
+ apply Permutation_app_comm.
+ rewrite <- app_assoc. reflexivity.
- (** A library theorem will now help us cancel [fl]. *)
apply Permutation_app_head.
(** Next let's cancel [utter]. *)
apply perm_trans with (utter ++ [y] ++ [b]).
+ replace ([y] ++ [b] ++ utter) with (([y] ++ [b]) ++ utter).
* apply Permutation_app_comm.
* rewrite app_assoc. reflexivity.
+ apply Permutation_app_head.
(** Finally we're left with just [y] and [b]. *)
apply perm_swap.
Qed.
(** That example illustrates a general method for proving permutations
involving cons [::] and append [++]:
- Identify some portion appearing in both sides.
- Bring that portion to the front on each side using lemmas such
as [Permutation_app_comm] and [perm_swap], with generous use of
[perm_trans].
- Use [Permutation_app_head] to cancel an appended head. You can
also use [perm_skip] to cancel a single element. *)
(** **** Exercise: 3 stars, standard (permut_example)
Use the permutation rules in the library to prove the following
theorem. The following [Check] commands are a hint about useful
lemmas. You don't need all of them, and depending on your
approach you will find lemmas to be more useful than others. Use
[Search Permutation] to find others, if you like. *)
Check perm_skip.
Check perm_trans.
Check Permutation_refl.
Check Permutation_app_comm.
Check app_assoc.
Check app_nil_r.
Check app_comm_cons.
Example permut_example: forall (a b: list nat),
Permutation (5 :: 6 :: a ++ b) ((5 :: b) ++ (6 :: a ++ [])).
Proof.
intros.
change (5 :: 6 :: a ++ b) with (5 :: (6 :: a) ++ b).
change ((5 :: b) ++ 6 :: a ++ []) with (5 :: (b ++ (6 :: a) ++ [])).
remember (6 :: a) as a'. clear Heqa'.
rewrite app_nil_r.
apply perm_skip.
apply Permutation_app_comm.
Qed.
(** [] *)
(** **** Exercise: 2 stars, standard (not_a_permutation)
Prove that [[1;1]] is not a permutation of [[1;2]].
Hints are given as [Check] commands. *)
Check Permutation_cons_inv.
Check Permutation_length_1_inv.
Example not_a_permutation:
~ Permutation [1;1] [1;2].
Proof.
unfold not. intros.
apply Permutation_cons_inv in H.
apply Permutation_length_1_inv in H.
discriminate H.
Qed.
(** [] *)
(* ================================================================= *)
(** ** Correctness of [maybe_swap] *)
(** Now we can prove that [maybe_swap] is a permutation: it reorders
elements but does not add or remove any. *)
Theorem maybe_swap_perm: forall al,
Permutation al (maybe_swap al).
Proof.
(* WORKED IN CLASS *)
unfold maybe_swap.
destruct al as [ | a [ | b al]].
- simpl. apply perm_nil.
- apply Permutation_refl.
- bdestruct (b <? a).
+ apply perm_swap.
+ apply Permutation_refl.
Qed.
(** And, we can prove that [maybe_swap] permutes elements such that
the first is less than or equal to the second. *)
Definition first_le_second (al: list nat) : Prop :=
match al with
| a :: b :: _ => a <= b
| _ => True
end.
Theorem maybe_swap_correct: forall al,
Permutation al (maybe_swap al)
/\ first_le_second (maybe_swap al).
Proof.
intros. split.
- apply maybe_swap_perm.
- (* WORKED IN CLASS *)
unfold maybe_swap.
destruct al as [ | a [ | b al]]; simpl; auto.
bdestruct (a >? b); simpl; lia.
Qed.
(* ################################################################# *)
(** * Summary: Comparisons and Permutations *)
(** To prove correctness of algorithms for sorting and searching,
we'll reason about comparisons and permutations using the tools
developed in this chapter. The [maybe_swap] program is a tiny
little example of a sorting program. The proof style in
[maybe_swap_correct] will be applied (at a larger scale) in
the next few chapters. *)
(** **** Exercise: 3 stars, standard (Forall_perm)
To close, we define a utility tactic and lemma. First, the
tactic. *)
(** Coq's [inversion H] tactic is so good at extracting
information from the hypothesis [H] that [H] sometimes becomes
completely redundant, and one might as well [clear] it from the
goal. Then, since the [inversion] typically creates some equality
facts, why not then [subst] ? Tactic [inv] does just that. *)
Ltac inv H := inversion H; clear H; subst.
(** Second, the lemma. You will find [inv] useful in proving it.
[Forall] is Coq library's version of the [All] proposition defined
in [Logic], but defined as an inductive proposition rather
than a fixpoint. Prove this lemma by induction. You will need to
decide what to induct on: [al], [bl], [Permutation al bl], and
[Forall f al] are possibilities. *)
Theorem Forall_perm: forall {A} (f: A -> Prop) al bl,
Permutation al bl ->
Forall f al -> Forall f bl.
Proof.
intros A f al bl H__permutation H__forall.
induction H__permutation.
- (* nil *) constructor.
- (* x :: l *)
inv H__forall.
auto.
- (* x :: y :: l *)
inv H__forall. inv H2.
auto.
- (* l -> l' -> l'' *)
auto.
Qed.
(** [] *)
(* 2021-08-11 15:15 *)
|
/* specfunc/test_hyperg.c
*
* Copyright (C) 2007, 2009, 2010 Brian Gough
* Copyright (C) 1996, 1997, 1998, 1999, 2000, 2004 Gerard Jungman
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/* Author: G. Jungman */
#include <config.h>
#include <gsl/gsl_test.h>
#include <gsl/gsl_sf.h>
#include "test_sf.h"
int test_hyperg(void)
{
gsl_sf_result r;
int s = 0;
/* 0F1 */
TEST_SF(s, gsl_sf_hyperg_0F1_e, (1, 0.5, &r), 1.5660829297563505373, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_0F1_e, (5, 0.5, &r), 1.1042674404828684574, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_0F1_e, (100, 30, &r), 1.3492598639485110176, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_0F1_e, (-0.5, 3, &r), -39.29137997543434276, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_0F1_e, (-100.5, 50, &r), 0.6087930289227538496, TEST_TOL3, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_0F1_e, (1, -5.0, &r), -0.3268752818235339109, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_0F1_e, (-0.5, -5.0, &r),-4.581634759005381184, TEST_TOL1, GSL_SUCCESS);
/* 1F1 for integer parameters */
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (1, 1, 0.5, &r), 1.6487212707001281468, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (1, 2, 500.0, &r), 2.8071844357056748215e+214, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (1, 2, -500.0, &r), 0.002, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (8, 1, 0.5, &r), 13.108875178030540372, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (10, 1, 1.0, &r), 131.63017574352619931, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (10, 1, 10.0, &r), 8.514625476546280796e+09, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (10, 1, 100.0, &r), 1.5671363646800353320e+56, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (10, 20, 1.0, &r), 1.6585618002669675465, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (10, 20, 10.0, &r), 265.26686430340188871, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (10, 20, 100.0, &r), 3.640477355063227129e+34, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (10, 100, 1.0, &r), 1.1056660194025527099, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (10, 100, 10.0, &r), 2.8491063634727594206, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (10, 100, 40.0, &r), 133.85880835831230986, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (10, 100, 80.0, &r), 310361.16228011433406, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (10, 100, 100.0, &r), 8.032171336754168282e+07, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (10, 100, 500.0, &r), 7.633961202528731426e+123, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (100, 1, 1.0, &r), 6.892842729046469965e+07, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (100, 1, 10.0, &r), 2.4175917112200409098e+28, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (100, 1, 100.0, &r), 1.9303216896309102993e+110, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (100, 200, 1.0, &r), 1.6497469106162459226, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (100, 200, 10.0, &r), 157.93286197349321981, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (100, 200, 100.0, &r), 2.1819577501255075240e+24, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (100, 200, 400.0, &r), 3.728975529926573300e+119, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (100, 400, 10.0, &r), 12.473087623658878813, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (100, 400, 100.0, &r), 9.071230376818550241e+11, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (100, 400, 150.0, &r), 7.160949515742170775e+18, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (100, 400, 200.0, &r), 2.7406690412731576823e+26, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (100, 400, 300.0, &r), 6.175110613473276193e+43, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (100, 400, 400.0, &r), 1.1807417662711371440e+64, TEST_TOL3, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (100, 400, 600.0, &r), 2.4076076354888886030e+112, TEST_TOL3, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (10, 1, -1.0, &r), 0.11394854824644542810, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (10, 1, -10.0, &r), 0.0006715506365396127863, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (10, 1, -100.0, &r), -4.208138537480269868e-32, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (10, 50, -1.0, &r), 0.820006196079380, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (10, 100, -10.0, &r), 0.38378859043466243, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (10, 100, -100.0, &r), 0.0008460143401464189061, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (10, 100, -500.0, &r), 1.1090822141973655929e-08, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (10, 100, -10000.0, &r), 5.173783508088272292e-21, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (50, 1, -90.0, &r), -1.6624258547648311554e-21, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (50, 1, -100.0, &r), 4.069661775122048204e-24, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (50, 1, -110.0, &r), 1.0072444993946236025e-25, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (100, 10, -100.0, &r), -2.7819353611733941962e-37, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (100, 1, -90.0, &r), 7.501705041159802854e-22, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (100, 1, -100.0, &r), 6.305128893152291187e-25, TEST_TOL3, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (100, 1, -110.0, &r), -7.007122115422439755e-26, TEST_TOL3, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (100, 10, -100.0, &r), -2.7819353611733941962e-37, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (200, 50, -1.0, &r), 0.016087060191732290813, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (200, 50, -300.0, &r), -4.294975979706421471e-121, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (200, 100, -1.0, &r), 0.13397521083325179687, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (200, 100, -10.0, &r), 5.835134393749807387e-10, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (200, 100, -100.0, &r), 4.888460453078914804e-74, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (200, 100, -500.0, &r), -1.4478509059582015053e-195, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (-1, 1, 2.0, &r), -1.0, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (-1, -2, 2.0, &r), 2.0, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (-2, -3, 2.0, &r), 3.0, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (-10, 1, 1.0, &r), 0.4189459325396825397, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (-10, 1, 10.0, &r), 27.984126984126984127, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (-10, 1, 100.0, &r), 9.051283795429571429e+12, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (-100, 20, 1.0, &r), 0.0020203016320697069566, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (-10, -20, 1.0, &r), 1.6379141878548080173, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (-10, -20, 10.0, &r), 78.65202404521289970, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (-10, -20, 100.0, &r), 4.416169713262624315e+08, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (-10, -100, 1.0, &r), 1.1046713999681950919, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (-10, -100, 10.0, &r), 2.6035952191039006838, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (-10, -100, 100.0, &r), 1151.6852040836932392, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (-100, -200, 1.0, &r), 1.6476859702535324743, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (-100, -200, 10.0, &r), 139.38026829540687270, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (-100, -200, 100.0, &r), 1.1669433576237933752e+19, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (-10, -20, -1.0, &r), 0.6025549561148035735, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (-10, -20, -10.0, &r), 0.00357079636732993491, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (-10, -20, -100.0, &r), 1.64284868563391159e-35, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (-10, -100, -1.0, &r), 0.90442397250313899, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (-10, -100, -10.0, &r), 0.35061515251367215, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (-10, -100, -100.0, &r), 8.19512187960476424e-09, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (-100, -200, -1.0, &r), 0.6061497939628952629, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (-100, -200, -10.0, &r), 0.0063278543908877674, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (-100, -200, -100.0, &r), 4.34111795007336552e-25, TEST_TOL2, GSL_SUCCESS);
/* 1F1 */
TEST_SF(s, gsl_sf_hyperg_1F1_e, (1, 1.5, 1, &r), 2.0300784692787049755, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (1, 1.5, 10, &r), 6172.859561078406855, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (1, 1.5, 100, &r), 2.3822817898485692114e+42, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (1, 1.5, 500, &r), 5.562895351723513581e+215, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (1.5, 2.5, 1, &r), 1.8834451238277954398, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (1.5, 2.5, 10, &r), 3128.7352996840916381, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (10, 1.1, 1, &r), 110.17623733873889579, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (10, 1.1, 10, &r), 6.146657975268385438e+09, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (10, 1.1, 100, &r), 9.331833897230312331e+55, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (10, 1.1, 500, &r), 4.519403368795715843e+235, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (10, 50.1, 2, &r), 1.5001295507968071788, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (10, 50.1, 10, &r), 8.713385849265044908, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (10, 50.1, 100, &r), 5.909423932273380330e+18, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (10, 50.1, 500, &r), 9.740060618457198900e+165, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (100, 1.1, 1, &r), 5.183531067116809033e+07, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (100, 1.1, 10, &r), 1.6032649110096979462e+28, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (100, 1.1, 100, &r), 1.1045151213192280064e+110, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (100, 50.1, 1, &r), 7.222953133216603757, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (100, 50.1, 10, &r), 1.0998696410887171538e+08, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (100, 50.1, 100, &r), 7.235304862322283251e+63, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (1, 1.5, -1, &r), 0.5380795069127684191, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (1, 1.5, -10, &r), 0.05303758099290164485, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (1, 1.5, -100, &r), 0.005025384718759852803, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (1, 1.5, -500, &r), 0.0010010030151059555322, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (1, 1.1, -500, &r), 0.00020036137599690208265, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (10, 1.1, -1, &r), 0.07227645648935938168, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (10, 1.1, -10, &r), 0.0003192415409695588126, TEST_TOL1, GSL_SUCCESS);
/*
sensitive to the pair_ratio hack in hyperg_1F1.c
TEST_SF_RLX(s, gsl_sf_hyperg_1F1_e, (10, 1.1, -100, &r), -8.293425316123158950e-16, 50.0*TEST_SNGL, GSL_SUCCESS);
*/
TEST_SF(s, gsl_sf_hyperg_1F1_e, (10, 1.1, -500, &r), -3.400379216707701408e-23, TEST_TOL2, GSL_SUCCESS);
TEST_SF_RLX(s, gsl_sf_hyperg_1F1_e, (50, 1.1, -90, &r), -7.843129411802921440e-22, TEST_SQRT_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (50, 1.1, -100, &r), 4.632883869540640460e-24, TEST_SQRT_TOL0, GSL_SUCCESS);
/* FIXME:
tolerance is poor, but is consistent within reported error
*/
TEST_SF(s, gsl_sf_hyperg_1F1_e, (50, 1.1, -110.0, &r), 5.642684651305310023e-26, 0.03, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (100, 1.1, -1, &r), 0.0811637344096042096, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (100, 1.1, -10, &r), 0.00025945610092231574387, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (100, 1.1, -50, &r), 2.4284830988994084452e-13, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (100, 1.1, -90, &r), 2.4468224638378426461e-22, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (100, 1.1, -99, &r), 1.0507096272617608461e-23, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (100, 1.1, -100, &r), 1.8315497474210138602e-24, TEST_TOL2, GSL_SUCCESS);
/* FIXME:
Reported error is too small.
TEST_SF(s, gsl_sf_hyperg_1F1_e, (100, 1.1, -101, &r), -2.3916306291344452490e-24, 0.04, GSL_SUCCESS);
*/
/* FIXME:
Reported error is too small.
TEST_SF(s, gsl_sf_hyperg_1F1_e, (100, 1.1, -110, &r), -4.517581986037732280e-26, TEST_TOL0, GSL_SUCCESS);
*/
/* FIXME:
Result is terrible, but reported error is very large, so consistent.
TEST_SF(s, gsl_sf_hyperg_1F1_e, (100, 10.1, -220, &r), -4.296130300021696573e-64, TEST_TOL1, GSL_SUCCESS);
*/
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-10, -10.1, 10.0, &r), 10959.603204633058116, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-10, -10.1, 1000.0, &r), 2.0942691895502242831e+23, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-10, -100.1, 10.0, &r), 2.6012036337980078062, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-1000, -1000.1, 10.0, &r), 22004.341698908631636, TEST_TOL3, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-1000, -1000.1, 200.0, &r), 7.066514294896245043e+86, TEST_TOL3, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-8.1, -10.1, -10.0, &r), 0.00018469685276347199258, TEST_TOL0, GSL_SUCCESS);
/* TEST_SF(s, gsl_sf_hyperg_1F1_e, (-8.1, -1000.1, -10.0, &r), 0.9218280185080036020, TEST_TOL0, GSL_SUCCESS); */
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-10, -5.1, 1, &r), 16.936141866089601635, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-10, -5.1, 10, &r), 771534.0349543820541, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-10, -5.1, 100, &r), 2.2733956505084964469e+17, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-100, -50.1, -1, &r), 0.13854540373629275583, TEST_TOL3, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-100, -50.1, -10, &r), -9.142260314353376284e+19, TEST_TOL3, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-100, -50.1, -100, &r), -1.7437371339223929259e+87, TEST_TOL3, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-100, -50.1, 1, &r), 7.516831748170351173, TEST_TOL3, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-100, -50.1, 10, &r), 1.0551632286359671976e+11, TEST_SQRT_TOL0, GSL_SUCCESS);
/*
These come out way off. On the other hand, the error estimates
are also very large; so much so that the answers are consistent
within the reported error. Something will need to be done about
this eventually
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-100, -50.1, 50, &r), -7.564755600940346649e+36, TEST_TOL3, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-100, -50.1, 100, &r), 4.218776962675977e+55, TEST_TOL3, GSL_SUCCESS);
*/
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-10.5, -8.1, 0.1, &r), 1.1387201443786421724, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-10.5, -11.1, 1, &r), 2.5682766147138452362, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-100.5, -80.1, 10, &r), 355145.4517305220603, TEST_TOL3, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-100.5, -102.1, 10, &r), 18678.558725244365016, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-100.5, -500.1, 10, &r), 7.342209011101454, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-100.5, -500.1, 100, &r), 1.2077443075367177662e+8, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-500.5, -80.1, 2, &r), 774057.8541325341699, TEST_TOL4, GSL_SUCCESS);
/*
UNIMPL
TEST_SF(s, gsl_sf_hyperg_1F1_e, (100, -10.1, 1, &r), -2.1213846338338567395e+12, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (100, -10.1, 10, &r), -6.624849346145112398e+39, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (100, -10.1, 100, &r), -1.2413466759089171904e+129, TEST_TOL0, GSL_SUCCESS);
*/
/*
UNIMPL
TEST_SF(s, gsl_sf_hyperg_1F1_e, (100, -10.1, -1, &r), 34456.29405305551691, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (100, -10.1, -10, &r), -7.809224251467710833e+07, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (100, -10.1, -100, &r), -5.214065452753988395e-07, TEST_TOL0, GSL_SUCCESS);
*/
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-100, 1.1, 1, &r), 0.21519810496314438414, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-100, 1.1, 10, &r), 8.196123715597869948, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-100, 1.1, 100, &r), -1.4612966715976530293e+20, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-100, 20.1, 1, &r), 0.0021267655527278456412, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-100, 20.1, 10, &r), 2.0908665169032186979e-11, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-100, 20.1, 100, &r), -0.04159447537001340412, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-100, 1.1, -1, &r), 2.1214770215694685282e+07, TEST_TOL3, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-100, 1.1, -10, &r), 1.0258848879387572642e+24, TEST_TOL3, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-100, 1.1, -100, &r), 1.1811367147091759910e+67, TEST_TOL3, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-100, 50.1, -1, &r), 6.965259317271427390, TEST_TOL3, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-100, 50.1, -10, &r), 1.0690052487716998389e+07, TEST_TOL3, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-100, 50.1, -100, &r), 6.889644435777096248e+36, TEST_TOL3, GSL_SUCCESS);
/* Bug report from Fernando Pilotto */
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-2.05, 1.0, 5.05, &r), 3.79393389516785e+00, TEST_TOL3, GSL_SUCCESS);
/* Bug reports from Ivan Liu */
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-26, 2.0, 100.0, &r), 1.444786781107436954e+19, TEST_TOL3, GSL_SUCCESS);
#ifdef FIXME
/* This one is computed with a huge error, there is loss of
precision but the error estimate flags the problem (assuming the
user looks at it). We should probably trap any return with
err>|val| and signal loss of precision */
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-26.1, 2.0, 100.0, &r), 1.341557199575986995e+19, TEST_TOL3, GSL_SUCCESS);
#endif
/* Bug report H.Moseby */
TEST_SF(s, gsl_sf_hyperg_1F1_e, (1.2, 1.1e-15, 1.5, &r), 8254503159672429.02, TEST_TOL3, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (1.0, 1000000.5, 0.8e6 + 0.5, &r), 4.999922505099443804e+00, TEST_TOL3, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (1.0, 1000000.5, 1001000.5, &r), 3480.3699557431856166, TEST_TOL4, GSL_SUCCESS);
#ifdef FIXME /* FIX THESE NEXT RELEASE */
TEST_SF(s, gsl_sf_hyperg_1F1_e, (1.1, 1000000.5, 1001000.5, &r), 7304.6126942641350122, TEST_TOL3, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (0.9, 1000000.5, 1001000.5, &r), 1645.4879293475410982, TEST_TOL3, GSL_SUCCESS);
#endif
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-1.1, 1000000.5, 1001000.5, &r), -5.30066488697455e-04, TEST_TOL3, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (1.5, 1000000.5, 0.8e6 + 0.5, &r), 11.18001288977894650469927615, TEST_TOL4, GSL_SUCCESS);
/* Bug report Lorenzo Moneta <[email protected]> */
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-1.5, 1.5, -100., &r), 456.44010011787485545, TEST_TOL4, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-1.5, 1.5, 99., &r), 4.13360436014643309757065e36, TEST_TOL4, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-1.5, 1.5, 100., &r), 1.0893724312430935129254e37, TEST_TOL4, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-1.5, 1.5, 709., &r), 8.7396804160264899999692120e298, TEST_TOL4, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-1.5, 1.5, 710., &r), 2.36563187217417898169834615e299, TEST_TOL4, GSL_SUCCESS);
/* Bug report from Weibin Li <[email protected]> */
#ifdef FIXME
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-37.8, 2.01, 103.58, &r), -6.21927211009e17, TEST_TOL1, GSL_SUCCESS);
#endif
/* Testing BJG */
#ifdef COMPARISON_WITH_MATHEMATICA
/* Mathematica uses a different convention for M(-m,-n,x) */
TEST_SF(s, gsl_sf_hyperg_1F1_int_e, (-1, -1, 0.1, &r), 1.1, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_1F1_e, (-1, -1, 0.1, &r), 1.1, TEST_TOL0, GSL_SUCCESS);
#endif
/* U for integer parameters */
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, 1, 0.0001, &r), 8.634088070212725330, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, 1, 0.01, &r), 4.078511443456425847, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, 1, 0.5, &r), 0.9229106324837304688, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, 1, 2.0, &r), 0.3613286168882225847, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, 1, 100, &r), 0.009901942286733018406, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, 1, 1000, &r), 0.0009990019940238807150, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, 8, 0.01, &r), 7.272361203006010000e+16, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, 8, 1, &r), 1957.0, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, 8, 5, &r), 1.042496, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, 8, 8, &r), 0.3207168579101562500, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, 8, 50, &r), 0.022660399001600000000, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, 8, 100, &r), 0.010631236727200000000, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, 8, 1000, &r), 0.0010060301203607207200, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, 20, 1, &r), 1.7403456103284421000e+16, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, 20, 20, &r), 0.22597813610531052969, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, 50, 1, &r), 3.374452117521520758e+61, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, 50, 50, &r), 0.15394136814987651785, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, 100, 0.1, &r), 1.0418325171990852858e+253, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, 100, 1, &r), 2.5624945006073464385e+154, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, 100, 50, &r), 3.0978624160896431391e+07, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, 100, 100, &r), 0.11323192555773717475, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, 100, 200, &r), 0.009715680951406713589, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, 100, 1000, &r), 0.0011085142546061528661, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, 1000, 2000, &r), 0.0009970168547036318206, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, -1, 1, &r), 0.29817368116159703717, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, -1, 10, &r), 0.07816669698940409380, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, -10, 1, &r), 0.08271753756946041959, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, -10, 5, &r), 0.06127757419425055261, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, -10, 10, &r), 0.04656199948873187212, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, -10, 20, &r), 0.031606421847946077709, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, -100, 0.01, &r), 0.009900000099999796950, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, -100, 1, &r), 0.009802970197050404429, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, -100, 10, &r), 0.009001648897173103447, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, -100, 20, &r), 0.008253126487166557546, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, -100, 50, &r), 0.006607993916432051008, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, -100, 90, &r), 0.005222713769726871937, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, -100, 110, &r), 0.004727658137692606210, TEST_TOL2, GSL_SUCCESS);
TEST_SF_RLX(s, gsl_sf_hyperg_U_int_e, (1, -1000, 1, &r), 0.0009980029970019970050, TEST_SQRT_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (1, -1000, 1010, &r), 0.0004971408839859245170, TEST_TOL4, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (8, 1, 0.001, &r), 0.0007505359326875706975, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (8, 1, 0.5, &r), 6.449509938973479986e-06, TEST_TOL3, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (8, 1, 8, &r), 6.190694573035761284e-10, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (8, 1, 20, &r), 3.647213845460374016e-12, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (8, 8, 1, &r), 0.12289755012652317578, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (8, 8, 10, &r), 5.687710359507564272e-09, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (8, 8, 20, &r), 2.8175404594901039724e-11, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (100, 100, 0.01, &r), 1.0099979491941914867e+196, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (100, 100, 0.1, &r), 1.0090713562719862833e+97, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (100, 100, 1, &r), 0.009998990209084729106, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (100, 100, 20, &r), 1.3239363905866130603e-131, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (-10, 1, 0.01, &r), 3.274012540759009536e+06, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (-10, 1, 1, &r), 1.5202710000000000000e+06, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (-10, 1, 10, &r), 1.0154880000000000000e+08, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (-10, 1, 100, &r), 3.284529863685482880e+19, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (-10, 10, 1, &r), 1.1043089864100000000e+11, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (-10, 100, 1, &r), 1.3991152402448957897e+20, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (-10, 100, 10, &r), 5.364469916567136000e+19, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (-10, 100, 100, &r), 3.909797568000000000e+12, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (-10, 100, 500, &r), 8.082625576697984130e+25, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (-50, 1, 0.01, &r), 1.6973422555823855798e+64, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (-50, 1, 1, &r), 7.086160198304780325e+63, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (-50, 1, 10, &r), 5.332862895528712200e+65, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (-50, 10, 1, &r), -7.106713471565790573e+71, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (-50, 100, 1, &r), 2.4661377199407186476e+104, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (-50, 10, 10, &r), 5.687538583671241287e+68, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (-50, 100, 10, &r), 1.7880761664553373445e+102, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (-90, 1, 0.01, &r), 4.185245354032917715e+137, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (-90, 1, 0.1, &r), 2.4234043408007841358e+137, TEST_TOL3, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (-90, 1, 10, &r), -1.8987677149221888807e+139, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (-90, 10, 10, &r), -5.682999988842066677e+143, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (-90, 100, 10, &r), 2.3410029853990624280e+189, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (-90, 1000, 10, &r), 1.9799451517572225316e+271, TEST_TOL3, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (-50, -1, 10, &r), -9.083195466262584149e+64, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (-50, -10, 10, &r), -1.4418257327071634407e+62, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (-50, -100, 0.01, &r), 3.0838993811468983931e+93, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (-50, -100, 10, &r), 4.014552630378340665e+95, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (-100, -100, 10, &r), 2.0556466922347982030e+162, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (-100, -200, 10, &r), 1.1778399522973555582e+219, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (-100, -200, 100, &r), 9.861313408898201873e+235, TEST_TOL3, GSL_SUCCESS);
/* U */
TEST_SF(s, gsl_sf_hyperg_U_e, (0.0001, 0.0001, 0.0001, &r), 1.0000576350699863577, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (0.0001, 0.0001, 1.0, &r), 0.9999403679233247536, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (0.0001, 0.0001, 100.0, &r), 0.9995385992657260887, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (0.0001, 1, 0.0001, &r), 1.0009210608660065989, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (0.0001, 1.0, 1.0, &r), 0.9999999925484179084, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (0.0001, 10, 1, &r), 13.567851006281412726, TEST_TOL3, GSL_SUCCESS);
TEST_SF_RLX(s, gsl_sf_hyperg_U_e, (0.0001, 10, 5, &r), 1.0006265020064596364, TEST_SQRT_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (0.0001, 10, 10, &r), 0.9999244381454633265, TEST_TOL0, GSL_SUCCESS);
TEST_SF_RLX(s, gsl_sf_hyperg_U_e, (0.0001, 100, 1, &r), 2.5890615708804247881e+150, TEST_SQRT_TOL0, GSL_SUCCESS);
TEST_SF_RLX(s, gsl_sf_hyperg_U_e, (0.0001, 100, 10, &r), 2.3127845417739661466e+55, TEST_SQRT_TOL0, GSL_SUCCESS);
TEST_SF_RLX(s, gsl_sf_hyperg_U_e, (0.0001, 100, 50, &r), 6402.818715083582554, TEST_SQRT_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (0.0001, 100, 98, &r), 0.9998517867411840044, TEST_TOL2, GSL_SUCCESS);
TEST_SF_RLX(s, gsl_sf_hyperg_U_e, (0.0001, 1000, 300, &r), 2.5389557274938010716e+213, TEST_SQRT_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (0.0001, 1000, 999, &r), 0.9997195294193261604, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (0.0001, 1000, 1100, &r), 0.9995342990014584713, TEST_TOL1, GSL_SUCCESS);
TEST_SF_RLX(s, gsl_sf_hyperg_U_e, (0.5, 1000, 300, &r), 1.1977955438214207486e+217, TEST_SQRT_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (0.5, 1000, 800, &r), 9.103916020464797207e+08, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (0.5, 1000, 998, &r), 0.21970269691801966806, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (0.5, 0.5, 1.0, &r), 0.7578721561413121060, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (1, 0.0001, 0.0001, &r), 0.9992361337764090785, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (1, 0.0001, 1, &r), 0.4036664068111504538, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (1, 0.0001, 100, &r), 0.009805780851264329587, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (1, 1.2, 2.0, &r), 0.3835044780075602550, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (1, -0.0001, 1, &r), 0.4036388693605999482, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (8, 10.5, 1, &r), 27.981926466707438538, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (8, 10.5, 10, &r), 2.4370135607662056809e-8, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (8, 10.5, 100, &r), 1.1226567526311488330e-16, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (10, -2.5, 10, &r), 6.734690720346560349e-14, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (10, 2.5, 10, &r), 6.787780794037971638e-13, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (10, 2.5, 50, &r), 2.4098720076596087125e-18, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-10.5, 1.1, 1, &r), -3.990841457734147e+6, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-10.5, 1.1, 10, &r), 1.307472052129343e+8, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-10.5, 1.1, 50, &r), 3.661978424114088e+16, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-10.5, 1.1, 90, &r), 8.09469542130868e+19, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-10.5, 1.1, 99, &r), 2.546328328942063e+20, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-10.5, 1.1, 100, &r), 2.870463201832814e+20, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-10.5, 1.1, 200, &r), 8.05143453769373e+23, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-10.5, 10.1, 0.1, &r), -3.043016255306515e+20, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-10.5, 10.1, 1, &r), -3.194745265896115e+12, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-10.5, 10.1, 4, &r), -6.764203430361954e+07, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-10.5, 10.1, 10, &r), -2.067399425480545e+09, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-10.5, 10.1, 50, &r), 4.661837330822824e+14, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-10.5, 100.4, 10, &r), -6.805460513724838e+66, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-10.5, 100.4, 50, &r), -2.081052558162805e+18, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-10.5, 100.4, 80, &r), 2.034113191014443e+14, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-10.5, 100.4, 100, &r), 6.85047268436107e+13, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-10.5, 100.4, 200, &r), 1.430815706105649e+20, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-19.5, 82.1, 10, &r), 5.464313196201917432e+60, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-50.5, 100.1, 10, &r), -5.5740216266953e+126, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-50.5, 100.1, 40, &r), 5.937463786613894e+91, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-50.5, 100.1, 50, &r), -1.631898534447233e+89, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-50.5, 100.1, 70, &r), 3.249026971618851e+84, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-50.5, 100.1, 100, &r), 1.003401902126641e+85, TEST_TOL1, GSL_SUCCESS);
/* Bug report from Stefan Gerlach */
TEST_SF(s, gsl_sf_hyperg_U_e, (-2.0, 4.0, 1.0, &r), 11.0, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-2.0, 0.5, 3.14, &r), 1.1896, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-2.0, 0.5, 1.13, &r), -1.3631, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-2.0, 0.5, 0.0, &r), 0.75, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-2.0, 0.5, 1e-20, &r), 0.75, TEST_TOL2, GSL_SUCCESS);
/* U(a,b,x) for x<0 [bug #27859] */
/* Tests for b >= 0 */
TEST_SF(s, gsl_sf_hyperg_U_e, ( 0, 0, -0.1, &r), 1, TEST_TOL0, GSL_SUCCESS);
#ifdef FIXME /* unimplemented case */
TEST_SF(s, gsl_sf_hyperg_U_e, (-1, 0, -0.1, &r), -0.1, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-2, 0, -0.1, &r), 0.21, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-3, 0, -0.1, &r), -0.661, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-4, 0, -0.1, &r), 2.7721, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-5, 0, -0.1, &r), -14.52201, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-6, 0, -0.1, &r), 91.230301, TEST_TOL0, GSL_SUCCESS);
#endif
TEST_SF(s, gsl_sf_hyperg_U_e, ( 0, 1, -0.1, &r), 1.0, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-1, 1, -0.1, &r), -1.1, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-2, 1, -0.1, &r), 2.41, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-3, 1, -0.1, &r), -7.891, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-4, 1, -0.1, &r), 34.3361, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-5, 1, -0.1, &r), -186.20251, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-6, 1, -0.1, &r), 1208.445361, TEST_TOL0, GSL_SUCCESS);
#ifdef FIXME /* unimplemented case */
TEST_SF(s, gsl_sf_hyperg_U_e, ( 1, 2, -0.1, &r), -10.0, TEST_TOL0, GSL_SUCCESS);
#endif
TEST_SF(s, gsl_sf_hyperg_U_e, ( 0, 2, -0.1, &r), 1.0, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-1, 2, -0.1, &r), -2.1, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-2, 2, -0.1, &r), 6.61, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-3, 2, -0.1, &r), -27.721, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-4, 2, -0.1, &r), 145.2201, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-5, 2, -0.1, &r), -912.30301, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-6, 2, -0.1, &r), 6682.263421, TEST_TOL0, GSL_SUCCESS);
#ifdef FIXME /* unimplemented case */
TEST_SF(s, gsl_sf_hyperg_U_e, ( 2, 3, -0.1, &r), 100.0, TEST_TOL0, GSL_SUCCESS);
#endif
TEST_SF(s, gsl_sf_hyperg_U_e, ( 1, 3, -0.1, &r), 90.0, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, ( 0, 3, -0.1, &r), 1.0, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-1, 3, -0.1, &r), -3.10, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-2, 3, -0.1, &r), 12.81, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-3, 3, -0.1, &r), -66.151, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-4, 3, -0.1, &r), 409.8241, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-5, 3, -0.1, &r), -2961.42351, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-6, 3, -0.1, &r), 24450.804481, TEST_TOL0, GSL_SUCCESS);
#ifdef FIXME /* unimplemented case */
TEST_SF(s, gsl_sf_hyperg_U_e, ( 3, 4, -0.1, &r), -1000.0, TEST_TOL0, GSL_SUCCESS);
#endif
TEST_SF(s, gsl_sf_hyperg_U_e, ( 2, 4, -0.1, &r), -1900.0, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, ( 1, 4, -0.1, &r), -1810.0, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, ( 0, 4, -0.1, &r), 1.0, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-1, 4, -0.1, &r), -4.10, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-2, 4, -0.1, &r), 21.01, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-3, 4, -0.1, &r), -129.181, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-4, 4, -0.1, &r), 926.5481, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-5, 4, -0.1, &r), -7594.16401, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-6, 4, -0.1, &r), 70015.788541, TEST_TOL0, GSL_SUCCESS);
/* Tests for b < 0 */
TEST_SF(s, gsl_sf_hyperg_U_e, ( 0, -1, -0.1, &r), 1.0, TEST_TOL0, GSL_SUCCESS);
#ifdef FIXME /* unimplemented case */
TEST_SF(s, gsl_sf_hyperg_U_e, (-1, -1, -0.1, &r), 0.9, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-2, -1, -0.1, &r), 0.01, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-3, -1, -0.1, &r), -0.031, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-4, -1, -0.1, &r), 0.1281, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-5, -1, -0.1, &r), -0.66151, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-6, -1, -0.1, &r), 4.098241, TEST_TOL0, GSL_SUCCESS);
#endif
TEST_SF(s, gsl_sf_hyperg_U_e, ( 0, -2, -0.1, &r), 1.0, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-1, -2, -0.1, &r), 1.9, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-2, -2, -0.1, &r), 1.81, TEST_TOL0, GSL_SUCCESS);
#ifdef FIXME /* unimplemented case */
TEST_SF(s, gsl_sf_hyperg_U_e, (-3, -2, -0.1, &r), -0.001, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-4, -2, -0.1, &r), 0.0041, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-5, -2, -0.1, &r), -0.02101, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-6, -2, -0.1, &r), 0.129181, TEST_TOL0, GSL_SUCCESS);
#endif
TEST_SF(s, gsl_sf_hyperg_U_e, ( 0, -3, -0.1, &r), 1.0, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-1, -3, -0.1, &r), 2.9, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-2, -3, -0.1, &r), 5.61, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-3, -3, -0.1, &r), 5.429, TEST_TOL0, GSL_SUCCESS);
#ifdef FIXME /* unimplemented case */
TEST_SF(s, gsl_sf_hyperg_U_e, (-4, -3, -0.1, &r), 0.0001, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-5, -3, -0.1, &r), -0.00051, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-6, -3, -0.1, &r), 0.003121, TEST_TOL0, GSL_SUCCESS);
#endif
TEST_SF(s, gsl_sf_hyperg_U_e, ( 0, -4, -0.1, &r), 1.0, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-1, -4, -0.1, &r), 3.9, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-2, -4, -0.1, &r), 11.41, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-3, -4, -0.1, &r), 22.259, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-4, -4, -0.1, &r), 21.7161, TEST_TOL0, GSL_SUCCESS);
#ifdef FIXME /* unimplemented case */
TEST_SF(s, gsl_sf_hyperg_U_e, (-5, -4, -0.1, &r), -1e-5, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-6, -4, -0.1, &r), 0.000061, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-7, -4, -0.1, &r), -0.0004341, TEST_TOL0, GSL_SUCCESS);
#endif
/* Tests for integer a */
TEST_SF(s, gsl_sf_hyperg_U_e, (-3, 0.5, -0.5, &r), -9.5, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-8, 0.5, -0.5, &r), 180495.0625, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-8, 1.5, -0.5, &r), 827341.0625, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-8, 1.5, -10, &r), 7.162987810253906e9, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (3, 6, -0.5, &r), -296.0, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (3, 7, -0.5, &r), 2824, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (5, 12, -1.7, &r), -153.262676210016018065768591104, TEST_TOL0, GSL_SUCCESS);
/* A few random tests */
TEST_SF(s, gsl_sf_hyperg_U_e, (0, 0, -0.5, &r), 1, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (0, 1, -0.5, &r), 1, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (0, 1, -0.001, &r), 1, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-1, 0.99, -0.1, &r), -1.09, TEST_TOL0, GSL_SUCCESS);
#ifdef FIXME /* unimplemented case */
TEST_SF(s, gsl_sf_hyperg_U_e, (-1, 0, -0.5, &r), -0.5, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-2, 0, -0.5, &r), 1.25, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_e, (-7, 0, -0.1, &r), -668.2263421, TEST_TOL0, GSL_SUCCESS);
#endif
TEST_SF(s, gsl_sf_hyperg_U_int_e, (3, 6, -0.5, &r), -296.0, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (3, 7, -0.5, &r), 2824, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_U_int_e, (5, 12, -1.7, &r), -153.262676210016018065768591104, TEST_TOL0, GSL_SUCCESS);
/* 2F1 */
TEST_SF(s, gsl_sf_hyperg_2F1_e, (1, 1, 1, 0.5, &r), 2.0, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_e, (8, 8, 1, 0.5, &r), 12451584.0, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_e, (8, -8, 1, 0.5, &r), 0.13671875, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_e, (8, -8.1, 1, 0.5, &r), 0.14147385378899930422, TEST_TOL4, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_e, (8, -8, 1, -0.5, &r), 4945.136718750000000, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_e, (8, -8, -5.5, 0.5, &r), -906.6363636363636364, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_e, (8, -8, -5.5, -0.5, &r), 24565.363636363636364, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_e, (8, 8, 1, -0.5, &r), -0.006476312098196747669, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_e, (8, 8, 5, 0.5, &r), 4205.714285714285714, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_e, (8, 8, 5, -0.5, &r), 0.0028489656290296436616, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_e, (9, 9, 1, 0.99, &r), 1.2363536673577259280e+38 , TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_e, (9, 9, -1.5, 0.99, &r), 3.796186436458346579e+46, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_e, (9, 9, -1.5, -0.99, &r), 0.14733409946001025146, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_e, (9, 9, -8.5, 0.99, &r), -1.1301780432998743440e+65, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_e, (9, 9, -8.5, -0.99, &r), -8.856462606575344483, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_e, (9, 9, -21.5, 0.99, &r), 2.0712920991876073253e+95, TEST_TOL3, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_e, (9, 9, -21.5, -0.99, &r), -74.30517015382249216, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_e, (9, 9, -100.5, 0.99, &r), -3.186778061428268980e+262, TEST_TOL3, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_e, (9, 9, -100.5, -0.99, &r), 2.4454358338375677520, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_e, (25, 25, 1, -0.5, &r), -2.9995530823639545027e-06, TEST_SQRT_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_e, (1.5, 0.5, 2.0, 1.0-1.0/64.0, &r), 3.17175539044729373926, TEST_TOL3, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_e, (1.5, 0.5, 2.0, 1.0-1.0/128.0, &r), 3.59937243502024563424, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_e, (1.5, 0.5, 2.0, 1.0-1.0/256.0, &r), 4.03259299524392504369, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_e, (1.5, 0.5, 2.0, 1.0-1.0/1024.0, &r), 4.90784159359675398250, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_e, (1.5, 0.5, 2.0, 1.0-1.0/65536.0, &r), 7.552266033399683914, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_e, (1.5, 0.5, 2.0, 1.0-1.0/16777216.0, &r), 11.08235454026043830363, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_e, (1.5, 0.5, 2.0, -1.0+1.0/1024.0, &r), 0.762910940909954974527, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_e, (1.5, 0.5, 2.0, -1.0+1.0/65536.0, &r), 0.762762124908845424449, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_e, (1.5, 0.5, 2.0, -1.0+1.0/1048576.0, &r), 0.762759911089064738044, TEST_TOL0, GSL_SUCCESS);
/* added special handling with x == 1.0 , Richard J. Mathar, 2008-01-09 */
TEST_SF(s, gsl_sf_hyperg_2F1_e, (1.5, 0.5, 3.0, 1.0, &r), 1.6976527263135502482014268 , TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_e, (1.5, -4.2, 3.0, 1.0, &r), .15583601560025710649555254 , TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_e, (-7.4, 0.7, -1.5, 1.0, &r), -.34478866959246584996859 , TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_e, (0.1, -2.7, -1.5, 1.0, &r), 1.059766766063610122925 , TEST_TOL2, GSL_SUCCESS);
/* Taylor Binnington a = 0 */
TEST_SF(s, gsl_sf_hyperg_2F1_e, (0, -2, -4, 0.5, &r), 1.0 , TEST_TOL2, GSL_SUCCESS);
/* 2F1 conj */
TEST_SF(s, gsl_sf_hyperg_2F1_conj_e, (1, 1, 1, 0.5, &r), 3.352857095662929028, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_conj_e, (8, 8, 1, 0.5, &r), 1.7078067538891293983e+09, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_conj_e, (8, 8, 5, 0.5, &r), 285767.15696901140627, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_conj_e, (8, 8, 1, -0.5, &r), 0.007248196261471276276, TEST_TOL3, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_conj_e, (8, 8, 5, -0.5, &r), 0.00023301916814505902809, TEST_TOL3, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_conj_e, (25, 25, 1, -0.5, &r), 5.1696944096e-06, TEST_SQRT_TOL0, GSL_SUCCESS);
/* updated correct values, testing enabled, Richard J. Mathar, 2008-01-09 */
TEST_SF(s, gsl_sf_hyperg_2F0_e, (0.01, 1.0, -0.02, &r), .99980388665511730901180717 , TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F0_e, (0.1, 0.5, -0.02, &r), .99901595171179281891589794 , TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F0_e, (1, 1, -0.02, &r), .98075549650574351826538049000 , TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F0_e, (8, 8, -0.02, &r), .32990592849626965538692141 , TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F0_e, (50, 50, -0.02, &r), .2688995263772964415245902e-12 , TEST_TOL0, GSL_SUCCESS);
/* 2F1 renorm */
TEST_SF(s, gsl_sf_hyperg_2F1_renorm_e, (1, 1, 1, 0.5, &r), 2.0, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_renorm_e, (8, 8, 1, 0.5, &r), 12451584.0, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_renorm_e, (8, -8, 1, 0.5, &r), 0.13671875, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_renorm_e, (8, -8, 1, -0.5, &r), 4945.13671875, TEST_TOL0, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_renorm_e, (8, -8, -5.5, 0.5, &r), -83081.19167659493609, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_renorm_e, (8, -8, -5.5, -0.5, &r), 2.2510895952730178518e+06, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_renorm_e, (8, 8, 5, 0.5, &r), 175.2380952380952381, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_renorm_e, (9, 9, -1.5, 0.99, &r), 1.6063266334913066551e+46, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_renorm_e, (9, 9, -1.5, -0.99, &r), 0.06234327316254516616, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_renorm_e, (5, 5, -1, 0.5, &r), 4949760.0, TEST_TOL1, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_renorm_e, (5, 5, -10, 0.5, &r), 139408493229637632000.0, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_renorm_e, (5, 5, -100, 0.5, &r), 3.0200107544594411315e+206, TEST_TOL3, GSL_SUCCESS);
/* 2F1 conj renorm */
TEST_SF(s, gsl_sf_hyperg_2F1_conj_renorm_e, (9, 9, -1.5, 0.99, &r), 5.912269095984229412e+49, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_conj_renorm_e, (9, 9, -1.5, -0.99, &r), 0.10834020229476124874, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_conj_renorm_e, (5, 5, -1, 0.5, &r), 1.4885106335357933625e+08, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_conj_renorm_e, (5, 5, -10, 0.5, &r), 7.968479361426355095e+21, TEST_TOL2, GSL_SUCCESS);
TEST_SF(s, gsl_sf_hyperg_2F1_conj_renorm_e, (5, 5, -100, 0.5, &r), 3.1113180227052313057e+208, TEST_TOL3, GSL_SUCCESS);
return s;
}
|
When Bart is talking to the boy 's father on the phone he says " I think I hear a dingo eating your baby " , referencing the case of Azaria Chamberlain , a ten @-@ week @-@ old baby who was killed by dingoes . The bullfrogs taking over Australia and destroying all the crops is a reference to the cane toad , originally introduced to Australia in order to protect sugar canes from the cane beetle , but became a pest in the country .
|
lemma (in topological_space) at_within_empty [simp]: "at a within {} = bot" |
/-
Copyright (c) 2016 Minchao Wu. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Minchao Wu
-/
import tools.super
open classical nat function prod subtype super
noncomputable theory
theorem lt_or_eq_of_lt_succ {n m : ℕ} (H : n < succ m) : n < m ∨ n = m :=
lt_or_eq_of_le (le_of_lt_succ H)
theorem and_of_not_imp {p q : Prop} (H : ¬ (p → ¬ q)) : p ∧ q := by super
theorem not_not_elim {a : Prop} : ¬¬a → a := by_contradiction
theorem exists_not_of_not_forall {A : Type} {p : A → Prop} (H : ¬∀x, p x) : ∃x, ¬p x :=
by_contradiction (λ neg, have ∀ x, ¬ ¬ p x, from forall_not_of_not_exists neg,
show _, from H (λ x, not_not_elim (this x)))
theorem existence_of_nat_gt (n : ℕ) : ∃ m, n < m := ⟨(succ n),(lt_succ_self n)⟩
namespace kruskal
structure [class] quasiorder (A : Type) extends has_le A :=
(refl : ∀ a, le a a)
(trans : ∀ {a b c}, le a b → le b c → le a c)
structure [class] wqo (A : Type) extends quasiorder A :=
(is_good : ∀ f : ℕ → A, ∃ i j, i < j ∧ le (f i) (f j))
def is_good {A : Type} (f : ℕ → A) (o : A → A → Prop) := ∃ i j : ℕ, i < j ∧ o (f i) (f j)
def prod_order {A B : Type} (o₁ : A → A → Prop) (o₂ : B → B → Prop) (s : A × B) (t : A × B) :=
o₁ (s.1) (t.1) ∧ o₂ (s.2) (t.2)
instance qo_prod {A B: Type} [o₁ : quasiorder A] [o₂ : quasiorder B] : quasiorder (A × B) :=
let op : A × B → A × B → Prop := prod_order o₁.le o₂.le in
have refl : ∀ p : A × B, op p p, by intro; apply and.intro; repeat {apply quasiorder.refl},
have trans : ∀ a b c, op a b → op b c → op a c, from λ x y z h1 h2,
⟨(quasiorder.trans h1^.left h2^.left), quasiorder.trans h1^.right h2^.right⟩,
show _, from quasiorder.mk (has_le.mk op) refl trans
def terminal {A : Type} (o : A → A → Prop) (f : ℕ → A) (m : ℕ) :=
∀ n, m < n → ¬ o (f m) (f n)
theorem lt_of_non_terminal {A : Type} {o : A → A → Prop} {f : ℕ → A} {m : ℕ} (H : ¬ @terminal _ o f m) :
∃ n, m < n ∧ o (f m) (f n) :=
let ⟨n,h⟩ := exists_not_of_not_forall H in ⟨n,(and_of_not_imp h)⟩
section
parameter {A : Type}
parameter [o : wqo A]
parameter f : ℕ → A
section
parameter H : ∀ N, ∃ r, N < r ∧ terminal o.le f r
def terminal_index (n : ℕ) : {x : ℕ // n < x ∧ terminal o.le f x} :=
nat.rec_on n (let i := some (H 0) in ⟨i, (some_spec (H 0))⟩)
(λ a rec_call,
let i' := rec_call.1, i := some (H i') in
have p : i' < i ∧ terminal o.le f i, from some_spec (H i'),
have a < i', from (rec_call.2)^.left,
have succ a < i, from lt_of_le_of_lt this p^.left,
⟨i, ⟨this,p^.right⟩⟩)
lemma increasing_ti {n m : ℕ} : n < m → (terminal_index n).1 < (terminal_index m).1 :=
nat.rec_on m (λ H, absurd H dec_trivial)
(λ a ih lt,
have disj : n < a ∨ n = a, from lt_or_eq_of_lt_succ lt,
have (terminal_index a).1 < (terminal_index (succ a)).1, from
(some_spec $ H (terminal_index a).1)^.left,
or.elim disj (λ Hl, lt_trans (ih Hl) this) (λ Hr, by rw Hr;exact this))
private def g (n : ℕ) := f (terminal_index n).1
lemma terminal_g (n : ℕ) : terminal o.le g n :=
have ∀ n', (terminal_index n).1 < n' → ¬ (f (terminal_index n)^.1) ≤ (f n'), from ((terminal_index n).2)^.right,
λ n' h, this (terminal_index n').1 (increasing_ti h)
lemma bad_g : ¬ is_good g o.le :=
have H1 : ∀ i j, i < j → ¬ (g i) ≤ (g j), from λ i j h, (terminal_g i) j h,
suppose ∃ i j, i < j ∧ (g i) ≤ (g j),
let ⟨i,j,h⟩ := this in
have ¬ (g i) ≤ (g j), from H1 i j h^.left,
show _, from this h^.right
lemma local_contradiction : false := bad_g (wqo.is_good g)
end
theorem finite_terminal : ∃ N, ∀ r, N < r → ¬ terminal o.le f r :=
have ¬ ∀ N, ∃ r, N < r ∧ @terminal A o.le f r, by apply local_contradiction,
have ∃ N, ¬ ∃ r, N < r ∧ @terminal A o.le f r, by super,
let ⟨n,h⟩ := this in
have ∀ r, n < r → ¬ @terminal A o.le f r, by super,
⟨n,this⟩
end
section
parameters {A B : Type}
parameters [o₁ : wqo A] [o₂ : wqo B]
section
parameter f : ℕ → A × B
theorem finite_terminal_on_A : ∃ N, ∀ r, N < r → ¬ @terminal A o₁.le (fst ∘ f) r :=
finite_terminal (fst ∘ f)
def sentinel := some finite_terminal_on_A
def h_helper (n : ℕ) : {x : ℕ // sentinel < x ∧ ¬ @terminal A o₁.le (fst ∘ f) x} :=
nat.rec_on n
(have ∃ m, sentinel < m, by apply existence_of_nat_gt,
let i := some this in have ge : sentinel < i, from some_spec this,
have ¬ @terminal A o₁.le (fst ∘ f) i, from (some_spec finite_terminal_on_A) i ge,
have sentinel < i ∧ ¬ terminal o₁.le (fst ∘ f) i, from ⟨ge,this⟩,
⟨i, this⟩)
(λ a rec_call, let i' := rec_call.1 in
have lt' : sentinel < i', from (rec_call.2)^.left,
have ¬ terminal o₁.le (fst ∘ f) i', from (rec_call.2)^.right,
have ∃ n, i' < n ∧ ((fst ∘ f) i') ≤ ((fst ∘ f) n), from lt_of_non_terminal this,
let i := some this in have i' < i, from (some_spec this)^.left,
have lt : sentinel < i, from lt.trans lt' this,
have ∀ r, sentinel < r → ¬ terminal o₁.le (fst ∘ f) r, from some_spec finite_terminal_on_A,
have ¬ terminal o₁.le (fst ∘ f) i, from this i lt,
have sentinel < i ∧ ¬ terminal o₁.le (fst ∘ f) i, from ⟨lt,this⟩,
⟨i,this⟩)
private def h (n : ℕ) : ℕ := (h_helper n).1
private lemma foo (a : ℕ) : h a < h (succ a) ∧ (fst ∘ f) (h a) ≤ (fst ∘ f) (h (succ a)) :=
have ¬ terminal o₁.le (fst ∘ f) (h a), from ((h_helper a).2)^.right,
have ∃ n, (h a) < n ∧ ((fst ∘ f) (h a)) ≤ ((fst ∘ f) n), from lt_of_non_terminal this,
show _, from some_spec this
theorem property_of_h {i j : ℕ} : i < j → (fst ∘ f) (h i) ≤ (fst ∘ f) (h j) :=
nat.rec_on j (λ H, absurd H dec_trivial)
(λ a IH lt,
have H1 : (fst ∘ f) (h a) ≤ (fst ∘ f) (h (succ a)), from (foo a)^.right,
have disj : i < a ∨ i = a, from lt_or_eq_of_lt_succ lt,
or.elim disj (λ Hl, quasiorder.trans (IH Hl) H1) (λ Hr, by simp [Hr, H1]))
theorem increasing_h {i j : ℕ} : i < j → h i < h j :=
nat.rec_on j
(λ H, absurd H dec_trivial)
(λ a ih lt,
have H1 : (h a) < h (succ a), from (foo a)^.left,
have disj : i < a ∨ i = a, from lt_or_eq_of_lt_succ lt,
or.elim disj (λ Hl, lt_trans (ih Hl) H1) (λ Hr, by simp [Hr, H1]))
theorem good_f : is_good f (prod_order o₁.le o₂.le) :=
have ∃ i j : ℕ, i < j ∧ (snd ∘ f ∘ h) i ≤ (snd ∘ f ∘ h) j, from wqo.is_good (snd ∘ f ∘ h),
let ⟨i,j,H⟩ := this in
have (fst ∘ f) (h i) ≤ (fst ∘ f) (h j), from property_of_h H^.left,
have Hr : (fst ∘ f) (h i) ≤ (fst ∘ f) (h j) ∧ (snd ∘ f) (h i) ≤ (snd ∘ f) (h j), from ⟨this, H^.right⟩,
have h i < h j, from increasing_h H^.left,
⟨(h i), (h j), ⟨this,Hr⟩⟩
end
theorem good_pairs (f : ℕ → A × B) : is_good f (prod_order o₁.le o₂.le) := good_f f
end
def wqo_prod {A B : Type} [o₁ : wqo A] [o₂ : wqo B] : wqo (A × B) :=
let op : A × B → A × B → Prop := prod_order o₁.le o₂.le in
have refl : ∀ p : A × B, op p p, from λ p, ⟨quasiorder.refl p.1,quasiorder.refl p.2⟩, -- by intro; apply and.intro;repeat {apply wqo.refl},
have trans : ∀ a b c, op a b → op b c → op a c, from λ a b c h1 h2,
⟨quasiorder.trans h1^.left h2^.left, quasiorder.trans h1^.right h2^.right⟩,
show _, from wqo.mk ⟨⟨op⟩,refl,trans⟩ good_pairs
-- example {A B : Type} [o₁ : wqo A] [o₂ : wqo B] : (@wqo_prod A B _ _).le = prod_order o₁.le o₂.le := rfl
end kruskal
|
function launcher(alignmentType)
% clc; clear; close all
addpath(genpath('thirdparty'));
%%
if ~exist('alignmentType','var')
alignmentType = 1; %0 for nowarp, 1 for optical flow, 2 for homography, 3 for similarity
end
path2dataset = '../dataset/qualitative_datasets';
%%
datasets = dir(path2dataset);
dirFlags = [datasets.isdir];
datasets(~dirFlags) = [];
datasets = {datasets(3:end).name};
% permute and divide into training and testing sets
num_datasets = numel(datasets);
seed = 12;
rng(seed);
ds_idx = randperm(num_datasets);
alignment = '';
if alignmentType == 0
alignment = '_nowarp';
elseif alignmentType == 1
alignment = '_OF';
elseif alignmentType == 2
alignment = '_homography';
end
path2output = @(stage,d) sprintf('../data/testing_real_all_nostab%s/%s/image_%s',alignment,stage,d);
fn_in = @(dsi,type,fri) [path2dataset '/' datasets{ds_idx(dsi)} '/' type '/' sprintf('%05d.jpg',fri)];
fn_out = @(stage,fri,frid) sprintf('%s/%05d.jpg',path2output(stage,frid),fri);
ds_range = 1:num_datasets;
for l = -2:2
for i = 1:num_datasets
checkDir(path2output(datasets{i},num2str(l)));
end
end
%%
for ii = 1:length(ds_range)
fr_cnt = 0;
i = ds_range(ii);
% get the frame range
datasets{ds_idx(i)}
files = dir([path2dataset '/' datasets{ds_idx(i)} '/input/*.jpg']);
if isempty(files)
files = dir([path2dataset '/' datasets{ds_idx(i)} '/input/*.png']);
end
if ~isempty(files)
[~,ststr,~] = fileparts(files(1).name);
[~,enstr,~] = fileparts(files(end).name);
start_frame = str2num(ststr);
end_frame = str2num(enstr);
frame_range = start_frame:min(start_frame+99,end_frame);
num_frame = numel(frame_range);
fr_idx = floor(linspace(frame_range(1),frame_range(end),num_frame));
for j = 1:num_frame
fr_cnt = fr_cnt+1;
% save image_1 to image_5
v0 = im2double(imread(fn_in(i,'input',fr_idx(j)+0)));
v0g = single(rgb2gray(v0));
[h,w,~] = size(v0);
for l = -2:2
if l ~= 0
vi = im2double(imread(fn_in(i,'input',max(min(fr_idx(j)+l,frame_range(end)),frame_range(1)))));
vig = single(rgb2gray(vi));
if alignmentType == 0
v_i0 = vi;
elseif alignmentType == 1
flo_i0 = genFlow(v0g, vig);
[v_i0, ~] = warpToRef(v0, vi, flo_i0);
elseif alignmentType == 2
v_i0 = homographyAlignment(v0,vi,0);
elseif alignmentType == 3
v_i0 = similarityAlignment(v0,vi,0);
end
else
v_i0 = v0;
end
imwrite(v_i0, fn_out(datasets{ds_idx(i)},fr_cnt,num2str(l)));
end
end
end
end
|
{-# LANGUAGE ViewPatterns #-}
module AsteriskGaussian where
import Control.Parallel.Strategies
import Data.Array.Repa as R
import Data.Complex as C
import Data.List as L
import Data.Vector.Storable as VS
import Data.Vector.Unboxed as VU
import Graphics.Rendering.Chart.Backend.Cairo
import Graphics.Rendering.Chart.Easy
import Math.Gamma
import Pinwheel.FourierSeries2D
-- import Pinwheel.Gaussian
import qualified FourierPinwheel.Filtering as FP
import System.Directory
import System.Environment
import System.FilePath
import Text.Printf
import Utils.BLAS
import Utils.List
import Utils.Distribution
main = do
args@(numR2FreqStr:numThetaFreqStr:numRFreqStr:numPointStr:numThetaStr:numRStr:xStr:yStr:periodStr:std1Str:std2Str:stdR2Str:_) <-
getArgs
let numR2Freq = read numR2FreqStr :: Int
numThetaFreq = read numThetaFreqStr :: Int
numRFreq = read numRFreqStr :: Int
numPoint = read numPointStr :: Int
numTheta = read numThetaStr :: Int
numR = read numRStr :: Int
x = read xStr :: Double
y = read yStr :: Double
period = read periodStr :: Double
std1 = read std1Str :: Double
std2 = read std2Str :: Double
stdR2 = read stdR2Str :: Double
folderPath = "output/test/AsteriskGaussian"
let centerR2Freq = div numR2Freq 2
centerThetaFreq = div numThetaFreq 2
periodEnv = period ^ 2 / 4
a = std1
b = std2
asteriskGaussianFreqTheta =
VS.concat .
parMap
rdeepseq
(\tFreq' ->
let tFreq = tFreq' - centerThetaFreq
arr =
R.map (* (gaussian1DFreq (fromIntegral tFreq) b :+ 0)) $
analyticalFourierCoefficients1
numR2Freq
1
tFreq
0
a
period
periodEnv
in VS.convert . toUnboxed . computeS $
centerHollowArray numR2Freq arr) $
[0 .. numThetaFreq - 1]
deltaTheta = 2 * pi / Prelude.fromIntegral numTheta
harmonicsThetaD =
fromFunction (Z :. numTheta :. numThetaFreq :. numR2Freq :. numR2Freq) $ \(Z :. theta' :. tFreq' :. xFreq' :. yFreq') ->
let tFreq = fromIntegral $ tFreq' - centerThetaFreq
xFreq = fromIntegral $ xFreq' - centerR2Freq
yFreq = fromIntegral $ yFreq' - centerR2Freq
theta = fromIntegral theta' * deltaTheta
in (1 / (2 * pi) :+ 0) *
cis (2 * pi / period * (x * xFreq + y * yFreq) + tFreq * theta)
harmonicsTheta <- computeUnboxedP harmonicsThetaD
xs <-
VS.toList . VS.map magnitude <$>
gemmBLAS
numTheta
1
(numThetaFreq * numR2Freq ^ 2)
(VS.convert . toUnboxed $ harmonicsTheta)
asteriskGaussianFreqTheta
toFile def (folderPath </> "theta.png") $ do
layout_title .=
printf "(%d , %d)" (Prelude.round x :: Int) (Prelude.round y :: Int)
plot
(line
""
[ L.zip
[fromIntegral i * deltaTheta / pi | i <- [0 .. numTheta - 1]]
xs
])
let centerRFreq = div numRFreq 2
centerR2Freq = div numR2Freq 2
gaussian2D =
computeUnboxedS . fromFunction (Z :. numR2Freq :. numR2Freq) $ \(Z :. i' :. j') ->
let i = i' - centerR2Freq
j = j' - centerR2Freq
in exp
(pi * fromIntegral (i ^ 2 + j ^ 2) /
((-1) * period ^ 2 * stdR2 ^ 2)) /
(2 * pi * stdR2 ^ 2) :+
0
asteriskGaussianFreqR =
VS.concat .
parMap
rdeepseq
(\rFreq' ->
let rFreq = rFreq' - centerRFreq
arr =
R.map
(* ((gaussian1DFourierCoefficients
(fromIntegral rFreq)
(log periodEnv)
b :+
0) -- *
-- cis
-- ((-2) * pi / log periodEnv * fromIntegral rFreq *
-- log 0.25)
)) . centerHollowArray numR2Freq $
analyticalFourierCoefficients1
numR2Freq
1
0
rFreq
a
period
periodEnv
in VS.convert . toUnboxed . computeS . centerHollowArray numR2Freq $
arr -- *^ gaussian2D
) $
[0 .. numRFreq - 1]
deltaR = log periodEnv / Prelude.fromIntegral numR
harmonicsRD =
fromFunction (Z :. numR :. numRFreq :. numR2Freq :. numR2Freq) $ \(Z :. r' :. rFreq' :. xFreq' :. yFreq') ->
let rFreq = fromIntegral $ rFreq' - centerRFreq
xFreq = fromIntegral $ xFreq' - centerR2Freq
yFreq = fromIntegral $ yFreq' - centerR2Freq
r = fromIntegral r' * deltaR - log periodEnv / 2
in (1 / log periodEnv :+ 0) *
cis
(2 * pi / period * (x * xFreq + y * yFreq) +
2 * pi / log periodEnv * rFreq * r)
harmonicsR <- computeUnboxedP harmonicsRD
ys <-
VS.toList . VS.map magnitude <$>
gemmBLAS
numR
1
(numRFreq * numR2Freq ^ 2)
(VS.convert . toUnboxed $ harmonicsR)
asteriskGaussianFreqR
toFile def (folderPath </> "R.png") $ do
layout_title .= printf "(%.3f , %.3f)" x y
plot
(line
""
[ L.zip
[ fromIntegral i * deltaR - log periodEnv / 2
| i <- [0 .. numR - 1]
]
ys
])
|
# Scientific Computing with Python (Second Edition)
# Chapter 16
## 16.1 What are symbolic computations?¶
```python
from scipy.integrate import quad
quad(lambda x : 1/(x**2+x+1),a=0, b=4)
```
(0.9896614396122965, 1.1735663442283496e-08)
### 16.1.1 Elaborating an example in SymPy
```python
from sympy import *
init_printing()
```
```python
x = symbols('x')
f = Lambda(x, 1/(x**2 + x + 1))
```
```python
integrate(f(x),x)
```
```python
pf = Lambda(x, integrate(f(x),x))
diff(pf(x),x)
```
```python
simplify(diff(pf(x),x))
```
```python
pf(4) - pf(0)
```
```python
(pf(4)-pf(0)).evalf()
```
## 16.2 Basic elements of SymPy
### 16.2.1 Symbols – the basis of all formulas
```python
x, y, mass, torque = symbols('x y mass torque')
```
```python
symbol_list=[symbols(l) for l in 'x y mass torque'.split()]
symbol_list
```
```python
x, y, mass, torque = symbol_list
```
```python
row_index=symbols('i',integer=True)
print(row_index**2) # returns i**2
```
i**2
```python
integervariables = symbols('i:l', integer=True)
dimensions = symbols('m:n', integer=True)
realvariables = symbols('x:z', real=True)
```
```python
A = symbols('A1:3(1:4)')
A
```
### 16.2.2 Numbers
```python
1/3 # returns 0.3333333333333333
sympify(1)/sympify(3) # returns '1/3'
```
```python
Rational(1,3)
```
### 16.2.3 Functions
```python
f, g = symbols('f g', cls=Function)
```
```python
f = Function('f')
g = Function('g')
```
```python
x = symbols('x')
f, g = symbols('f g', cls=Function)
diff(f(x*g(x)),x)
```
```python
x = symbols('x:3')
f(*x)
```
```python
[f(*x).diff(xx) for xx in x]
```
```python
x = symbols('x')
f(x).series(x,0,n=4)
```
### 16.2.4 Elementary functions
```python
x = symbols('x')
simplify(cos(x)**2 + sin(x)**2) # returns 1
```
```python
atan(x).diff(x) - 1./(x**2+1) # returns 0
```
```python
import numpy as np
import sympy as sym
# working with numbers
x=3
y=np.sin(x)
y
```
```python
# working with symbols
x=sym.symbols('x')
y=sym.sin(x)
y
```
### 16.2.5 Lambda - functions
```python
C,rho,A,v=symbols('C rho A v')
# C drag coefficient, A coss-sectional area, rho density
# v speed
f_drag = Lambda(v,-Rational(1,2)*C*rho*A*v**2)
f_drag
```
```python
x = symbols('x')
f_drag(2)
```
```python
f_drag(x/3)
```
```python
x,y=symbols('x y')
t=Lambda((x,y),sin(x) + cos(2*y))
```
```python
t(pi,pi/2) # returns -1
```
```python
p=(pi,pi/2)
t(*p) # returns -1
```
```python
F=Lambda((x,y),Matrix([sin(x) + cos(2*y), sin(x)*cos(y)]))
F
```
```python
F
```
```python
F(x,y).jacobian((x,y))
```
## 16.3 Symbolic Linear Algebra
### 16.3.1 Symbolic matrices
```python
phi=symbols('phi')
rotation=Matrix([[cos(phi), -sin(phi)],
[sin(phi), cos(phi)]])
rotation
```
```python
simplify(rotation.T*rotation -eye(2)) # returns a 2 x 2 zero matrix
```
```python
simplify(rotation.T - rotation.inv())
```
```python
M = Matrix(3,3, symbols('M:3(:3)'))
M
```
```python
def toeplitz(n):
a = symbols('a:'+str(2*n))
f = lambda i,j: a[i-j+n-1]
return Matrix(n,n,f)
```
```python
toeplitz(5)
```
```python
M[0,2]=0 # changes one element
M[1,:]=Matrix(1,3,[1,2,3]) # changes an entire row
M
```
### 16.3.2 Examples for linear algebra methods in SymPy
```python
A = Matrix(3,3,symbols('A1:4(1:4)'))
b = Matrix(3,1,symbols('b1:4'))
x = A.LUsolve(b)
x
```
```python
simplify(x)
```
## 16.4 Substitutions
```python
x, a = symbols('x a')
b = x + a
b
```
```python
x, a = symbols('x a')
b = x + a
c = b.subs(x,0)
d = c.subs(a,2*a)
print(c, d) # returns (a, 2a)
```
a 2*a
```python
b.subs(x,0)
```
```python
b.subs({x:0}) # a dictionary as argument
```
```python
b.subs({x:0, a:2*a}) # several substitutions in one
```
```python
x, a, y = symbols('x a y')
b = x + a
b.subs({a:a*y, x:2*x, y:a/y})
b.subs({y:a/y, a:a*y, x:2*x})
```
```python
b.subs([(y,a/y), (a,a*y), (x,2*x)])
```
```python
n, alpha = symbols('n alpha')
b = cos(n*alpha)
b.subs(cos(n*alpha), 2*cos(alpha)*cos((n-1)*alpha)-cos((n-2)*alpha))
```
```python
T=toeplitz(5)
T
```
```python
T.subs(T[0,2],0)
```
```python
a2 = symbols('a2')
T.subs(a2,0)
```
```python
symbs = [symbols('a'+str(i)) for i in range(19) if i < 3 or i > 5]
substitutions=list(zip(symbs,len(symbs)*[0]))
T.subs(substitutions)
```
## 16. 5 Evaluating symbolic expressions
```python
pi.evalf() # returns 3.14159265358979
```
```python
pi.evalf(30) # returns 3.14159265358979323846264338328
```
### 16.5.1 Example: A study on the convergence order of Newton's Method
```python
import sympy as sym
x = sym.Rational(1,2)
xns=[x]
for i in range(1,9):
x = (x - sym.atan(x)*(1+x**2)).evalf(3000)
xns.append(x)
```
```python
# Test for cubic convergence
import numpy as np
# Test for cubic convergence
print(np.array(np.abs(np.diff(xns[1:]))/np.abs(np.diff(xns[:-1]))**3,dtype=np.float64))
```
[0.41041618 0.65747717 0.6666665 0.66666667 0.66666667 0.66666667
0.66666667]
### 16.5.2 Converting a symbolic expression into a numeric function
```python
t=symbols('t')
x=[0,t,1]
# The Vandermonde Matrix
V = Matrix([[0, 0, 1], [t**2, t, 1], [1, 1,1]])
y = Matrix([0,1,-1]) # the data vector
a = simplify(V.LUsolve(y)) # the coefficients
# the leading coefficient as a function of the parameter
a2 = Lambda(t,a[0])
a2
```
```python
leading_coefficient = lambdify(t,a2(t))
```
```python
import numpy as np
import matplotlib.pyplot as mp
t_list= np.linspace(-0.4,1.4,200)
ax=mp.subplot(111)
lc_list = [leading_coefficient(t) for t in t_list]
ax.plot(t_list, lc_list)
ax.axis([-.4,1.4,-15,10])
ax.set_xlabel('Free parameter $t$')
ax.set_ylabel('$a_2(t)$')
```
|
-- Idris2
module Sinter
import Data.List
import Data.Vect
import Data.String.Extra
import Core.Context
import Core.CompileExpr
import Compiler.LambdaLift
import Compiler.Common
import Idris.Driver
------------
-- SINTER --
------------
sqBrack : String -> String
sqBrack s = "[ " ++ s ++ " ]"
-- only literals in sinter are ints
data SinterLit
= SinInt Int Nat -- TODO: Int is value then width
| SinStr String
-- IDs are basically strings (not really, TODO)
data SinterID = MkSinterID String
genSinterID : SinterID -> String
genSinterID (MkSinterID id_) = id_
Show SinterID where
show = genSinterID
-- declared here, defined later
data SinterGlobal : Type where
-- an expression is a list of expressions, an ID, or a literal
data Sexpr : Type where
SexprList : (es : List Sexpr) -> Sexpr
SexprID : (id_ : SinterID) -> Sexpr
SexprLit : SinterLit -> Sexpr
SexprLet : (next : Sexpr) -> (defFun : SinterGlobal) -> Sexpr -- let-in for sinter
genSexpr : Sexpr -> String
genSexpr (SexprList []) = "[]"
genSexpr (SexprList es) =
let
exprs = map genSexpr es
in sqBrack $ join " " exprs
-- in "[ " ++ (concat exprs) ++ " ]"
genSexpr (SexprID id_) = show id_
genSexpr (SexprLit (SinInt v w)) =
"( " ++ show w ++ "; " ++ show v ++ " )"
genSexpr (SexprLit (SinStr s)) =
show s
-- TODO
genSexpr (SexprLet next defFun) =
genSexpr next
Show Sexpr where
show = genSexpr
-------------------------------
-- Things that can be global --
-------------------------------
-- declared earlier, defined here
data SinterGlobal = SinDef SinterID (List SinterID) Sexpr
| SinDecl SinterID (List SinterID)
| SinType SinterID (List SinterID)
genSinter : SinterGlobal -> String
genSinter (SinDef fName args body) =
let
args' = map show args
argsStr = sqBrack $ join " " args'
in sqBrack $ "def " ++ show fName ++ " " ++ argsStr ++ "\n\t"
++ genSexpr body
genSinter (SinDecl fName args) =
let
args' = map show args
argsStr = sqBrack $ join " " args'
in sqBrack $ "dec " ++ show fName ++ " " ++ argsStr
genSinter (SinType tName membs) =
let
membs' = map show membs
membsStr = sqBrack $ join " " membs'
in sqBrack $ "type " ++ show tName ++ membsStr
Show SinterGlobal where
show = genSinter
testCase : SinterGlobal
testCase = SinDef (MkSinterID "test") [(MkSinterID "arg1"), (MkSinterID "arg2")]
(SexprList [SexprLit (SinInt 1 2)])
-----------------------
-- Sinter primitives --
-----------------------
||| Primitive implemented in sinter
sinterPrim : String -> Sexpr
sinterPrim name = SexprID $ MkSinterID name
||| Primitive supplied by stdlib
sinterStdlib : String -> Sexpr
sinterStdlib name = SexprID $ MkSinterID ("stdlib_" ++ name)
||| Call the special sinter function for creating closures
sinterClosureCon : Sexpr
sinterClosureCon = sinterPrim ">makeClosure"
||| Call the special sinter function for running or adding args to closures
sinterClosureAdd : Sexpr
sinterClosureAdd = sinterPrim ">closureAddElem"
||| Crash sinter very inelegantly
sinterCrash : Sexpr
sinterCrash = SexprList [ sinterPrim "CRASH" ]
------------------
-- GORY DETAILS --
------------------
||| Symbol for indicating membership of namespace: "--|"
specialSep : String
specialSep = "--|"
||| Symbol for indicating record access: "."
recordAcc : String
recordAcc = "."
||| Separate two ids by "specialSep"
stitch : SinterID -> SinterID -> SinterID
stitch (MkSinterID x) (MkSinterID y) = MkSinterID $ x ++ specialSep ++ y
||| Turn a NS into a string separated by "specialSep"
mangleNS : Namespace -> SinterID
mangleNS ns = MkSinterID $ showNSWithSep specialSep ns
||| Sinter doesn't have a concept of NameSpaces, so define unique, but
||| identifiable names/strings instead.
mangle : Name -> SinterID
mangle (NS nameSpace name) =
let
nameSpace' = mangleNS nameSpace
name' = mangle name
in
stitch nameSpace' name'
mangle (UN x) = MkSinterID x
mangle (MN x i) = MkSinterID $ x ++ "-" ++ show i
mangle (PV n i) = MkSinterID $ (show $ mangle n) ++ "-" ++ show i
--mangle (DN x y) = MkSinterID x -- FIXME: correct? (assumes x repr.s y)
mangle (DN _ y) = mangle y -- ^ incorrect! the Name itself still
-- needs to be mangled; the way to display
-- it doesn't necessarily match our way of
-- representing names.
mangle (RF x) = MkSinterID $ recordAcc ++ x
mangle (Nested (i, j) n) =
MkSinterID $ "nested_" ++ (show i) ++ "_" ++ (show j) ++ (show $ mangle n)
-- string repr.n of case followed by unique Int (?)
mangle (CaseBlock x i) = MkSinterID $ "case_" ++ x ++ "-" ++ (show i)
mangle (WithBlock x i) = MkSinterID $ "with_" ++ x ++ "-" ++ (show i)
mangle (Resolved i) = MkSinterID $ "resolved_" ++ (show i)
idrisWorld : Sexpr
idrisWorld = SexprID $ MkSinterID "**IDRIS_WORLD**"
||| Assume < 2^31 number of args to any function (seriously, what would you do
||| with 2^31 args?...)
nArgsWidth : Nat
nArgsWidth = 32
||| Turn all the arguments (including the scope) into SinterIDs
superArgsToSinter : List Name -> List SinterID
superArgsToSinter ns = map mangle ns
-- Constants
constantToSexpr : Constant -> Sexpr
constantToSexpr (I x) = SexprLit $ SinInt (cast x) 64
constantToSexpr (BI x) = ?sexprConstBI
constantToSexpr (B8 x) = SexprLit $ SinInt (cast x) 8
constantToSexpr (B16 x) = SexprLit $ SinInt (cast x) 16
constantToSexpr (B32 x) = SexprLit $ SinInt (cast x) 32
constantToSexpr (B64 x) = SexprLit $ SinInt (cast x) 64
constantToSexpr (Str x) = SexprLit $ SinStr x
constantToSexpr (Ch x) = ?constantToSexpr_rhs_8
constantToSexpr (Db x) = ?constantToSexpr_rhs_9
constantToSexpr WorldVal = idrisWorld
constantToSexpr IntType = ?constantToSexpr_rhs_11
constantToSexpr IntegerType = ?constantToSexpr_rhs_12
constantToSexpr Bits8Type = ?constantToSexpr_rhs_13
constantToSexpr Bits16Type = ?constantToSexpr_rhs_14
constantToSexpr Bits32Type = ?constantToSexpr_rhs_15
constantToSexpr Bits64Type = ?constantToSexpr_rhs_16
constantToSexpr StringType = ?constantToSexpr_rhs_17
constantToSexpr CharType = ?constantToSexpr_rhs_18
constantToSexpr DoubleType = ?constantToSexpr_rhs_19
constantToSexpr WorldType = ?constantToSexpr_rhs_20
mutual
-- Primitive Functions
primFnToSexpr : {scope : _} -> {args : _}
-> PrimFn arity -> Vect arity (Lifted (scope ++ args)) -> Sexpr
primFnToSexpr (Add ty) [x, y] =
SexprList [ sinterStdlib "add", liftedToSexpr x, liftedToSexpr y ]
primFnToSexpr (Sub ty) [x, y] =
SexprList [ sinterStdlib "sub", liftedToSexpr x, liftedToSexpr y ]
primFnToSexpr (Mul ty) [x, y] =
SexprList [ sinterStdlib "mul", liftedToSexpr x, liftedToSexpr y ]
primFnToSexpr (Div ty) [x, y] =
SexprList [ sinterStdlib "div", liftedToSexpr x, liftedToSexpr y ]
primFnToSexpr (Mod ty) [x, y] =
SexprList [ sinterStdlib "mod", liftedToSexpr x, liftedToSexpr y ]
primFnToSexpr (Neg ty) [x] =
SexprList [ sinterStdlib "neg", liftedToSexpr x ]
primFnToSexpr (ShiftL ty) [x, y] =
SexprList [ sinterStdlib "shiftl", liftedToSexpr x, liftedToSexpr y ]
primFnToSexpr (ShiftR ty) [x, y] =
SexprList [ sinterStdlib "shiftr", liftedToSexpr x, liftedToSexpr y ]
primFnToSexpr (BAnd ty) [x, y] =
SexprList [ sinterStdlib "bitwAnd", liftedToSexpr x, liftedToSexpr y ]
primFnToSexpr (BOr ty) [x, y] =
SexprList [ sinterStdlib "bitwOr", liftedToSexpr x, liftedToSexpr y ]
primFnToSexpr (BXOr ty) [x, y] =
SexprList [ sinterStdlib "bitwXor", liftedToSexpr x, liftedToSexpr y ]
primFnToSexpr (LT ty) [x, y] =
SexprList [ sinterStdlib "lt", liftedToSexpr x, liftedToSexpr y ]
primFnToSexpr (LTE ty) [x, y] =
SexprList [ sinterStdlib "lte", liftedToSexpr x, liftedToSexpr y ]
primFnToSexpr (EQ ty) [x, y] =
SexprList [ sinterStdlib "eq", liftedToSexpr x, liftedToSexpr y ]
primFnToSexpr (GTE ty) [x, y] =
SexprList [ sinterStdlib "gte", liftedToSexpr x, liftedToSexpr y ]
primFnToSexpr (GT ty) [x, y] =
SexprList [ sinterStdlib "gt", liftedToSexpr x, liftedToSexpr y ]
-- TODO
primFnToSexpr StrLength [s] = ?sinterStrLen
primFnToSexpr StrHead [s] = ?sinterStrHead
primFnToSexpr StrTail [s] = ?sinterStrTail
primFnToSexpr StrIndex [s, i] = ?sinterStrIndex
primFnToSexpr StrCons [s1, s2] = ?sinterStrCons
primFnToSexpr StrAppend [s1, s2] =
SexprList [ sinterStdlib "strAppend", liftedToSexpr s1, liftedToSexpr s2 ]
primFnToSexpr StrReverse [s] = ?sinterStrReverse
primFnToSexpr StrSubstr [i, j, s] = ?sinterSubstr
-- TODO
primFnToSexpr DoubleExp [d] = ?primFnToSexpr_rhs_25
primFnToSexpr DoubleLog [d] = ?primFnToSexpr_rhs_26
primFnToSexpr DoubleSin [d] = ?primFnToSexpr_rhs_27
primFnToSexpr DoubleCos [d] = ?primFnToSexpr_rhs_28
primFnToSexpr DoubleTan [d] = ?primFnToSexpr_rhs_29
primFnToSexpr DoubleASin [d] = ?primFnToSexpr_rhs_30
primFnToSexpr DoubleACos [d] = ?primFnToSexpr_rhs_31
primFnToSexpr DoubleATan [d] = ?primFnToSexpr_rhs_32
primFnToSexpr DoubleSqrt [d] = ?primFnToSexpr_rhs_33
primFnToSexpr DoubleFloor [d] = ?primFnToSexpr_rhs_34
primFnToSexpr DoubleCeiling [d] = ?primFnToSexpr_rhs_35
-- TODO
primFnToSexpr (Cast x y) [z] = ?primFnToSexpr_rhs_36
primFnToSexpr BelieveMe [_, _, thing] = liftedToSexpr thing
-- ^ I believe this is correct?
primFnToSexpr Crash [fc, reason] =
sinterCrash
||| Create a call to a function which evaluates `in` over `let`
||| let f x = y in z
||| is equivalent to
||| (\f . z) (\x . y)
lletToSexpr : {scope : _} -> {args : _}
-> FC
-> (n : Name)
-> (existing : Lifted (scope ++ args))
-> (in_expr : Lifted (n :: (scope ++ args)))
-> Sexpr
lletToSexpr fc n existing in_expr =
let
-- containing IN
vars = scope ++ args
cursedFuncName = show fc ++ show n
cFunName = MkSinterID cursedFuncName
cFunArgs = (mangle n) :: map mangle vars
cFunBody = liftedToSexpr {scope=(n :: scope)} {args=args} in_expr
cFunDef = SinDef cFunName cFunArgs cFunBody
-- applying this
cFunCallArgs = liftedToSexpr {scope=scope} {args=args} existing
cFunCall = SexprList $ (SexprID cFunName)
:: cFunCallArgs
:: (map (SexprID . mangle) vars)
in
SexprLet cFunCall cFunDef
-- Functions
||| Compile the definition to sexprs
liftedToSexpr : {scope : _} -> {args : _} -> Lifted (scope ++ args) -> Sexpr
-- idx points to right variable; de bruijn index
liftedToSexpr (LLocal {idx} fc p) = -- ?llocalToSinter
case take (S idx) (scope ++ args) of
-- FIXME: this is very naughty and should be handled better
[] => assert_total $ idris_crash "scope ++ args did not contain name"
(n :: _) => SexprID $ mangle n
-- complete function call
liftedToSexpr (LAppName fc _ n fArgs) =
let
funName = SexprID $ mangle n
funArgs = map (liftedToSexpr {scope=scope} {args=args}) fArgs
in
SexprList $ funName :: funArgs
-- partial function call
liftedToSexpr (LUnderApp fc n missing fArgs) =
let
sinName = SexprID $ mangle n
sinMiss = SexprLit $ SinInt (cast missing) nArgsWidth
nArgs = length args
sinNArgs = SexprLit $ SinInt (cast nArgs) nArgsWidth
-- number of args function expects
sinArity = SexprLit $ SinInt (cast (missing + nArgs)) nArgsWidth
-- list of arguments
--sinArgs = SexprList $ map liftedToSexpr fArgs
sinArgs = SexprList $ map (liftedToSexpr {scope=scope} {args=args}) fArgs
in
-- make a closure containing the above info (sinter-specific closure)
SexprList [sinterClosureCon , sinName , sinArity , sinNArgs , sinArgs]
-- application of a closure to another argument; potentially to the last arg
liftedToSexpr (LApp fc _ closure arg) =
let
sinClosure = liftedToSexpr closure
sinArg = liftedToSexpr arg
in
SexprList [sinterClosureAdd, sinClosure, sinArg]
-- let expressions
liftedToSexpr (LLet fc n existing in_expr) =
lletToSexpr fc n existing in_expr
-- constructor calls
liftedToSexpr (LCon fc n tag xs) =
let
(MkSinterID mn) = mangle n
name = MkSinterID $ mn ++ show tag
fArgs = map (liftedToSexpr {scope=scope} {args=args}) xs
in
SexprList $ (SexprID name) :: fArgs
-- primitive operators
liftedToSexpr (LOp fc _ x xs) =
primFnToSexpr x xs
liftedToSexpr (LExtPrim fc _ p xs) = ?liftedToSexpr_rhs_8
liftedToSexpr (LConCase fc x xs y) = ?liftedToSexpr_rhs_9
liftedToSexpr (LConstCase fc lvars lConAlts m_default) =
let
comparator = liftedToSexpr lvars
in
case m_default of
Nothing => sinterCrash
(Just x) => ifHelper comparator lConAlts (liftedToSexpr x)
liftedToSexpr (LPrimVal fc x) = constantToSexpr x
liftedToSexpr (LErased fc) =
SexprLit $ SinInt 0 0
liftedToSexpr (LCrash fc x) =
sinterCrash
ifHelper : {scope : _} -> {args : _}
-> Sexpr -> List (LiftedConstAlt (scope ++ args)) -> Sexpr -> Sexpr
ifHelper x [] def = def
ifHelper x ((MkLConstAlt c body) :: alts) def =
let
sinC = constantToSexpr c
sinBody = liftedToSexpr body
ifCond = SexprList [ (sinterStdlib "eq") , x , sinC ]
elseBody = ifHelper x alts def
in
SexprList [ SexprID $ MkSinterID "if" , ifCond, sinBody , elseBody ,
SexprID $ MkSinterID "32" ]
||| Compile a constructor's definition
liftedConToSinter : (tag : Maybe Int)
-> (arity : Nat)
-> (nt : Maybe Nat)
-> Core SinterGlobal
liftedConToSinter tag arity nt = ?liftedConToSinter_rhs
||| Compile a pair of Name and its associated definition into a SinterGlobal,
||| i.e.:
||| - mangle the Name into a valid sinter name
||| - compile the definition into a sexpr
liftedToSinter : (Name, LiftedDef) -> Core SinterGlobal
-- FUNCTIONS
liftedToSinter (name, (MkLFun args scope body)) =
let
sinName = mangle name
superArgs = args ++ reverse scope
sinArgs = superArgsToSinter superArgs
sinDefn = liftedToSexpr body
in
pure $ SinDef sinName sinArgs sinDefn
-- CONSTRUCTORS
liftedToSinter (name, (MkLCon tag arity nt)) =
let
sinName = mangle name
sinDefn = liftedConToSinter tag arity nt
in
pure $ SinType sinName ?liftedConBody
-- FFI CALLS
liftedToSinter (name, (MkLForeign ccs fargs x)) = ?liftedFFICalltoSinter
-- GLOBAL ERRORS
liftedToSinter (name, (MkLError x)) = ?liftedErrorToSinter
----------------
-- TOP OF API --
----------------
compile : Ref Ctxt Defs -> (tmpDir : String) -> (outputDir : String) ->
ClosedTerm -> (outfile : String) -> Core (Maybe String)
compile context tmpDir outputDir term outfile =
do compData <- getCompileData False Lifted term
let defs = lambdaLifted compData
sinterGlobs <- traverse liftedToSinter defs
-- readyForCG <- traverse bubbleLets sinterGlobs
?compile_rhs
execute : Ref Ctxt Defs -> (tmpDir : String) -> ClosedTerm -> Core ()
execute context tmpDir term =
throw $ InternalError "Sinter backend can only compile, sorry."
sinterCodegen : Codegen
sinterCodegen = MkCG compile execute
main : IO ()
main = mainWithCodegens [("sinter", sinterCodegen)]
|
# Clothing Recognition Task
If this is your first time running a notebook - welcome!! Notebooks are awesome because they let us play around and experiment
with code with near-instant feedback. Some pointers:
1. To execute a cell, click on it and hit SHIFT-Enter
2. Once something is executed, the variables are in memory - inspect them!
## Getting Started
This first cell imports the necessary libraries so we can get started:
```python
import torch
import numpy as np
from PIL import Image
import torch.nn as nn
import torch.onnx as onnx
import torch.nn.functional as F
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
```
## Data!
Without data we really can't do anything with machine learning. At this point we have our sharp question **can we predict a digit given a 28x28 vector of numbers?**
Once that is all squared away we need to take a look at our data. The next cell is a helper function that visualizes the the digits (a sanity check).
The next cell downloads the standard digit dataset (called FashionMNIST). The `transform` and `target_transform` parts of this call add some conversion steps to make the data more suitable for the models we will try.
```python
clothing = datasets.FashionMNIST('data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: x.reshape(28*28))
]),
target_transform=transforms.Compose([
transforms.Lambda(lambda y: torch.zeros(10, dtype=torch.float).scatter_(0, torch.tensor(y), value=1))
])
)
```
Now to get for the actual clothing classes
```python
classes = ["T-shirt/top", "Trouser", "Pullover", "Dress", "Coat", "Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot"]
```
A little helper to draw the clothing
```python
def draw_clothes(clothing):
fig, axes = plt.subplots(7, 10, figsize=(18, 7),
subplot_kw={'xticks':[], 'yticks':[]},
gridspec_kw=dict(hspace=0.1, wspace=2.5))
for i, ax in enumerate(axes.flat):
X, y = clothing[i]
ax.imshow(255 - X.reshape(28,28) * 255, cmap='gray')
ax.set_title('{0}'.format(classes[torch.argmax(y).item()]))
```
Here is our sanity check!
```python
draw_clothes(clothing)
```
Feel free to take a look at the data by printing out `x` and/or `y`. They really are just numbers! A couple of things that might seem strange when you print them out:
1. "I thought you said the images were 784 sized vectors of 0-255???" - They were! We just normalized the vectors by dividing by 255 so the new range is 0-1 (it makes the numbers more tidy)
2. "I thought the `y` was a numerical answer?? Instead it's a 10 sized vector!" - Yes - this is called a one-hot encoding of the answer. Now there's a `1` in the index of the right answer. Again, this makes the math work a lot better for the models we will be creating.
```python
x, y = clothing[0]
print(x)
print(y)
```
# Choosing Models
Now that we have some data it's time to start picking models we think might work. This is where the science part of data-science comes in: we guess and then check if our assumptions were right. Imagine models like water pipes that have to distribute water to 10 different hoses depending on 784 knobs. These 784 knobs represent the individual pixels in the digit and the 10 hoses at the end represent the actual number (or at least the index of the one with the most water coming out of it). Our job now is to pick the plumbing in between.
The next three cells represent three different constructions in an increasingly more complex order:
1. The first is a simple linear model,
2. The second is a 3 layer Neural Network,
3. and the last is a full convolutional neural network
While it is out of the scope of this tutorial to fully explain how they work, just imagine they are basically plumbing with internal knobs that have to be tuned to produce the right water pressure at the end to push the most water out of the right
index. As you go down each cell the plumbing and corresponding internal knobs just get more complicated.
```python
class SimpleLinear(nn.Module):
def __init__(self):
super(SimpleLinear, self).__init__()
self.layer1 = nn.Linear(28*28, 10)
def forward(self, x):
x = self.layer1(x)
return F.softmax(x, dim=1)
```
```python
class NeuralNework(nn.Module):
def __init__(self):
super(NeuralNework, self).__init__()
self.layer1 = nn.Linear(28*28, 512)
self.layer2 = nn.Linear(512, 512)
self.output = nn.Linear(512, 10)
def forward(self, x):
x = F.relu(self.layer1(x))
x = F.relu(self.layer2(x))
x = self.output(x)
return F.softmax(x, dim=1)
```
```python
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = x.view(-1, 1, 28, 28)
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.softmax(x, dim=1)
```
# Optimizing Model Parameters
Now that we have some models it's time to optimize the internal parameters to see if it can do a good job at recognizing digits! It turns out there are some parameters that we can give the optimization algorithm to tune how it trains - these are called hyper-parameters. That's what the two variables represent below:
```python
learning_rate = 1e-3
batch_size = 64
epochs = 5
```
The `learning_rate` basically specifies how fast the algorithm will learn the model parameters. Right now you're probably thinking "let's set it to fifty million #amirite?" The best analogy for why this is a bad idea is golf. I'm a terrible golfist (is that right?) so I don't really know anything - but pretend you are trying to sink a shot (again sorry) but can only hit the ball the same distance every time. Easy right? Hit it the exact length from where you are to the hole! Done! Now pretend you don't know where the hole is but just know the general direction. Now the distance you choose actually matters. If it is too long a distance you'll miss the hole, and then when you hit it back you'll overshoot again. If the distance is too small then it will take forever to get there but for sure you'll eventually get it in. Basically you have to guess what the right distance per shot should be and then try it out. That is basically what the learning rate does for finding the "hole in one" for the right parameters (ok, I'm done with the golf stuff).
Below there are three things that make this all work:
1. **The Model** - this is the function we're making that takes in the digit vector and should return the right number
2. **The Cost Function** (sometimes called the loss function). I know I promised I was done with golf but I lied. Remember how I said in our screwy golf game you knew the general direction of the hole? The cost function tells us the distance to the hole - when it's zero we're there! In actual scientific terms, the cost function tells us how bad the model is at getting the right answer. As we take shots you should see the cost function decreasing. If this does not happen then something is wrong. At this point I would change the shot distance (or `learning_rate`) to something smaller and try again. If that doesn't work maybe change the model!
3. **The Optimizer** - this part is the bit that actually changes the model parameters. It has a sense for the direction we should be shooting and updates all of the internal numbers inside the model to find the best internal knobs to predict the right digits. In this case I am using the Binary Cross Entropy cost function because, well, I know it works. There are a ton of different cost functions you can choose from that fit a variety of different scenarios.
```python
# where to run
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print('Using {} device'.format(device))
# selected model - you can uncomment
# the other models for future runs
model = SimpleLinear().to(device)
#model = NeuralNework().to(device)
#model = CNN().to(device)
print(model)
# cost function used to determine best parameters
cost = torch.nn.BCELoss()
# used to create optimal parameters
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
```
I forgot to mention the whole `cuda` thing. GPUs speed this whole process up because this is basically all a big Matrix multiplcation problem in a `for` loop. PyTorch is great because you basically need to tell the model where to run (either on the CPU or using CUDA - which is a platform for moving computations to the GPU).
Now for the learning part! The `dataloader`'s job is to iterate through the entire dataset (in this case 60,000 examples of digits and their corresponding label) but to take chunks of size `batch_size` of the data to process. This is another hyperparameter that needs to be chosen. `epochs` is the number of times we want to loop through the dataset in its entirety (again something we choose based upon how our experiment goes).
A word on how long this takes - it takes a while.
```python
# loader
dataloader = DataLoader(clothing, batch_size=batch_size, num_workers=0, pin_memory=True)
# golfing time!
for t in range(epochs):
print('Epoch {}'.format(t))
print('-------------------------------')
# go through the entire dataset once
for batch, (X, Y) in enumerate(dataloader):
X, Y = X.to(device), Y.to(device)
# zero out gradient
optimizer.zero_grad()
# make a prediction on this batch!
pred = model(X)
# how bad is it?
loss = cost(pred, Y)
# compute gradients
loss.backward()
# update parameters
optimizer.step()
if batch % 100 == 0:
print('loss: {:>10f} [{:>5d}/{:>5d}]'.format(loss.item(), batch * len(X), len(dataloader.dataset)))
print('loss: {:>10f} [{:>5d}/{:>5d}]\n'.format(loss.item(), len(dataloader.dataset), len(dataloader.dataset)))
print('Done!')
```
So what the heck did this actually do? Great question. I will explain this `SimpleLinear` model since its the on we ran first. We are basically learning one matrix and one vector. That's it (it's really anti-climacttic if you ask me). Let me show you why that actually does anything:
\begin{align}
prediction = W \cdot x + b
\end{align}
\begin{align}
\begin{bmatrix}
\hat{y}_1 \\ \vdots \\ \hat{y}_{10}
\end{bmatrix}_{10 \times 1} =
\begin{bmatrix}
w_{1,1} & \ldots & w_{1,784} \\
\vdots & \ddots & \\
w_{10,1} & \ldots & w_{10, 784} \\
\end{bmatrix}_{10 \times 784}
\cdot
\begin{bmatrix}x_1 \\ x_2 \\ \vdots \\ x_{783} \\ x_{784}\end{bmatrix}_{784 \times 1}
+
\begin{bmatrix}
b_1 \\ \vdots \\ b_{10}
\end{bmatrix}_{10 \times 1}
\end{align}
The matrix `W`and the vector `b` is what the `SimpleLinear` model learns. The output is a 10 by 1 matrix whose largest value is the index of the thing number we want to predict. Take a look at what the algorithm learned for the two variables (as well as the sizes):
```python
for p in model.parameters():
print(p.shape)
print(p)
```
# Is it working???
The best way to figure out if it is working is to test the model on data the learning process hasn't used. Luckily we have such a dataset (it is basically a held out section of the data we already have used). I'm loading it all up the same way as before and printing them out to show you that they're different.
```python
test_clothing = datasets.FashionMNIST('data', train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: x.reshape(28*28))
]),
target_transform=transforms.Compose([
transforms.Lambda(lambda y: torch.zeros(10, dtype=torch.float).scatter_(0, torch.tensor(y), value=1))
])
)
draw_clothes(test_clothing)
```
Let's test it on the first trousers above (index 2).
```python
x, y = test_clothing[2]
x = x.to(device).view(1, 28*28)
```
We'll tell the model we are using it for evaluation (sometimes called inference) and pass in our previously unseen digit.
```python
model.eval()
with torch.no_grad():
pred = model(x)
pred = pred.to('cpu').detach()[0]
print(pred)
```
Now let's see if the predicted clothing item matches the actual clothing item:
```python
classes[pred.argmax(0)], classes[y.argmax(0)]
```
Let's see how well we do over *all* of the test data!
```python
# data loader for test digits
test_dataloader = DataLoader(test_clothing, batch_size=64, num_workers=0, pin_memory=True)
# set model to evaluation mode
model.eval()
test_loss = 0
correct = 0
# loop!
with torch.no_grad():
for batch, (X, Y) in enumerate(dataloader):
X, Y = X.to(device), Y.to(device)
pred = model(X)
test_loss += cost(pred, Y).item()
correct += (pred.argmax(1) == Y.argmax(1)).type(torch.float).sum().item()
test_loss /= len(dataloader.dataset)
correct /= len(dataloader.dataset)
print('\nTest Error:')
print('acc: {:>0.1f}%, avg loss: {:>8f}'.format(100*correct, test_loss))
```
# Saving the Model
Every framework is different - in this case PyTorch let's us save the model (which you remember is just a big matrix `W` and a vector `b`) to an internal format as well as to the ONNX format. These can then be loaded up as an asset to a program that is executed every time you need to recognize a digit!
```python
# create dummy variable to traverse graph
x = torch.randint(255, (1, 28*28), dtype=torch.float).to(device) / 255
onnx.export(model, x, 'model.onnx')
print('Saved onnx model to model.onnx')
# saving PyTorch Model Dictionary
torch.save(model.state_dict(), 'model.pth')
print('Saved PyTorch Model to model.pth')
```
# Play!
Now that you've gone through the whole process, please go back up to play around! Try changing:
1. The actual model! The other models are almost identical with the exception that they learn additional Matrices (W's and b's) that the images pass through to get the final answer.
2. The hyperparameters like `learning_rate`, `batch_size`, and `epoch`. Does it make things better or worse? 92% is ok, does any other combination of model's and hyperparamters fare better?
## Final Thoughts
Would love your feedback! Was this helpful? Any parts confusing? Drop me a line!
```python
```
|
[STATEMENT]
lemma
assumes "P dvd x"
shows "[x = 0] (mod P)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. [x = 0::'a] (mod P)
[PROOF STEP]
using assms cong_def
[PROOF STATE]
proof (prove)
using this:
P dvd x
[?b = ?c] (mod ?a) = (?b mod ?a = ?c mod ?a)
goal (1 subgoal):
1. [x = 0::'a] (mod P)
[PROOF STEP]
by force |
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!!!!
!!!! MIT License
!!!!
!!!! ParaMonte: plain powerful parallel Monte Carlo library.
!!!!
!!!! Copyright (C) 2012-present, The Computational Data Science Lab
!!!!
!!!! This file is part of the ParaMonte library.
!!!!
!!!! Permission is hereby granted, free of charge, to any person obtaining a
!!!! copy of this software and associated documentation files (the "Software"),
!!!! to deal in the Software without restriction, including without limitation
!!!! the rights to use, copy, modify, merge, publish, distribute, sublicense,
!!!! and/or sell copies of the Software, and to permit persons to whom the
!!!! Software is furnished to do so, subject to the following conditions:
!!!!
!!!! The above copyright notice and this permission notice shall be
!!!! included in all copies or substantial portions of the Software.
!!!!
!!!! THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
!!!! EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
!!!! MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
!!!! IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
!!!! DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
!!!! OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
!!!! OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
!!!!
!!!! ACKNOWLEDGMENT
!!!!
!!!! ParaMonte is an honor-ware and its currency is acknowledgment and citations.
!!!! As per the ParaMonte library license agreement terms, if you use any parts of
!!!! this library for any purposes, kindly acknowledge the use of ParaMonte in your
!!!! work (education/research/industry/development/...) by citing the ParaMonte
!!!! library as described on this page:
!!!!
!!!! https://github.com/cdslaborg/paramonte/blob/main/ACKNOWLEDGMENT.md
!!!!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!> \brief
!> This module contains the classes and procedures for setting up the `restartFileFormat` attribute of ParaMonte samplers.
!> For more information, see the description of this attribute in the body of the module.
!> \author Amir Shahmoradi
module SpecBase_RestartFileFormat_mod
use Constants_mod, only: IK
implicit none
character(*), parameter :: MODULE_NAME = "@SpecBase_RestartFileFormat_mod"
integer(IK), parameter :: MAX_LEN_RESTART_FILE_FORMAT = 63
character(MAX_LEN_RESTART_FILE_FORMAT) :: restartFileFormat
type :: RestartFileFormat_type
logical :: isBinary
logical :: isAscii
character(6) :: binary
character(5) :: ascii
character(:), allocatable :: def
character(:), allocatable :: val
character(:), allocatable :: null
character(:), allocatable :: desc
contains
procedure, pass :: set => setRestartFileFormat, checkForSanity, nullifyNameListVar
end type RestartFileFormat_type
interface RestartFileFormat_type
module procedure :: constructRestartFileFormat
end interface RestartFileFormat_type
private :: constructRestartFileFormat, setRestartFileFormat, checkForSanity, nullifyNameListVar
!%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
contains
!%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function constructRestartFileFormat(methodName) result(RestartFileFormatObj)
#if INTEL_COMPILER_ENABLED && defined DLL_ENABLED && (OS_IS_WINDOWS || defined OS_IS_DARWIN)
!DEC$ ATTRIBUTES DLLEXPORT :: constructRestartFileFormat
#endif
use Constants_mod, only: NULL_SK, FILE_EXT, FILE_TYPE
use String_mod, only: num2str
implicit none
character(*), intent(in) :: methodName
type(RestartFileFormat_type) :: RestartFileFormatObj
RestartFileFormatObj%isBinary = .false.
RestartFileFormatObj%isAscii = .false.
RestartFileFormatObj%binary = FILE_TYPE%binary
RestartFileFormatObj%ascii = FILE_TYPE%ascii
RestartFileFormatObj%def = RestartFileFormatObj%binary
RestartFileFormatObj%null = repeat(NULL_SK, MAX_LEN_RESTART_FILE_FORMAT)
RestartFileFormatObj%desc = &
"restartFileFormat is a string variable that represents the format of the output restart file(s) which are used to restart &
&an interrupted "// methodName //" simulation. The string value must be enclosed by either single or double quotation &
&marks when provided as input. Two values are possible:\n\n&
& restartFileFormat = '" // RestartFileFormatObj%binary // "'\n\n&
& This is the binary file format which is not human-readable, but preserves the exact values of the &
&specification variables required for the simulation restart. This full accuracy representation is required &
&to exactly reproduce an interrupted simulation. The binary format is also normally the fastest mode of restart file &
&generation. Binary restart files will have the " // FILE_EXT%binary // " file extensions.\n\n&
& restartFileFormat = '" // RestartFileFormatObj%ascii // "'\n\n&
& This is the ASCII (text) file format which is human-readable but does not preserve the full accuracy of &
&the specification variables required for the simulation restart. It is also a significantly slower mode of &
&restart file generation, compared to the binary format. Therefore, its usage should be limited to situations where &
&the user wants to track the dynamics of simulation specifications throughout the simulation time. &
&ASCII restart file(s) will have the " // FILE_EXT%ascii //" file extensions.\n\n&
&The default value is restartFileFormat = '" // RestartFileFormatObj%def // "'. Note that the input values are case-insensitive."
end function constructRestartFileFormat
!%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
subroutine nullifyNameListVar(RestartFileFormatObj)
#if INTEL_COMPILER_ENABLED && defined DLL_ENABLED && (OS_IS_WINDOWS || defined OS_IS_DARWIN)
!DEC$ ATTRIBUTES DLLEXPORT :: nullifyNameListVar
#endif
implicit none
class(RestartFileFormat_type), intent(in) :: RestartFileFormatObj
restartFileFormat = RestartFileFormatObj%null
end subroutine nullifyNameListVar
!%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
subroutine setRestartFileFormat(RestartFileFormatObj,restartFileFormat)
#if INTEL_COMPILER_ENABLED && defined DLL_ENABLED && (OS_IS_WINDOWS || defined OS_IS_DARWIN)
!DEC$ ATTRIBUTES DLLEXPORT :: setRestartFileFormat
#endif
use String_mod, only: getLowerCase
implicit none
class(RestartFileFormat_type), intent(inout) :: RestartFileFormatObj
character(*), intent(in) :: restartFileFormat
character(:), allocatable :: restartFileFormatLowerCase
RestartFileFormatObj%val = trim(adjustl(restartFileFormat))
if ( RestartFileFormatObj%val==trim(adjustl(RestartFileFormatObj%null)) ) then
RestartFileFormatObj%val = trim(adjustl(RestartFileFormatObj%def))
end if
restartFileFormatLowerCase = getLowerCase(RestartFileFormatObj%val)
RestartFileFormatObj%isBinary = restartFileFormatLowerCase == getLowerCase(RestartFileFormatObj%binary)
RestartFileFormatObj%isAscii = restartFileFormatLowerCase == getLowerCase(RestartFileFormatObj%ascii)
end subroutine setRestartFileFormat
!%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
subroutine checkForSanity(RestartFileFormat,Err,methodName)
#if INTEL_COMPILER_ENABLED && defined DLL_ENABLED && (OS_IS_WINDOWS || defined OS_IS_DARWIN)
!DEC$ ATTRIBUTES DLLEXPORT :: checkForSanity
#endif
use Err_mod, only: Err_type
use String_mod, only: num2str
implicit none
class(RestartFileFormat_type), intent(in) :: RestartFileFormat
character(*), intent(in) :: methodName
type(Err_type), intent(inout) :: Err
character(*), parameter :: PROCEDURE_NAME = "@checkForSanity()"
if ( .not.(RestartFileFormat%isBinary .or. RestartFileFormat%isAscii) ) then
Err%occurred = .true.
Err%msg = Err%msg // &
MODULE_NAME // PROCEDURE_NAME // ": Error occurred. &
&The input requested restart file format ('" // RestartFileFormat%val // &
"') represented by the variable restartFileFormat cannot be anything other than '" // &
RestartFileFormat%binary // "' or '" // RestartFileFormat%ascii // "'. If you don't know an appropriate &
&value for RestartFileFormat, drop it from the input list. " // methodName // &
" will automatically assign an appropriate value to it.\n\n"
end if
end subroutine checkForSanity
!%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
end module SpecBase_RestartFileFormat_mod ! LCOV_EXCL_LINE |
------------------------------------------------------------------------
-- A virtual machine
------------------------------------------------------------------------
{-# OPTIONS --sized-types #-}
module Lambda.Simplified.Delay-monad.Virtual-machine where
open import Equality.Propositional
open import Prelude
open import Monad equality-with-J
open import Delay-monad
open import Delay-monad.Monad
open import Lambda.Simplified.Delay-monad.Interpreter
open import Lambda.Simplified.Syntax
open import Lambda.Simplified.Virtual-machine
open Closure Code
-- A functional semantics for the VM.
exec : ∀ {i} → State → Delay (Maybe Value) i
exec s with step s
... | continue s′ = later λ { .force → exec s′ }
... | done v = return (just v)
... | crash = return nothing
|
Formal statement is: lemmas continuous_on_of_real [continuous_intros] = bounded_linear.continuous_on [OF bounded_linear_of_real] Informal statement is: The function $x \mapsto \mathrm{Re}(x)$ is continuous. |
[STATEMENT]
lemma complex_cnj_i [simp]: "cnj \<i> = - \<i>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cnj \<i> = - \<i>
[PROOF STEP]
by (simp add: complex_eq_iff) |
Finkelstein has written of his Jewish parents ' experiences during World War II . His mother , <unk> <unk> , grew up in Warsaw , survived the Warsaw Ghetto , the Majdanek concentration camp , and two slave labor camps . Her first husband died in the war . She considered the day of her liberation as the most horrible day of her life , as she realized that she was alone , her parents and siblings gone . Norman 's father , Zacharias Finkelstein , active in Hashomer <unk> , was a survivor of both the Warsaw Ghetto and the Auschwitz concentration camp .
|
section \<open>Union-Find Data-Structure\<close>
theory Union_Find_Fun
imports
Collections.Partial_Equivalence_Relation
(* "../Sep_Main"
"HOL-Library.Code_Target_Numeral" *)
begin
text \<open>
We implement a simple union-find data-structure based on an array.
It uses path compression and a size-based union heuristics.
\<close>
subsection \<open>Abstract Union-Find on Lists\<close>
text \<open>
We first formulate union-find structures on lists, and later implement
them using Imperative/HOL. This is a separation of proof concerns
between proving the algorithmic idea correct and generating the verification
conditions.
\<close>
subsubsection \<open>Representatives\<close>
text \<open>
We define a function that searches for the representative of an element.
This function is only partially defined, as it does not terminate on all
lists. We use the domain of this function to characterize valid union-find
lists.
\<close>
function (domintros) rep_of
where "rep_of l i = (if l!i = i then i else rep_of l (l!i))"
by pat_completeness auto
text \<open>A valid union-find structure only contains valid indexes, and
the \<open>rep_of\<close> function terminates for all indexes.\<close>
definition
"ufa_invar l \<equiv> \<forall>i<length l. rep_of_dom (l,i) \<and> l!i<length l"
lemma ufa_invarD:
"\<lbrakk>ufa_invar l; i<length l\<rbrakk> \<Longrightarrow> rep_of_dom (l,i)"
"\<lbrakk>ufa_invar l; i<length l\<rbrakk> \<Longrightarrow> l!i<length l"
unfolding ufa_invar_def by auto
text \<open>We derive the following equations for the \<open>rep-of\<close> function.\<close>
lemma rep_of_refl: "l!i=i \<Longrightarrow> rep_of l i = i"
apply (subst rep_of.psimps)
apply (rule rep_of.domintros)
apply (auto)
done
lemma rep_of_step:
"\<lbrakk>ufa_invar l; i<length l; l!i\<noteq>i\<rbrakk> \<Longrightarrow> rep_of l i = rep_of l (l!i)"
apply (subst rep_of.psimps)
apply (auto dest: ufa_invarD)
done
lemmas rep_of_simps = rep_of_refl rep_of_step
lemma rep_of_iff: "\<lbrakk>ufa_invar l; i<length l\<rbrakk>
\<Longrightarrow> rep_of l i = (if l!i=i then i else rep_of l (l!i))"
by (simp add: rep_of_simps)
text \<open>We derive a custom induction rule, that is more suited to
our purposes.\<close>
lemma rep_of_induct[case_names base step, consumes 2]:
assumes I: "ufa_invar l"
assumes L: "i<length l"
assumes BASE: "\<And>i. \<lbrakk> ufa_invar l; i<length l; l!i=i \<rbrakk> \<Longrightarrow> P l i"
assumes STEP: "\<And>i. \<lbrakk> ufa_invar l; i<length l; l!i\<noteq>i; P l (l!i) \<rbrakk>
\<Longrightarrow> P l i"
shows "P l i"
proof -
from ufa_invarD[OF I L] have "ufa_invar l \<and> i<length l \<longrightarrow> P l i"
apply (induct l\<equiv>l i rule: rep_of.pinduct)
apply (auto intro: STEP BASE dest: ufa_invarD)
done
thus ?thesis using I L by simp
qed
text \<open>In the following, we define various properties of \<open>rep_of\<close>.\<close>
lemma rep_of_min:
"\<lbrakk> ufa_invar l; i<length l \<rbrakk> \<Longrightarrow> l!(rep_of l i) = rep_of l i"
proof -
have "\<lbrakk>rep_of_dom (l,i) \<rbrakk> \<Longrightarrow> l!(rep_of l i) = rep_of l i"
apply (induct arbitrary: rule: rep_of.pinduct)
apply (subst rep_of.psimps, assumption)
apply (subst (2) rep_of.psimps, assumption)
apply auto
done
thus "\<lbrakk> ufa_invar l; i<length l \<rbrakk> \<Longrightarrow> l!(rep_of l i) = rep_of l i"
by (metis ufa_invarD(1))
qed
lemma rep_of_bound:
"\<lbrakk> ufa_invar l; i<length l \<rbrakk> \<Longrightarrow> rep_of l i < length l"
apply (induct rule: rep_of_induct)
apply (auto simp: rep_of_iff)
done
lemma rep_of_idem:
"\<lbrakk> ufa_invar l; i<length l \<rbrakk> \<Longrightarrow> rep_of l (rep_of l i) = rep_of l i"
by (auto simp: rep_of_min rep_of_refl)
lemma rep_of_min_upd: "\<lbrakk> ufa_invar l; x<length l; i<length l \<rbrakk> \<Longrightarrow>
rep_of (l[rep_of l x := rep_of l x]) i = rep_of l i"
by (metis list_update_id rep_of_min)
lemma rep_of_idx:
"\<lbrakk>ufa_invar l; i<length l\<rbrakk> \<Longrightarrow> rep_of l (l!i) = rep_of l i"
by (metis rep_of_step)
subsubsection \<open>Abstraction to Partial Equivalence Relation\<close>
definition ufa_\<alpha> :: "nat list \<Rightarrow> (nat\<times>nat) set"
where "ufa_\<alpha> l
\<equiv> {(x,y). x<length l \<and> y<length l \<and> rep_of l x = rep_of l y}"
lemma ufa_\<alpha>_equiv[simp, intro!]: "part_equiv (ufa_\<alpha> l)"
by rule (auto simp: ufa_\<alpha>_def intro: symI transI)
lemma ufa_\<alpha>_lenD:
"(x,y)\<in>ufa_\<alpha> l \<Longrightarrow> x<length l"
"(x,y)\<in>ufa_\<alpha> l \<Longrightarrow> y<length l"
unfolding ufa_\<alpha>_def by auto
lemma ufa_\<alpha>_dom[simp]: "Domain (ufa_\<alpha> l) = {0..<length l}"
unfolding ufa_\<alpha>_def by auto
lemma ufa_\<alpha>_refl[simp]: "(i,i)\<in>ufa_\<alpha> l \<longleftrightarrow> i<length l"
unfolding ufa_\<alpha>_def
by simp
lemma ufa_\<alpha>_len_eq:
assumes "ufa_\<alpha> l = ufa_\<alpha> l'"
shows "length l = length l'"
by (metis assms le_antisym less_not_refl linorder_le_less_linear ufa_\<alpha>_refl)
subsubsection \<open>Operations\<close>
lemma ufa_init_invar: "ufa_invar [0..<n]"
unfolding ufa_invar_def
by (auto intro: rep_of.domintros)
lemma ufa_init_correct: "ufa_\<alpha> [0..<n] = {(x,x) | x. x<n}"
unfolding ufa_\<alpha>_def
using ufa_init_invar[of n]
apply (auto simp: rep_of_refl)
done
lemma ufa_find_correct: "\<lbrakk>ufa_invar l; x<length l; y<length l\<rbrakk>
\<Longrightarrow> rep_of l x = rep_of l y \<longleftrightarrow> (x,y)\<in>ufa_\<alpha> l"
unfolding ufa_\<alpha>_def
by auto
abbreviation "ufa_union l x y \<equiv> l[rep_of l x := rep_of l y]"
lemma ufa_union_invar:
assumes I: "ufa_invar l"
assumes L: "x<length l" "y<length l"
shows "ufa_invar (ufa_union l x y)"
unfolding ufa_invar_def
proof (intro allI impI, simp only: length_list_update)
fix i
assume A: "i<length l"
with I have "rep_of_dom (l,i)" by (auto dest: ufa_invarD)
have "ufa_union l x y ! i < length l" using I L A
apply (cases "i=rep_of l x")
apply (auto simp: rep_of_bound dest: ufa_invarD)
done
moreover have "rep_of_dom (ufa_union l x y, i)" using I A L
proof (induct rule: rep_of_induct)
case (base i)
thus ?case
apply -
apply (rule rep_of.domintros)
apply (cases "i=rep_of l x")
apply auto
apply (rule rep_of.domintros)
apply (auto simp: rep_of_min)
done
next
case (step i)
from step.prems \<open>ufa_invar l\<close> \<open>i<length l\<close> \<open>l!i\<noteq>i\<close>
have [simp]: "ufa_union l x y ! i = l!i"
apply (auto simp: rep_of_min rep_of_bound nth_list_update)
done
from step show ?case
apply -
apply (rule rep_of.domintros)
apply simp
done
qed
ultimately show
"rep_of_dom (ufa_union l x y, i) \<and> ufa_union l x y ! i < length l"
by blast
qed
lemma ufa_union_aux:
assumes I: "ufa_invar l"
assumes L: "x<length l" "y<length l"
assumes IL: "i<length l"
shows "rep_of (ufa_union l x y) i =
(if rep_of l i = rep_of l x then rep_of l y else rep_of l i)"
using I IL
proof (induct rule: rep_of_induct)
case (base i)
have [simp]: "rep_of l i = i" using \<open>l!i=i\<close> by (simp add: rep_of_refl)
note [simp] = \<open>ufa_invar l\<close> \<open>i<length l\<close>
show ?case proof (cases)
assume A[simp]: "rep_of l x = i"
have [simp]: "l[i := rep_of l y] ! i = rep_of l y"
by (auto simp: rep_of_bound)
show ?thesis proof (cases)
assume [simp]: "rep_of l y = i"
show ?thesis by (simp add: rep_of_refl)
next
assume A: "rep_of l y \<noteq> i"
have [simp]: "rep_of (l[i := rep_of l y]) i = rep_of l y"
apply (subst rep_of_step[OF ufa_union_invar[OF I L], simplified])
using A apply simp_all
apply (subst rep_of_refl[where i="rep_of l y"])
using I L
apply (simp_all add: rep_of_min)
done
show ?thesis by (simp add: rep_of_refl)
qed
next
assume A: "rep_of l x \<noteq> i"
hence "ufa_union l x y ! i = l!i" by (auto)
also note \<open>l!i=i\<close>
finally have "rep_of (ufa_union l x y) i = i" by (simp add: rep_of_refl)
thus ?thesis using A by auto
qed
next
case (step i)
note [simp] = I L \<open>i<length l\<close>
have "rep_of l x \<noteq> i" by (metis I L(1) rep_of_min \<open>l!i\<noteq>i\<close>)
hence [simp]: "ufa_union l x y ! i = l!i"
by (auto simp add: nth_list_update rep_of_bound \<open>l!i\<noteq>i\<close>) []
have "rep_of (ufa_union l x y) i = rep_of (ufa_union l x y) (l!i)"
by (auto simp add: rep_of_iff[OF ufa_union_invar[OF I L]])
also note step.hyps(4)
finally show ?case
by (auto simp: rep_of_idx)
qed
lemma ufa_union_correct: "\<lbrakk> ufa_invar l; x<length l; y<length l \<rbrakk>
\<Longrightarrow> ufa_\<alpha> (ufa_union l x y) = per_union (ufa_\<alpha> l) x y"
unfolding ufa_\<alpha>_def per_union_def
by (auto simp: ufa_union_aux
split: if_split_asm
)
lemma ufa_compress_aux:
assumes I: "ufa_invar l"
assumes L[simp]: "x<length l"
shows "ufa_invar (l[x := rep_of l x])"
and "\<forall>i<length l. rep_of (l[x := rep_of l x]) i = rep_of l i"
proof -
{
fix i
assume "i<length (l[x := rep_of l x])"
hence IL: "i<length l" by simp
have G1: "l[x := rep_of l x] ! i < length (l[x := rep_of l x])"
using I IL
by (auto dest: ufa_invarD[OF I] simp: nth_list_update rep_of_bound)
from I IL have G2: "rep_of (l[x := rep_of l x]) i = rep_of l i
\<and> rep_of_dom (l[x := rep_of l x], i)"
proof (induct rule: rep_of_induct)
case (base i)
thus ?case
apply (cases "x=i")
apply (auto intro: rep_of.domintros simp: rep_of_refl)
done
next
case (step i)
hence D: "rep_of_dom (l[x := rep_of l x], i)"
apply -
apply (rule rep_of.domintros)
apply (cases "x=i")
apply (auto intro: rep_of.domintros simp: rep_of_min)
done
thus ?case apply simp using step
apply -
apply (subst rep_of.psimps[OF D])
apply (cases "x=i")
apply (auto simp: rep_of_min rep_of_idx)
apply (subst rep_of.psimps[where i="rep_of l i"])
apply (auto intro: rep_of.domintros simp: rep_of_min)
done
qed
note G1 G2
} note G=this
thus "\<forall>i<length l. rep_of (l[x := rep_of l x]) i = rep_of l i"
by auto
from G show "ufa_invar (l[x := rep_of l x])"
by (auto simp: ufa_invar_def)
qed
lemma ufa_compress_invar:
assumes I: "ufa_invar l"
assumes L[simp]: "x<length l"
shows "ufa_invar (l[x := rep_of l x])"
using assms by (rule ufa_compress_aux)
lemma ufa_compress_correct:
assumes I: "ufa_invar l"
assumes L[simp]: "x<length l"
shows "ufa_\<alpha> (l[x := rep_of l x]) = ufa_\<alpha> l"
by (auto simp: ufa_\<alpha>_def ufa_compress_aux[OF I])
end
|
State Before: G : Type u_1
G' : Type ?u.308406
inst✝⁴ : Group G
inst✝³ : Group G'
A : Type ?u.308415
inst✝² : AddGroup A
H✝ K✝ : Subgroup G
k : Set G
N : Type u_2
inst✝¹ : Group N
P : Type ?u.308442
inst✝ : Group P
H : Subgroup G
K : Subgroup N
⊢ prod H K = ⊥ ↔ H = ⊥ ∧ K = ⊥ State After: no goals Tactic: simpa only [← Subgroup.toSubmonoid_eq] using Submonoid.prod_eq_bot_iff |
function time_sec()
time_ns() * 1e-9
end
function timed_result(action::Function, seed::Int64)
start = time_sec()
result = action(seed)
delta = time_sec() - start
return (delta, result)
end
function time_clock(niter::Integer)
start = time_sec()
for _ in 1:niter
time_sec()
end
time_sec() - start
end
function timed_noresult(action::Function, seed::Int64)
start = time_sec()
for _ in 1:seed
action()
end
time_sec() - start
end
function run_for_atleast(howlong::Float64,
seed::Int64,
action::Function)
#ensure that time_sec is compiled
time_sec()
init_time = time_sec()
iters = 0
while true
now = time_sec()
if ((now - init_time) > 10.0 * howlong)
throw(error("took too long to run: seed $seed, iters $iters"))
end
elapsed, result = timed_result(action, seed)
if elapsed < howlong
seed *= 2
iters += 1
else
return elapsed, seed, result
end
end
end
function timed_func(func::Function, n::Integer)
start = time_ns()
local ret
for _ in 1:n
ret = func()
end
(int(time_ns() - start), 1.0)
end
function collect_samples(sample_count::Integer,
exec_count::Integer,
func::Function,
gc_before::Bool)
@assert sample_count > 0
results = zeros(sample_count)
times = zeros(sample_count)
for i in 1:sample_count
if gc_before
Base.gc()
end
t, r = timed_func(func, exec_count)
times[i] = t;
results[i] = r
end
(times, results)
end
function estimate_execution_count(period, func,
gc_before_sample,
est_run_time)
print("Estimating execution count...")
n = int(max(period / est_run_time / 5, 1))
while true
times = collect_samples(1, n, func, gc_before_sample)[1]
t = max(1.0, mean(times)) # prevent zero times
print("....")
if t >= period
print("\n")
return n
end
n = min(2 * n, n * (period / t) + 1)
end
@printf("%d\n", n)
return n
end
function time_str(k::Float64)
function fmt(time::Float64, unit::String)
if time >= 1e9 return @sprintf("%.4f %s", time, unit)
elseif time >= 1e6 return @sprintf("%.0f %s", time, unit)
elseif time >= 1e5 return @sprintf("%.1f %s", time, unit)
elseif time >= 1e4 return @sprintf("%.2f %s", time, unit)
elseif time >= 1e3 return @sprintf("%.3f %s", time, unit)
elseif time >= 1e2 return @sprintf("%.4f %s", time, unit)
elseif time >= 1e1 return @sprintf("%.5f %s", time, unit)
else return @sprintf("%.6f %s", time, unit) end
end
if k < 0 return "-" + secs(-k)
elseif k >= 60 return fmt(k / 60.0, "min")
elseif k >= 1 return fmt(k, "s")
elseif k >= 1e-03 return fmt(k * 1e03, "ms")
elseif k >= 1e-06 return fmt(k * 1e06, "us")
elseif k >= 1e-09 return fmt(k * 1e09, "ns")
elseif k >= 1e-12 return fmt(k * 1e12, "ps")
else return @sprintf("%.6f %s", k * 1e12, "ps")
end
end
|
[STATEMENT]
lemma multiset_remove_induct [case_names empty remove]:
assumes "P {#}" "\<And>A. A \<noteq> {#} \<Longrightarrow> (\<And>x. x \<in># A \<Longrightarrow> P (A - {#x#})) \<Longrightarrow> P A"
shows "P A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P A
[PROOF STEP]
proof (induction A rule: full_multiset_induct)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>B. \<forall>A. A \<subset># B \<longrightarrow> P A \<Longrightarrow> P B
[PROOF STEP]
case (less A)
[PROOF STATE]
proof (state)
this:
\<forall>A. A \<subset># A \<longrightarrow> P A
goal (1 subgoal):
1. \<And>B. \<forall>A. A \<subset># B \<longrightarrow> P A \<Longrightarrow> P B
[PROOF STEP]
hence IH: "P B" if "B \<subset># A" for B
[PROOF STATE]
proof (prove)
using this:
\<forall>A. A \<subset># A \<longrightarrow> P A
goal (1 subgoal):
1. P B
[PROOF STEP]
using that
[PROOF STATE]
proof (prove)
using this:
\<forall>A. A \<subset># A \<longrightarrow> P A
B \<subset># A
goal (1 subgoal):
1. P B
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
?B \<subset># A \<Longrightarrow> P ?B
goal (1 subgoal):
1. \<And>B. \<forall>A. A \<subset># B \<longrightarrow> P A \<Longrightarrow> P B
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P A
[PROOF STEP]
proof (cases "A = {#}")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. A = {#} \<Longrightarrow> P A
2. A \<noteq> {#} \<Longrightarrow> P A
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
A = {#}
goal (2 subgoals):
1. A = {#} \<Longrightarrow> P A
2. A \<noteq> {#} \<Longrightarrow> P A
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
A = {#}
goal (1 subgoal):
1. P A
[PROOF STEP]
by (simp add: assms)
[PROOF STATE]
proof (state)
this:
P A
goal (1 subgoal):
1. A \<noteq> {#} \<Longrightarrow> P A
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. A \<noteq> {#} \<Longrightarrow> P A
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
A \<noteq> {#}
goal (1 subgoal):
1. A \<noteq> {#} \<Longrightarrow> P A
[PROOF STEP]
hence "P (A - {#x#})" if "x \<in># A" for x
[PROOF STATE]
proof (prove)
using this:
A \<noteq> {#}
goal (1 subgoal):
1. P (A - {#x#})
[PROOF STEP]
using that
[PROOF STATE]
proof (prove)
using this:
A \<noteq> {#}
x \<in># A
goal (1 subgoal):
1. P (A - {#x#})
[PROOF STEP]
by (intro IH) (simp add: mset_subset_diff_self)
[PROOF STATE]
proof (state)
this:
?x \<in># A \<Longrightarrow> P (A - {#?x#})
goal (1 subgoal):
1. A \<noteq> {#} \<Longrightarrow> P A
[PROOF STEP]
from False and this
[PROOF STATE]
proof (chain)
picking this:
A \<noteq> {#}
?x \<in># A \<Longrightarrow> P (A - {#?x#})
[PROOF STEP]
show "P A"
[PROOF STATE]
proof (prove)
using this:
A \<noteq> {#}
?x \<in># A \<Longrightarrow> P (A - {#?x#})
goal (1 subgoal):
1. P A
[PROOF STEP]
by (rule assms)
[PROOF STATE]
proof (state)
this:
P A
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
P A
goal:
No subgoals!
[PROOF STEP]
qed |
\documentclass[11pt,twoside]{article}
%\documentclass[10pt,twoside,twocolumn]{article}
\usepackage[english]{babel}
\usepackage{times,subeqnarray}
\usepackage{url}
% following is for pdflatex vs. old(dvi) latex
\newif\myifpdf
\ifx\pdfoutput\undefined
% \pdffalse % we are not running PDFLaTeX
\usepackage[dvips]{graphicx}
\else
\pdfoutput=1 % we are running PDFLaTeX
% \pdftrue
\usepackage[pdftex]{graphicx}
\fi
\usepackage{apatitlepages}
% if you want to be more fully apa-style for submission, then use this
%\usepackage{setspace,psypub,ulem}
%\usepackage{setspace} % must come before psypub
%\usepackage{psypub}
\usepackage{psydraft}
%\usepackage{one-in-margins} % use instead of psydraft for one-in-margs
%\usepackage{apa} % apa must come last
\usepackage[natbibapa]{apacite} % natbib
% \usepackage{csquotes} % biblatex
% \usepackage[style=apa]{biblatex}
\usepackage{placeins}
% \input netsym
\usepackage{diagbox} % table
% tell pdflatex to prefer .pdf files over .png files!!
\myifpdf
\DeclareGraphicsExtensions{.pdf,.eps,.png,.jpg,.mps,.tif}
\fi
% use 0 for psypub format
\parskip 2pt
% for double-spacing, determines spacing
%\doublespacing
%\setstretch{1.7}
\columnsep .25in % 3/8 in column separation
\def\myheading{ Correcting the Hebbian Mistake }
% no twoside for pure apa style, use \markright with heading only
\pagestyle{myheadings}
\markboth{\hspace{.5in} \myheading \hfill}{\hfill Zheng et al \hspace{.5in}}
\begin{document}
\bibliographystyle{apacite}
% sloppy is the way to go!
\sloppy
\raggedbottom
\def\mytitle{ Correcting the Hebbian Mistake: Toward a Fully Error-Driven Hippocampus }
\def\myauthor{Yicong Zheng$^{1,2}$, Xiaonan L. Liu$^{1,2}$, Satoru Nishiyama$^{3,4}$, Charan Ranganath$^{1,2}$, and Randall C. O'Reilly$^{1,2,5}$\\
$^1$Department of Psychology, University of California, Davis\\
$^2$Center for Neuroscience, University of California, Davis\\
$^3$Graduate School of Education, Kyoto University\\
$^4$Japan Society for the Promotion of Science\\
$^5$Department of Computer Science, University of California, Davis\\
{\small [email protected]}\\}
\def\mynote{R. C. O'Reilly is Director of Science at Obelisk Lab in the Astera Institute, and Chief Scientist at eCortex, Inc., which may derive indirect benefit from the work presented here.
Supported by: ONR grants N00014-20-1-2578, N00014-19-1-2684/ N00014-18-1-2116, N00014-18-C-2067, N00014-17-1-2961, N00014-15-1-0033
}
% ROR: the abstract was too long! Also You generally can't add actual references in an abstract.
\def\myabstract{
The hippocampus plays a critical role in the rapid learning of new episodic memories. Many computational models propose that the hippocampus is an autoassociator that relies on Hebbian learning (i.e., “cells that fire together, wire together”). However, Hebbian learning is computationally suboptimal as it modifies weights unnecessarily beyond what is actually needed to achieve effective retrieval, causing more interference and resulting in a lower learning capacity. Our previous computational models have utilized a powerful, biologically plausible form of error-driven learning in hippocampal CA1 and entorhinal cortex (EC) (functioning as a sparse autoencoder) by contrasting local activity states at different phases in the theta cycle. Based on specific neural data and a recent abstract computational model, we propose a new model called Theremin (Total Hippocampal ERror MINimization) that extends error-driven learning to area CA3 --- the mnemonic heart of the hippocampal system. In the model, CA3 responds to the EC monosynaptic input prior to the EC disynaptic input through dentate gyrus (DG), giving rise to a temporal difference between these two activation states, which drives error-driven learning in the EC$\rightarrow$CA3 and CA3$\leftrightarrow$CA3 projections. In effect, DG serves as a teacher to CA3, correcting its patterns into more pattern-separated ones, thereby reducing interference. Results showed that Theremin, compared with our original model, has significantly increased capacity and learning speed. The model makes several novel predictions that can be tested in future studies.
}
% \titlesepage{\mytitle}{\myauthor}{\mynote}{\myabstract}
% \twocolumn
%\titlesamepage{\mytitle}{\myauthor}{\mynote}{\myabstract}
\titlesamepageoc{\mytitle}{\myauthor}{\mynote}{\myabstract}
% single-spaced table of contents, delete if unwanted
% \newpage
% \begingroup
% \parskip 0em
% \tableofcontents
% \endgroup
% \newpage
% \twocolumn
\pagestyle{myheadings}
\section{Introduction}
It is well-established that the hippocampus plays a critical role in the rapid learning of new episodic memories \citep{EichenbaumYonelinasRanganath07}. Most computational and conceptual models of this hippocampal function are based on principles first articulated by Donald O. Hebb and David Marr \citep{Hebb49,Marr71,McNaughtonNadel90,McClellandMcNaughtonOReilly95}. At the core of this framework is the notion that recurrent connections among CA3 neurons are strengthened when they are co-activated (``cells that fire together, wire together''), essentially creating a cell assembly of interconnected neurons that bind the different elements of an event. As a result of this Hebbian learning, subsequent partial cues can drive pattern completion to recall the entire original memory, by reactivating the entire cell assembly via the strengthened interconnections.
In addition, Marr's fundamental insight was that sparse levels of neural activity in area CA3 and especially the dentate gyrus (DG) granule cells, will drive the creation of cell assemblies that involve a distinct combination of neurons for each event, otherwise known as \emph{pattern separation} \citep{Marr71,OReillyMcClelland94,YassaStark11}. As a consequence, the DG to CA3 pathway has the capability to minimize interference from learning across even closely overlapping episodes (e.g., where you parked your car today vs. where you parked it yesterday). Note that it is the patterns of activity over area CA3 that constitute the principal hippocampal representation of an episodic memory, and learning in these CA3 synapses is thus essential for cementing the storage of these memories. Overall, the basic tenets established by Hebb and Marr account for a vast amount of behavioral and neural data on hippocampal function, and represents one of the most widely accepted theories in neuroscience \citep{MilnerSquireKandel98,OReillyBhattacharyyaHowardEtAl14,Eichenbaum16,YonelinasRanganathEkstromEtAl19}
Although almost every biologically-based computational model of hippocampal function incorporates Hebbian plasticity, it is notable that Hebbian learning is computationally suboptimal in various respects, especially in terms of overall learning capacity \citep{Abu-MostafaSt.Jacques85,TrevesRolls91}. In models that rely solely on Hebbian learning, whenever two neurons are active together, the synaptic weight between them is increased, regardless of how necessary that change might be to achieve better memory recall. As a result, such models do not know when to stop learning, and continue to drive synaptic changes beyond what is actually necessary to achieve effective pattern completion. The consequence of this ``learning overkill'' is that all those unnecessary synaptic weight changes end up driving more interference with the weights needed to recall other memories, significantly reducing overall memory capacity. Even the high degree of pattern separation in the DG and CA3 pathways might not be sufficient to make up for the interference caused by reliance on Hebbian learning. Although it is difficult to quantitatively assess the capacity of the hippocampus in various species, there is reason to believe that even the high degree of pattern separation in the DG and CA3 pathways might not be sufficient to make up for the interference caused by reliance on Hebbian learning.
One logical alternative to the simple Hebbian approach is to introduce a self-limiting learning mechanism that drives only synaptic changes that are absolutely necessary to support learning. However, determining this minimal amount of learning can be challenging: how can local synaptic changes ``know'' what is functionally necessary in terms of the overall memory system function? One well-established class of such learning mechanisms are error-driven learning rules: by driving synaptic changes directly in proportion to a functionally-defined error signal, learning automatically stops when that error signal goes to zero. For example, the well-known Rescorla-Wagner learning rule for classical conditioning \citep{RescorlaWagner72} is an instance of the delta-rule error-driven learning rule \citep{WidrowHoff60}:
\begin{equation}
dW = x (r - y),
\end{equation}
where $dW$ is the amount of synaptic weight change, $x$ is the sending neuron activity level (e.g., average firing rate of sensory inputs representing conditioned stimuli), $r$ is the actual amount of reward received, and $y$ is the expected amount of reward, computed according to the existing synaptic weights:
\begin{equation}
y = \sum x W.
\end{equation}
This learning rule drives learning (changes in weights, $dW$) up to the point where the expected prediction of reward ($y$) matches the actual reward received ($r$), at which point learning stops, because the difference term in the parentheses goes to 0. The dependency on $x$ is critical for \emph{credit assignment}, which ensures that the most active sending neurons change their weights the most, as such weight changes will be the most effective in reducing the error. The widely-used error backpropagation learning algorithm is a mathematical extension of this simpler delta-rule form of learning \citep{RumelhartHintonWilliams86}, and demonstrates the general-purpose power of these error-driven learning principles, underlying the current success in large-scale deep learning models \citep{LeCunBengioHinton15}.
We have previously shown that these error-driven learning principles can be applied to the CA1 region of the hippocampus \citep{KetzMorkondaOReilly13}, building on theta-phase dynamics discovered by \citet{HasselmoBodelonWyble02}. The critical \emph{target} value driving this error-driven learning is the full pattern of activity over the entorhinal cortex (EC) representing the current state of the rest of the cortex. Learning stops when the hippocampal encoding of this EC pattern projecting from CA3 through CA1 matches the target version driven by the excitatory projections into the EC. These error-driven learning dynamics have been supported empirically by studies of CA1 learning in various tasks \citep{SchapiroTurk-BrowneNormanEtAl16,SchapiroTurk-BrowneBotvinickEtAl17}. However, this prior model retained the standard Hebbian learning for all of the connections within CA3 and DG, because the error signal that drives CA1 learning does not have any way of propagating back to these earlier areas within the overall circuit: the connectivity is only feedforward from CA3 to CA1.
To be able to apply a similar type of self-limiting error-driven learning to the core area CA3 of the hippocampus, we need a suitable target signal available to neurons within the CA3 that determines when the learning has accomplished what it needs to do. Recently, \citet{KowadloAhmedRawlinson20} proposed in an abstract, backpropagation-based model that the DG can serve as a kind of teacher to the CA3, driving learning just to the point where CA3 on its own can replicate the same highly pattern-separated representations that the DG imparts on the CA3. We build on this idea here, by showing how error-driven learning based on this DG-driven target signal can emerge naturally within the activation dynamics of the hippocampal circuitry, driving learning in the feedforward and recurrent synapses of area CA3. Thus, we are able to extend the application of error-driven learning to the ``heart'' of the hippocampus.
We show that this more fully error-driven hippocampal learning system has significantly improved memory capacity and resistance to interference compared to one with Hebbian learning in CA3. Furthermore, we show how these error-driven learning dynamics fit with detailed features of the neuroanatomy and physiology of the hippocampal circuits, and can have broad implications for understanding important learning phenomena such as the \emph{testing effect} \citep{LiuOReillyRanganath21}. Overall, this new framework provides a consistent computational and biological account of hippocampal episodic learning, which departs from the tradition of Hebbian learning at a computational level, while retaining the overall conceptual understanding of the essential role of the hippocampus in episodic memory. Thus, we do not throw the baby out with the bathwater here, and our model remains consistent with the vast majority of behavioral and neural data consistent with the classic Hebb-Marr model. Nevertheless, it also does make novel predictions, and at a broad, behavioral level, the improved performance of our model is more compatible with the remarkable capacity of the relatively small hippocampal system for encoding so many distinct memories over the course of our lives.
In the remainder of the paper, we first introduce the computational and biological framework for error-driven learning in the hippocampal circuits, and then present the details of an implemented computational model, followed by results of this model as compared to our previous Hebbian-CA3 version \citep{KetzMorkondaOReilly13}, as well as representational analyses that capture the subregional dynamics in the model. We conclude with a general discussion, including testable predictions from this framework and implications for some salient existing behavioral and neural data on hippocampal learning.
\section{Sources of Error Driven Learning in the Hippocampal Circuit}
\begin{figure}
\centering\includegraphics[width=4in]{fig_hip_edl_model}
\caption{\footnotesize Architecture of the Theremin model. Visual depiction of one full theta-cycle training trial, separated into four different phases within the cycle (i.e., four \emph{quarters}). The CA1 learns to properly decode the CA3 pattern into the corresponding EC representation, while CA3 learns to encode the EC input in a more pattern-separated manner reflecting DG input. Arrows depict pathways of particular relevance for that quarter. First quarter: Blue arrows show initial activation of CA3 and DG via monosynaptic pathways from ECin (superficial layers of EC). Green shows CA1 likewise being monosynaptically driven from ECin, and in turn driving ECout (deep layers). Second quarter: Red arrow indicates DG driving CA3, providing a target activity state over CA3 relative to the first quarter state. Also, CA3 starts to drive CA1, resulting in full ``attempted recall'' state over ECout by the end of the Third quarter. In the Fourth quarter, the ECin drives ECout (Orange arrow), which in turn drives any resulting changes in CA1. The activation state at the end of this quarter represents the full target state for all error-driven learning throughout the hippocampus.}
\label{fig.theremin}
\end{figure}
We begin by briefly reviewing our earlier work showing how the monosynaptic pathway interconnecting the entorhinal cortex (EC) and area CA1 can support error-driven learning, via systematic changes in pathway strengths across the theta cycle \citep{KetzMorkondaOReilly13,HasselmoBodelonWyble02} (Figure~\ref{fig.theremin}). Although nominally a central part of the hippocampus, from a computational perspective it makes more sense to think of this monosynaptic CA1 $\leftrightarrow$ EC pathway (sometimes known as the temporo-ammonic pathway) as an extension of the neocortex, where the principles of error-driven learning have been well-developed \citep{OReilly96,WhittingtonBogacz19,LillicrapSantoroMarrisEtAl20}. Specifically, this pathway can be thought of as learning to encode the EC information in CA1 in a way that can then support reactivation of the corresponding EC activity patterns when memories are later retrieved via CA3 pattern completion. Computationally, this is known as an \emph{auto-encoder}, and error-driven learning in this case amounts to adjusting the synapses in this monosynaptic pathway to ensure that the EC pattern is accurately reconstructed from the CA1 activity pattern.
Using the delta rule equation shown above, this objective can be formulated as:
\begin{equation}
dW = \mbox{CA1} (\mbox{ECin} - \mbox{ECout}),
\end{equation}
where \emph{ECin} is the EC input pattern driven by cortical inputs to the EC, and \emph{ECout} is the output produced by the CA1 encoding of this ECin pattern (Figure~\ref{fig.theremin}). If these are identical, then the error is 0, and no learning occurs, and any learning that does occur is directly in proportion to the extent of error correction required. The detailed biological story for how the system could implement this error-driven learning mechanism depends on the relative out-of-phase strengths of the two major projections into the CA1 \citep{HasselmoBodelonWyble02}: in one phase the CA1 is driven more strongly by direct ECin layer 3 inputs, and in another it is driven more by the CA3 projections. Furthermore, the ECout (deep layers) is initially driven by CA1, but then is driven more by ECin (layer 3).
The net effect is that, by learning based on temporal differences in activation state over time, the delta rule is realized. Specifically, the difference in ECout activation between their CA1-driven vs. ECin-driven states produces the (ECin -- ECout) difference in the above equation. Likewise, CA1 neurons directly experience a ``reflection'' of this temporal difference, conveyed via the projections from ECout $\rightarrow$ CA1, and they also can learn using their own local delta-rule-like equation based on this temporal difference. Mathematically, this is a form of error backpropagation \citep{OReilly96,WhittingtonBogacz19,LillicrapSantoroMarrisEtAl20}. See \citet{KetzMorkondaOReilly13} for more details. Consistent with the idea that this monosynaptic pathway is more cortex-like in nature, \citet{SchapiroTurk-BrowneBotvinickEtAl17} have shown that this pathway can learn to integrate across multiple learning experiences to encode sequential structure, in a way that depends critically on the error-driven nature of this pathway, and is compatible with multiple sources of data \citep{SchapiroTurk-BrowneNormanEtAl16}.
To extend this error-driven learning mechanism to area CA3, it is essential to have two different activation states, one that represents a better, target representation (e.g., the actual reward, or the actual ECin input in the examples considered previously), and the other that represents what the current synaptic weights produce on their own. The key idea in our new Theremin model is that the highly pattern-separated activity pattern in the DG drives a target representation as a pattern of activity over CA3, which can be compared to what the CA3 can produce on its own (i.e., prior to the arrival of DG $\rightarrow$ CA3 inputs). Thus, in effect, the DG, which is the sparsest and most pattern-separated hippocampal layer, is serving as a teacher to the CA3, driving error-driven learning signals there just to the point where CA3 on its own can replicate the DG-driven sparse, pattern-separated representations \citep{KowadloAhmedRawlinson20}.
The delta-rule formulation for this new error-driven learning component is:
\begin{equation}
dW = \mbox{ECin} (\mbox{CA3}_{dg} - \mbox{CA3}_{nodg}),
\end{equation}
where \emph{ECin} is the sending activity into CA3 (via the perforant pathway (PP) projections), $\mbox{CA3}_{dg}$ is the activity of the CA3 neurons when being driven by the strong mossy fiber (MF) inputs from the DG, and $\mbox{CA3}_{nodg}$ is the CA3 activity when not being driven by those DG inputs, i.e., from only the PP and CA3 recurrent inputs prior to the arrival of DG $\rightarrow$ CA3 inputs. Critically, to the extent that CA3 prior to DG input is already matching the DG-driven pattern, no additional learning needs to occur, thus producing the interference minimization benefits of error-driven learning. Note that the same error-driven signal in CA3 trains the lateral recurrent pathway within CA3 in addition to the ECin $\rightarrow$ CA3 PP projections, so that these recurrent connections also adapt to fit the DG-driven pattern, but no further.
Although this form of error-driven learning might make sense computationally, how could something like this delta error signal emerge naturally from the hippocampal biology? First, as in our prior model of learning in the monosynaptic pathway \citep{KetzMorkondaOReilly13}, we adopt the idea that the delta arises as a \emph{temporal difference} between two states of activity over the CA3, which is also consistent with a broader understanding of how error-driven learning works in the neocortex; \citep{OReilly96,OReillyMunakata00,OReillyRussinZolfagharEtAl21}. The appropriate temporal difference over CA3 should actually emerge naturally from the additional delay associated with the propagation of the MF signal through the DG to the CA3, compared to the more direct PP signal from ECin to CA3. Thus, as in our prior models, the minus phase term in the delta rule occurs first (i.e., $\mbox{CA3}_{nodg}$), followed by the plus phase (this terminology goes back to the Boltzmann machine, which also used a temporal-difference error-driven learning mechanism; \citealp{AckleyHintonSejnowski85}).
Neurophysiologically, there are a number of lines of empirical evidence consistent with this temporal difference error-driven learning dynamic in the CA3:
\begin{itemize}
\item CA3 pyramidal cells respond to PP stimulation prior to the granule cells (GCs) in the DG, in vivo \citep{YeckelBerger90,DoMartinezMartinezEtAl02}, such that the indirect input through the DG will be that much more delayed due to the slower DG response (by roughly 5 msec at least).
\item MF inputs from the DG GCs to the CA3 can induce heterosynaptic plasticity at PP and CA3 recurrent connections \citep{McMahonBarrionuevo02,TsukamotoYasuiYamadaEtAl03,KobayashiPoo04,RebolaCartaMulle17}. This is consistent with ability of the later-arriving DG inputs to drive the CA3 synaptic changes toward that imposed by this stronger target-like pattern, compared to the earlier pattern initially evoked by PP and CA3 recurrent inputs.
\item Although several studies have found that contextual fear learning is intact without MF input to CA3 \citep{McHughJonesQuinnEtAl07,NakashibaCushmanPelkeyEtAl12,KitamuraSunMartinEtAl15}, incomplete patterns from DG during encoding impair the function of EC $\rightarrow$ CA3 pathway in contextual fear conditioning tasks \citep{BernierLacagninaAyoubEtAl17}, suggesting that DG still plays an important role in heterosynaptic plasticity at CA3.
\end{itemize}
In addition to this DG-driven error learning in CA3, we explored a few other important principles that also help improve overall learning performance. First, reducing the strength of the MF inputs to the CA3 during memory recall helped shift the dynamics toward pattern completion instead of pattern separation, as was hypothesized in \citet{OReillyMcClelland94}. This is consistent with evidence and models showing that MF projections are not necessary in naturally recalling a memory \citep{NakashibaCushmanPelkeyEtAl12,BernierLacagninaAyoubEtAl17,Rolls13}. However, other data suggests that it still plays an important role in increasing recall precision \citep{RuedigerVittoriBednarekEtAl11,NakashibaCushmanPelkeyEtAl12,BernierLacagninaAyoubEtAl17,PignatelliRyanRoyEtAl19}. Thus, consistent with these data, we found that reducing, but not entirely eliminating MF input to the CA3 during recall was beneficial, most likely because it enabled the other pathways to exert a somewhat stronger influence in favor of pattern completion, while still preserving the informative inputs from the DG.
Second, we experimented with the parameters on the one remaining Hebbian form of learning in the network, which is in the ECin $\rightarrow$ DG pathway (i.e., the perforant path). This pathway does not have an obvious source of error-driven contrast, given that there is only one set of projections into the DG granule cells. Thus, we sought to determine if there were particular parameterizations of Hebbian learning that would optimize learning in this pathway, and found that shifting the balance of weight decreases over weight increases helped learning overall, working to increase pattern separation in this pathway still further.
Finally, we tested a range of different learning rates for all of the pathways in the model, along with relative strengths of the projections, across a wide range of network sizes and numbers of training items, to determine the overall best parameterization under these new mechanisms.
Next, we describe our computational implementation within the existing \citet{KetzMorkondaOReilly13} framework, and then present the results of a systematic large-scale parameter search of all relevant parameters in the model, to determine the overall best-performing configuration of the new model.
\section{Methods}
\subsection{Hippocampal Architecture}
The current model, which we refer to as the Theremin (i.e., Total Hippocampal ERror MINimization) (Figure~\ref{fig.theremin}), is based on our previous theta-phase hippocampus model \citep{KetzMorkondaOReilly13}, which was developed within the earlier Complementary Learning System (CLS) model of the hippocampus \citep{NormanOReilly03,OReillyRudy01}. The broader implementation framework is the Leabra model (Local, Error-driven, and Associative, Biologically Realistic Algorithm), which provides standard point-neuron rate-coded neurons, inhibitory interneuron-mediated competition and sparse, distributed representations, full bidirectional connectivity, and temporal-difference based error-driven learning dynamics \citep{OReillyMunakata00,OReillyMunakataFrankEtAl12}. See \url{https://github.com/emer/leabra} for fully-documented equations, code, and several example simulations, including the exact model presented here.
Figure~\ref{fig.theremin} shows the standard hippocampal architecture captured in our models. The EC superficial layer (ECin) is the source of input to the hippocampus, integrated from all over the cortex. Based on anatomical and physiological data, we organize the EC into different pools (also called slots) that reflect the inputs from different cortical areas, and thus have different types of representations reflecting the specializations of these different areas \citep{WitterDoanJacobsenEtAl17}. In the present model, we assume some pools reflect item-specific information, while others reflect the various aspects of information that together constitute context, which is important for distinguishing different memory lists in our tests.
The ECin projects to the DG and CA3 via broad, diffuse PP projections, which have a uniform 25\% random chance of connection. This connectivity is essential for driving conjunctive encoding in the DG and CA3, such that each receiving neuron receives a random sample of information across the full spectrum present in the ECin. Further, the DG and CA3 have high levels of inhibition, driving extreme competition, such that only those neurons that have a particularly favorable conjunction of input features are able to get active in the face of the strong inhibition. This is the core principle behind Marr's pattern separation mechanism, captured by his simple R-theta codon model \citep{Marr71}. Using the standard FFFB (feedforward \& feedback) inhibition mechanism in Leabra, DG has a inhibitory conductance multiplier of 3.8 and CA3 has 2.8, compared to the standard cortical value of 1.8 which produces activity levels of around 15\%. These resulted in DG activity around 1\% and CA3 around 2\%. The number of units in DG is roughly 5 times of that in CA3, consistent with the theta-phase hippocampus model.
The CA3 receives strong MF projections from the DG, which have a strength multiplier of 4 (during encoding), giving the DG a much stronger influence on CA3 activity compared to the direct PP inputs from ECin. CA3 also receives recurrent collateral projections which have a strength multiplier of 2, which are the critical Hebbian cell-assembly autoassociation projections in the standard Hebb-Marr model, as captured in \cite{KetzMorkondaOReilly13} using a Hebbian learning mechanism. That model also uses Hebbian learning in the PP pathways from ECin to DG and CA3, which also facilitate pattern completion during recall as analyzed in \citet{OReillyMcClelland94}.
In the monosynaptic pathway, ECin (superficial) layers project to CA1, which then projects back into the deep layers of EC, called ECout in the model, such that CA1 encodes the information in ECin and can drive ECout during recall to drive the hippocampal memory back out into the cortex. This is the auto-encoder function of CA1, which is essential for translating the highly pattern-separated representations in CA3 back into the ``language'' of the cortex. Thus, a critical locus of memory encoding is in the CA3 $\rightarrow$ CA1 connections that associate the CA3 conjunctive memory with the CA1 decoding thereof --- without this, the randomized CA3 patterns would be effectively unintelligible to the cortex.
Unlike the broad and diffuse PP projections, the EC $\leftrightarrow$ CA1 connections obey the pool-wise organization of EC, consistent with the focal, point-to-point nature of the EC $\leftrightarrow$ CA1 projections \citep{WitterDoanJacobsenEtAl17}. Thus, each pool is separately auto-encoding the specific, specialized information associated with a given cortical area, which enables these connections to slowly learn the systematic ``language'' of that area. The entire episodic memory is thus the distributed pattern across all of these pools, but the monosynaptic pathway only sees a systematic subset, which it can efficiently and systematically auto-encode.
The purpose of the theta-phase error-driven learning in the \citet{KetzMorkondaOReilly13} model is to shape these synaptic weights to support this systematic auto-encoding of information within the monosynaptic pathway. Specifically, CA1 patterns at peaks and troughs of theta cycles come from CA3-retrieved memory and ECin inputs, respectively. As shown in equation 3 above, the target plus-phase activation comes from the ECout being strongly driven by the ECin superficial patterns, in contrast to the minus phase where ECout is being driven directly by CA1. Thus, over iterations of learning, this error-driven mechanism shapes synapses so that the CA1 projection to ECout will replicate the corresponding pattern on ECin.
Relative to this theta-phase model, the new Theremin model introduces error-driven learning in the CA3, using equation 4 as shown above, which was achieved by delaying the output of DG $\rightarrow$ CA3 until the 2nd theta phase cycle (Figure~\ref{fig.theremin}). Thus, the plus phase represents the CA3 activity in the presence of these strong DG inputs, while the minus phase is the activity prior to the activation of these inputs. In addition, as noted earlier, we tested the effects of reducing the strength of MF inputs to CA3 during recall testing, along with testing all other relevant parameters in a massive grid search.
\subsection{Model Testing}
\begin{figure}
\centering\includegraphics[width=5in]{fig_hip_edl_abac}
\caption{\footnotesize AB--AC list learning paradigm diagram and human data reproduced from an empirical experiment \citep{BarnesUnderwood59}. A) The first AB list is traind until memory accuracy reaches 100\% or 15 epochs, whichever is less; the second AC list is then trained to same criterion, while continuing to test AB and AC items. Detailed procedure is described in Methods. B) Human participants show moderate interference of the AB list after learning the AC list.}
\label{fig.abac}
\end{figure}
The task used in the current study is a standard AB-AC paired-associates list-learning paradigm, widely used to stress interference effects \citep{BarnesUnderwood59,McCloskeyCohen89} (Figure~\ref{fig.abac}). In these paradigms, typically, a participant learns a list of word pairs, with each pair referred to as \emph{A-B}. Once the pairs are learned to a criterion or a fixed number of repetitions, participants learn a new list of \emph{A-C} word pairs, in which the first word in each \emph{A-B} pair is now associated with a new word. Learning of A-C pairs is typically slowed due to competition with previously learned A-B pairs (\emph{proactive interference}), and once the A-C pairs are learned, retention of A-B pairs is reduced (\emph{retroactive interference}).
To simulate the AB--AC paradigm, each pair of A and B items (unique random bit patterns in the model) was trained, and then tested by probing with the A item and testing for recall of the associated B item. A list context representation was also present during training and testing, to distinguish the AB vs. AC list. Once recall accuracy for all AB pairs reached 100\%, or 15 epochs of the whole AB list have been trained, the model switched to learn the AC list, where previously learned A items were paired with novel C items and AC list context. Similarly, if memory for all AC pairs reached 100\%, or 30 epochs have been trained in total, that run was considered complete. We ran 30 different simulated subjects (i.e., runs) on each configuration and set of parameters, with each such subject having a different set of random initial synaptic weights.
There are several central questions that we address in turn. First, we compared the earlier theta-phase hippocampus model with the new Theremin model to determine the overall improvement resulting from the new error-driven learning mechanism and other optimized parameters. This provides an overall sense of the importance of these mechanisms for episodic memory performance, and an indication of what kinds of problems can now be solved using these models, at a practical level. In short, the Theremin model can be expected to perform quite well learning challenging, overlapping patterns, opening up significant new practical applications of the model.
Next, we tested different parameterizations of the Theremin model, to determine the specific contributions of: 1) error-driven learning specifically in the CA3, compared to Hebbian learning in this pathway, with everything else the same; 2) reduced MF strength during testing (cued recall); 3) the balance of weight decreases vs. increases in the ECin $\rightarrow$ DG projections; 4) the effect of pretraining on the monosynaptic pathway between EC and CA1, which simulates the accumulated learning in CA1 about the semantics of EC representations, reflecting in turn the slower learning of cortical representations. In other words, human participants have extensive real life experience of knowing the A/B/C list items, enabling the CA1 to already be able to invertably reconstruct the corresponding EC patterns for them, and pretraining captures this prior learning. Pretraining has relatively moderate benefits for the Theremin model, and was used by default outside of this specific test. The pretraining process involved turning DG and CA3 off, while training the model with items and context separately only in the monosynaptic EC $\leftrightarrow$ CA1 pathway for 5 epochs.
The learning capacity of a model is proportional to its size, so we tested a set of three network sizes (small, medium, large, see Appendix for detailed parameters) to determine the relationship between size and capacity in each case. The list sizes ranged from 20 to 100 pairs of AB--AC associations (for comparison, \citet{BarnesUnderwood59} used 8 pairs of nonsense syllables). For the basic performance tests, the two dependent variables were NEpochs and ABmem. NEpochs measures the total number of epochs used to finish one full run through AB and AC lists, which measures the overall speed of learning (capped at 30 if the network failed to learn). ABmem is the memory for AB pairs after learning the AC list, thus representing the models' ability to resist interference.
In addition to these performance tests, we ran representational analyses on different network layers (i.e., hippocampal subregions) over the course of learning. This enabled us to directly measure the temporal difference error signals that drove learning in Theremin, and how representations evolved through learning. Furthermore, by comparing across differences in learning algorithm and other parameters, we can directly understand the overall performance differences. The main analytic tool here is to compute cycle-by-cycle correlations between the activity patterns present at that cycle and the patterns present at the end of a trial of processing (100 cycles), which provides a simple 1-dimensional summary of the high-dimensional layer activation patterns as they evolve over time.
\section{Results}
\subsection{Overall memory performance}
\begin{figure}
\centering\includegraphics[width=4in]{fig_hip_edl_thetaphase}
\caption{\footnotesize Theremin vs. ThetaPhase on AB memory and learning time for all three network sizes. The Theremin model was better at counteracting interference across all list sizes and network sizes, and had significantly faster training time across all list sizes and network sizes.}
\label{fig.thetaphase}
\end{figure}
First, we examined the broadest measure of overall learning performance improvements in Theremin compared to the earlier theta-phase model from \citet{KetzMorkondaOReilly13}. Figure~\ref{fig.thetaphase} shows the results across all three network sizes and numbers of list items. For all three network sizes, the ABmem results show that Theremin was better at counteracting interference and retained more memory for AB pairs than the theta-phase hippocampus model across all list sizes and network sizes (\emph{ps} \textless \ .01 except SmallHip List100 (\emph{p} = 0.736)). Moreover, the full Theremin model completed learning significantly faster (i.e., the NEpochs measure) than the theta-phase hippocampus model across all list sizes and network sizes (\emph{ps} \textless \ .01 except SmallHip List100 (all NEpochs = 30)).
\begin{figure}
\centering\includegraphics[width=4in]{fig_hip_edl_mods}
\caption{\footnotesize Theremin vs. other Models on AB memory and learning time. NoEDL is the Theremin without the new error-driven learning mechanism. NoDynMF is the Theremin with same mossy fiber strength during training and testing. NoDGLearn is the Theremin with ECin $\rightarrow$ DG learning off. NoPretrain is the Theremin without pretraining CA1. Each of these factors makes a significant contribution as seen in decrements relative to the full Theremin in interference resistance (AB memory) and learning time.}
\label{fig.mods}
\end{figure}
To more specifically test the effects of the new error-driven CA3 mechanism in the Theremin model, we directly compared the Theremin model with another Theremin model without error-driven CA3 component (labeled as NoEDL), but with everything else the same. For this and subsequent comparisons, we focus on the medium and large network sizes, as the small case often failed to learn at all for larger list sizes. Figure~\ref{fig.mods} shows that, except for the smallest list size (20 items), Theremin retained significantly more AB memory (\emph{ps} \textless \ .01, except large network with list size of 20 (\emph{p} = 0.321)) and learned faster (\emph{ps} \textless \ .01, except large network with list size of 20 (\emph{p} = 0.343)) than NoEDL. Thus, it is clear that this error-driven learning mechanism is responsible for a significant amount of the improved performance of the Theremin model relative to the earlier theta-phase model.
To determine the contributions of the other new mechanisms included in the Theremin model, we compared the full Theremin to versions without each of these mechanisms (Figure~\ref{fig.mods}). The NoDynMF version eliminated the mechanism of dynamically decreasing the strength of MF inputs from DG to the CA3 during recall, and the results show a significant effect on performance for all but the smallest list size (20 items) (NEpoch \emph{ps} \textless \ .01, except large network with list size of 20 (\emph{p} = .013), ABMem \emph{ps} \textless \ .01, except large network with list size of 20 and 80 (\emph{p} = 0.127)).
To determine the importance of learning in ECin $\rightarrow$ DG pathway overall, we tested a NoDGLearn variant with no learning at all in this pathway. In principle, the DG could support its pattern separation function without any learning at all, relying only on the high levels of pattern separation and random PP connectivity. However, we found that learning in this pathway is indeed important, with an overall decrease in performance for larger list sizes (above 40 items) (NEpoch \emph{ps} \textless \ .01, BigHip List40 \emph{p} = .011; ABMem \emph{ps} \textless \ .01, BigHip List40 \emph{p} = .025). Interestingly, as the list size scaled up, the NoDGLearn model learned increasingly more slowly, such that it was even slower than the theta-phase model at a list size of 100. This effect is attributable to the strong effect of DG on training the CA3, and when the DG's ability to drive strong pattern separation is compromised, it significantly affects CA3 and thus the overall memory performance.
The higher rate of weight decrease (LTD = long-term depression in biological terms) relative to weight increases in the ECin $\rightarrow$ DG pathway were also important: eliminating this asymmetry significantly decreased performance for larger list sizes (above 60 items) (NEpoch \emph{ps} \textless \ .01, SmallHip List60 \emph{p} = .051; ABMem \emph{ps} \textless \ .05, BigHip List80 \emph{p} = .129). We also found that a lower learning rate in the ECin $\rightarrow$ DG pathway improved the ABMem score (reducing interference), but resulted in slower learning, and vice-versa for higher learning rates, consistent with the fundamental tradeoff between learning rate and interference that underlies the complementary learning systems framework \citep{McClellandMcNaughtonOReilly95}. Likewise, due to optimized parameters in Theremin, comparing it to a lower or higher learning rate model would result in significant improvement in ABMem or NEpoch, respectively, but not both. Thus, we compared two Theremin variants that had dramatic differences in both ABMem and NEpoch. Higher learning rate resulted in faster learning (\emph{ps} \textless \ .01) but less ABMem (\emph{ps} \textless \ .01) compared to lower learning rate for list sizes over 40, vice versa.
The final mechanism we tested was the pretraining of the EC $\leftrightarrow$ CA1 encoder pathway, to reflect long-term semantic learning in this pathway. The NoPretrain variant showed significantly worse performance at all but the smallest list sizes (NEpoch \emph{ps} \textless \ .01; ABMem \emph{ps} \textless \ .05 except BigHip List20 (\emph{p} = .155)).
\subsection{Representational dynamics}
\begin{figure}
\centering\includegraphics[width=4in]{fig_hip_edl_test_stats}
\caption{\footnotesize Statistics for area CA3 over the course of testing (List 100, Medium sized network). Representational similarity analyses (RSA) for area CA3 for Theremin vs. NoEDL show how the error-driven learning in Theremin reduces the representational overlap (top left) whereas the Hebbian learning in NoEDL increases the representational overlap (top right). This explains the differential interference as shown in the AB Memory plot for each case (bottom row). The number of epochs used in Theremin training was set to a fixed number (i.e., 10) that enabled complete learning of AB and AC lists, while in NoEDL was set to the maximum amount used in the current paper (i.e., 30).}
\label{fig.test_stats}
\end{figure}
Having established the basic memory performance effects of the error-driven CA3 and other mechanisms in the Theremin model, we now turn to some analyses of the network representations and dynamics to attempt to understand in greater detail how the error-driven learning shapes representations during learning, how the activation dynamics unfold over the course of the theta cycle within a single trial of learning, and how these dynamics change over multiple iterations of learning. For these analyses, we focus on the 100-item list size, and the medium sized network, comparing the full Theremin model vs. the NoEDL model, to focus specifically on the effects of error-driven learning in the CA3 pathways.
Figure~\ref{fig.test_stats} shows a representational similarity analysis (RSA) of the different hippocampal layers over the course of learning, comparing the average correlation of representations in CA3 within each list (all AB items and all AC items, e.g., A1B1 vs. A2B2) and between lists (AB vs. AC, e.g., A1B1 vs. A1C1). These plots also show the proportion of items correctly recalled from each list, with the switch over from the AB to AC list happening half-way through the run (we fixed this crossover point to enable consistent averaging across 30 simulated subjects, using a number of epochs that allowed successful learning for each condition). These results show that the error-driven learning in the full Theremin model immediately learns to decrease the similarity of representations within the list (e.g., WithinAB when learning AB) and between lists over training, while the Hebbian learning in the NoEDL model fails to separate these representations and results in increases in similarity over time. This explains the reduced interference and improved learning times for the error-driven learning mechanism, and is consistent with the idea that the continuous weight changes associated with Hebbian learning are deleterious.
\begin{figure}
\centering\includegraphics[width=4in]{fig_hip_edl_pat_sim}
\caption{\footnotesize Changes in hippocampal subregions' pattern similarities over the course of the first 4 epochs of learning, within a full trial for an example AB pair (model timing equivalent to 100 ms), for the Theremin (top row) and NoEDL models (bottom row). Each line reflects the correlation of the current-time activity pattern relative to the activity pattern at the end of the trial. Two major effects are evident. First, the CA1 pattern learns over epochs to quickly converge on the final plus-phase activation state, based on learning in the CA3 $\rightarrow$ CA1 pathway. Second, the Theremin model shows how the CA3 pattern learns over epochs to converge on the DG-driven activation state that arises after cycle 25, reflecting CA3 error-driven learning. Additionally, big-loop signals from ECout back to ECin could be observed from cycle 25 to 75 in the first epoch for both models, shifting CA3 patterns slightly off its final patterns. Drops seen within the first few cycles were due to the settling of temporally different patterns and were not of interest to the current paper.}
\label{fig.pat_sim}
\end{figure}
Figure~\ref{fig.pat_sim} shows an example AB pair plot, with each layer's correlation with the final activation state at the end of the trial across 4 training epochs. As illustrated in the plot, the learning dynamics in DG, CA3 and CA1 layers follow different learning rules across 4 quarters in one trial. In the CA3, error-driven learning in the Theremin model causes its activation to converge over the course of learning based on the target DG input that arrives starting after cycle 25. This learning progression is not evident in the NoEDL model, where Hebbian learning in the CA3 establishes a relatively stable representation early on. The CA1 shows increasing convergence to the final plus-phase pattern starting in the second and third quarter (cycle 26-75), when CA3 starts to drive CA1. Interestingly, there is evidence for a ``big-loop'' error signal \citep{KumaranMcClelland12} reflecting activation circulating through the trisynaptic pathway and out through the EC, and back into the CA3, deviating its pattern from the stabilized one, as depicted by the slightly curved green line in the first epoch.
To elaborate on the error-driven learning dynamics in the Theremin model, as learning progresses, the CA3 pattern in the first quarter becomes increasingly similar to its final pattern (Figure~\ref{fig.pat_sim}). In effect, this similarity signal reflects how close the CA3 pattern is to its final DG-dominated pattern, before DG starts to have an effect on CA3. In the first epoch, CA3 is driven only by ECin $\rightarrow$ CA3 and CA3 $\rightarrow$ CA3 inputs, resulting in a large temporal difference (error signal), which in turn modifies these connections (i.e., heterosynaptic plasticity). This error becomes smaller fast, and learning will stop when there is no more error. On the other hand, the NoEDL model continually increases the synaptic weights between CA3 and other regions whenever two neurons are active together, according to the Hebbian learning principle.
\section{Discussion}
By incorporating biologically plausible error-driven learning mechanisms at the core CA3 synapses in our computational model of the hippocampus, along with a few other important optimizations, we have been able to significantly improve learning speed and memory capacity (resistance to interference) compared to our previous model that used Hebbian learning in these synapses. These results demonstrate the critical ability of error-driven learning to automatically limit further learning once it has achieved sufficient changes to support effective memory recall, which then significantly reduces the amount of interference that would otherwise occur from continued synaptic changes. Furthermore, representational similarity analysis (RSA) was used to illustrate temporal dynamics within the hippocampal formation, which explains the effects of the error-driven learning mechanism, making it possible for the model to make specific subregional predictions that could be tested in experiments.
Another contribution of the current model is to test several computationally-motivated mechanisms that improve overall performance, and could plausibly be implemented in the hippocampal biology. First, decreasing the DG $\rightarrow$ CA3 strength during recall improves performance, because the DG otherwise biases more strongly toward pattern separation rather than the pattern completion needed for recall. Second, as opposed to the intuitive idea that the DG naturally forms highly separated patterns, learning in ECin $\rightarrow$ DG pathway is important for overall performance. Furthermore, favoring of LTD over LTP in this pathway is beneficial as it forces DG to form sparse representations, suggesting that learning overall is helping with the DG pattern separation dynamics. Although our previous model without all of these improvements was sufficient for simulating smaller-scale one-off experiments, the significantly improved capacity of the present model opens up the potential to examine longer time-scale learning contributions of the hippocampus, and other larger-scale datasets as emphasized in \citet{KowadloAhmedRawlinson20}.
The Theremin model retains the major tenets of the Hebb-Marr paradigm based on rapid episodic learning, while incorporating error-driven learning to optimize the learning capacity of the system relative to the predominant use of Hebbian learning in other models. In the following subsections, we consider other theoretical models of the hippocampus that can usefully be compared with the present model, including a number of widely-cited theories that postulate some form of error-signaling or error-driven learning. At the heart of many of these models is the idea that the hippocampus can generate predictions in order to then compute a novelty or error signal relative to such predictions, or to learn and predict sequences of future states. After briefly summarizing these models, we discuss what roles the hippocampus and the neocortex play in generating predictions according to the complementary learning systems (CLS) framework in which the current model is based \citep{McClellandMcNaughtonOReilly95,OReillyBhattacharyyaHowardEtAl14,OReillyRanganathRussin21}.
\subsection{Prediction-based Models of the Hippocampus}
One longstanding and influential set of theories suggests that the hippocampus acts as a \emph{comparator}, generating predictions in order to detect and signal \emph{novelty} or \emph{surprise} \citep{Gray82,Vinogradova01,LismanGrace05}. Specifically, the hippocampus in these models generates a global \emph{scalar} signal as a function of the relative mismatch between a predicted state and the actual next state. For example \citet{Gray82} proposed that combining previous sensory information and the motor plan creates predictions about the current state, which are then compared with the actual current sensory information. The motor plan is maintained if the two states match, but it is interrupted in the case of incorrect predictions (i.e., surprise or novelty), so that the animal can attempt to solve the problem in a different way. In the \citet{LismanGrace05} model, the hippocampal novelty or surprise signal is hypothesized to drive phasic dopamine firing via its subcortical projection pathway through the subiculum. These different models vary in terms of the exact mechanisms and subfields proposed to compute the mismatch signal (CA3, CA1 or subiculum), but they assume a similar overall functional role for the hippocampus in terms of synthesizing predictions.
In the present model, error signals are not summed, but rather used to optimize learning of specific associations. However, it is possible that the temporal-difference error signals present in our hippocampal model could play a role in generating a global novelty signal. For example, at different points in the theta phase cycle (Figure~\ref{fig.theremin}), area CA1 and ECout are representing the current information as encoded in CA3 and its projections into CA1, versus the bottom-up cortical state present in ECin. The difference between these two activation states could be converted into a global mismatch signal that would reflect the relative novelty of the current state compared to prior episodic memory learning in the CA3 of the hippocampus. Likewise, it is possible that a similar global error signal could be computed from the temporal differences over CA3 in our model, reflecting the extent to which CA3 has learned to encode the more pattern-separated DG-driven pattern, which is likely to also reflect the relative novelty of the current input state. We will investigate these possibilities in future research.
Prediction-based learning in the hippocampus is also central to another early computational model, which is based on error-driven backpropagation learning in the context of a predictive autoencoder \citep{MyersGluck95}. In this model, the hippocampal network learns by simultaneously attempting to recreate the current input patterns, and also predict future reinforcement outcomes. The cortical network representations are then shaped by hippocampal training signals, similar in spirit to the scalar novelty / surprise signals. Simulations with this model and its hippocampus-lesioned variant have been shown to replicate a wide range of conditioned behaviors in rats and rabbits \citep{GluckMyers94}, although it is notable that many of these same phenomena can also be accounted for using an earlier version of the episodic memory model presented here \citep{OReillyRudy01}.
Another class of models hypothesizes that the hippocampus learns \emph{sequences} of events over time, such that, when a past state is encountered, the hippocampus can enable the prediction of potential outcomes of actions taken in novel situations, based on what has happened previously \citep{Levy96,WallensteinHasselmo97,JensenLisman96,TsodyksSkaggsSejnowskiEtAl96,Rolls13,SchapiroTurk-BrowneBotvinickEtAl17,StachenfeldBotvinickGershman17}. In some of these models, the recurrent connections in area CA3 learn to associate prior time step representations with subsequent time step patterns, thus learning to predict the next state based on the current state. Other models suggest that the hippocampus learns systematic predictive representations (e.g., a successor map of subsequent states following from the current state in the case of \citealp{StachenfeldBotvinickGershman17}). Most of the models suggest that the hippocampus itself is capable of synthesizing novel predictions based on these learned sequences.
\subsection{Neural Mechanisms of Prediction in Hippocampus and Cortex}
The models discussed above emphasize the idea that memory retrieval in the hippocampus is a form of prediction, and at a broader level, many researchers have embraced the idea that the hippocampus might be specialized for generating predictions in the service of navigation, reasoning, and imagination \citep{DavachiDuBrow15,KokTurk-Browne18,Mizumori13,LismanRedish09,ZeithamovaSchlichtingPreston12,MackLovePreston18,ZeithamovaSchlichtingPreston12}. These theories, however, tend to describe prediction in broad strokes, and as such, we argue that they do not respect the computational limitations of the hippocampus.
In contrast to the above models, we do not believe that the hippocampus itself is well-suited for generating predictions in novel situations, and instead we think the relevant data can be better captured in terms of the simple episodic memory framework that the Hebb-Marr model embodies (as updated in the present paper). Here, the hippocampus is specialized for rapidly encoding memories of distinct events or episodes using highly pattern-separated representations, which can later be recalled through the process of pattern completion. Given the overwhelming empirical support for the idea that the hippocampus is specialized for rapidly learning new episodic memories, we believe that it also cannot support a semantic prediction system capable of generating systematic predictions in novel situations.
Specifically, generating a novel prediction typically requires a cognitive process to synthesize prior experience and general principles (e.g., a scientific theory, or implicitly-learned regularities of the world, such as intuitive physics) to specify what will happen in the future. This kind of systematic generalization from prior experience to novel situations is precisely what the neocortex is thought to be optimized for according to the CLS theory \citep{McClellandMcNaughtonOReilly95,OReillyBhattacharyyaHowardEtAl14,OReillyRanganathRussin21}. This is because the overlapping representations of cortical networks are optimized to slowly integrate statistical regularities across many different experiences to learn \emph{semantic} representations capable of supporting systematic generalization in novel situations. Indeed there are various models of error-driven predictive learning in the neocortex capable of learning such systematic predictive abilities, including a biologically-detailed proposal based on thalamocortical loops \citep{OReillyRussinZolfagharEtAl21}.
Although the computational architecture of the hippocampus is not well-suited for generating predictions on its own, it can certainly provide relevant episodic memories as input to the cortical prediction generation process. For example, strategic recall of particular memories, followed by appropriate updating of the details to better match the current circumstances, could produce a more generative predictive system that can synthesize novel predictions for new situations. These kinds of complex interactions, however, go well beyond the capabilities of the hippocampal circuit by itself, as captured in any implemented computational model.
\subsection{Nonmonotonic Plasticity vs. Error-driven Learning}
The temporal difference error signals that drive learning in our model can be related to the neural activation signals that drive nonmonotonic plasticity (NMP) learning dynamics as explored by Norman and colleagues \citep{RitvoTurk-BrowneNorman19}. Specifically, the nonmonotonic plasticity function drives LTD when activations are at a middling, above-zero level, while LTP occurs for more strongly activated neurons. This is the same underlying learning function that we use in our error-driven learning model \citep{OReillyMunakataFrankEtAl12}, and thus it can be difficult to strongly distinguish the predictions of these two models. In particular, the conditions under which errors drive LTD in our model can be construed as being within the LTD range of the nonmonotonic plasticity function, under various additional assumptions. However, the NMP models have not been implemented within the context of a full hippocampal circuit, and it is unclear how those models might actually perform in specific conditions. Thus, the difficulties are more at the level of abstract principles rather than detailed model comparisons at this point.
\subsection{Novel Predictions}
There are several novel, testable predictions from our model that can distinguish it from a more standard Hebbian-based model:
\begin{itemize}
\item As shown in Figure~\ref{fig.test_stats}, the error-driven learning in area CA3 serves to drive pattern separation over time among otherwise similar representations, whereas the Hebbian version of the model showed increasing patterns similarity over learning. Thus, experiments that track the progression of representational similarity over the course of learning could distinguish these two patterns.
\item Our model depends critically on the modulation of different pathways of connectivity within the hippocampus, organized according to the theta cycle in rodents according to \citet{HasselmoBodelonWyble02}, creating the temporal differences that drive learning. Thus, neural manipulations that selectively disrupt the theta cycle and / or these pathway-specific modulations should affect error-driven learning, but may not affect recall of previously-learned information to the same extent. By contrast, it is not clear why from the purely Hebbian learning framework that disrupting the theta cycle should impair that form of learning. Intriguingly, a recent report appears to be consistent with the predictions of our model: \citet{QuirkZutshiSrikanthEtAl21} found that a highly selective disruption of the timing of the theta cycle produced selective deficits in learning. Even more selective tests of the specific temporal dynamics associated with the CA3 and CA1 could more specifically test our model.
\item Our model also generates novel predictions about the functional characteristics of human memory. For instance, there is a large body of evidence about the \emph{testing effect}, in which items that are tested with partial information (as compared to restudy of the complete original information) are better retained than items that are re-studied \citep{LiuOReillyRanganath21}. The superiority of testing over restudy presents a challenge to models depending on Hebbian learning because learning a precise input pattern should be as good or better than learning from a partial cue. Theremin, however, provides a natural explanation for the testing effect, as the difference between an initial guess and subsequent correct answer provides an opportunity for error-driven learning, whereas restudy provides no opportunity to make the initial guess needed in order to optimize weights.
\end{itemize}
\subsection{Conclusions}
In summary, results from our simulations show that error-driven learning mechanisms can dramatically improves both memory capacity and learning speed by reducing competition between learned representations. Furthermore, these mechanisms can potentially explain a wide range of learning and memory phenomena. Error-driven learning in CA3 can emerge naturally out of neurophysiological properties of the hippocampal circuits, building on the basic framework for error-driven learning in the monosynaptic EC $\leftrightarrow$ CA1 pathway \citep{KetzMorkondaOReilly13}. There are many further implications and applications of this work, and many important empirical tests needed to more fully establish its validity. Hopefully, the results presented here provide sufficient motivation to undertake this important future research.
\section{Appendix}
In this appendix we provide some of the key parameters to understanding how the Theremin model is structured, and organized to do the AB-AC memory task. See table captions for detailed descriptions on parameters/diagrams. The best documentation for those interested in all the details is the fully-functioning Theremin model along with further detailed documentation, available at: \url{https://github.com/ccnlab/hip-edl}.
\FloatBarrier
\subsection{Network Size Parameters}
\begin{table}[hbt!]
\begin{tabular}{|l|c|c|c|}
\hline
\diagbox{Parameter}{Network Size} & Small & Medium & Large \\
\hline
Input Pool Size & 7x7 & 7x7 & 7x7 \\
\hline
Input Number of Pools & 2x3 & 2x3 & 2x3 \\
\hline
ECin Pool Size & 7x7 & 7x7 & 7x7 \\
\hline
ECin Number of Pools & 2x3 & 2x3 & 2x3 \\
\hline
ECout Pool Size & 7x7 & 7x7 & 7x7 \\
\hline
ECout Number of Pools & 2x3 & 2x3 & 2x3 \\
\hline
DG Size & 44x44 & 67x67 & 89x89 \\
\hline
CA3 Size & 20x20 & 30x30 & 40x40 \\
\hline
CA1 Pool Size & 10x10 & 15x15 & 20x20 \\
\hline
CA1 Number of Pools & 2x3 & 2x3 & 2x3 \\
\hline
\end{tabular}
\caption{Parameters for network sizes. In neural networks, larger network size usually leads to higher capacity, when controlled for other settings. In the current study, we tested different variations of the hippocampus model for three different network sizes to show the benefit of error-driven learning for hippocampus regardless of sizes, meaning the mechanism is generalizable. For pool sizes, the numbers in the table refer to number of neurons in that specific pool. Note: DG size is around five times CA3 size as specified in our previous model \citep{KetzMorkondaOReilly13}}
\end{table}
\FloatBarrier
\subsection{Training Input Diagram}
\begin{table}[hbt!]
\begin{tabular}{|c|c|}
\hline
Context B3/C3 & Context B4/C4 \\
\hline
Context B1/C1 & Context B2/C2 \\
\hline
A & B/C \\
\hline
\end{tabular}
\caption{Training phase patterns for network input. Each unit here represents a pool in the Input layer. Memory of AB and AC pairs are categorized into different experiences, with four different context pools for each experience. }
\end{table}
\FloatBarrier
\subsection{Testing Input Diagram}
\begin{table}[hbt!]
\begin{tabular}{|c|c|}
\hline
Context B3/C3 & Context B4/C4 \\
\hline
Context B1/C1 & Context B2/C2 \\
\hline
A & empty \\
\hline
\end{tabular}
\caption{Testing phase patterns for network input. Each unit here represents a pool in the Input layer. Memory of AB and AC pairs are categorized into different experiences, with four different context pools for each experience. During testing, no input is given for the B/C pool, while A and context pools are used as partial cues for the hippocampus to do pattern completion (i.e., complete the pattern for the B/C pool).}
\end{table}
% \bibliography{ccnlab}
% use: bibexport -o hip_edl.bib hip_edl_2021.aux to regenerate:
\bibliography{hip_edl}
\end{document}
|
{-
HOW TO:
1) install cabal and idris
2) run:
$ idris hello.idr -o hello
$ ./hello
-}
module Main
main : IO ()
main = putStrLn "Hello world"
|
State Before: α✝ : Type ?u.19256
β✝ : Type ?u.19259
σ : Type ?u.19262
inst✝⁴ : Primcodable α✝
inst✝³ : Primcodable β✝
inst✝² : Primcodable σ
α : Type u_1
β : Type u_2
inst✝¹ : Denumerable α
inst✝ : Primcodable β
f : α → β
h : Primrec f
n : ℕ
⊢ Nat.pred (encode (Option.map f (decode n))) = encode (f (ofNat α n)) State After: no goals Tactic: simp State Before: α✝ : Type ?u.19256
β✝ : Type ?u.19259
σ : Type ?u.19262
inst✝⁴ : Primcodable α✝
inst✝³ : Primcodable β✝
inst✝² : Primcodable σ
α : Type u_1
β : Type u_2
inst✝¹ : Denumerable α
inst✝ : Primcodable β
f : α → β
h : Nat.Primrec fun n => encode (f (ofNat α n))
n : ℕ
⊢ Nat.succ (encode (f (ofNat α n))) = encode (Option.map f (decode n)) State After: no goals Tactic: simp |
=begin
# sample-groebner01.rb
require "algebra"
P = MPolynomial(Rational, "xyz")
x, y, z = P.vars("xyz")
f1 = x**2 + y**2 + z**2 -1
f2 = x**2 + z**2 - y
f3 = x - z
p Groebner.basis([f1, f2, f3])
#=> [x - z, y - 2z^2, z^4 + 1/2z^2 - 1/4]
((<_|CONTENTS>))
=end
|
function [sUnitVector, OUnitVector, vInfMag] = computeHyperSVectOVect(hSMA, hEcc, hInc, hRAAN, hArg, hTA, gmu)
%computeHyperSVectOVect Summary of this function goes here
% Detailed explanation goes here
[hRVect,hVVect]=getStatefromKepler(hSMA, hEcc, hInc, hRAAN, hArg, hTA, gmu);
hHat = normVector(cross(hRVect, hVVect));
flyByAngle=2*asin(1/hEcc);
SigmaAngle=pi/2 - flyByAngle/2;
hUnitVector=hHat;
eVector=(norm(hVVect)^2/gmu - 1/norm(hRVect))*hRVect - (dot(hRVect,hVVect)/gmu)*hVVect;
eUnitVect=eVector/norm(eVector);
sUnitVector=cos(SigmaAngle)*eUnitVect + sin(SigmaAngle)*cross(hUnitVector,eUnitVect);
BUnitVector=cross(sUnitVector,hUnitVector)/norm(cross(sUnitVector,hUnitVector));
OUnitVector=cos(flyByAngle)*sUnitVector - sin(flyByAngle)*BUnitVector;
sUnitVector = real(sUnitVector);
OUnitVector = real(OUnitVector);
vInfMag = sqrt(-gmu/hSMA);
end
|
myseries := series(sin(x), x = 0, 10);
poly := convert(myseries, polynom);
plotsetup(gif,plotoutput="plot.gif"):
plot(poly, x = -2*Pi .. 2*Pi, y = -3 .. 3);
|
{-# OPTIONS_GHC -fplugin GHC.TypeLits.KnownNat.Solver #-}
{-# OPTIONS_GHC -fno-warn-missing-signatures #-}
{-# OPTIONS_GHC -fno-warn-incomplete-patterns #-}
{-# LANGUAGE CPP #-}
{-# LANGUAGE ConstraintKinds #-}
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE GADTs #-}
{-# LANGUAGE KindSignatures #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE TemplateHaskell #-}
{-# LANGUAGE TypeOperators #-}
module Test.Grenade.Layers.BatchNorm where
import Control.Monad
import Data.List (zipWith5)
import Data.Proxy
import GHC.TypeLits
import Numeric.LinearAlgebra.Static (L, R)
import qualified Numeric.LinearAlgebra.Static as H
import Hedgehog
import Test.Hedgehog.Compat
import Test.Hedgehog.Hmatrix
import Grenade.Core
import Grenade.Layers.BatchNormalisation
import Grenade.Utils.LinearAlgebra
import Grenade.Utils.ListStore
batchnorm :: forall channels rows columns momentum.
(KnownNat channels, KnownNat rows, KnownNat columns, KnownNat momentum)
=> Bool -> R channels -> R channels -> R channels -> R channels -> BatchNorm channels rows columns momentum
batchnorm training gamma beta mean var =
let ε = 0.00001
in BatchNorm training (BatchNormParams gamma beta) mean var ε mkListStore
prop_batchnorm_train_behaves_as_reference :: Property
prop_batchnorm_train_behaves_as_reference = property $ do
height :: Int <- forAll $ choose 2 100
width :: Int <- forAll $ choose 2 100
channels :: Int <- forAll $ choose 1 100
case (someNatVal (fromIntegral height), someNatVal (fromIntegral width), someNatVal (fromIntegral channels), channels) of
(Just (SomeNat (Proxy :: Proxy h)), Just (SomeNat (Proxy :: Proxy w)), _, 1) -> do
inp :: S ('D2 h w) <- forAll genOfShape
guard . not $ elementsEqual inp
g :: R 1 <- forAll randomVector
b :: R 1 <- forAll randomVector
m :: R 1 <- forAll randomVector
v :: R 1 <- forAll randomPositiveVector
let layer = batchnorm False g b m v :: BatchNorm 1 h w 90
S2D out = snd $ runForwards layer inp :: S ('D2 h w)
S2D ref = run2DBatchNorm layer inp :: S ('D2 h w)
H.extract out === H.extract ref
(Just (SomeNat (Proxy :: Proxy h)), Just (SomeNat (Proxy :: Proxy w)), Just (SomeNat (Proxy :: Proxy c)), _) -> do
inp :: S ('D3 h w c) <- forAll genOfShape
g :: R c <- forAll randomVector
b :: R c <- forAll randomVector
m :: R c <- forAll randomVector
v :: R c <- forAll randomPositiveVector
let layer = batchnorm False g b m v :: BatchNorm c h w 90
S3D out = snd $ runForwards layer inp :: S ('D3 h w c)
S3D ref = run3DBatchNorm layer inp :: S ('D3 h w c)
H.extract out === H.extract ref
prop_batchnorm_1D_forward_same_as_torch :: Property
prop_batchnorm_1D_forward_same_as_torch = withTests 1 $ property $ do
let g = H.fromList weight :: R 10
b = H.fromList bias :: R 10
m = H.fromList mean :: R 10
v = H.fromList var :: R 10
bn = batchnorm False g b m v :: BatchNorm 10 4 4 90
mat = H.fromList . concat . concat $ input :: L 40 4
x = S3D mat :: S ('D3 4 4 10)
y = snd $ runForwards bn x :: S ('D3 4 4 10)
mat' = H.fromList . concat . concat $ ref_out :: L 40 4
assert $ allClose y (S3D mat')
where
weight = [ -0.9323, 1.0161, 0.1728, 0.3656, -0.6816, 0.0334, 0.8494, -0.6669, -0.1527, -0.7004 ]
bias = [ 0.7582, 1.0068, -0.2532, -1.5240, -1.0370, 0.8442, 0.5867, -1.2567, 0.4283, -0.0001 ]
mean = [ -0.4507, 0.9090, -1.4717, 0.5009, 0.8931, -0.4792, 0.0432, 0.4649, -0.6547, -1.3197 ]
var = [ 1.2303, 1.4775, 0.8372, 0.1644, 0.9392, 0.2103, 0.4951, 0.2482, 0.7559, 0.3686 ]
input = [ [ [ -0.70681204, -0.20616523, -0.33806887, -1.52378976 ]
, [ 0.056113367, -0.51263034, -0.28884589, -2.64030218 ]
, [ -1.19894597, -1.16714501, -0.19816216, -1.13361239 ]
, [ -0.81997509, -1.05715847, 0.59198695, 0.51939314 ] ]
, [ [ -0.18338945, -1.08975303, 0.30558434, 0.85780441 ]
, [ -0.47586514, 0.16499641, 2.18205571, -0.11155529 ]
, [ 1.090167402, 0.92460924, 0.42982020, 1.30098605 ]
, [ 0.286766794, -1.90825951, -0.91737461, -1.11035680 ] ]
, [ [ 1.042808533, 0.08287286, -0.92343962, -0.49747768 ]
, [ -0.21943949, 0.61554014, -2.25771808, -0.04292159 ]
, [ 1.290057424, -1.07323992, -1.00024509, 1.30155622 ]
, [ 0.472014425, -0.96431374, 0.77593171, -1.19090688 ] ]
, [ [ 0.993361895, 0.82586401, -1.64278686, 1.25544464 ]
, [ 0.239656539, -0.81472164, 1.32814168, 0.78350490 ]
, [ -0.16597847, 0.74175131, -1.29834091, -1.28858852 ]
, [ 1.307537318, 0.55525642, -0.04312540, 0.24699424 ] ]
, [ [ 0.391699581, -0.09803850, -0.41061267, 0.34999904 ]
, [ -2.22257169, 0.43748092, -1.21343314, 0.39576068 ]
, [ 0.003147978, -1.00396716, 1.27623140, 1.17001295 ]
, [ -0.58247902, -0.15453417, -0.37016496, 0.04613848 ] ]
, [ [ 0.521356827, 0.94643139, 1.11394095, 0.60162323 ]
, [ -0.90214585, -0.75316292, 2.20823979, -1.63446676 ]
, [ 0.668517357, 0.62832462, 0.31174039, -0.04457542 ]
, [ -0.24607617, 0.12855675, -1.62831199, -0.23100854 ] ]
, [ [ -0.43619379, -0.41219231, 0.07910434, -0.20312546 ]
, [ 1.670419093, -0.26496240, -1.53759109, 1.00907373 ]
, [ -1.04028647, -1.37309467, -0.79040497, -0.15661381 ]
, [ 0.009049783, -0.05525103, 1.44492578, 0.44786781 ] ]
, [ [ 1.431640263, -0.12869687, 1.25025844, 0.07864278 ]
, [ -1.69032764, -0.07707843, 0.11284181, -0.00826502 ]
, [ -0.92387816, -0.83121442, 0.42292186, -0.49128937 ]
, [ -1.62631051, 0.98236626, -1.69256067, -0.66552013 ] ]
, [ [ 0.154654814, 0.59295737, 0.48604089, 0.46829459 ]
, [ 0.624001921, 2.11190581, -1.80008912, 0.26847255 ]
, [ -0.36086676, 0.94211035, 0.19112136, -0.04113261 ]
, [ -0.94438538, -0.38932472, -0.29867526, 0.34307864 ] ]
, [ [ 1.016388653, -0.41974341, -0.94618958, 0.22629515 ]
, [ -2.04437517, -1.14956784, 0.38054388, 0.82105201 ]
, [ 0.054255251, 1.03682625, 0.29021424, -0.42736151 ]
, [ -0.00021907, 0.98816186, 0.23878140, -0.17728853 ] ] ]
ref_out = [ [ [ 0.9734674096107483, 0.5526634454727173, 0.6635311841964722, 1.660154104232788]
, [0.3322128653526306, 0.8102537393569946, 0.6221582293510437, 2.5986058712005615]
, [1.3871161937713623, 1.360386848449707, 0.5459367036819458, 1.3322019577026367]
, [1.068583369255066, 1.267940878868103, -0.11819997429847717, -0.05718337371945381] ]
, [ [0.0936361625790596, -0.66402268409729, 0.5023852586746216, 0.9640040397644043]
, [-0.15085376799106598, 0.3848632574081421, 2.070988893508911, 0.15368467569351196]
, [1.1582437753677368, 1.019848346710205, 0.606238067150116, 1.334473967552185]
, [0.4866550862789154, -1.3482388257980347, -0.5199258923530579, -0.6812460422515869] ]
, [ [0.22167539596557617, 0.04038754478096962, -0.14965875446796417, -0.06921406835317612]
, [-0.016705399379134178, 0.14098398387432098, -0.4016427993774414, 0.016630740836262703]
, [0.2683693766593933, -0.17794916033744812, -0.16416378319263458, 0.2705409526824951]
, [0.11387854814529419, -0.15737800300121307, 0.1712745875120163, -0.20017105340957642] ]
, [ [-1.0799674987792969, -1.230993390083313, -3.456873655319214, -0.8436583876609802]
, [-1.7595523595809937, -2.7102415561676025, -0.7781105041503906, -1.2691868543624878]
, [-2.1252965927124023, -1.30683434009552, -3.146301031112671, -3.137507677078247]
, [-0.7966886162757874, -1.4749890565872192, -2.0145251750946045, -1.7529363632202148] ]
, [ [-0.6843588948249817, -0.3399200439453125, -0.1200828030705452, -0.655030369758606]
, [1.1542901992797852, -0.7165574431419373, 0.44455069303512573, -0.6872150897979736]
, [-0.41108575463294983, 0.29723069071769714, -1.306460976600647, -1.2317562103271484]
, [0.0007928922423161566, -0.3001859486103058, -0.14853018522262573, -0.4413214921951294] ]
, [ [0.9170715808868408, 0.9480301737785339, 0.9602301120758057, 0.9229174852371216]
, [0.8133963942527771, 0.8242470026016235, 1.0399290323257446, 0.760060727596283]
, [0.9277894496917725, 0.9248621463775635, 0.9018049836158752, 0.8758541345596313]
, [0.8611786365509033, 0.88846355676651, 0.7605089545249939, 0.862276017665863] ]
, [ [0.007999604567885399, 0.036973003298044205, 0.6300419569015503, 0.28934815526008606]
, [2.5509982109069824, 0.21470165252685547, -1.3215526342391968, 1.7526549100875854]
, [-0.7212311029434204, -1.1229807138442993, -0.4195865988731384, 0.34549471735954285]
, [0.5454756021499634, 0.4678548276424408, 2.278794050216675, 1.0751949548721313] ]
, [ [-2.550779104232788, -0.4621109068393707, -2.307981491088867, -0.7396559119224548]
, [1.628288984298706, -0.5312073826789856, -0.7854347229003906, -0.6233210563659668]
, [0.6023192405700684, 0.4782795310020447, -1.2005081176757812, 0.023255640640854836]
, [1.542595624923706, -1.9493807554244995, 1.631278157234192, 0.2564810514450073] ]
, [ [0.28615128993988037, 0.20917125046253204, 0.22794923186302185, 0.2310660481452942]
, [0.20371884107589722, -0.05760492384433746, 0.6294671297073364, 0.2661612331867218]
, [0.3766934275627136, 0.14784877002239227, 0.2797465920448303, 0.32053783535957336]
, [0.4791780710220337, 0.381691575050354, 0.3657706081867218, 0.25305798649787903] ]
, [ [-2.6950571537017822, -1.0383073091506958, -0.4309888482093811, -1.7835899591445923]
, [0.8358993530273438, -0.19636774063110352, -1.9615343809127808, -2.469712972640991]
, [-1.5851213932037354, -2.7186343669891357, -1.8573282957077026, -1.029518961906433]
, [-1.5222787857055664, -2.66249418258667, -1.7979943752288818, -1.3180080652236938] ] ]
tests :: IO Bool
tests = checkParallel $$(discover)
-- REFERENCE FUNCTIONS
run2DBatchNorm :: forall h w m.
(KnownNat h, KnownNat w, KnownNat m)
=> BatchNorm 1 h w m -> S ('D2 h w) -> S ('D2 h w)
run2DBatchNorm (BatchNorm False (BatchNormParams gamma beta) runningMean runningVar ε _) (S2D x)
= let [m] = vectorToList runningMean
[v] = vectorToList runningVar
[g] = vectorToList gamma
[b] = vectorToList beta
std = sqrt $ v + ε
x_norm = H.dmmap (\a -> (a - m) / std) x
out = H.dmmap (\a -> g * a + b) x_norm
in S2D out
run3DBatchNorm :: forall h w m c.
(KnownNat h, KnownNat w, KnownNat m, KnownNat c)
=> BatchNorm c h w m -> S ('D3 h w c) -> S ('D3 h w c)
run3DBatchNorm (BatchNorm False (BatchNormParams gamma beta) runningMean runningVar ε _) inp
= let ms = vectorToList runningMean
vs = vectorToList runningVar
gs = vectorToList gamma
bs = vectorToList beta
cs = splitChannels inp :: [S ('D2 h w)]
f c g b m v = let gs' = listToVector [g] :: R 1
bs' = listToVector [b] :: R 1
ms' = listToVector [m] :: R 1
vs' = listToVector [v] :: R 1
bn' = BatchNorm False (BatchNormParams gs' bs') ms' vs' ε undefined :: BatchNorm 1 h w m
in run2DBatchNorm bn' c
in combineChannels $ zipWith5 f cs gs bs ms vs
|
/-
Copyright (c) 2020 Bhavik Mehta. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Bhavik Mehta, Jakob von Raumer
-/
import data.list.chain
import category_theory.punit
import category_theory.groupoid
import category_theory.category.ulift
/-!
# Connected category
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
Define a connected category as a _nonempty_ category for which every functor
to a discrete category is isomorphic to the constant functor.
NB. Some authors include the empty category as connected, we do not.
We instead are interested in categories with exactly one 'connected
component'.
We give some equivalent definitions:
- A nonempty category for which every functor to a discrete category is
constant on objects.
See `any_functor_const_on_obj` and `connected.of_any_functor_const_on_obj`.
- A nonempty category for which every function `F` for which the presence of a
morphism `f : j₁ ⟶ j₂` implies `F j₁ = F j₂` must be constant everywhere.
See `constant_of_preserves_morphisms` and `connected.of_constant_of_preserves_morphisms`.
- A nonempty category for which any subset of its elements containing the
default and closed under morphisms is everything.
See `induct_on_objects` and `connected.of_induct`.
- A nonempty category for which every object is related under the reflexive
transitive closure of the relation "there is a morphism in some direction
from `j₁` to `j₂`".
See `connected_zigzag` and `zigzag_connected`.
- A nonempty category for which for any two objects there is a sequence of
morphisms (some reversed) from one to the other.
See `exists_zigzag'` and `connected_of_zigzag`.
We also prove the result that the functor given by `(X × -)` preserves any
connected limit. That is, any limit of shape `J` where `J` is a connected
category is preserved by the functor `(X × -)`. This appears in `category_theory.limits.connected`.
-/
universes v₁ v₂ u₁ u₂
noncomputable theory
open category_theory.category
open opposite
namespace category_theory
/--
A possibly empty category for which every functor to a discrete category is constant.
-/
class is_preconnected (J : Type u₁) [category.{v₁} J] : Prop :=
(iso_constant : Π {α : Type u₁} (F : J ⥤ discrete α) (j : J),
nonempty (F ≅ (functor.const J).obj (F.obj j)))
/--
We define a connected category as a _nonempty_ category for which every
functor to a discrete category is constant.
NB. Some authors include the empty category as connected, we do not.
We instead are interested in categories with exactly one 'connected
component'.
This allows us to show that the functor X ⨯ - preserves connected limits.
See <https://stacks.math.columbia.edu/tag/002S>
-/
class is_connected (J : Type u₁) [category.{v₁} J] extends is_preconnected J : Prop :=
[is_nonempty : nonempty J]
attribute [instance, priority 100] is_connected.is_nonempty
variables {J : Type u₁} [category.{v₁} J]
variables {K : Type u₂} [category.{v₂} K]
/--
If `J` is connected, any functor `F : J ⥤ discrete α` is isomorphic to
the constant functor with value `F.obj j` (for any choice of `j`).
-/
def iso_constant [is_preconnected J] {α : Type u₁} (F : J ⥤ discrete α) (j : J) :
F ≅ (functor.const J).obj (F.obj j) :=
(is_preconnected.iso_constant F j).some
/--
If J is connected, any functor to a discrete category is constant on objects.
The converse is given in `is_connected.of_any_functor_const_on_obj`.
-/
lemma any_functor_const_on_obj [is_preconnected J]
{α : Type u₁} (F : J ⥤ discrete α) (j j' : J) :
F.obj j = F.obj j' :=
by { ext, exact ((iso_constant F j').hom.app j).down.1 }
/--
If any functor to a discrete category is constant on objects, J is connected.
The converse of `any_functor_const_on_obj`.
-/
lemma is_connected.of_any_functor_const_on_obj [nonempty J]
(h : ∀ {α : Type u₁} (F : J ⥤ discrete α), ∀ (j j' : J), F.obj j = F.obj j') :
is_connected J :=
{ iso_constant := λ α F j',
⟨nat_iso.of_components (λ j, eq_to_iso (h F j j')) (λ _ _ _, subsingleton.elim _ _)⟩ }
/--
If `J` is connected, then given any function `F` such that the presence of a
morphism `j₁ ⟶ j₂` implies `F j₁ = F j₂`, we have that `F` is constant.
This can be thought of as a local-to-global property.
The converse is shown in `is_connected.of_constant_of_preserves_morphisms`
-/
lemma constant_of_preserves_morphisms [is_preconnected J] {α : Type u₁} (F : J → α)
(h : ∀ (j₁ j₂ : J) (f : j₁ ⟶ j₂), F j₁ = F j₂) (j j' : J) :
F j = F j' :=
by simpa using any_functor_const_on_obj
{ obj := discrete.mk ∘ F,
map := λ _ _ f, eq_to_hom (by { ext, exact (h _ _ f), }) } j j'
/--
`J` is connected if: given any function `F : J → α` which is constant for any
`j₁, j₂` for which there is a morphism `j₁ ⟶ j₂`, then `F` is constant.
This can be thought of as a local-to-global property.
The converse of `constant_of_preserves_morphisms`.
-/
lemma is_connected.of_constant_of_preserves_morphisms [nonempty J]
(h : ∀ {α : Type u₁} (F : J → α), (∀ {j₁ j₂ : J} (f : j₁ ⟶ j₂), F j₁ = F j₂) →
(∀ j j' : J, F j = F j')) :
is_connected J :=
is_connected.of_any_functor_const_on_obj
(λ _ F, h F.obj (λ _ _ f, by { ext, exact discrete.eq_of_hom (F.map f) }))
/--
An inductive-like property for the objects of a connected category.
If the set `p` is nonempty, and `p` is closed under morphisms of `J`,
then `p` contains all of `J`.
The converse is given in `is_connected.of_induct`.
-/
lemma induct_on_objects [is_preconnected J] (p : set J) {j₀ : J} (h0 : j₀ ∈ p)
(h1 : ∀ {j₁ j₂ : J} (f : j₁ ⟶ j₂), j₁ ∈ p ↔ j₂ ∈ p) (j : J) :
j ∈ p :=
begin
injection (constant_of_preserves_morphisms (λ k, ulift.up (k ∈ p)) (λ j₁ j₂ f, _) j j₀) with i,
rwa i,
dsimp,
exact congr_arg ulift.up (propext (h1 f)),
end
/--
If any maximal connected component containing some element j₀ of J is all of J, then J is connected.
The converse of `induct_on_objects`.
-/
lemma is_connected.of_induct [nonempty J] {j₀ : J}
(h : ∀ (p : set J), j₀ ∈ p → (∀ {j₁ j₂ : J} (f : j₁ ⟶ j₂), j₁ ∈ p ↔ j₂ ∈ p) → ∀ (j : J), j ∈ p) :
is_connected J :=
is_connected.of_constant_of_preserves_morphisms (λ α F a,
begin
have w := h {j | F j = F j₀} rfl (λ _ _ f, by simp [a f]),
dsimp at w,
intros j j',
rw [w j, w j'],
end)
/-- Lifting the universe level of morphisms and objects preserves connectedness. -/
instance [hc : is_connected J] : is_connected (ulift_hom.{v₂} (ulift.{u₂} J)) :=
begin
haveI : nonempty (ulift_hom.{v₂} (ulift.{u₂} J)), { simp [ulift_hom, hc.is_nonempty] },
apply is_connected.of_induct,
rintros p hj₀ h ⟨j⟩,
let p' : set J := ((λ (j : J), p {down := j}) : set J),
have hj₀' : (classical.choice hc.is_nonempty) ∈ p', { simp only [p'], exact hj₀ },
apply induct_on_objects (λ (j : J), p {down := j}) hj₀'
(λ _ _ f, h ((ulift_hom_ulift_category.equiv J).functor.map f))
end
/--
Another induction principle for `is_preconnected J`:
given a type family `Z : J → Sort*` and
a rule for transporting in *both* directions along a morphism in `J`,
we can transport an `x : Z j₀` to a point in `Z j` for any `j`.
-/
lemma is_preconnected_induction [is_preconnected J] (Z : J → Sort*)
(h₁ : Π {j₁ j₂ : J} (f : j₁ ⟶ j₂), Z j₁ → Z j₂)
(h₂ : Π {j₁ j₂ : J} (f : j₁ ⟶ j₂), Z j₂ → Z j₁)
{j₀ : J} (x : Z j₀) (j : J) : nonempty (Z j) :=
(induct_on_objects {j | nonempty (Z j)} ⟨x⟩
(λ j₁ j₂ f, ⟨by { rintro ⟨y⟩, exact ⟨h₁ f y⟩, }, by { rintro ⟨y⟩, exact ⟨h₂ f y⟩, }⟩) j : _)
/-- If `J` and `K` are equivalent, then if `J` is preconnected then `K` is as well. -/
lemma is_preconnected_of_equivalent {K : Type u₁} [category.{v₂} K] [is_preconnected J]
(e : J ≌ K) :
is_preconnected K :=
{ iso_constant := λ α F k, ⟨
calc F ≅ e.inverse ⋙ e.functor ⋙ F : (e.inv_fun_id_assoc F).symm
... ≅ e.inverse ⋙ (functor.const J).obj ((e.functor ⋙ F).obj (e.inverse.obj k)) :
iso_whisker_left e.inverse (iso_constant (e.functor ⋙ F) (e.inverse.obj k))
... ≅ e.inverse ⋙ (functor.const J).obj (F.obj k) :
iso_whisker_left _ ((F ⋙ functor.const J).map_iso (e.counit_iso.app k))
... ≅ (functor.const K).obj (F.obj k) : nat_iso.of_components (λ X, iso.refl _) (by simp),
⟩ }
/-- If `J` and `K` are equivalent, then if `J` is connected then `K` is as well. -/
lemma is_connected_of_equivalent {K : Type u₁} [category.{v₂} K]
(e : J ≌ K) [is_connected J] :
is_connected K :=
{ is_nonempty := nonempty.map e.functor.obj (by apply_instance),
to_is_preconnected := is_preconnected_of_equivalent e }
/-- If `J` is preconnected, then `Jᵒᵖ` is preconnected as well. -/
instance is_preconnected_op [is_preconnected J] : is_preconnected Jᵒᵖ :=
{ iso_constant := λ α F X, ⟨nat_iso.of_components
(λ Y, eq_to_iso (discrete.ext _ _ (discrete.eq_of_hom ((nonempty.some
(is_preconnected.iso_constant (F.right_op ⋙ (discrete.opposite α).functor) (unop X))).app
(unop Y)).hom)))
(λ Y Z f, subsingleton.elim _ _)⟩ }
/-- If `J` is connected, then `Jᵒᵖ` is connected as well. -/
instance is_connected_op [is_connected J] : is_connected Jᵒᵖ :=
{ is_nonempty := nonempty.intro (op (classical.arbitrary J)) }
lemma is_preconnected_of_is_preconnected_op [is_preconnected Jᵒᵖ] : is_preconnected J :=
is_preconnected_of_equivalent (op_op_equivalence J)
lemma is_connected_of_is_connected_op [is_connected Jᵒᵖ] : is_connected J :=
is_connected_of_equivalent (op_op_equivalence J)
/-- j₁ and j₂ are related by `zag` if there is a morphism between them. -/
@[reducible]
def zag (j₁ j₂ : J) : Prop := nonempty (j₁ ⟶ j₂) ∨ nonempty (j₂ ⟶ j₁)
lemma zag_symmetric : symmetric (@zag J _) :=
λ j₂ j₁ h, h.swap
/--
`j₁` and `j₂` are related by `zigzag` if there is a chain of
morphisms from `j₁` to `j₂`, with backward morphisms allowed.
-/
@[reducible]
def zigzag : J → J → Prop := relation.refl_trans_gen zag
lemma zigzag_symmetric : symmetric (@zigzag J _) :=
relation.refl_trans_gen.symmetric zag_symmetric
lemma zigzag_equivalence : _root_.equivalence (@zigzag J _) :=
mk_equivalence _
relation.reflexive_refl_trans_gen
zigzag_symmetric
relation.transitive_refl_trans_gen
/--
The setoid given by the equivalence relation `zigzag`. A quotient for this
setoid is a connected component of the category.
-/
def zigzag.setoid (J : Type u₂) [category.{v₁} J] : setoid J :=
{ r := zigzag,
iseqv := zigzag_equivalence }
/--
If there is a zigzag from `j₁` to `j₂`, then there is a zigzag from `F j₁` to
`F j₂` as long as `F` is a functor.
-/
lemma zigzag_obj_of_zigzag (F : J ⥤ K) {j₁ j₂ : J} (h : zigzag j₁ j₂) :
zigzag (F.obj j₁) (F.obj j₂) :=
h.lift _ $ λ j k, or.imp (nonempty.map (λ f, F.map f)) (nonempty.map (λ f, F.map f))
-- TODO: figure out the right way to generalise this to `zigzag`.
lemma zag_of_zag_obj (F : J ⥤ K) [full F] {j₁ j₂ : J} (h : zag (F.obj j₁) (F.obj j₂)) :
zag j₁ j₂ :=
or.imp (nonempty.map F.preimage) (nonempty.map F.preimage) h
/-- Any equivalence relation containing (⟶) holds for all pairs of a connected category. -/
lemma equiv_relation [is_connected J] (r : J → J → Prop) (hr : _root_.equivalence r)
(h : ∀ {j₁ j₂ : J} (f : j₁ ⟶ j₂), r j₁ j₂) :
∀ (j₁ j₂ : J), r j₁ j₂ :=
begin
have z : ∀ (j : J), r (classical.arbitrary J) j :=
induct_on_objects (λ k, r (classical.arbitrary J) k)
(hr.1 (classical.arbitrary J)) (λ _ _ f, ⟨λ t, hr.2.2 t (h f), λ t, hr.2.2 t (hr.2.1 (h f))⟩),
intros, apply hr.2.2 (hr.2.1 (z _)) (z _)
end
/-- In a connected category, any two objects are related by `zigzag`. -/
lemma is_connected_zigzag [is_connected J] (j₁ j₂ : J) : zigzag j₁ j₂ :=
equiv_relation _ zigzag_equivalence
(λ _ _ f, relation.refl_trans_gen.single (or.inl (nonempty.intro f))) _ _
/--
If any two objects in an nonempty category are related by `zigzag`, the category is connected.
-/
lemma zigzag_is_connected [nonempty J] (h : ∀ (j₁ j₂ : J), zigzag j₁ j₂) : is_connected J :=
begin
apply is_connected.of_induct,
intros p hp hjp j,
have: ∀ (j₁ j₂ : J), zigzag j₁ j₂ → (j₁ ∈ p ↔ j₂ ∈ p),
{ introv k,
induction k with _ _ rt_zag zag,
{ refl },
{ rw k_ih,
rcases zag with ⟨⟨_⟩⟩ | ⟨⟨_⟩⟩,
apply hjp zag,
apply (hjp zag).symm } },
rwa this j (classical.arbitrary J) (h _ _)
end
lemma exists_zigzag' [is_connected J] (j₁ j₂ : J) :
∃ l, list.chain zag j₁ l ∧ list.last (j₁ :: l) (list.cons_ne_nil _ _) = j₂ :=
list.exists_chain_of_relation_refl_trans_gen (is_connected_zigzag _ _)
/--
If any two objects in an nonempty category are linked by a sequence of (potentially reversed)
morphisms, then J is connected.
The converse of `exists_zigzag'`.
-/
lemma is_connected_of_zigzag [nonempty J]
(h : ∀ (j₁ j₂ : J), ∃ l, list.chain zag j₁ l ∧ list.last (j₁ :: l) (list.cons_ne_nil _ _) = j₂) :
is_connected J :=
begin
apply zigzag_is_connected,
intros j₁ j₂,
rcases h j₁ j₂ with ⟨l, hl₁, hl₂⟩,
apply list.relation_refl_trans_gen_of_exists_chain l hl₁ hl₂,
end
/-- If `discrete α` is connected, then `α` is (type-)equivalent to `punit`. -/
def discrete_is_connected_equiv_punit {α : Type u₁} [is_connected (discrete α)] : α ≃ punit :=
discrete.equiv_of_equivalence.{u₁ u₁}
{ functor := functor.star (discrete α),
inverse := discrete.functor (λ _, classical.arbitrary _),
unit_iso := by { exact (iso_constant _ (classical.arbitrary _)), },
counit_iso := functor.punit_ext _ _ }
variables {C : Type u₂} [category.{u₁} C]
/--
For objects `X Y : C`, any natural transformation `α : const X ⟶ const Y` from a connected
category must be constant.
This is the key property of connected categories which we use to establish properties about limits.
-/
instance [is_connected J] : full (functor.const J : C ⥤ J ⥤ C) :=
{ preimage := λ X Y f, f.app (classical.arbitrary J),
witness' := λ X Y f,
begin
ext j,
apply nat_trans_from_is_connected f (classical.arbitrary J) j,
end }
instance nonempty_hom_of_connected_groupoid {G} [groupoid G] [is_connected G] :
∀ (x y : G), nonempty (x ⟶ y) :=
begin
refine equiv_relation _ _ (λ j₁ j₂, nonempty.intro),
exact ⟨λ j, ⟨𝟙 _⟩, λ j₁ j₂, nonempty.map (λ f, inv f), λ _ _ _, nonempty.map2 (≫)⟩,
end
end category_theory
|
function re=rev(x);
% REV
% REV(x) reverses the elements of x
d=size(x); li=d(1,1);
ind=li:-1:1;
re=x(ind,:);
|
Formal statement is: lemma Borsuk_map_into_sphere: "(\<lambda>x. inverse(norm (x - a)) *\<^sub>R (x - a)) ` s \<subseteq> sphere 0 1 \<longleftrightarrow> (a \<notin> s)" Informal statement is: The image of a set $s$ under the map $x \mapsto \frac{x - a}{\|x - a\|}$ is contained in the unit sphere if and only if $a \notin s$. |
import numpy as np
from tensorflow import convert_to_tensor
def Xy(df, target, clss, tensor = False):
if clss:
temp = df.drop([target], axis=1), df[target] >0
else:
temp = df.drop([target], axis=1), df[target]
return [convert_to_tensor(i) for i in temp] |
The ideal way to create masterpieces without the mess at home. Sticky, glue, paint, get messy and have fun. Summer term starts week commencing Monday, June 8, classes available at Ickleford Village Hall on Thursday 1.30pm (other classes available on Mondays and Fridays), for further details contact Helen or Lisa on 01462 451522/07769 952853. |
setwd("~/slurm/slurmer")
suppressPackageStartupMessages(require(dplyr))
i_am <- Sys.info()[6]
my_bla <- read.table(".file1")
rownames(my_bla) <- NULL
coln <- unlist(unname(my_bla[1,]))
colnames(my_bla) <- coln
my_bla <- my_bla[-1,]
my_bla <- my_bla[,-9]
bla <- my_bla
my_blub <- unique(my_bla[,c(2,5)])
my_blub <- cbind(my_blub,matrix(nrow = dim(my_blub)[1], ncol = 3))
colnames(my_blub)[3:5] <- c("JOBS", "MEM", "CPUS")
for(i in 1:dim(my_blub)[1]){
my_blub[i,3] <- (filter(my_bla, my_bla[,2] == my_blub[i,1] & my_bla[,5]== my_blub[i,2]) %>% dim())[1]
my_blub[i,4] <- strsplit(as.character(filter(my_bla, my_bla$USER == my_blub[i,1] & my_bla[,5] == my_blub[i,2])$MIN_MEMORY),"M") %>% unlist() %>% as.numeric() %>% sum(na.rm = TRUE)
my_blub[i,5] <- filter(my_bla, my_bla[,2] == my_blub[i,1] & my_bla[,5] == my_blub[i,2])$CPUS %>% as.numeric() %>% sum(na.rm = TRUE)
}
my_blub$MEM <- trunc((my_blub$MEM)*0.001)
my_blub
my_jobs <- filter(my_bla, my_bla$USER == i_am)
cat(" ","\n", "My Jobs\n" , "\n")
my_jobs
|
# Regression Analyse
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import itertools
import warnings
import mysql.connector
from sqlalchemy import create_engine, exc
from time import gmtime, strftime
```
```python
# Settings
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
# Seaborn
sns.set()
sns.set_style("darkgrid")
plt.matplotlib.style.use('default')
my_colors = ["windows blue", "saffron", "hot pink", "algae green", "dusty purple", "greyish", "petrol", "denim blue", "lime"]
sns.set_palette(sns.xkcd_palette(my_colors))
colors = sns.xkcd_palette(my_colors)
# Warnings
warnings.filterwarnings("ignore")
```
```python
def my_df_summary(data):
'''function for the summary'''
try:
dat = data.copy()
df = pd.DataFrame([dat.min(), dat.max(), dat.mean(), dat.std(), dat.isna().sum(), dat.nunique(), dat.dtypes],
index=['Minimum', 'Maximum', 'Mean', 'Stand. Dev.','#NA', '#Uniques', 'dtypes'])
print(f'Dataset has {len(data)} rows.')
return df
except:
print('No summary!!')
return data
```
```python
df = pd.read_csv('data/Werbungseffekte.csv')
df.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>ID</th>
<th>TV</th>
<th>Radio</th>
<th>Newspaper</th>
<th>Sales</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>1</td>
<td>230.1</td>
<td>37.8</td>
<td>69.2</td>
<td>22.1</td>
</tr>
<tr>
<th>1</th>
<td>2</td>
<td>44.5</td>
<td>39.3</td>
<td>45.1</td>
<td>10.4</td>
</tr>
<tr>
<th>2</th>
<td>3</td>
<td>17.2</td>
<td>45.9</td>
<td>69.3</td>
<td>9.3</td>
</tr>
<tr>
<th>3</th>
<td>4</td>
<td>151.5</td>
<td>41.3</td>
<td>58.5</td>
<td>18.5</td>
</tr>
<tr>
<th>4</th>
<td>5</td>
<td>180.8</td>
<td>10.8</td>
<td>58.4</td>
<td>12.9</td>
</tr>
</tbody>
</table>
</div>
```python
my_df_summary(df)
```
Dataset has 200 rows.
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>ID</th>
<th>TV</th>
<th>Radio</th>
<th>Newspaper</th>
<th>Sales</th>
</tr>
</thead>
<tbody>
<tr>
<th>Minimum</th>
<td>1</td>
<td>0.7</td>
<td>0</td>
<td>0.3</td>
<td>1.6</td>
</tr>
<tr>
<th>Maximum</th>
<td>200</td>
<td>296.4</td>
<td>49.6</td>
<td>114</td>
<td>27</td>
</tr>
<tr>
<th>Mean</th>
<td>100.5</td>
<td>147.042</td>
<td>23.264</td>
<td>30.554</td>
<td>14.0225</td>
</tr>
<tr>
<th>Stand. Dev.</th>
<td>57.8792</td>
<td>85.8542</td>
<td>14.8468</td>
<td>21.7786</td>
<td>5.21746</td>
</tr>
<tr>
<th>#NA</th>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
</tr>
<tr>
<th>#Uniques</th>
<td>200</td>
<td>190</td>
<td>167</td>
<td>172</td>
<td>121</td>
</tr>
<tr>
<th>dtypes</th>
<td>int64</td>
<td>float64</td>
<td>float64</td>
<td>float64</td>
<td>float64</td>
</tr>
</tbody>
</table>
</div>
```python
df.info()
```
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 200 entries, 0 to 199
Data columns (total 5 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 ID 200 non-null int64
1 TV 200 non-null float64
2 Radio 200 non-null float64
3 Newspaper 200 non-null float64
4 Sales 200 non-null float64
dtypes: float64(4), int64(1)
memory usage: 7.9 KB
```python
df.describe()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>ID</th>
<th>TV</th>
<th>Radio</th>
<th>Newspaper</th>
<th>Sales</th>
</tr>
</thead>
<tbody>
<tr>
<th>count</th>
<td>200.000000</td>
<td>200.000000</td>
<td>200.000000</td>
<td>200.000000</td>
<td>200.000000</td>
</tr>
<tr>
<th>mean</th>
<td>100.500000</td>
<td>147.042500</td>
<td>23.264000</td>
<td>30.554000</td>
<td>14.022500</td>
</tr>
<tr>
<th>std</th>
<td>57.879185</td>
<td>85.854236</td>
<td>14.846809</td>
<td>21.778621</td>
<td>5.217457</td>
</tr>
<tr>
<th>min</th>
<td>1.000000</td>
<td>0.700000</td>
<td>0.000000</td>
<td>0.300000</td>
<td>1.600000</td>
</tr>
<tr>
<th>25%</th>
<td>50.750000</td>
<td>74.375000</td>
<td>9.975000</td>
<td>12.750000</td>
<td>10.375000</td>
</tr>
<tr>
<th>50%</th>
<td>100.500000</td>
<td>149.750000</td>
<td>22.900000</td>
<td>25.750000</td>
<td>12.900000</td>
</tr>
<tr>
<th>75%</th>
<td>150.250000</td>
<td>218.825000</td>
<td>36.525000</td>
<td>45.100000</td>
<td>17.400000</td>
</tr>
<tr>
<th>max</th>
<td>200.000000</td>
<td>296.400000</td>
<td>49.600000</td>
<td>114.000000</td>
<td>27.000000</td>
</tr>
</tbody>
</table>
</div>
### Understanding Data
```python
%matplotlib inline
plt.rcParams['figure.figsize'] = [16, 16]
fig, axs = plt.subplots(4, 1)
sns.distplot(df.iloc[:,1], color=colors[0], ax=axs[0], bins=20)
sns.distplot(df.iloc[:,2], color=colors[1], ax=axs[1], bins=20)
sns.distplot(df.iloc[:,3], color=colors[2], ax=axs[2], bins=20)
sns.distplot(df.iloc[:,4], color=colors[3], ax=axs[3], bins=20)
plt.show()
```
```python
%matplotlib inline
plt.rcParams['figure.figsize'] = [16, 16]
fig, axs = plt.subplots(4, 1)
sns.boxplot(df.iloc[:,1], color=colors[0], ax=axs[0])
sns.boxplot(df.iloc[:,2], color=colors[1], ax=axs[1])
sns.boxplot(df.iloc[:,3], color=colors[2], ax=axs[2])
sns.boxplot(df.iloc[:,4], color=colors[3], ax=axs[3])
plt.show()
```
## Exploring Sales data
### 1. Linear Correlation
```python
df.iloc[:,1:].corr()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>TV</th>
<th>Radio</th>
<th>Newspaper</th>
<th>Sales</th>
</tr>
</thead>
<tbody>
<tr>
<th>TV</th>
<td>1.000000</td>
<td>0.054809</td>
<td>0.056648</td>
<td>0.782224</td>
</tr>
<tr>
<th>Radio</th>
<td>0.054809</td>
<td>1.000000</td>
<td>0.354104</td>
<td>0.576223</td>
</tr>
<tr>
<th>Newspaper</th>
<td>0.056648</td>
<td>0.354104</td>
<td>1.000000</td>
<td>0.228299</td>
</tr>
<tr>
<th>Sales</th>
<td>0.782224</td>
<td>0.576223</td>
<td>0.228299</td>
<td>1.000000</td>
</tr>
</tbody>
</table>
</div>
```python
%matplotlib inline
plt.rcParams['figure.figsize'] = [8, 8]
plt.matshow(df.iloc[:,1:].corr())
plt.colorbar()
plt.show()
```
## 2. Regression Analyse
```python
%matplotlib inline
plt.rcParams['figure.figsize'] = [16, 8]
fig, axs = plt.subplots(1, 3)
df.plot(kind='scatter', x='TV', y='Sales', ax=axs[0])
df.plot(kind='scatter', x='Radio', y='Sales', ax=axs[1])
df.plot(kind='scatter', x='Newspaper', y='Sales', ax=axs[2])
plt.show()
```
## Einfache lineare Regression (häufig auch einfach OLS-Regression)
<font size = 3><br>
Mit einer einfachen linearen Regression misst man den direkten, linearen Zusammenhang (häufig engl. Response) zwischen zwei Werten.
Gleichung: $y = \beta_0 + \beta_1x$
- $y$ ist die zu erklärende Größe (Response-Value)
- $x$ ist die erklärende Größe (häufig engl. Feature)
- $\beta_0$ ist der geschätzte Achsenabschnitt
- $\beta_1$ ist der geschätzte lineare Wirkzusammenhang (Steigung der Geraden)
$\beta_0$ und $\beta_1$ zusammen nennt man **Koeffizienten**.
</font>
## Berechnung der Koeffizienten
<font size = 3>
<br> Schätzfunktion: $y = \hat{\beta}_0 + \hat{\beta}_{1x} + \epsilon$ <br>
Die optimalen Werte für $\beta_0$ und $\beta_1$ werden für eine OLS-Regression so berechnet: <br>
### Root Mean Squared Error - RMSE
<font size="4">
Tipp: https://de.wikipedia.org/wiki/Lineare_Regression<br>
\begin{align}
\hat{\beta}_1 \; &= \frac{\sum_{i=1}^n (X_i - \bar{X})(Y_i - \bar{Y})}{\sum_{i=1}^n (X_i - \bar{X})^2}
\end{align} <br>
\begin{align*}
\hat{\beta}_0 \; &= \bar{Y} - \hat{\beta}_1 \bar{X}
\end{align*} <br>
D.h., der Steigungs-Koeffizient ($\beta_1$) lässt sich direkt als Cov(X,Y)/Var(X) berechnen.
```python
%matplotlib inline
plt.rcParams['figure.figsize'] = [16, 8]
g = sns.lmplot(x="TV", y="Sales", data=df)
plt.show()
```
```python
df.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>ID</th>
<th>TV</th>
<th>Radio</th>
<th>Newspaper</th>
<th>Sales</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>1</td>
<td>230.1</td>
<td>37.8</td>
<td>69.2</td>
<td>22.1</td>
</tr>
<tr>
<th>1</th>
<td>2</td>
<td>44.5</td>
<td>39.3</td>
<td>45.1</td>
<td>10.4</td>
</tr>
<tr>
<th>2</th>
<td>3</td>
<td>17.2</td>
<td>45.9</td>
<td>69.3</td>
<td>9.3</td>
</tr>
<tr>
<th>3</th>
<td>4</td>
<td>151.5</td>
<td>41.3</td>
<td>58.5</td>
<td>18.5</td>
</tr>
<tr>
<th>4</th>
<td>5</td>
<td>180.8</td>
<td>10.8</td>
<td>58.4</td>
<td>12.9</td>
</tr>
</tbody>
</table>
</div>
```python
# Coefficients on Statsmodel
import statsmodels.formula.api as smf
# create a fitted model in one line
lm = smf.ols(formula='Sales ~ TV', data=df).fit()
```
```python
dir(lm)
```
['HC0_se',
'HC1_se',
'HC2_se',
'HC3_se',
'_HCCM',
'__class__',
'__delattr__',
'__dict__',
'__dir__',
'__doc__',
'__eq__',
'__format__',
'__ge__',
'__getattribute__',
'__gt__',
'__hash__',
'__init__',
'__init_subclass__',
'__le__',
'__lt__',
'__module__',
'__ne__',
'__new__',
'__reduce__',
'__reduce_ex__',
'__repr__',
'__setattr__',
'__sizeof__',
'__str__',
'__subclasshook__',
'__weakref__',
'_cache',
'_data_attr',
'_get_robustcov_results',
'_is_nested',
'_use_t',
'_wexog_singular_values',
'aic',
'bic',
'bse',
'centered_tss',
'compare_f_test',
'compare_lm_test',
'compare_lr_test',
'condition_number',
'conf_int',
'conf_int_el',
'cov_HC0',
'cov_HC1',
'cov_HC2',
'cov_HC3',
'cov_kwds',
'cov_params',
'cov_type',
'df_model',
'df_resid',
'eigenvals',
'el_test',
'ess',
'f_pvalue',
'f_test',
'fittedvalues',
'fvalue',
'get_influence',
'get_prediction',
'get_robustcov_results',
'initialize',
'k_constant',
'llf',
'load',
'model',
'mse_model',
'mse_resid',
'mse_total',
'nobs',
'normalized_cov_params',
'outlier_test',
'params',
'predict',
'pvalues',
'remove_data',
'resid',
'resid_pearson',
'rsquared',
'rsquared_adj',
'save',
'scale',
'ssr',
'summary',
'summary2',
't_test',
't_test_pairwise',
'tvalues',
'uncentered_tss',
'use_t',
'wald_test',
'wald_test_terms',
'wresid']
```python
# summary function
lm.summary()
```
<table class="simpletable">
<caption>OLS Regression Results</caption>
<tr>
<th>Dep. Variable:</th> <td>Sales</td> <th> R-squared: </th> <td> 0.612</td>
</tr>
<tr>
<th>Model:</th> <td>OLS</td> <th> Adj. R-squared: </th> <td> 0.610</td>
</tr>
<tr>
<th>Method:</th> <td>Least Squares</td> <th> F-statistic: </th> <td> 312.1</td>
</tr>
<tr>
<th>Date:</th> <td>Mon, 27 Jul 2020</td> <th> Prob (F-statistic):</th> <td>1.47e-42</td>
</tr>
<tr>
<th>Time:</th> <td>11:43:17</td> <th> Log-Likelihood: </th> <td> -519.05</td>
</tr>
<tr>
<th>No. Observations:</th> <td> 200</td> <th> AIC: </th> <td> 1042.</td>
</tr>
<tr>
<th>Df Residuals:</th> <td> 198</td> <th> BIC: </th> <td> 1049.</td>
</tr>
<tr>
<th>Df Model:</th> <td> 1</td> <th> </th> <td> </td>
</tr>
<tr>
<th>Covariance Type:</th> <td>nonrobust</td> <th> </th> <td> </td>
</tr>
</table>
<table class="simpletable">
<tr>
<td></td> <th>coef</th> <th>std err</th> <th>t</th> <th>P>|t|</th> <th>[0.025</th> <th>0.975]</th>
</tr>
<tr>
<th>Intercept</th> <td> 7.0326</td> <td> 0.458</td> <td> 15.360</td> <td> 0.000</td> <td> 6.130</td> <td> 7.935</td>
</tr>
<tr>
<th>TV</th> <td> 0.0475</td> <td> 0.003</td> <td> 17.668</td> <td> 0.000</td> <td> 0.042</td> <td> 0.053</td>
</tr>
</table>
<table class="simpletable">
<tr>
<th>Omnibus:</th> <td> 0.531</td> <th> Durbin-Watson: </th> <td> 1.935</td>
</tr>
<tr>
<th>Prob(Omnibus):</th> <td> 0.767</td> <th> Jarque-Bera (JB): </th> <td> 0.669</td>
</tr>
<tr>
<th>Skew:</th> <td>-0.089</td> <th> Prob(JB): </th> <td> 0.716</td>
</tr>
<tr>
<th>Kurtosis:</th> <td> 2.779</td> <th> Cond. No. </th> <td> 338.</td>
</tr>
</table><br/><br/>Warnings:<br/>[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
## Wie lese ich die Ergebnisse einer OLS-Regression?
<font size = 3><br>
To check whether a regression has good explanatory power at all, we should look at the following key-features:<br>
1. Determination coefficient adj. R-squared<br>
2. The estimation statistics of all coefficients<br>
3. Jarque-Bera-Test<br>
4. Durbin-Watson-Test<br>
Only when these values have the desired properties we should go further with the actual coefficients
### Analyse the Rest
```python
# The residuals are also directly ejected by the statsmodel
%matplotlib inline
plt.rcParams['figure.figsize'] = [16, 8]
fig, axs = plt.subplots(2, 1)
lm.resid.plot(ax=axs[0])
lm.resid.plot(kind='hist', bins=25, ax=axs[1])
plt.show()
```
### Once we have estimated the model, we can also use it to explain Y with new x values.
```python
# Important: predict expects the values with the same labels as data with which estimates were made.
X_neu = pd.DataFrame({'TV': [100, 200, 300]})
pred = lm.predict(X_neu)
pred
```
0 11.786258
1 16.539922
2 21.293586
dtype: float64
### The quality of an estimate can also be made again with training and test data. With an OLS model, however, this is only very rarely necessary due to the large number of estimation statistics.
```python
from sklearn.model_selection import train_test_split
train, test = train_test_split(df, test_size=0.2)
```
```python
lm = smf.ols(formula='Sales ~ TV', data=train).fit()
```
```python
lm.summary()
```
<table class="simpletable">
<caption>OLS Regression Results</caption>
<tr>
<th>Dep. Variable:</th> <td>Sales</td> <th> R-squared: </th> <td> 0.585</td>
</tr>
<tr>
<th>Model:</th> <td>OLS</td> <th> Adj. R-squared: </th> <td> 0.583</td>
</tr>
<tr>
<th>Method:</th> <td>Least Squares</td> <th> F-statistic: </th> <td> 222.9</td>
</tr>
<tr>
<th>Date:</th> <td>Mon, 27 Jul 2020</td> <th> Prob (F-statistic):</th> <td>5.33e-32</td>
</tr>
<tr>
<th>Time:</th> <td>11:56:55</td> <th> Log-Likelihood: </th> <td> -417.26</td>
</tr>
<tr>
<th>No. Observations:</th> <td> 160</td> <th> AIC: </th> <td> 838.5</td>
</tr>
<tr>
<th>Df Residuals:</th> <td> 158</td> <th> BIC: </th> <td> 844.7</td>
</tr>
<tr>
<th>Df Model:</th> <td> 1</td> <th> </th> <td> </td>
</tr>
<tr>
<th>Covariance Type:</th> <td>nonrobust</td> <th> </th> <td> </td>
</tr>
</table>
<table class="simpletable">
<tr>
<td></td> <th>coef</th> <th>std err</th> <th>t</th> <th>P>|t|</th> <th>[0.025</th> <th>0.975]</th>
</tr>
<tr>
<th>Intercept</th> <td> 7.3339</td> <td> 0.535</td> <td> 13.702</td> <td> 0.000</td> <td> 6.277</td> <td> 8.391</td>
</tr>
<tr>
<th>TV</th> <td> 0.0461</td> <td> 0.003</td> <td> 14.930</td> <td> 0.000</td> <td> 0.040</td> <td> 0.052</td>
</tr>
</table>
<table class="simpletable">
<tr>
<th>Omnibus:</th> <td> 0.476</td> <th> Durbin-Watson: </th> <td> 1.920</td>
</tr>
<tr>
<th>Prob(Omnibus):</th> <td> 0.788</td> <th> Jarque-Bera (JB): </th> <td> 0.620</td>
</tr>
<tr>
<th>Skew:</th> <td>-0.097</td> <th> Prob(JB): </th> <td> 0.733</td>
</tr>
<tr>
<th>Kurtosis:</th> <td> 2.765</td> <th> Cond. No. </th> <td> 355.</td>
</tr>
</table><br/><br/>Warnings:<br/>[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
```python
y_hat = lm.predict(test)
y_hat.head()
```
192 8.126934
108 7.937909
80 10.856274
23 17.859427
139 15.858526
dtype: float64
```python
# Error estimation over sklearn
from sklearn.metrics import mean_squared_error
from math import sqrt
```
```python
rmse = sqrt(mean_squared_error(test.Sales, y_hat))
me = (test.Sales - y_hat).sum() / len(test)
print('ME: {0:.4f}, RMSE: {1:.4f}.'.format(me,rmse))
```
ME: -0.4533, RMSE: 3.0903.
## Multivariate lineare Regression
<font size = 3><br>
Das Schätzverfahren lässt sich (theoretisch) beliebig erweitern. Sobald wir y mit mehr als einer Variablen erklären wollen, schätzen wir eine **Multivariatie Regression**:
$y = \beta_0 + \beta_1x_1 + ... + \beta_nx_n$ <br>
<br> Jedes $x$ ist ein eigenes "Feature" und hat dementsprechend einen eigenen Koeffizienten:
$y = \beta_0 + \beta_1 \times TV + \beta_2 \times Radio + \beta_3 \times Newspaper$
```python
df.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>ID</th>
<th>TV</th>
<th>Radio</th>
<th>Newspaper</th>
<th>Sales</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>1</td>
<td>230.1</td>
<td>37.8</td>
<td>69.2</td>
<td>22.1</td>
</tr>
<tr>
<th>1</th>
<td>2</td>
<td>44.5</td>
<td>39.3</td>
<td>45.1</td>
<td>10.4</td>
</tr>
<tr>
<th>2</th>
<td>3</td>
<td>17.2</td>
<td>45.9</td>
<td>69.3</td>
<td>9.3</td>
</tr>
<tr>
<th>3</th>
<td>4</td>
<td>151.5</td>
<td>41.3</td>
<td>58.5</td>
<td>18.5</td>
</tr>
<tr>
<th>4</th>
<td>5</td>
<td>180.8</td>
<td>10.8</td>
<td>58.4</td>
<td>12.9</td>
</tr>
</tbody>
</table>
</div>
```python
# Estimation
lm = smf.ols(formula='Sales ~ TV + Radio + Newspaper', data=df).fit()
# Coefficients
lm.params
```
Intercept 2.938889
TV 0.045765
Radio 0.188530
Newspaper -0.001037
dtype: float64
```python
lm.summary()
```
<table class="simpletable">
<caption>OLS Regression Results</caption>
<tr>
<th>Dep. Variable:</th> <td>Sales</td> <th> R-squared: </th> <td> 0.897</td>
</tr>
<tr>
<th>Model:</th> <td>OLS</td> <th> Adj. R-squared: </th> <td> 0.896</td>
</tr>
<tr>
<th>Method:</th> <td>Least Squares</td> <th> F-statistic: </th> <td> 570.3</td>
</tr>
<tr>
<th>Date:</th> <td>Mon, 27 Jul 2020</td> <th> Prob (F-statistic):</th> <td>1.58e-96</td>
</tr>
<tr>
<th>Time:</th> <td>11:59:21</td> <th> Log-Likelihood: </th> <td> -386.18</td>
</tr>
<tr>
<th>No. Observations:</th> <td> 200</td> <th> AIC: </th> <td> 780.4</td>
</tr>
<tr>
<th>Df Residuals:</th> <td> 196</td> <th> BIC: </th> <td> 793.6</td>
</tr>
<tr>
<th>Df Model:</th> <td> 3</td> <th> </th> <td> </td>
</tr>
<tr>
<th>Covariance Type:</th> <td>nonrobust</td> <th> </th> <td> </td>
</tr>
</table>
<table class="simpletable">
<tr>
<td></td> <th>coef</th> <th>std err</th> <th>t</th> <th>P>|t|</th> <th>[0.025</th> <th>0.975]</th>
</tr>
<tr>
<th>Intercept</th> <td> 2.9389</td> <td> 0.312</td> <td> 9.422</td> <td> 0.000</td> <td> 2.324</td> <td> 3.554</td>
</tr>
<tr>
<th>TV</th> <td> 0.0458</td> <td> 0.001</td> <td> 32.809</td> <td> 0.000</td> <td> 0.043</td> <td> 0.049</td>
</tr>
<tr>
<th>Radio</th> <td> 0.1885</td> <td> 0.009</td> <td> 21.893</td> <td> 0.000</td> <td> 0.172</td> <td> 0.206</td>
</tr>
<tr>
<th>Newspaper</th> <td> -0.0010</td> <td> 0.006</td> <td> -0.177</td> <td> 0.860</td> <td> -0.013</td> <td> 0.011</td>
</tr>
</table>
<table class="simpletable">
<tr>
<th>Omnibus:</th> <td>60.414</td> <th> Durbin-Watson: </th> <td> 2.084</td>
</tr>
<tr>
<th>Prob(Omnibus):</th> <td> 0.000</td> <th> Jarque-Bera (JB): </th> <td> 151.241</td>
</tr>
<tr>
<th>Skew:</th> <td>-1.327</td> <th> Prob(JB): </th> <td>1.44e-33</td>
</tr>
<tr>
<th>Kurtosis:</th> <td> 6.332</td> <th> Cond. No. </th> <td> 454.</td>
</tr>
</table><br/><br/>Warnings:<br/>[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
```python
%matplotlib inline
plt.rcParams['figure.figsize'] = [16, 8]
fig, axs = plt.subplots(2, 1)
lm.resid.plot(ax=axs[0])
lm.resid.plot(kind='hist', bins=25, ax=axs[1])
plt.show()
```
### The estimate is already better (has a higher adj. R-squared), but it also has significantly more noticeable (not normally distributed) residuals, so we should remove the insignificant feature.
```python
lm = smf.ols(formula='Sales ~ TV + Radio', data=df).fit()
lm.summary()
```
<table class="simpletable">
<caption>OLS Regression Results</caption>
<tr>
<th>Dep. Variable:</th> <td>Sales</td> <th> R-squared: </th> <td> 0.897</td>
</tr>
<tr>
<th>Model:</th> <td>OLS</td> <th> Adj. R-squared: </th> <td> 0.896</td>
</tr>
<tr>
<th>Method:</th> <td>Least Squares</td> <th> F-statistic: </th> <td> 859.6</td>
</tr>
<tr>
<th>Date:</th> <td>Mon, 27 Jul 2020</td> <th> Prob (F-statistic):</th> <td>4.83e-98</td>
</tr>
<tr>
<th>Time:</th> <td>12:03:07</td> <th> Log-Likelihood: </th> <td> -386.20</td>
</tr>
<tr>
<th>No. Observations:</th> <td> 200</td> <th> AIC: </th> <td> 778.4</td>
</tr>
<tr>
<th>Df Residuals:</th> <td> 197</td> <th> BIC: </th> <td> 788.3</td>
</tr>
<tr>
<th>Df Model:</th> <td> 2</td> <th> </th> <td> </td>
</tr>
<tr>
<th>Covariance Type:</th> <td>nonrobust</td> <th> </th> <td> </td>
</tr>
</table>
<table class="simpletable">
<tr>
<td></td> <th>coef</th> <th>std err</th> <th>t</th> <th>P>|t|</th> <th>[0.025</th> <th>0.975]</th>
</tr>
<tr>
<th>Intercept</th> <td> 2.9211</td> <td> 0.294</td> <td> 9.919</td> <td> 0.000</td> <td> 2.340</td> <td> 3.502</td>
</tr>
<tr>
<th>TV</th> <td> 0.0458</td> <td> 0.001</td> <td> 32.909</td> <td> 0.000</td> <td> 0.043</td> <td> 0.048</td>
</tr>
<tr>
<th>Radio</th> <td> 0.1880</td> <td> 0.008</td> <td> 23.382</td> <td> 0.000</td> <td> 0.172</td> <td> 0.204</td>
</tr>
</table>
<table class="simpletable">
<tr>
<th>Omnibus:</th> <td>60.022</td> <th> Durbin-Watson: </th> <td> 2.081</td>
</tr>
<tr>
<th>Prob(Omnibus):</th> <td> 0.000</td> <th> Jarque-Bera (JB): </th> <td> 148.679</td>
</tr>
<tr>
<th>Skew:</th> <td>-1.323</td> <th> Prob(JB): </th> <td>5.19e-33</td>
</tr>
<tr>
<th>Kurtosis:</th> <td> 6.292</td> <th> Cond. No. </th> <td> 425.</td>
</tr>
</table><br/><br/>Warnings:<br/>[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
```python
%matplotlib inline
plt.rcParams['figure.figsize'] = [16, 8]
fig, axs = plt.subplots(2, 1)
lm.resid.plot(ax=axs[0])
lm.resid.plot(kind='hist', bins=25, ax=axs[1])
plt.show()
```
### Logarithmic relationships can also be measured
```python
lm = smf.ols(formula='np.log(Sales) ~ np.log(TV) + np.log(Radio + 0.00000001)', data=df).fit()
lm.summary()
```
<table class="simpletable">
<caption>OLS Regression Results</caption>
<tr>
<th>Dep. Variable:</th> <td>np.log(Sales)</td> <th> R-squared: </th> <td> 0.823</td>
</tr>
<tr>
<th>Model:</th> <td>OLS</td> <th> Adj. R-squared: </th> <td> 0.821</td>
</tr>
<tr>
<th>Method:</th> <td>Least Squares</td> <th> F-statistic: </th> <td> 458.4</td>
</tr>
<tr>
<th>Date:</th> <td>Mon, 27 Jul 2020</td> <th> Prob (F-statistic):</th> <td>7.84e-75</td>
</tr>
<tr>
<th>Time:</th> <td>12:04:07</td> <th> Log-Likelihood: </th> <td> 66.155</td>
</tr>
<tr>
<th>No. Observations:</th> <td> 200</td> <th> AIC: </th> <td> -126.3</td>
</tr>
<tr>
<th>Df Residuals:</th> <td> 197</td> <th> BIC: </th> <td> -116.4</td>
</tr>
<tr>
<th>Df Model:</th> <td> 2</td> <th> </th> <td> </td>
</tr>
<tr>
<th>Covariance Type:</th> <td>nonrobust</td> <th> </th> <td> </td>
</tr>
</table>
<table class="simpletable">
<tr>
<td></td> <th>coef</th> <th>std err</th> <th>t</th> <th>P>|t|</th> <th>[0.025</th> <th>0.975]</th>
</tr>
<tr>
<th>Intercept</th> <td> 0.7467</td> <td> 0.061</td> <td> 12.177</td> <td> 0.000</td> <td> 0.626</td> <td> 0.868</td>
</tr>
<tr>
<th>np.log(TV)</th> <td> 0.3516</td> <td> 0.012</td> <td> 28.456</td> <td> 0.000</td> <td> 0.327</td> <td> 0.376</td>
</tr>
<tr>
<th>np.log(Radio + 0.00000001)</th> <td> 0.0649</td> <td> 0.007</td> <td> 9.498</td> <td> 0.000</td> <td> 0.051</td> <td> 0.078</td>
</tr>
</table>
<table class="simpletable">
<tr>
<th>Omnibus:</th> <td>65.501</td> <th> Durbin-Watson: </th> <td> 1.803</td>
</tr>
<tr>
<th>Prob(Omnibus):</th> <td> 0.000</td> <th> Jarque-Bera (JB): </th> <td> 338.943</td>
</tr>
<tr>
<th>Skew:</th> <td> 1.138</td> <th> Prob(JB): </th> <td>2.51e-74</td>
</tr>
<tr>
<th>Kurtosis:</th> <td> 8.958</td> <th> Cond. No. </th> <td> 28.4</td>
</tr>
</table><br/><br/>Warnings:<br/>[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
```python
lm.resid
```
0 0.201211
1 0.022522
2 0.234834
3 0.164581
4 -0.171073
5 0.214404
6 0.070447
7 -0.043245
8 0.017286
9 -0.310161
10 -0.182454
11 0.015936
12 0.127256
13 -0.216273
14 0.101235
15 0.257064
16 0.063035
17 0.226443
18 -0.007461
19 -0.026788
20 0.034526
21 -0.249626
22 -0.110562
23 -0.098525
24 -0.091624
25 -0.301884
26 -0.002318
27 -0.089989
28 0.038922
29 -0.071846
30 0.102973
31 -0.117171
32 -0.120246
33 -0.047025
34 -0.120773
35 -0.282951
36 0.278665
37 0.171619
38 0.029575
39 0.177081
40 -0.005798
41 0.044977
42 0.070267
43 -0.202168
44 0.049652
45 -0.063309
46 -0.115390
47 0.229063
48 -0.138672
49 -0.111859
50 -0.248816
51 -0.143620
52 0.238791
53 0.227896
54 0.082383
55 0.304965
56 0.042704
57 -0.085839
58 0.288485
59 0.065037
60 -0.098907
61 0.239374
62 -0.096635
63 0.044157
64 0.185710
65 -0.149937
66 0.083872
67 -0.060493
68 0.054460
69 0.221442
70 0.077119
71 -0.053491
72 0.045073
73 -0.171343
74 -0.006788
75 0.177515
76 -0.010813
77 0.004608
78 0.107617
79 -0.152430
80 -0.016110
81 -0.255128
82 -0.036524
83 0.131595
84 0.200904
85 -0.064902
86 -0.000758
87 0.130811
88 0.025113
89 0.165906
90 -0.156576
91 0.035940
92 0.098218
93 0.177573
94 -0.119695
95 0.065129
96 -0.226824
97 -0.038498
98 0.252110
99 0.131111
100 -0.281748
101 0.188935
102 -0.183343
103 -0.084194
104 0.129910
105 0.227230
106 -0.059860
107 -0.088743
108 0.076039
109 0.076719
110 -0.193307
111 0.169910
112 -0.095093
113 -0.055850
114 0.152205
115 0.037955
116 -0.153158
117 -0.015856
118 0.086070
119 -0.082033
120 0.040218
121 -0.031923
122 -0.255008
123 0.052571
124 0.097374
125 -0.116846
126 0.180629
127 1.082072
128 0.310896
129 -0.072882
130 -0.390074
131 -0.236047
132 0.031191
133 0.105100
134 0.127266
135 0.091303
136 0.126877
137 0.097008
138 -0.018406
139 0.202942
140 -0.052066
141 0.125339
142 0.129756
143 -0.152635
144 -0.093319
145 -0.194230
146 -0.222388
147 0.304076
148 0.123359
149 0.018986
150 -0.120503
151 -0.119802
152 -0.000033
153 0.150632
154 -0.037846
155 -0.238670
156 0.139461
157 -0.212244
158 0.142317
159 -0.094255
160 -0.078061
161 0.044139
162 -0.074925
163 0.117871
164 -0.119368
165 -0.268183
166 0.083181
167 -0.226667
168 -0.001510
169 -0.178163
170 -0.152833
171 -0.063781
172 0.040635
173 -0.216513
174 -0.283749
175 0.319674
176 0.098980
177 -0.226353
178 -0.309430
179 -0.158737
180 -0.233994
181 -0.248464
182 -0.112724
183 0.284612
184 -0.023707
185 0.252730
186 -0.198715
187 0.039502
188 -0.139579
189 -0.035945
190 0.099257
191 -0.128770
192 -0.063473
193 0.187426
194 0.111356
195 -0.084128
196 -0.175685
197 -0.161683
198 0.263982
199 -0.206073
dtype: float64
```python
```
## What can we do if we have text or category information?
```python
np.random.seed(12345)
nums = np.random.rand(len(df))
mask_large = nums > 0.5
df['Size'] = 'small'
df.loc[mask_large, 'Size'] = 'large'
df.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>ID</th>
<th>TV</th>
<th>Radio</th>
<th>Newspaper</th>
<th>Sales</th>
<th>Size</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>1</td>
<td>230.1</td>
<td>37.8</td>
<td>69.2</td>
<td>22.1</td>
<td>large</td>
</tr>
<tr>
<th>1</th>
<td>2</td>
<td>44.5</td>
<td>39.3</td>
<td>45.1</td>
<td>10.4</td>
<td>small</td>
</tr>
<tr>
<th>2</th>
<td>3</td>
<td>17.2</td>
<td>45.9</td>
<td>69.3</td>
<td>9.3</td>
<td>small</td>
</tr>
<tr>
<th>3</th>
<td>4</td>
<td>151.5</td>
<td>41.3</td>
<td>58.5</td>
<td>18.5</td>
<td>small</td>
</tr>
<tr>
<th>4</th>
<td>5</td>
<td>180.8</td>
<td>10.8</td>
<td>58.4</td>
<td>12.9</td>
<td>large</td>
</tr>
</tbody>
</table>
</div>
### The easiest way is to simply convert the categories into dummy variables (variables that can take 0 and 1).
```python
df['IsLarge'] = df.Size.map({'small':0, 'large':1})
df.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>ID</th>
<th>TV</th>
<th>Radio</th>
<th>Newspaper</th>
<th>Sales</th>
<th>Size</th>
<th>IsLarge</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>1</td>
<td>230.1</td>
<td>37.8</td>
<td>69.2</td>
<td>22.1</td>
<td>large</td>
<td>1</td>
</tr>
<tr>
<th>1</th>
<td>2</td>
<td>44.5</td>
<td>39.3</td>
<td>45.1</td>
<td>10.4</td>
<td>small</td>
<td>0</td>
</tr>
<tr>
<th>2</th>
<td>3</td>
<td>17.2</td>
<td>45.9</td>
<td>69.3</td>
<td>9.3</td>
<td>small</td>
<td>0</td>
</tr>
<tr>
<th>3</th>
<td>4</td>
<td>151.5</td>
<td>41.3</td>
<td>58.5</td>
<td>18.5</td>
<td>small</td>
<td>0</td>
</tr>
<tr>
<th>4</th>
<td>5</td>
<td>180.8</td>
<td>10.8</td>
<td>58.4</td>
<td>12.9</td>
<td>large</td>
<td>1</td>
</tr>
</tbody>
</table>
</div>
### Dummy variables can easily be included in the prediction.
#### ATTENTION: In this case, the dummy variable was created randomly and has no meaning. This is for demonstration purposes only.
```python
lm = smf.ols(formula='Sales ~ TV + Radio + IsLarge', data=df).fit()
lm.summary()
```
<table class="simpletable">
<caption>OLS Regression Results</caption>
<tr>
<th>Dep. Variable:</th> <td>Sales</td> <th> R-squared: </th> <td> 0.897</td>
</tr>
<tr>
<th>Model:</th> <td>OLS</td> <th> Adj. R-squared: </th> <td> 0.896</td>
</tr>
<tr>
<th>Method:</th> <td>Least Squares</td> <th> F-statistic: </th> <td> 570.3</td>
</tr>
<tr>
<th>Date:</th> <td>Mon, 27 Jul 2020</td> <th> Prob (F-statistic):</th> <td>1.56e-96</td>
</tr>
<tr>
<th>Time:</th> <td>12:07:46</td> <th> Log-Likelihood: </th> <td> -386.17</td>
</tr>
<tr>
<th>No. Observations:</th> <td> 200</td> <th> AIC: </th> <td> 780.3</td>
</tr>
<tr>
<th>Df Residuals:</th> <td> 196</td> <th> BIC: </th> <td> 793.5</td>
</tr>
<tr>
<th>Df Model:</th> <td> 3</td> <th> </th> <td> </td>
</tr>
<tr>
<th>Covariance Type:</th> <td>nonrobust</td> <th> </th> <td> </td>
</tr>
</table>
<table class="simpletable">
<tr>
<td></td> <th>coef</th> <th>std err</th> <th>t</th> <th>P>|t|</th> <th>[0.025</th> <th>0.975]</th>
</tr>
<tr>
<th>Intercept</th> <td> 2.8938</td> <td> 0.318</td> <td> 9.092</td> <td> 0.000</td> <td> 2.266</td> <td> 3.522</td>
</tr>
<tr>
<th>TV</th> <td> 0.0457</td> <td> 0.001</td> <td> 32.493</td> <td> 0.000</td> <td> 0.043</td> <td> 0.048</td>
</tr>
<tr>
<th>Radio</th> <td> 0.1882</td> <td> 0.008</td> <td> 23.258</td> <td> 0.000</td> <td> 0.172</td> <td> 0.204</td>
</tr>
<tr>
<th>IsLarge</th> <td> 0.0555</td> <td> 0.242</td> <td> 0.229</td> <td> 0.819</td> <td> -0.422</td> <td> 0.533</td>
</tr>
</table>
<table class="simpletable">
<tr>
<th>Omnibus:</th> <td>59.724</td> <th> Durbin-Watson: </th> <td> 2.082</td>
</tr>
<tr>
<th>Prob(Omnibus):</th> <td> 0.000</td> <th> Jarque-Bera (JB): </th> <td> 147.220</td>
</tr>
<tr>
<th>Skew:</th> <td>-1.319</td> <th> Prob(JB): </th> <td>1.08e-32</td>
</tr>
<tr>
<th>Kurtosis:</th> <td> 6.272</td> <th> Cond. No. </th> <td> 490.</td>
</tr>
</table><br/><br/>Warnings:<br/>[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
### If there are many different text values in a column, we can easily create dummy variables with the "OneHotEncoder" from the sklearn module.
```python
from sklearn import preprocessing
```
```python
df_titanic = sns.load_dataset('titanic')
df_titanic.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>survived</th>
<th>pclass</th>
<th>sex</th>
<th>age</th>
<th>sibsp</th>
<th>parch</th>
<th>fare</th>
<th>embarked</th>
<th>class</th>
<th>who</th>
<th>adult_male</th>
<th>deck</th>
<th>embark_town</th>
<th>alive</th>
<th>alone</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>0</td>
<td>3</td>
<td>male</td>
<td>22.0</td>
<td>1</td>
<td>0</td>
<td>7.2500</td>
<td>S</td>
<td>Third</td>
<td>man</td>
<td>True</td>
<td>NaN</td>
<td>Southampton</td>
<td>no</td>
<td>False</td>
</tr>
<tr>
<th>1</th>
<td>1</td>
<td>1</td>
<td>female</td>
<td>38.0</td>
<td>1</td>
<td>0</td>
<td>71.2833</td>
<td>C</td>
<td>First</td>
<td>woman</td>
<td>False</td>
<td>C</td>
<td>Cherbourg</td>
<td>yes</td>
<td>False</td>
</tr>
<tr>
<th>2</th>
<td>1</td>
<td>3</td>
<td>female</td>
<td>26.0</td>
<td>0</td>
<td>0</td>
<td>7.9250</td>
<td>S</td>
<td>Third</td>
<td>woman</td>
<td>False</td>
<td>NaN</td>
<td>Southampton</td>
<td>yes</td>
<td>True</td>
</tr>
<tr>
<th>3</th>
<td>1</td>
<td>1</td>
<td>female</td>
<td>35.0</td>
<td>1</td>
<td>0</td>
<td>53.1000</td>
<td>S</td>
<td>First</td>
<td>woman</td>
<td>False</td>
<td>C</td>
<td>Southampton</td>
<td>yes</td>
<td>False</td>
</tr>
<tr>
<th>4</th>
<td>0</td>
<td>3</td>
<td>male</td>
<td>35.0</td>
<td>0</td>
<td>0</td>
<td>8.0500</td>
<td>S</td>
<td>Third</td>
<td>man</td>
<td>True</td>
<td>NaN</td>
<td>Southampton</td>
<td>no</td>
<td>True</td>
</tr>
</tbody>
</table>
</div>
```python
my_df_summary(df_titanic)
```
Dataset has 891 rows.
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>survived</th>
<th>pclass</th>
<th>sex</th>
<th>age</th>
<th>sibsp</th>
<th>parch</th>
<th>fare</th>
<th>class</th>
<th>who</th>
<th>adult_male</th>
<th>alive</th>
<th>alone</th>
<th>embarked</th>
<th>deck</th>
<th>embark_town</th>
</tr>
</thead>
<tbody>
<tr>
<th>Minimum</th>
<td>0</td>
<td>1</td>
<td>female</td>
<td>0.42</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>First</td>
<td>child</td>
<td>False</td>
<td>no</td>
<td>False</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>Maximum</th>
<td>1</td>
<td>3</td>
<td>male</td>
<td>80</td>
<td>8</td>
<td>6</td>
<td>512.329</td>
<td>Third</td>
<td>woman</td>
<td>True</td>
<td>yes</td>
<td>True</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>Mean</th>
<td>0.383838</td>
<td>2.30864</td>
<td>NaN</td>
<td>29.6991</td>
<td>0.523008</td>
<td>0.381594</td>
<td>32.2042</td>
<td>NaN</td>
<td>NaN</td>
<td>0.602694</td>
<td>NaN</td>
<td>0.602694</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>Stand. Dev.</th>
<td>0.486592</td>
<td>0.836071</td>
<td>NaN</td>
<td>14.5265</td>
<td>1.10274</td>
<td>0.806057</td>
<td>49.6934</td>
<td>NaN</td>
<td>NaN</td>
<td>0.489615</td>
<td>NaN</td>
<td>0.489615</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>#NA</th>
<td>0</td>
<td>0</td>
<td>0</td>
<td>177</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>2</td>
<td>688</td>
<td>2</td>
</tr>
<tr>
<th>#Uniques</th>
<td>2</td>
<td>3</td>
<td>2</td>
<td>88</td>
<td>7</td>
<td>7</td>
<td>248</td>
<td>3</td>
<td>3</td>
<td>2</td>
<td>2</td>
<td>2</td>
<td>3</td>
<td>7</td>
<td>3</td>
</tr>
<tr>
<th>dtypes</th>
<td>int64</td>
<td>int64</td>
<td>object</td>
<td>float64</td>
<td>int64</td>
<td>int64</td>
<td>float64</td>
<td>category</td>
<td>object</td>
<td>bool</td>
<td>object</td>
<td>bool</td>
<td>object</td>
<td>category</td>
<td>object</td>
</tr>
</tbody>
</table>
</div>
```python
df_text = df_titanic[['class']].dropna()
my_df_summary(df_text)
```
Dataset has 891 rows.
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>class</th>
</tr>
</thead>
<tbody>
<tr>
<th>Minimum</th>
<td>First</td>
</tr>
<tr>
<th>Maximum</th>
<td>Third</td>
</tr>
<tr>
<th>Mean</th>
<td>NaN</td>
</tr>
<tr>
<th>Stand. Dev.</th>
<td>NaN</td>
</tr>
<tr>
<th>#NA</th>
<td>0</td>
</tr>
<tr>
<th>#Uniques</th>
<td>3</td>
</tr>
<tr>
<th>dtypes</th>
<td>category</td>
</tr>
</tbody>
</table>
</div>
### Text-Encoder
```python
enc = preprocessing.OneHotEncoder()
```
```python
enc.fit(df_text)
```
OneHotEncoder(categories='auto', drop=None, dtype=<class 'numpy.float64'>,
handle_unknown='error', sparse=True)
```python
onehotlabels = enc.transform(df_text).toarray()
df_dummys = pd.DataFrame(onehotlabels)
df_dummys.sum(axis=1).describe()
```
count 891.0
mean 1.0
std 0.0
min 1.0
25% 1.0
50% 1.0
75% 1.0
max 1.0
dtype: float64
```python
dir(enc)
```
['__class__',
'__delattr__',
'__dict__',
'__dir__',
'__doc__',
'__eq__',
'__format__',
'__ge__',
'__getattribute__',
'__getstate__',
'__gt__',
'__hash__',
'__init__',
'__init_subclass__',
'__le__',
'__lt__',
'__module__',
'__ne__',
'__new__',
'__reduce__',
'__reduce_ex__',
'__repr__',
'__setattr__',
'__setstate__',
'__sizeof__',
'__str__',
'__subclasshook__',
'__weakref__',
'_check_X',
'_compute_drop_idx',
'_fit',
'_get_feature',
'_get_param_names',
'_get_tags',
'_more_tags',
'_transform',
'_validate_keywords',
'categories',
'categories_',
'drop',
'drop_idx_',
'dtype',
'fit',
'fit_transform',
'get_feature_names',
'get_params',
'handle_unknown',
'inverse_transform',
'set_params',
'sparse',
'transform']
### Sometimes (NOT WITH LINEAR REGRESSION) it can also be helpful not to label in dummies, but simply in numerical values. This is very easy with the label encoder.
```python
le = preprocessing.LabelEncoder()
```
```python
df_labels = df_text.apply(le.fit_transform)
df_labels.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>class</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>2</td>
</tr>
<tr>
<th>1</th>
<td>0</td>
</tr>
<tr>
<th>2</th>
<td>2</td>
</tr>
<tr>
<th>3</th>
<td>0</td>
</tr>
<tr>
<th>4</th>
<td>2</td>
</tr>
</tbody>
</table>
</div>
|
# "Performance Mid to higher severity"
# Setting
```{r}
# reset
rm(list=ls(all=TRUE))
# library
library(data.table)
library(dplyr)
library(psych)
library(caret)
library(gam)
library(RSNNS)
library(e1071)
library(ranger)
library(randomForest)
library(xgboost)
# Directory
setwd(".")
# Get data
Data <- fread("Data_demo.csv", encoding = "UTF-8" )
Data$V1 <- NULL
# Train Test
Data_train <- Data[TrainTest == "Train", , ] #*eligible
Data_train <- data.frame(Data_train)
Data_test <- Data[TrainTest == "Test", , ] #*eligible
Data_test <- data.frame(Data_test)
```
# define functions
```{r}
RMSE <- function(Obs, Pred){
Dif <- Pred - Obs
RMSE <- round(sqrt(mean(Dif**2)), 2)
return(RMSE)
}
MAE <- function(Obs, Pred){
Dif <- Pred - Obs
MAE <- Dif %>% abs() %>% mean() %>% round(., 2)
return(MAE)
}
MAPE <- function(Obs, Pred){
Dif <- Pred - Obs
MAPE <- mean(abs(Dif/Obs)*100) %>% round(., 2)
return(MAPE)
}
```
# Performance All SeverityHigherMid in Train and Test
```{r}
Models <- c(
"glm_uni.rds"
, "GLM_temp.rds"
, "gamSpline_temp.rds"
, "RF_temp.rds"
, "xgbTree_temp.rds"
)
# get model list
Model_list <- Models
# Save performance
Hoge <- c()
# loop
for(iii in Model_list){
# load models
Model <- readRDS(iii)
# get predicted values
Data_train$Pred <- if(iii == "glm_uni.rds"){Data_train$Pred <- predict(Model, Data_train, type = "response")}else{Data_train$Pred <- predict(Model, Data_train)}
Data_train$Pred <- ifelse(Data_train$Pred < 0, 0, Data_train$Pred)
# get performance
RMSE_train <- RMSE(Obs = Data_train$SeverityHigherMid, Pred = Data_train$Pred)
MAE_train <- MAE(Obs = Data_train$SeverityHigherMid, Pred = Data_train$Pred)
Cor_train <- paste(
cor.test(Data_train$SeverityHigherMid, Data_train$Pred)$estimate %>% round(., 2),
" (",
cor.test(Data_train$SeverityHigherMid, Data_train$Pred)$conf.int[1] %>% round(., 2),
" to ",
cor.test(Data_train$SeverityHigherMid, Data_train$Pred)$conf.int[2] %>% round(., 2),
")",
sep = ""
)
# get predicted values
Data_test$Pred <- if(iii == "glm_uni.rds"){Data_test$Pred <- predict(Model, Data_test, type = "response")}else{Data_test$Pred <- predict(Model, Data_test)}
Data_test$Pred <- ifelse(Data_test$Pred < 0, 0, Data_test$Pred)
# get performance
RMSE_test <- RMSE(Obs = Data_test$SeverityHigherMid, Pred = Data_test$Pred)
MAE_test <- MAE(Obs = Data_test$SeverityHigherMid, Pred = Data_test$Pred)
Cor_test <- paste(
cor.test(Data_test$SeverityHigherMid, Data_test$Pred)$estimate %>% round(., 2),
" (",
cor.test(Data_test$SeverityHigherMid, Data_test$Pred)$conf.int[1] %>% round(., 2),
" to ",
cor.test(Data_test$SeverityHigherMid, Data_test$Pred)$conf.int[2] %>% round(., 2),
")",
sep = ""
)
# Summarize
Performance <- c(
iii,
RMSE_train
, RMSE_test
, Cor_train
, Cor_test
)
Hoge <- cbind(Hoge, Performance)
}
Hoge <- data.table(Hoge)
Hoge
fwrite(Hoge, "Out_RMSE.csv")
```
# MAPE by 24
```{r}
# get model list
Model_list <- Models
# Save performance
Hoge <- c()
# loop
for(iii in Model_list){
# load models
Model <- readRDS(iii)
# get predicted values
Data_train$Pred <- if(iii == "glm_uni.rds"){Data_train$Pred <- predict(Model, Data_train, type = "response")}else{Data_train$Pred <- predict(Model, Data_train)}
Data_train$Pred <- ifelse(Data_train$Pred < 0, 0, Data_train$Pred)
# Organized dataset
Data_train_MAPE <- Data_train %>%
group_by(Date) %>%
summarise(
Obs = sum(SeverityHigherMid),
Pred = sum(Pred)
) %>% data.table()
Data_train_MAPE[, Year := year(Date), ]
Data_train_MAPE[, Spike :=
ifelse(Year == 2015 & Obs >= Data_train_MAPE[Year == 2015, quantile(Obs, 0.8), ], 1,
ifelse(Year == 2016 & Obs >= Data_train_MAPE[Year == 2016, quantile(Obs, 0.8), ], 1,
ifelse(Year == 2017 & Obs >= Data_train_MAPE[Year == 2017, quantile(Obs, 0.8), ], 1, 0)))
, ]
Data_train_MAPE[, table(Spike), ]
Data_train_MAPE[, table(Spike), ] / nrow(Data_train_MAPE)
# get performance
MAPE_train <- MAPE(
Obs = Data_train_MAPE[Spike == 1, Obs, ],
Pred = Data_train_MAPE[Spike == 1, Pred, ]
)
PE_train <- Data_train_MAPE[Spike == 1, round( (abs(sum(Pred) - sum(Obs)) / sum(Obs) ) * 100, 2), ]
# get predicted values
Data_test$Pred <- if(iii == "glm_uni.rds"){Data_test$Pred <- predict(Model, Data_test, type = "response")}else{Data_test$Pred <- predict(Model, Data_test)}
Data_test$Pred <- ifelse(Data_test$Pred < 0, 0, Data_test$Pred)
# Organized dataset
Data_test_MAPE <- Data_test %>%
group_by(Date) %>%
summarise(
Obs = sum(SeverityHigherMid),
Pred = sum(Pred)
) %>% data.table()
Data_test_MAPE[, Year := year(Date), ]
Data_test_MAPE[, Spike :=
ifelse(Year == 2018 & Obs >= Data_test_MAPE[Year == 2018, quantile(Obs, 0.8), ], 1, 0)
, ]
Data_test_MAPE[, table(Spike), ]
Data_test_MAPE[, table(Spike), ] / nrow(Data_test_MAPE)
# get performance
MAPE_test <- MAPE(
Obs = Data_test_MAPE[Spike == 1, Obs, ],
Pred = Data_test_MAPE[Spike == 1, Pred, ]
)
PE_est <- Data_test_MAPE[Spike == 1, round( (abs(sum(Pred) - sum(Obs)) / sum(Obs) ) * 100, 2), ]
# Summarize
Performance <- c(
iii,
MAPE_train
, MAPE_test
, PE_train
, PE_est
)
Hoge <- cbind(Hoge, Performance)
}
Hoge <- data.table(Hoge)
Hoge
fwrite(Hoge, "Out_MAPE_24.csv")
```
# Figure time series by 24h
```{r}
# get model list
Model_list <- Models
# Save performance
Hoge <- c()
# loop
for(iii in Model_list){
# load models
Model <- readRDS(iii)
# get predicted values
Data_train$Pred <- if(iii == "glm_uni.rds"){Data_train$Pred <- predict(Model, Data_train, type = "response")}else{Data_train$Pred <- predict(Model, Data_train)}
Data_train$Predicted <- ifelse(Data_train$Pred < 0, 0, Data_train$Pred)
Data_train$Obserbved <- Data_train$SeverityHigherMid
Data_train$Dif <- Data_train$Predicted - Data_train$Obserbved
# get predicted values
Data_test$Pred <- if(iii == "glm_uni.rds"){Data_test$Pred <- predict(Model, Data_test, type = "response")}else{Data_test$Pred <- predict(Model, Data_test)}
Data_test$Predicted <- ifelse(Data_test$Pred < 0, 0, Data_test$Pred)
Data_test$Obserbved <- Data_test$SeverityHigherMid
Data_test$Dif <- Data_test$Predicted - Data_test$Obserbved
### Make figures
ggplot(data = Data_train, aes(x = Predicted, y = Obserbved)) +
geom_point() +
geom_smooth(method = lm) +
scale_x_continuous(limits = c(0, 40), breaks = seq(0, 40, 5)) +
scale_y_continuous(limits = c(0, 40), breaks = seq(0, 40, 5)) +
xlab("") +
ylab("") +
theme_classic()
ggsave(paste("Out_plot_", iii, "_train.png", sep = ""), width = 4.2, height = 3.2) #*action
### Make figures taest
ggplot(data = Data_test, aes(x = Predicted, y = Obserbved)) +
geom_point() +
geom_smooth(method = lm) +
scale_x_continuous(limits = c(0, 40), breaks = seq(0, 40, 5)) +
scale_y_continuous(limits = c(0, 40), breaks = seq(0, 40, 5)) +
xlab("") +
ylab("") +
theme_classic()
ggsave(paste("Out_plot_", iii, "_test.png", sep = ""), width = 4.2, height = 3.2) #*action
### Make Fig train
DataSum_train <- Data_train %>%
group_by(Date) %>%
summarise(
Obserbved = sum(Obserbved),
Predicted = sum(Predicted)
) %>%
data.table() %>%
print()
DataSum_test <- Data_test %>%
group_by(Date) %>%
summarise(
Obserbved = sum(Obserbved),
Predicted = sum(Predicted)
) %>%
data.table() %>%
print()
DataSum <- rbind(DataSum_train, DataSum_test)
DataSum[, Date:=as.Date(Date), ]
DataSum[, YearUse:=year(Date), ]
DataSum <- DataSum[order(Date), , ]
DataSum[, Day:=1:length(Obserbved), by = YearUse]
DataSum
DataSum[Date == as.Date("2015-06-01"), , ]
DataSum[Date == as.Date("2015-07-01"), , ]
DataSum[Date == as.Date("2015-08-01"), , ]
DataSum[Date == as.Date("2015-09-01"), , ]
ggplot(data = DataSum, aes(x = Day), group=factor(YearUse)) +
geom_line(aes(y = Obserbved, x=Day), colour = "Black", size = 0.4) +
geom_line(aes(y = Predicted, x=Day), colour = "Red", size = 0.4) +
xlab("") +
ylab("") +
scale_y_continuous(
limits = c(0, 150),
breaks = seq(0, 150, 30)
) +
scale_x_continuous(
label = c("Jun.", "Jul.", "Aug.", "Sep."),
breaks = c(1, 31, 62, 93)
) +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
facet_grid(~YearUse) +
theme_classic()
ggsave(paste("Out_time_24_", iii, ".png", sep = ""), width = 6.7 * 1.5, height = 3.5 * 0.66) #*action
fwrite(DataSum, paste("Out_data_figure4_", iii, ".csv", sep = ""))
}
Hoge <- data.table(Hoge)
Hoge
```
|
#include <stdio.h>
#include <gsl/gsl_math.h>
#include <gsl/gsl_deriv.h>
double f (double x, void * params)
{
return pow (x, 1.5);
}
int
main (void)
{
gsl_function F;
double result, abserr;
F.function = &f;
F.params = 0;
printf ("f(x) = x^(3/2)\n");
gsl_deriv_central (&F, 2.0, 1e-8, &result, &abserr);
printf ("x = 2.0\n");
printf ("f'(x) = %.10f +/- %.10f\n", result, abserr);
printf ("exact = %.10f\n\n", 1.5 * sqrt(2.0));
gsl_deriv_forward (&F, 0.0, 1e-8, &result, &abserr);
printf ("x = 0.0\n");
printf ("f'(x) = %.10f +/- %.10f\n", result, abserr);
printf ("exact = %.10f\n", 0.0);
return 0;
}
|
using .Pluto
@info "You can use the notebooks in `TropicalTensors` now by typing, e.g.
∘ `TropicalTensors.notebook(\"spinglass\")`, solving square lattice spinglass using a quantum simulator.
∘ `TropicalTensors.notebook(\"ising_and_2sat\")`, solving Ising spinglass and 2-SAT counting using tensor network contraction.
"
"""
notebook(which; dev=false, kwargs...)
Open a notebook, the first argument can be
* "spinglass": solving spinglass model with Yao.
* "randomgraph": solving randomgraph model with tensor network contraction.
"""
function notebook(which; dev=false, kwargs...)
src = project_relative_path("notebooks", "$which.jl")
if dev
dest = src
else
dest = tempname()
cp(src, dest)
chmod(dest, 0o664)
end
Pluto.run(; notebook=dest, project=project_relative_path(), kwargs...)
end
|
import Relation.Binary.Reasoning.Setoid as SetoidR
open import MultiSorted.AlgebraicTheory
import MultiSorted.Interpretation as Interpretation
import MultiSorted.Model as Model
import MultiSorted.UniversalInterpretation as UniversalInterpretation
import MultiSorted.Substitution as Substitution
import MultiSorted.SyntacticCategory as SyntacticCategory
module MultiSorted.UniversalModel
{ℓt}
{𝓈 ℴ}
{Σ : Signature {𝓈} {ℴ}}
(T : Theory ℓt Σ) where
open Theory T
open Substitution T
open UniversalInterpretation T
open Interpretation.Interpretation ℐ
open SyntacticCategory T
𝒰 : Model.Is-Model T ℐ
𝒰 =
record
{ model-eq = λ ε var-var →
let open SetoidR (eq-setoid (ax-ctx ε) (sort-of (ctx-slot (ax-sort ε)) var-var)) in
begin
interp-term (ax-lhs ε) var-var ≈⟨ interp-term-self (ax-lhs ε) var-var ⟩
ax-lhs ε ≈⟨ id-action ⟩
ax-lhs ε [ id-s ]s ≈⟨ eq-axiom ε id-s ⟩
ax-rhs ε [ id-s ]s ≈˘⟨ id-action ⟩
ax-rhs ε ≈˘⟨ interp-term-self (ax-rhs ε) var-var ⟩
interp-term (ax-rhs ε) var-var ∎
}
-- The universal model is universal
universality : ∀ (ε : Equation Σ) → ⊨ ε → ⊢ ε
universality ε p =
let open Equation in
let open SetoidR (eq-setoid (eq-ctx ε) (eq-sort ε)) in
(begin
eq-lhs ε ≈˘⟨ interp-term-self (eq-lhs ε) var-var ⟩
interp-term (eq-lhs ε) var-var ≈⟨ p var-var ⟩
interp-term (eq-rhs ε) var-var ≈⟨ interp-term-self (eq-rhs ε) var-var ⟩
eq-rhs ε ∎)
|
% !TeX root = ../main.tex
% Add the above to each chapter to make compiling the PDF easier in some editors.
\chapter{Case Studies}\label{chapter:case_studies}
This chapter examines the performance of the previously described models and algorithms using real server traces. Thereby we focus on two aspects: First, we are interested in how well the discussed algorithms compare in absolute terms and relative to each other. Second, we are interested in the general promise of dynamically right-sizing data centers, which we study by conservatively estimating cost savings and relating them to previous research.
\section{Method}
First, we describe our experimental setup. We begin with a detailed discussion of the characteristics of the server traces, which we use as a basis for our analysis. Then, we examine the underlying assumptions of our analysis. This is followed by a discussion of alternative approaches to right-sizing data centers, which we use as a foundation for estimating the cost savings resulting from dynamic right-sizing of data centers. Next, we describe the general model parameters we use in our analysis and relate them to previous research. Lastly, we introduce the precise performance metrics used in the subsequent sections.
Throughout our experiments, we seek to determine conservative approximations for the resulting performance and cost savings. Our experimental results were obtained on a machine with 16 GB memory and an Intel Core i7-8550U CPU with a base clock rate of 1.80GHz.
\subsection{Traces}\label{section:case_studies:method:traces}
We use several traces with varying characteristics for our experiments. Some traces are from clusters rather than individual data centers. However, to simplify our analysis, we assume traces apply to a single data center without restricting the considered server architectures.
\citeauthor{Amvrosiadis2018}~\cite{Amvrosiadis2018} showed that the characteristics of traces vary drastically even within a single trace when different subsets are considered individually. Their observation shows that it is crucial to examine long server traces and various server traces from different sources to gain a proper perspective of real-world performance.
\subsubsection{Characteristics}
To understand the varying effectiveness of dynamic right-sizing for the considered traces, we first analyze the properties of the given traces.
The most immediate and fundamental properties of a trace are its duration, the number of appearing jobs, the number of job types, and the underlying server infrastructure -- especially whether this infrastructure is homogeneous or heterogeneous.
Then, we also consider several more specific characteristics. The \emph{interarrival time}\index{interarrival time} (or submission rate) of jobs is the distribution of times between job arrivals. This distribution indicates the average system load as well as load uniformity. The \emph{peak-to-mean ratio (PMR)}\index{peak-to-mean ratio} is defined as the ratio of the maximum load and the mean load. It is a good indicator of the uniformity of loads. We refer to time slots as \emph{peaks}\index{peak load} when their load is greater than the 0.9-quantile of loads. We call the ratio of the 0.9-quantile of loads and the mean load \emph{true peak-to-mean-ratio (TPMR)}\index{true peak-to-mean ratio} as it is less sensitive to outliers than the PMR. We refer to periods between peaks as \emph{valleys}\index{valley}. More concretely, we refer to the time between two consecutive peaks as \emph{peak distance}\index{peak distance} and the number of consecutive time slots up to a time slot with a smaller load as \emph{valley length}\index{valley length}. Further, we say that a trace follows a \emph{diurnal pattern}\index{diurnal pattern} if during every 24 hours, excluding the final day, there is at least one valley spanning 12 hours or more. We exclude the final day as the final valley might be shortened by the end of the trace.
We also consider some additional information included in some traces, such as the measured scheduling rate (or queuing delay), an indicator for utilization.
% and, if provided, the distribution of the measured utilization of servers which may be an indicator for resource over-commitment.
\subsubsection{Overview}
We now give an overview of all used traces. For our initial analysis, we use a time slot length of 10 minutes.
\paragraph{MapReduce\footnote{MapReduce is a programming model for processing and generating large data sets in a functional style~\cite{Dean2004}} Workload from a Hadoop\footnote{Apache Hadoop is an open-source software for managing clusters} Cluster at Facebook~\cite{SWIM2013}} This trace encompasses three day-long traces from 2009 and 2010, extracted from a 6-month and a 1.5-month-long trace containing 1 million homogeneous jobs each. The traces are visualized in \cref{fig:facebook:histogram} and summarized in \cref{tab:facebook}. The cluster consists of 600 machines which we assume to be homogeneous. For the trace from 2010, we adjust the maximum number of servers to 1000 as otherwise the trace is infeasible under our models. \Cref{fig:facebook:schedule} visualizes the corresponding dynamic and static offline optimal schedules under our second model (which is described in \cref{section:case_studies:traces:model-parameters}). The trace was published by \citeauthor{SWIM2013}~\cite{SWIM2013} as part of the SWIM project at UC Berkeley.
\begin{figure}
\begin{subfigure}[b]{.3425\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/facebook_2009_0_histogram.tex}}
\caption{2009-0}
\end{subfigure}
\begin{subfigure}[b]{.32\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/facebook_2009_1_histogram.tex}}
\caption{2009-1}
\end{subfigure}
\begin{subfigure}[b]{.32\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/facebook_2010_histogram.tex}}
\caption{2010}
\end{subfigure}
\caption{Facebook MapReduce workloads.}
\label{fig:facebook:histogram}
\end{figure}
\begin{figure}
\begin{subfigure}[b]{.33\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/facebook_2009_0_schedule.tex}}
\caption{2009-0}
\end{subfigure}
\begin{subfigure}[b]{.3075\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/facebook_2009_1_schedule}}
\caption{2009-1}
\end{subfigure}
\begin{subfigure}[b]{.3475\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/facebook_2010_schedule}}
\caption{2010}
\end{subfigure}
\caption{Optimal dynamic and static offline schedules for the last day of the Facebook workloads. The left y axis shows the number of servers of the static and dynamic offline optima at a given time (black). The right y axis shows the number of jobs (i.e., the load) at a given time (red).}
\label{fig:facebook:schedule}
\end{figure}
\begin{table}
\centering
\begin{tabularx}{\textwidth}{>{\bfseries}l|X|X|X}
characteristic & 2009-0 & 2009-1 & 2010 \\\hline
duration & 1 day & 1 day & 1 day \\
number of jobs & 6 thousand & 7 thousand & 24 thousand \\
median interarrival time & 7 seconds & 7 seconds & 2 seconds \\
PMR & 3.91 & 2.97 & 2.2 \\
TPMR & 2.04 & 1.93 & 1.69 \\
mean peak distance & 95 minutes & 106 minutes & 87 minutes \\
mean valley length & 44 minutes & 36 minutes & 35 minutes \\
% diurnal pattern & \emph{trace too short} & \emph{trace too short} & \emph{trace too short} \\
\caption{Characteristics of Facebook's MapReduce workloads.}
\end{tabularx}
\label{tab:facebook}
\end{table}
\paragraph{Los Alamos National Lab HPC Traces~\cite{Amvrosiadis2018_3, Amvrosiadis2018, Amvrosiadis2018_2}} This trace comprises two separate traces from high-performance computing clusters from Los Alamos National Lab (LANL). The traces were published by \citeauthor{Amvrosiadis2018}~\cite{Amvrosiadis2018} as part of the Atlas project at Carnegie Mellon University.
The first trace is from the Mustang cluster, a general-purpose cluster consisting of 1600 homogeneous servers. Jobs were assigned to entire servers. The dataset covers 61 months from October 2011 to November 2016 and is shown in \cref{fig:los_alamos:histogram}. Note that the PMR is large at $622$ due to some outliers in the data. The median job duration is roughly 7 minutes, although the trace includes some extremely long-running outliers, resulting in a mean job duration of over 2.5 hours. In the trace, jobs were assigned to one or multiple servers. To normalize the trace, we consider each job once for each server it was processed on. \Cref{fig:los_alamos:schedule} shows the dynamic and static offline optimal schedules under our second model.
The second trace is from the Trinity supercomputer. This trace is very similar to the Mustang trace but includes an even more significant number of long-running jobs. We, therefore, do not consider this trace in our analysis.
\paragraph{Microsoft Fiddle Trace~\cite{Jeon2019}} This trace consists of deep neural network training workloads on internal servers from Microsoft. The trace was published as part of the Fiddle project from Microsoft Research. The jobs are run on a heterogeneous set of servers which we group based on the number of GPUs of each server. There are 321 servers with two GPUs and 231 servers with eight GPUs. The median job duration is just below 15 minutes. The load profiles are visualized in \cref{fig:microsoft:histogram}.
The CPU utilization of the trace is extremely low, with more than 80\% of servers running with utilization 30\% or less~\cite{Santhanam2019}. However, memory utilization is high, with an average of more than 80\% indicating that overall server utilization is already very high~\cite{Santhanam2019}. Again, the PMR is rather large at 89.43 due to outliers.
In our model, we adjust the runtime of jobs relative to the number of available GPUs in the respective server, i.e., the average runtime of jobs on a 2-GPU-server is four times as long as the average runtime of jobs on an 8-GPU-server. We adjust for the increased energy consumption of a server with eight GPUs by increasing the energy consumption of servers with two GPUs by a factor of 4.2. We also associate a fifteen times higher switching cost with servers with eight GPUs.
The dynamic and static offline optimal schedules under our second model are shown in \cref{fig:microsoft:schedule}. Note that under the given load servers with two GPUs are preferred to servers with eight GPUs when they are only needed for a short period due to their lower switching costs. This might seem counterintuitive at first, as 2-GPU-servers seem to be strictly better than 8-GPU-servers as the operating and switching cost of 8-GPU-servers is worse by a factor greater than four than the respective cost of 2-GPU-servers. However, we assume an average job runtime of 7.5 minutes on 8-GPU-servers as opposed to an average job runtime of 30 minutes on 2-GPU-servers (a factor of four), implying that 8-GPU-servers can process more than four jobs in an hour without a significant increase in delay, whereas 2-GPU-servers are limited to one job per time slot.
\paragraph{Alibaba Trace~\cite{Alibaba2018}} This trace consists of a mixture of long-running applications and batch jobs. We are using their trace from 2018, covering eight days. The trace is visualized in \cref{fig:alibaba:histogram}, the dynamic and static offline optimal schedules under our second model are shown in \cref{fig:alibaba:schedule}. The jobs are processed on 4000 homogeneous servers. In our models, we assume a total of 10,000 servers to ensure that the number of servers is not a bottleneck. Jobs themselves are grouped into 11 types which we further simplify to 4 types based on their average runtime. We consider \emph{short}, \emph{medium}, \emph{long}, and \emph{very long} jobs. Their average runtime in the trace is shown in \cref{tab:alibaba:job_types}. The mean job duration is just below 15 minutes. The median job duration is 8 seconds, and the mean job duration is just over 1.5 minutes.
\begin{table}
\centering
\begin{tabularx}{\textwidth}{>{\bfseries}l|c}
job type & mean runtime \\\hline
short & 68 seconds \\
medium & 196 seconds \\
long & 534 seconds \\
very long & 1180 seconds \\
\caption{Characterization of the job types of the Alibaba trace.}
\end{tabularx}
\label{tab:alibaba:job_types}
\end{table}
Data from a previous trace indicates that mean CPU utilization varies between 10\% and 40\% while mean memory utilization varies between 40\% and 65\%~\cite{Lu2017}. This indicates that the overall server utilization is not optimal.
In our model, we scale job runtimes by a factor of 2.5 from short to very long jobs, roughly matching the runtimes of jobs from the trace.
\begin{figure}
\begin{subfigure}[b]{.3425\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/los_alamos_mustang_histogram.tex}}
\caption{LANL Mustang}\label{fig:los_alamos:histogram}
\end{subfigure}
\begin{subfigure}[b]{.32\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/microsoft_histogram}}
\caption{Microsoft Fiddle}\label{fig:microsoft:histogram}
\end{subfigure}
\begin{subfigure}[b]{.32\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/alibaba_histogram}}
\caption{Alibaba}\label{fig:alibaba:histogram}
\end{subfigure}
\caption{LANL Mustang, Microsoft Fiddle, and Alibaba traces. The figures display the average number of job arrivals throughout a day. The interquartile range is shown as the shaded region.}
\end{figure}
\begin{figure}
\begin{subfigure}[b]{.345\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/los_alamos_mustang_schedule.tex}}
\caption{LANL Mustang}\label{fig:los_alamos:schedule}
\end{subfigure}
\begin{subfigure}[b]{.305\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/microsoft_schedule}}
\caption{Microsoft Fiddle}\label{fig:microsoft:schedule}
\end{subfigure}
\begin{subfigure}[b]{.335\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/alibaba_schedule}}
\caption{Alibaba}\label{fig:alibaba:schedule}
\end{subfigure}
\caption{Optimal dynamic and static offline schedules for the last day of the LANL Mustang, Microsoft Fiddle, and the second to last day of the Alibaba trace. The left y axis shows the number of servers of the static and dynamic offline optima at a given time (black). The right y axis shows the number of jobs (i.e., the load) at a given time (red).}
\end{figure}
\paragraph{} We have seen traces from very different real-world use cases. The Microsoft Fiddle trace is based on a heterogeneous server architecture, and the Alibaba trace receives heterogeneous loads. The PMR and valley lengths of the days used in our analysis are shown in \cref{tab:pmr_vl}. Interestingly, as shown in \cref{tab:traces}, their TPMR, peak distances, and valley lengths are mostly similar.
\subsection{Assumptions}
We impose a couple of assumptions to simplify our analysis. First, and already mentioned, our analysis is inherently limited by the traces we used as a basis for our experiments. While we examine a wide variety of traces, the high variability in traces indicates they are a fundamental limitation to any estimation of real-world performance.
Another common limitation of models is that the interarrival times of jobs are on the order of seconds or smaller~\cite{Amvrosiadis2018}. However, this is not a limitation of our analysis as we are using a general Poisson process with an appropriate mean arrival rate in our delay model.
In the context of high-performance computing, jobs typically have a \emph{gang scheduling}\index{gang scheduling} requirement, i.e., a requirement that related jobs are processed simultaneously even though they are run on different hardware~\cite{Amvrosiadis2018}. For simplification, we assume this requirement always to be satisfied. However, this is not a substantial limitation as the scheduling of jobs within a time slot is not determined by the discussed algorithms and instead left to the server operator. Nevertheless, in principle, the gang scheduling requirement may render some schedules infeasible if the processing time on servers exceeds the length of a time slot when gang scheduling constraints are considered.
There are also some limitations resulting from the design of our model. As was mentioned previously, we assume that the jobs arrive at the beginning of a new time slot rather than at random times throughout the time slot. Moreover, we assumed that for every job, a server type exists that can process this job within one time slot. In other words, there exists no job running longer than $\delta$. We have seen in \cref{section:case_studies:method:traces} that this assumption is violated in most practical scenarios. In \cref{section:application:dynamic_duration}, we described how this assumption can be removed. The same approach can also be used to remove the assumption that jobs must arrive at the beginning of a time slot.
\subsection{Alternatives to Right-Sizing Data Centers}\label{section:case_studies:method:alternatives}
To determine the benefit of dynamically right-sizing data centers, we must first describe the alternative strategies to managing a data center. We will then use these approaches as a point of reference in our analysis.
Most data centers are statically provisioned; that is, the configuration of active servers is only changed rarely (often manually) and remains constant during most periods~\cite{Whitney2014}. To support the highest loads, the data centers are peak-provisioned, i.e., the number of servers is chosen such that they suffice to process all jobs even during times where most jobs arrive. Moreover, as a safety measure, data centers are typically provisioned to handle much higher loads than the loads encountered in practice~\cite{Whitney2014}.
\begin{table}
\centering
\begin{tabularx}{\textwidth}{>{\bfseries}l|X|X|X}
characteristic & LANL Mustang & Microsoft Fiddle & Alibaba \\\hline
duration & 5 years & 30 days & 8 days \\
number of jobs & 20 million & 120 thousand & 14 million \\
median interarrival time & 0 seconds & 8 seconds & 0 seconds \\
PMR & 621.94 & 89.43 & 3.93 \\
TPMR & 2.5 & 1.68 & 1.77 \\
mean peak distance & 100 minutes & 105 minutes & 89 minutes \\
mean valley length & 120 minutes & 115 minutes & 74 minutes \\
diurnal pattern & yes & - & yes \\
\caption{Characteristics of the LANL Mustang, Microsoft Fiddle, and Alibaba traces.}
\end{tabularx}
\label{tab:traces}
\end{table}
Naturally, traces with a high PMR or long valleys are more likely to benefit from alternatives to static provisioning. Therefore another widely used alternative is \emph{valley filling}\index{valley filling}, which aims to schedule lower priority jobs (i.e., some batch jobs) during valleys. In an ideal scenario, this approach can achieve $\text{PMR} \approx 1$, which would allow for efficient static provisioning. Crucially, this approach requires a large number of low-priority jobs which may be processed with a significant delay (requiring a considerable minimum perceptible delay $\delta_i$ for a large number of jobs of type $i$), and thus in most cases, valleys cannot be eliminated entirely. \citeauthor{Lin2011}~\cite{Lin2011} showed that dynamic-right sizing can be combined with valley filling to achieve a significant cost reduction. The optimal balancing of dynamic right-sizing and valley filling is mainly determined by the change to the PMR. \citeauthor{Lin2011}~\cite{Lin2011} showed that cost savings of 20\% are possible with a PMR of 2 and a PMR of approximately 1.3 can still achieve cost savings of more than 5\%. Generally, the cost reduction vanishes once the PMR approaches $1$, which may happen between 30\% to 70\% mean background load~\cite{Lin2011}. The results when dynamic right-sizing is used together with valley filling can be estimated from previous results.
\subsection{Performance Metrics}
Let $OPT$ denote the dynamic offline optimum and $OPT_s$ denote the static offline optimum. In our analysis, the \emph{normalized cost}\index{normalized cost} of an online algorithm is the ratio of the obtained cost and the dynamic optimal offline cost, i.e. $NC(ALG) = c(ALG) / c(OPT)$. Further, we base our estimated \emph{cost reduction}\index{cost reduction} on an optimal offline static provisioning: \begin{align*}
CR(ALG) = \frac{c(OPT_s) - c(ALG)}{c(OPT_s)}.
\end{align*} Note that this definition is similar to the definition of regret, but expressed relative to the overall cost. We refer to $SDR = c(OPT_s) / c(OPT)$ as the \emph{static/dynamic ratio}\index{static/dynamic ratio}, which is closely related to the \emph{potential cost reduction}\index{potential cost reduction} $PCR = CR(OPT)$.
\subsection{Previous Results}
\citeauthor{Lin2011}~\cite{Lin2011} showed that the cost reduction is directly proportional to the PMR and inversely proportional to the normalized switching cost. Additionally, \citeauthor{Lin2011}~\cite{Lin2011} showed that, as one would expect, the possible cost reduction decreases as the delay cost assumes a more significant fraction of the overall hitting costs. In practice, this can be understood as the effect of making the model more conservative.
\begin{table}
\centering
\begin{tabularx}{\textwidth}{>{\bfseries}l|c|c}
trace & PMR & mean valley length (hours) \\\hline
Facebook 2009-0 & 2.115 & 2.565 \\
Facebook 2009-1 & 1.913 & 1.522 \\
Facebook 2010 & 1.549 & 1.435 \\
LANL Mustang & 6.575 & 1.167 \\
Microsoft Fiddle & 3.822 & 2.125 \\
Alibaba & 1.339 & 2.792 \\
\caption{PMR and mean valley length of the traces used in our analysis. Note that the valley lengths are typically shorter than the normalized switching cost of our model.}
\end{tabularx}
\label{tab:pmr_vl}
\end{table}
\subsection{Model Parameters}\label{section:case_studies:traces:model-parameters}
We now describe how we parametrized our model in our case studies. In our models, we strive to choose conservative estimates to under-estimate the cost savings from dynamically right-sizing data centers. This approach is similar to the study by \citeauthor{Lin2011}~\cite{Lin2011}. \Cref{tab:model} gives an overview of the used parameters producing the results of subsequent sections.
\paragraph{Energy} We use the linear energy consumption model from \autoref{eq:energy_model:1} in our experiments. In their analysis, \citeauthor{Lin2011}~\cite{Lin2011} choose energy cost and energy consumption such that the fixed energy cost (i.e., the energy cost of a server when idling) is $1$ and the dynamic energy cost is $0$ as, on most servers, the fixed costs dominate the dynamic costs~\cite{Clark2005}. We investigate this model and an alternative model. In the alternative model, we estimate the power consumption of a server with 1 kW during peak loads and with 500 W when idling to yield a conservative estimate (as cooling costs are included). According to the U.S. Energy Information Administration (EIA), the average cost of energy in the industrial sector in the United States during April 2021 was 6.77 cents per kilowatt-hour~\cite{EIA2021}. We use this as a conservative estimate as data centers typically use a more expensive portfolio of energy sources. If the actual carbon cost of the used energy were to be considered, which is the case in some data centers as discussed in \cref{section:application:operating_cost:energy}, energy costs are likely to be substantially higher.
\paragraph{Revenue Loss} According to measurements, a 500 ms increase in delay results in a revenue loss of 20\% or 0.04\%/ms~\cite{Lin2012, Hamilton2009}. Thus, scaling the delay measured in ms by 0.1 can be used as a slight over-approximation of revenue loss. \citeauthor{Lin2011}~\cite{Lin2011} choose the minimal perceptible delay as 1.5 times the time to run a job, which is a very conservative estimate if valley filling is assumed a viable alternative. In our model, we choose the minimal perceptible delay as 2.5 times the time to run a job which is equivalent as we also added the processing time of a job to the delay. In the case of valley filling, jobs are typically processed with a much more significant delay. Similar to \citeauthor{Lin2012}~\cite{Lin2012}, we also estimate a constant network delay of 10 ms.
\paragraph{Switching Cost} We mentioned in \cref{section:application:switching_cost} that in practice, the switching cost should be on the order of operating a server between an hour to several hours. To obtain a conservative estimate, we choose $\beta$ such that the normalized switching cost times the length of a time slot equals 4 hours.
\paragraph{Time Slot Length} We choose a time slot length of 1 hour. We further assume that the average processing time of jobs is $\delta / 2$ unless noted otherwise.
\begin{table}
\centering
\begin{tabularx}{\textwidth}{>{\bfseries}l|X|X}
parameter & model 1 & model 2 \\\hline
time slot length & 1 hour & 1 hour \\
energy cost & $c=1$ & $c=0.0677$ \\
energy consumption & $\Phi_{\text{min}}=1, \Phi_{\text{max}}=1$ & $\Phi_{\text{min}}=0.5, \Phi_{\text{max}}=1$ \\
revenue loss & $\gamma = 0.1, \delta_i = 2.5 \eta_i$ & $\gamma = 0.1, \delta_i = 2.5 \eta_i$ \\
normalized switching cost & 4 hours & 4 hours \\
\caption{Models used in our case studies. $\eta_i$ is the processing time of jobs of type $i$.}
\end{tabularx}
\label{tab:model}
\end{table}
\section{Uni-Dimensional Algorithms}
The results of this section are based on the final day of the LANL Mustang, Facebook, and the second to last day of the Alibaba trace. We begin by discussing the general features of the traces. Then, we compare the uni-dimensional online algorithms with respect to their achieved normalized cost, cost reduction, and runtime.
\paragraph{Fractional vs. Integral Cost} For all traces, the ratio of the fractional and the integral costs is 1 for a precision of at least $10^{-3}$. This is not surprising due to the large number of servers used in each model.
\begin{figure}
\begin{subfigure}[b]{.5\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/opt_vs_opts}}
\caption{Ratio of static and dynamic optima}
\end{subfigure}
\begin{subfigure}[b]{.5\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/opts_opt_vs_pmr}}
\caption{Correlation of the PMR and the static/dynamic ratio}\label{fig:case_studies:ud:opt_vs_opts:pmr}
\end{subfigure}
\caption{Ratio of static and dynamic offline optima for each trace. The LANL Mustang and Microsoft Fiddle traces have a significantly higher PMR than the remaining traces. Generally, we observe a strong correlation of PMR and the static/dynamic ratio.}\label{fig:case_studies:ud:opt_vs_opts}
\end{figure}
\begin{figure}
\begin{subfigure}[b]{.49\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/opt_vs_opts_against_normalized_cost}}
\caption{Normalized cost}\label{fig:case_studies:ud:opt_vs_opts_against_normalized_cost}
\end{subfigure}
\begin{subfigure}[b]{.51\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/opt_vs_opts_against_mean_cost_reduction}}
\caption{Cost reduction}\label{fig:case_studies:ud:opt_vs_opts_against_mean_cost_reduction}
\end{subfigure}
\caption{Effect of the ratio of static and dynamic optima on the cost reduction and normalized cost achieved by the memoryless algorithm.}
\end{figure}
\paragraph{Dynamic vs. Static Cost} The dynamic and static costs differ significantly depending on the trace. The ratio of dynamic and static optimal costs for each trace is shown in \cref{fig:case_studies:ud:opt_vs_opts}.
\Cref{fig:case_studies:ud:opt_vs_opts_against_mean_cost_reduction} shows a strong positive correlation between the average cost reduction achieved by the memoryless algorithm and the ratio of the static and dynamic optima. As $OPT_s / OPT$ is directly linked to the PMR, this also indicates a strong correlation between cost reduction and the PMR. Even under our very conservative estimates of parameters, we achieve a significant cost reduction when the ratio of the static and dynamic offline optimum exceeds 1.5. Similar to \citeauthor{Lin2011}~\cite{Lin2011}, we observe that cost savings increase rapidly as the PMR increases.
We also observe in \cref{fig:case_studies:ud:opt_vs_opts_against_normalized_cost} that as the static/dynamic ratio increases, the normalized costs achieved by the memoryless algorithm increases too but not as much as the potential energy savings, resulting in the observed significant cost reduction.
\begin{figure}
\begin{subfigure}[b]{.3425\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/fb90_normalized_cost}}
\caption{Facebook 2009-0}
\end{subfigure}
\begin{subfigure}[b]{.32\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/fb91_normalized_cost}}
\caption{Facebook 2009-1}
\end{subfigure}
\begin{subfigure}[b]{.32\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/fb10_normalized_cost}}
\caption{Facebook 2010}
\end{subfigure}
\par\bigskip
\begin{subfigure}[b]{.50\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/lanl_normalized_cost}}
\caption{LANL Mustang}
\end{subfigure}
\begin{subfigure}[b]{.48\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/alibaba_normalized_cost}}
\caption{Alibaba}
\end{subfigure}
\caption{Normalized costs of uni-dimensional online algorithms. For the Facebook 2010 trace, the second model results in an optimal schedule constantly using all servers, explaining the disparate performance compared to the first model. Further, Probabilistic and Rand-Probabilistic perform very poorly in this setting and are therefore not shown for model 2. Generally, Probabilistic and Rand-Probabilistic achieve similar results. Interestingly, we observe that LCP outperforms Int-LCP when the potential cost reduction is small. In contrast, Int-LCP and the probabilistic algorithms outperform Memoryless and LCP significantly when the potential cost reduction is large. \Cref{fig:case_studies:ud:lcp_vs_int_lcp} compares the schedules obtained by LCP and Int-LCP in greater detail.}\label{fig:case_studies:ud:normalized_cost}
\end{figure}
\begin{figure}
\begin{subfigure}[b]{.3425\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/fb90_cost_reduction}}
\caption{Facebook 2009-0}
\end{subfigure}
\begin{subfigure}[b]{.32\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/fb91_cost_reduction}}
\caption{Facebook 2009-1}
\end{subfigure}
\begin{subfigure}[b]{.32\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/fb10_cost_reduction}}
\caption{Facebook 2010}
\end{subfigure}
\par\bigskip
\begin{subfigure}[b]{.50\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/lanl_cost_reduction}}
\caption{LANL Mustang}
\end{subfigure}
\begin{subfigure}[b]{.48\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/alibaba_cost_reduction}}
\caption{Alibaba}
\end{subfigure}
\caption{Cost reduction of uni-dimensional online algorithms. For the Facebook 2010 trace, the second model results in an optimal schedule constantly using all servers, explaining the disparate performance compared to the first model. Further, Probabilistic and Rand-Probabilistic perform very poorly in this setting and are therefore not shown for model 2. Results are mainly determined by the normalized cost and the potential cost reduction (or static/dynamic ratio). We observe that the achieved cost reduction is dominated by the potential cost reduction (see \cref{fig:case_studies:ud:cost_reduction_vs_normalized_cost}).}\label{fig:case_studies:ud:cost_reduction}
\end{figure}
\begin{figure}
\centering
\input{thesis/figures/cr_vs_nc}
\caption{Weak positive correlation of achieved normalized cost and cost reduction. Intuitively, one would expect a strong negative correlation, i.e., the achieved cost reduction increases as the normalized cost approaches 1. Here, we find a positive correlation as the achieved cost reduction is dominated by the potential cost reduction (see \cref{fig:case_studies:ud:opt_vs_opts_against_mean_cost_reduction}).}\label{fig:case_studies:ud:cost_reduction_vs_normalized_cost}
\end{figure}
\paragraph{Normalized Cost} In the application of right-sizing data centers, we are interested in the cost associated with integral schedules. \Cref{fig:case_studies:ud:normalized_cost} shows the normalized cost of each algorithm. For fractional algorithms, we consider the cost of the associated integral schedule obtained by ceiling each configuration. Notably, LCP and Int-LCP perform differently depending on the trace and used model. We explore this behavior in \cref{fig:case_studies:ud:lcp_vs_int_lcp}.
\paragraph{Cost Reduction} \Cref{fig:case_studies:ud:cost_reduction} shows the achieved cost reduction. In general, we observe in \cref{fig:case_studies:ud:opt_vs_opts_against_normalized_cost}, \cref{fig:case_studies:ud:opt_vs_opts_against_mean_cost_reduction}, and \cref{fig:case_studies:ud:cost_reduction_vs_normalized_cost} that the achieved cost reduction is dominated by the potential cost reduction (which is mainly influenced by the PMR, see \cref{fig:case_studies:ud:opt_vs_opts:pmr}). When the potential cost reduction is small, algorithms with a smaller normalized cost in a particular setting, achieve a significantly higher cost reduction.
\begin{figure}
\begin{subfigure}[b]{.5175\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/lcp_vs_ilcp_1}}
\caption{Facebook 2009-0}
\end{subfigure}
\begin{subfigure}[b]{.4825\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/lcp_vs_ilcp_2}}
\caption{LANL Mustang}
\end{subfigure}
\caption{Comparison of the schedules obtained by LCP and Int-LCP under our first model. We observe that LCP is much ``stickier'' than Int-LCP, which is beneficial when the potential cost reduction is small (i.e., for the Facebook 2009-0 trace) but detrimental when the potential cost reduction is large (i.e., for the LANL Mustang trace). In our experiments, we observe that the memoryless algorithm tends to behave similarly to LCP (i.e., is more ``sticky''), whereas the probabilistic algorithms tend to behave similarly to Int-LCP (i.e., are less ``sticky'').}\label{fig:case_studies:ud:lcp_vs_int_lcp}
\end{figure}
\begin{figure}
\begin{subfigure}[b]{.5175\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/ud_runtimes}}
\end{subfigure}
\begin{subfigure}[b]{.4825\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/ud_runtimes_slow}}
\end{subfigure}
\caption{Runtimes of uni-dimensional online algorithms.}\label{fig:case_studies:ud:runtimes}
\end{figure}
\paragraph{Runtime} \cref{fig:case_studies:ud:runtimes} shows the distribution of runtimes (per iteration) of the online algorithms using the Facebook 2009-1 trace. The memoryless algorithm, LCP, and Int-LCP are very fast, even as the number of time slots increases. The runtime of Probabilistic and Rand-Probabilistic is slightly dependent on the used trace and model but generally good. However, when resulting schedules are thight around the upper bound of the decision space, as is the case for the Facebook 2010 trace under our second model, the probabilistic algorithms perform take multiple minutes per iteration. Rand-Probabilistic is significantly slower than Probabilistic as due to the relaxation the integrals need to be computed in a piecewise fashion. The runtime of RBG grows linearly with time and is shown in \cref{fig:case_studies:ud:runtimes} for the first four time slots.
\begin{figure}
\centering
\input{thesis/figures/costs}
\caption{Cost profiles of uni-dimensional online algorithms. Note that integral algorithms seem to prefer revenue loss and switching costs over energy costs, whereas fractional algorithms prefer energy costs. However, this is likely because integral algorithms balance energy costs and revenue loss more accurately than the ceiled schedules of fractional algorithms.}\label{fig:case_studies:ud:costs}
\end{figure}
\paragraph{Cost Makeup} An interesting aspect of the (integral) schedules obtained by the online algorithms is the makeup of their associated costs to understand whether an algorithm systematically prefers some cost over another. We measure this preference of an algorithm as the normalized deviation, i.e., the cost of the algorithm minus the mean cost among all algorithms divided by the standard deviation of costs among all algorithms. We then average the results between all traces. \Cref{fig:case_studies:ud:costs} shows the cost profiles for each algorithm. We observe that fractional algorithms prefer energy cost over revenue loss and switching cost, while integral algorithms prefer revenue loss and switching cost over energy cost. This is likely because fractional algorithms cannot balance energy cost and revenue loss optimally. When fractional schedules are ceiled, this results in an additional energy cost while reducing revenue loss. In absolute terms, the deviations make up less than 1\% of the overall costs of each type.
\paragraph{} We have seen that even in our conservative model, significant cost savings with respect to the optimal static provisioning in hindsight can be achieved in practical settings when the PMR is large enough or the normalized switching cost is less than the typical valley length. Due to the conservative estimates of our model, it is likely that in practice, much more drastic cost savings are possible. For example, when energy costs are higher, or the switching costs are on the order of operating a server in an idle state for one hour rather than four hours.
\section{Multi-Dimensional Algorithms}
Now, we turn to the discussed multi-dimensional algorithms. We begin by analyzing the simplified settings, SLO and SBLO, from \cref{section:online_algorithms:md:lazy_budgeting}. Then, we analyze the gradient-based methods from \cref{section:online_algorithms:md:descent_methods}.
\subsection{Smoothed Load Optimization}
Recall that for SLO, during a single time slot a server can process at most one job. Hence, we cannot use dynamic job durations to model the different runtimes of jobs on servers with two GPUs and servers with eight GPUs. Instead, we use a simplified model based on our second model, which is described in \cref{tab:simp_model}. Note that we disregard revenue loss and that we assume, servers operate at full utilization (if they are active). Overall, we obtain the operating costs $c = (243.720, 219.348)$ and switching costs $\beta = (487.440, 663.672)$.
\begin{table}
\centering
\begin{tabularx}{\textwidth}{>{\bfseries}l|X}
cost & simplified model \\\hline
operating cost & servers with eight GPUs have $0.9$ times the energy consumption (per processed job) as servers with two GPUs due to improved cooling efficiency \\
switching cost & servers with eight GPUs have $1.3$ times the switching cost as servers with two GPUs due to an increased associated risk \\
\caption{Simplified model used in our case studies of SLO and SBLO. The model parameters are based on our second model described in \cref{tab:model}.}
\end{tabularx}
\label{tab:simp_model}
\end{table}
The achieved normalized cost and cost reduction of lazy budgeting are shown in \cref{fig:case_studies:md:slo:normalized_cost} and \cref{fig:case_studies:md:slo:cost_reduction}, respectively. The dynamic offline optimal schedule primarily uses 8-GPU-servers and only uses 2-GPU-servers for short periods. The static offline optimal schedule uses 122 8-GPU-servers and no 2-GPU-servers as they have a larger operating cost, which would have to be paid throughout the entire day. The lazy budgeting algorithms primarily use 2-GPU-servers due to their lower switching cost and stick with 8-GPU-servers once they were powered up. \Cref{fig:case_studies:md:slo:det:schedule} and \cref{fig:case_studies:md:slo:rand:schedule} show the schedules obtained by the online algorithms in comparison to the offline optimal schedule. The runtime of the deterministic and randomized variants is shown in \cref{fig:case_studies:md:slo:runtimes}.
\begin{figure}
\begin{subfigure}[b]{.5\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/slo_normalized_cost}}
\caption{Normalized cost}\label{fig:case_studies:md:slo:normalized_cost}
\end{subfigure}
\begin{subfigure}[b]{.5\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/slo_cost_reduction}}
\caption{Cost reduction}\label{fig:case_studies:md:slo:cost_reduction}
\end{subfigure}
\caption{Performance of lazy budgeting (SLO) for the Microsoft Fiddle trace when compared against the offline optimum. The results of the randomized algorithm are based on five individual runs.}
\end{figure}
\begin{figure}
\begin{subfigure}[b]{.3425\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/slo_det_schedule}}
\caption{SLO (deterministic)}\label{fig:case_studies:md:slo:det:schedule}
\end{subfigure}
\begin{subfigure}[b]{.32\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/slo_rand_schedule}}
\caption{SLO (randomized)}\label{fig:case_studies:md:slo:rand:schedule}
\end{subfigure}
\begin{subfigure}[b]{.32\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/sblo_schedule}}
\caption{SBLO ($\epsilon = 1/4$)}\label{fig:case_studies:md:sblo:schedule}
\end{subfigure}
\caption{Comparison of the schedules obtained by lazy budgeting for the Microsoft Fiddle trace and the offline optimum. For SLO, the deterministic algorithm is shown in blue and one result of the randomized algorithm is shown in red. The lazy budgeting algorithm for SBLO is shown in green. Note that the lazy budgeting algorithms prefer the 2-GPU-servers initially due to their low switching costs. For SLO, the randomized algorithm appears to be less ``sticky'' than the deterministic algorithm, resulting in a better normalized cost.}
\end{figure}
\begin{figure}
\begin{subfigure}[b]{.5175\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/slo_runtimes}}
\caption{SLO}\label{fig:case_studies:md:slo:runtimes}
\end{subfigure}
\begin{subfigure}[b]{.4825\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/sblo_runtimes}}
\caption{SBLO}\label{fig:case_studies:md:sblo:runtimes}
\end{subfigure}
\caption{Runtime of lazy budgeting algorithms.}
\end{figure}
\subsection{Smoothed Balanced-Load Optimization}
For our analysis of SBLO, we use the same simplified model that we used in our analysis of SLO (see \cref{tab:simp_model}). In particular, we still assume that a server can at most process a single job during a time slot. The dynamic and static offline optimum are similar to those in our analysis of SLO. In particular, the static offline optimum still only uses 8-GPU-servers. \Cref{fig:case_studies:md:sblo:schedule} shows the schedule obtained by lazy budgeting ($\epsilon = 1/4$) in comparison with the offline optimal. Lazy budgeting achieves normalized costs 1.284 and a cost reduction of 11\%. The runtime of the algorithm is shown in \cref{fig:case_studies:md:sblo:runtimes}.
\subsection{Descent Methods}
We also evaluated the performance of P-OBD and D-OBD on the Microsoft Fiddle trace under our original models (see \cref{tab:model}). In our analysis, we use the squared $\ell_2$ norm as the distance-generating function, i.e. $h(x) = \frac{1}{2} \norm{x}_2^2$, which is strongly convex and Lipschitz smooth in the $\ell_2$ norm. In our data center model, we use the $\ell_1$ norm to calculate switching costs, however, we observe that this approximation still achieves a good performance when compared against the dynamic offline optimum. The negative entropy $h(x) = \sum_{k=1}^d x_k \log_2 x_k$, which is commonly used as a distance-generating function for the $\ell_1$ norm cannot be used in the right-sizing data center setting as $\mathbf{0} \in \mathcal{X}$. \Cref{fig:case_studies:md:obd:normalized_cost} and \cref{fig:case_studies:md:obd:cost_reduction} show the achieved normalized cost and cost reduction. The resulting schedules under the first model are compared with the offline optimal in \cref{fig:case_studies:md:obd:schedule}. Remarkably, P-OBD and D-OBD obtain the exact same schedule under our second model. \Cref{fig:case_studies:md:obd:runtimes} visualizes the runtime of P-OBD and D-OBD under our first model.
\begin{figure}
\begin{subfigure}[b]{.5\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/obd_normalized_cost}}
\caption{Normalized cost}\label{fig:case_studies:md:obd:normalized_cost}
\end{subfigure}
\begin{subfigure}[b]{.5\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/obd_cost_reduction}}
\caption{Cost reduction}\label{fig:case_studies:md:obd:cost_reduction}
\end{subfigure}
\caption{Performance of P-OBD ($\beta = 1/2$) and D-OBD ($\eta = 1$) for the Microsoft Fiddle trace when compared against the offline optimum. $h(x) = \frac{1}{2} \norm{x}_2^2$ is used as the distance-generating function.}
\end{figure}
\begin{figure}
\begin{subfigure}[b]{.5175\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/pobd_schedule}}
\caption{P-OBD}
\end{subfigure}
\begin{subfigure}[b]{.4825\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/dobd_schedule}}
\caption{D-OBD}
\end{subfigure}
\caption{Comparison of the schedules obtained by OBD for the Microsoft Fiddle trace under our first model and the offline optimum. P-OBD ($\beta = 1/2$) is shown in blue and D-OBD ($\eta = 1$) is shown in red. The two time slots during which P-OBD and D-OBD differ are marked in yellow ($t \in \{6, 20\}$). In both time slots, D-OBD is slightly less ``sticky'', resulting in a slightly better normalized cost. Also observe that under our first model, the dynamic offline optimum strictly prefers 2-GPU-servers over 8-GPU-servers In contrast, under our second model, 8-GPU-servers are slightly preferred by the dynamic offline optimum (see \cref{fig:microsoft:schedule}). $h(x) = \frac{1}{2} \norm{x}_2^2$ is used as the distance-generating function.}\label{fig:case_studies:md:obd:schedule}
\end{figure}
Although, OBD incurs an increased cost compared to the static offline optimal (the optimal static choice in hindsight) albeit by a factor less than 1, our very conservative model indicates that in practice, significant cost savings are possible.
\begin{figure}
\centering
\input{thesis/figures/obd_runtimes}
\caption{Runtime of OBD algorithms.}\label{fig:case_studies:md:obd:runtimes}
\end{figure}
\section{Predictions}
In our analysis, we use the Alibaba trace under our second model to evaluate the effects of using predictions. We consider two different types of predictions: perfect predictions, and actual predictions that are obtained using Prophet as described in \cref{section:online_algorithms:md:predictions:making_predictions}. The obtained predictions are based on the four preceding days of the Alibaba trace up until the second to last day. \Cref{fig:case_studies:predictions:prediction} visualizes the used prediction for the most common job type (i.e., short jobs). Note that in our analysis, we use the mean predicted load.
\begin{figure}
\centering
\input{thesis/figures/prediction}
\caption{Prediction of the load of short jobs for the second to last day of the Alibaba trace. The mean prediction is shown as the black line. The interquartile range of the predicted distribution is shown as the shaded region. The marks represent actual loads.}\label{fig:case_studies:predictions:prediction}
\end{figure}
\Cref{fig:case_studies:predictions:lcp} shows the effect of the prediction window $w$ when used with LCP for perfect and actual predictions. We observe that in practice, the prediction window can significantly improve the algorithm performance. Additionally, we find that this effect is also achieved with imperfect (i.e., actual) predictions. Previously, \citeauthor{Lin2011}~\cite{Lin2011} only showed this effect for perfect predictions with additive white Gaussian noise.
Interestingly, RHC and AFHC achieve equivalent results for perfect and imperfect predictions. \Cref{fig:case_studies:predictions:mpc} shows how the achieved normalized cost changes with the prediction window. Crucially, note that the MPC-style algorithms do not necessarily perform better for a growing prediction window. \citeauthor{Lin2012}~\cite{Lin2012} showed this effect previously for an adversarially chosen example, however, we observe this behavior with AFHC in a practical setting. In fact, for the Alibaba trace, RHC and AFHC achieve their best result when used without a prediction window, i.e. $w = 0$.
\begin{figure}
\begin{subfigure}[b]{.5175\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/pred_lcp_cost}}
\caption{LCP and Int-LCP}\label{fig:case_studies:predictions:lcp}
\end{subfigure}
\begin{subfigure}[b]{.4825\linewidth}
\resizebox{\textwidth}{!}{\input{thesis/figures/pred_mpc_cost}}
\caption{RHC and AFHC}\label{fig:case_studies:predictions:mpc}
\end{subfigure}
\caption{Performance of online algorithms with a prediction window for the Alibaba trace. The left figure shows the performance of LCP and Int-LCP. The right figure shows the performance of RHC and AFHC. Note that LCP does not continuously approach the normalized cost $1$ as $w \to 24$ because of numerical inaccuracies solving the convex optimizations and as the obtained is compared to the integral offline optimum rather than the fractional offline optimum. For RHC and AFHC, the achieved normalized cost is independent of whether perfect predictions or the predictions from \cref{fig:case_studies:predictions:prediction} are used.}
\end{figure} |
module Control.Effect.Lift
import Control.EffectAlgebra
namespace Algebra
public export
[Lift] Monad m => Algebra (Lift m) m where
alg ctxx hdl (MkLift x) = x >>= hdl . ( <$ ctxx)
%hint public export
HintLift : Monad m => Algebra (Lift m) m
HintLift = Lift
||| Lift a computation to the underlying effect stack.
public export
lift : Monad n => Inj (Lift n) sig => Algebra sig m => n a -> m a
lift x = send {eff = Lift n} $ MkLift (map pure x)
|
import HTTP
import JSON
include("./request.jl")
function delete_project(server::Bool=true) :: Nothing
cd("..")
rm("demo", recursive=true)
if server
Dance.CoreEngine.close_server()
end
Dance.Router.delete_routes!()
nothing
end
function dirs_add_multiple() :: Nothing
mkdir("code")
mkdir("node_modules") # test ignores node dir
cd("code") # test dir with file & sub-dir
touch("file1.jl")
mkdir("sub-dir1") # test dir with only sub-dir
cd("sub-dir1")
mkdir("sub-dir2") # test dir with file
cd("sub-dir2")
touch("file2.jl")
cd("../../..")
nothing
end
function dirs_add_single() :: Nothing
mkdir("code")
mkdir("node_modules") # test ignores node dir
cd("code") # test dir with file & sub-dir
touch("file1.jl")
cd("..")
nothing
end
function make_and_test_request_get(path::String, status::Int64, headers::Dict{String, String}, content_length::Int64, is_json_body::Bool, body::Any) :: Nothing
r = HTTP.request("GET", "http://127.0.0.1:8000$path")
parse_and_test_request(r, status, headers, content_length, is_json_body, body)
end
function make_and_test_request_options(path::String) :: Nothing
r = HTTP.request("OPTIONS", "http://127.0.0.1:8000$path")
compare_http_header(r.headers, "Allow", "POST")
compare_http_header(r.headers, "Access-Control-Allow-Methods", "POST")
compare_http_header(r.headers, "Access-Control-Allow-Headers", "X-PINGOTHER, Content-Type")
end
function make_and_test_request_post(path::String, payload::Union{Array, Dict}, status::Int64, headers::Dict{String, String}, content_length::Int64, is_json_body::Bool, body::Any) :: Nothing
r = HTTP.request("POST", "http://127.0.0.1:8000$path", [], JSON.json(payload))
parse_and_test_request(r, status, headers, content_length, is_json_body, body)
end
function project_settings() :: Nothing
## Add `dev.jl` file with 1 overwrite & 1 new entry ##
cd("demo/settings")
touch("dev.jl")
open("dev.jl", "w") do io
write(io, ":dev = true\n")
write(io, ":foo = \"bar\"")
end
open("Global.jl", "a+") do io
write(io, "include(\"dev.jl\")")
end
cd("..")
end
function routes(file_suffix::String) :: Nothing
open("routes.jl", "w") do io_routes
open("../sample/routes/" * file_suffix * ".jl") do io_file
write(io_routes, io_file)
end
end
nothing
end
|
"""
Projector(ϕs, orbitals)
Represents the projector on the subspace spanned by the radial
orbitals `ϕs` (corresponding to `orbitals`).
"""
struct Projector{T,B<:AbstractQuasiMatrix,RO<:RadialOrbital{T,B},O} <: NBodyOperator{1}
ϕs::Vector{RO}
orbitals::Vector{O}
end
Base.iszero(me::OrbitalMatrixElement{1,A,<:Projector,B}) where {A<:SpinOrbital,B<:SpinOrbital} =
me.a[1] ∉ me.o.orbitals || me.b[1] ∉ me.o.orbitals
function Base.show(io::IO, projector::Projector)
write(io, "P(")
write(io, join(string.(projector.orbitals), " "))
write(io, ")")
end
projectout!(y::RO, ::Nothing) where RO = y
"""
projectout!(y, projector)
Project out all components of `y` parallel to the radial orbitals
`projector.ϕs`.
"""
function projectout!(y::RO, projector::Proj) where {RO,Proj<:Projector}
yc = y.args[2]
for ϕ in projector.ϕs
c = materialize(applied(*, ϕ', y))
yc .-= c*ϕ.args[2]
# y -= c*ϕ
end
y
end
|
Require Import VST.progs.io.
Require Import VST.progs.io_specs.
Require Import VST.floyd.proofauto.
Require Import ITree.ITree.
(*Import ITreeNotations.*)
Notation "t1 >>= k2" := (ITree.bind t1 k2)
(at level 50, left associativity) : itree_scope.
Notation "x <- t1 ;; t2" := (ITree.bind t1 (fun x => t2))
(at level 100, t1 at next level, right associativity) : itree_scope.
Notation "t1 ;; t2" := (ITree.bind t1 (fun _ => t2))
(at level 100, right associativity) : itree_scope.
Notation "' p <- t1 ;; t2" :=
(ITree.bind t1 (fun x_ => match x_ with p => t2 end))
(at level 100, t1 at next level, p pattern, right associativity) : itree_scope.
Instance CompSpecs : compspecs. make_compspecs prog. Defined.
Definition Vprog : varspecs. mk_varspecs prog. Defined.
Definition putchar_spec := DECLARE _putchar putchar_spec.
Definition getchar_spec := DECLARE _getchar getchar_spec.
Lemma div_10_dec : forall n, 0 < n ->
(Z.to_nat (n / 10) < Z.to_nat n)%nat.
Proof.
intros.
change 10 with (Z.of_nat 10).
rewrite <- (Z2Nat.id n) by omega.
rewrite <- div_Zdiv by discriminate.
rewrite !Nat2Z.id.
apply Nat2Z.inj_lt.
rewrite div_Zdiv, Z2Nat.id by omega; simpl.
apply Z.div_lt; auto; omega.
Qed.
Program Fixpoint chars_of_Z (n : Z) { measure (Z.to_nat n) } : list int :=
let n' := n / 10 in
match n' <=? 0 with true => [Int.repr (n + char0)] | false => chars_of_Z n' ++ [Int.repr (n mod 10 + char0)] end.
Next Obligation.
Proof.
apply div_10_dec.
symmetry in Heq_anonymous; apply Z.leb_nle in Heq_anonymous.
eapply Z.lt_le_trans, Z_mult_div_ge with (b := 10); omega.
Defined.
(* The function computed by print_intr *)
Program Fixpoint intr n { measure (Z.to_nat n) } : list int :=
match n <=? 0 with
| true => []
| false => intr (n / 10) ++ [Int.repr (n mod 10 + char0)]
end.
Next Obligation.
Proof.
apply div_10_dec.
symmetry in Heq_anonymous; apply Z.leb_nle in Heq_anonymous; omega.
Defined.
Definition print_intr_spec :=
DECLARE _print_intr
WITH i : Z, tr : IO_itree
PRE [ _i OF tuint ]
PROP (0 <= i <= Int.max_unsigned)
LOCAL (temp _i (Vint (Int.repr i)))
SEP (ITREE (write_list (intr i) ;; tr))
POST [ tvoid ]
PROP ()
LOCAL ()
SEP (ITREE tr).
Definition print_int_spec :=
DECLARE _print_int
WITH i : Z, tr : IO_itree
PRE [ _i OF tuint ]
PROP (0 <= i <= Int.max_unsigned)
LOCAL (temp _i (Vint (Int.repr i)))
SEP (ITREE (write_list (chars_of_Z i) ;; tr))
POST [ tvoid ]
PROP ()
LOCAL ()
SEP (ITREE tr).
Definition read_sum n d : IO_itree :=
ITree.aloop (fun '(n, d) =>
if zlt n 1000 then if zlt d 10 then
inl (write_list (chars_of_Z (n + d));; write (Int.repr newline);;
c <- read;;
Ret (n + d, Int.unsigned c - char0)) (* loop again with these parameters *)
else inr tt else inr tt) (* inr to end the loop *)
(n, d).
Definition main_itree := c <- read;; read_sum 0 (Int.unsigned c - char0).
Definition main_spec :=
DECLARE _main
WITH gv : globals
PRE [] main_pre_ext prog main_itree nil gv
POST [ tint ] main_post prog nil gv.
Definition Gprog : funspecs := ltac:(with_library prog [putchar_spec; getchar_spec;
print_intr_spec; print_int_spec; main_spec]).
Lemma divu_repr : forall x y,
0 <= x <= Int.max_unsigned -> 0 <= y <= Int.max_unsigned ->
Int.divu (Int.repr x) (Int.repr y) = Int.repr (x / y).
Proof.
intros; unfold Int.divu.
rewrite !Int.unsigned_repr; auto.
Qed.
Opaque Nat.div Nat.modulo.
Lemma intr_eq : forall n, intr n =
match n <=? 0 with
| true => []
| false => intr (n / 10) ++ [Int.repr (n mod 10 + char0)]
end.
Proof.
intros.
unfold intr at 1.
rewrite Wf.WfExtensionality.fix_sub_eq_ext; simpl; fold intr.
destruct n; reflexivity.
Qed.
Lemma bind_ret' : forall E (s : itree E unit), eutt eq (s;; Ret tt) s.
Proof.
intros.
etransitivity; [|apply subrelation_eq_eutt, bind_ret2].
apply eutt_bind; [intros []|]; reflexivity.
Qed.
Lemma body_print_intr: semax_body Vprog Gprog f_print_intr print_intr_spec.
Proof.
start_function.
forward_if (PROP () LOCAL () SEP (ITREE tr)).
- forward.
forward.
rewrite modu_repr, divu_repr by (omega || computable).
rewrite intr_eq.
destruct (Z.leb_spec i 0); try omega.
erewrite ITREE_ext by (rewrite write_list_app, bind_bind; reflexivity).
forward_call (i / 10, write_list [Int.repr (i mod 10 + char0)];; tr).
{ split; [apply Z.div_pos; omega | apply Z.div_le_upper_bound; omega]. }
simpl write_list.
forward_call (Int.repr (i mod 10 + char0), tr).
{ rewrite <- sepcon_emp at 1; apply sepcon_derives; [|cancel].
apply ITREE_impl; rewrite bind_ret'; reflexivity. }
entailer!.
- forward.
subst; entailer!.
erewrite ITREE_ext; [apply derives_refl|].
simpl.
rewrite Shallow.bind_ret; reflexivity.
- forward.
Qed.
Lemma chars_of_Z_eq : forall n, chars_of_Z n =
let n' := n / 10 in
match n' <=? 0 with true => [Int.repr (n + char0)] | false => chars_of_Z n' ++ [Int.repr (n mod 10 + char0)] end.
Proof.
intros.
unfold chars_of_Z at 1.
rewrite Wf.WfExtensionality.fix_sub_eq_ext; simpl; fold chars_of_Z.
destruct (_ <=? _); reflexivity.
Qed.
Lemma chars_of_Z_intr : forall n, 0 < n ->
chars_of_Z n = intr n.
Proof.
induction n using (well_founded_induction (Zwf.Zwf_well_founded 0)); intro.
rewrite chars_of_Z_eq, intr_eq.
destruct (n <=? 0) eqn: Hn; [apply Zle_bool_imp_le in Hn; omega|].
simpl.
destruct (n / 10 <=? 0) eqn: Hdiv.
- apply Zle_bool_imp_le in Hdiv.
assert (0 <= n / 10).
{ apply Z.div_pos; omega. }
assert (n / 10 = 0) as Hz by omega.
rewrite Hz; simpl.
apply Z.div_small_iff in Hz as [|]; try omega.
rewrite Zmod_small; auto.
- apply Z.leb_nle in Hdiv.
rewrite H; auto; try omega.
split; try omega.
apply Z.div_lt; auto; omega.
Qed.
Lemma body_print_int: semax_body Vprog Gprog f_print_int print_int_spec.
Proof.
start_function.
forward_if (PROP () LOCAL () SEP (ITREE tr)).
- subst.
forward_call (Int.repr char0, tr).
{ rewrite chars_of_Z_eq; simpl.
erewrite <- sepcon_emp at 1; apply sepcon_derives; [|cancel].
erewrite ITREE_ext; [apply derives_refl|].
rewrite bind_ret'; reflexivity. }
entailer!.
- forward_call (i, tr).
{ rewrite chars_of_Z_intr by omega; cancel. }
entailer!.
- forward.
Qed.
Lemma read_sum_eq : forall n d, read_sum n d ≈
(if zlt n 1000 then if zlt d 10 then
write_list (chars_of_Z (n + d));; write (Int.repr newline);;
c <- read;; read_sum (n + d) (Int.unsigned c - char0)
else Ret tt else Ret tt).
Proof.
intros.
unfold read_sum; rewrite unfold_aloop.
unfold ITree._aloop.
if_tac; [|reflexivity].
if_tac; [|reflexivity].
unfold id.
repeat setoid_rewrite bind_bind.
setoid_rewrite Shallow.bind_ret.
reflexivity.
Qed.
Lemma body_main: semax_body Vprog Gprog f_main main_spec.
Proof.
start_function.
unfold main_pre_ext.
replace_SEP 0 (ITREE main_itree).
{ go_lower.
apply has_ext_ITREE. }
forward.
unfold main_itree.
rewrite <- !seq_assoc. (* Without this, forward_call gives a type error! *)
forward_call (fun c => read_sum 0 (Int.unsigned c - char0)).
Intros c.
forward.
rewrite sign_ext_inrange by auto.
set (Inv := EX n : Z, EX c : int,
PROP (0 <= n < 1009)
LOCAL (temp _c (Vint c); temp _n (Vint (Int.repr n)))
SEP (ITREE (read_sum n (Int.unsigned c - char0)))).
unfold Swhile; forward_loop Inv break: Inv.
{ Exists 0 c; entailer!. }
subst Inv.
clear dependent c; Intros n c.
forward_if.
forward.
forward_if.
{ forward.
Exists n c; entailer!. }
forward.
rewrite <- (Int.repr_unsigned c) in H1.
rewrite sub_repr in H1.
pose proof (Int.unsigned_range c).
destruct (zlt (Int.unsigned c) char0).
{ rewrite Int.unsigned_repr_eq in H1.
rewrite <- Z_mod_plus_full with (b := 1), Zmod_small in H1; unfold char0 in *; rep_omega. }
rewrite Int.unsigned_repr in H1 by (unfold char0 in *; rep_omega).
erewrite ITREE_ext by apply read_sum_eq.
rewrite if_true by auto.
destruct (zlt _ _); [|unfold char0 in *; omega].
forward_call (n + (Int.unsigned c - char0),
write (Int.repr newline);; c' <- read;; read_sum (n + (Int.unsigned c - char0)) (Int.unsigned c' - char0)).
{ entailer!.
rewrite <- (Int.repr_unsigned c) at 1.
rewrite sub_repr, add_repr; auto. }
{ unfold char0 in *; rep_omega. }
forward_call (Int.repr newline, c' <- read;; read_sum (n + (Int.unsigned c - char0)) (Int.unsigned c' - char0)).
forward_call (fun c' => read_sum (n + (Int.unsigned c - char0)) (Int.unsigned c' - char0)).
Intros c'.
forward.
rewrite sign_ext_inrange by auto.
Exists (n + (Int.unsigned c - char0)) c'; entailer!.
rewrite <- (Int.repr_unsigned c) at 2; rewrite sub_repr, add_repr; auto.
{ forward.
Exists n c; entailer!. }
subst Inv.
Intros n c'.
forward.
Qed.
Definition ext_link := ext_link_prog prog.
Instance Espec : OracleKind := IO_Espec ext_link.
Lemma prog_correct:
semax_prog_ext prog main_itree Vprog Gprog.
Proof.
prove_semax_prog.
semax_func_cons_ext.
{ simpl; Intro i.
apply typecheck_return_value; auto. }
semax_func_cons_ext.
semax_func_cons body_print_intr.
semax_func_cons body_print_int.
semax_func_cons body_main.
Qed.
Require Import VST.veric.SequentialClight.
Require Import VST.progs.io_dry.
Definition init_mem_exists : { m | Genv.init_mem prog = Some m }.
Proof.
unfold Genv.init_mem; simpl.
Admitted. (* seems true, but hard to prove -- can we compute it? *)
Definition init_mem := proj1_sig init_mem_exists.
Definition main_block_exists : {b | Genv.find_symbol (Genv.globalenv prog) (prog_main prog) = Some b}.
Proof.
eexists; simpl.
unfold Genv.find_symbol; simpl; reflexivity.
Qed.
Definition main_block := proj1_sig main_block_exists.
Theorem prog_toplevel : exists q : Clight_new.corestate,
semantics.initial_core (Clight_new.cl_core_sem (globalenv prog)) 0 init_mem q init_mem (Vptr main_block Ptrofs.zero) [] /\
forall n, @step_lemmas.dry_safeN _ _ _ _ Clight_sim.genv_symb_injective (Clight_sim.coresem_extract_cenv (Clight_new.cl_core_sem (globalenv prog)) (prog_comp_env prog))
(io_dry_spec ext_link) {| Clight_sim.CC.genv_genv := Genv.globalenv prog; Clight_sim.CC.genv_cenv := prog_comp_env prog |} n
main_itree q init_mem.
Proof.
edestruct whole_program_sequential_safety_ext with (V := Vprog) as (b & q & m' & Hb & Hq & Hsafe).
- apply juicy_dry_specs.
- apply dry_spec_mem.
- apply CSHL_Sound.semax_prog_ext_sound, prog_correct.
- apply (proj2_sig init_mem_exists).
- exists q.
rewrite (proj2_sig main_block_exists) in Hb; inv Hb.
assert (m' = init_mem); [|subst; auto].
destruct Hq; tauto.
Qed.
|
!
! Copyright 2016 ARTED developers
!
! Licensed under the Apache License, Version 2.0 (the "License");
! you may not use this file except in compliance with the License.
! You may obtain a copy of the License at
!
! http://www.apache.org/licenses/LICENSE-2.0
!
! Unless required by applicable law or agreed to in writing, software
! distributed under the License is distributed on an "AS IS" BASIS,
! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
! See the License for the specific language governing permissions and
! limitations under the License.
!
Subroutine init_wf
use Global_Variables
implicit none
integer :: iseed,ib,ik,i
real(8) :: r2,x1,y1,z1,rnd
real(8) :: x0,y0,z0
real(8) :: r22,x2,y2,z2
zu_GS=0.d0
iseed=123
do ik=1,NK
do ib=1,NB
call quickrnd(iseed,rnd)
x0=rnd
call quickrnd(iseed,rnd)
y0=rnd
call quickrnd(iseed,rnd)
z0=rnd
if(ik >= NK_s .and. ik<=NK_e)then
do i=1,NL
x1=Lx(1,i)-x0
y1=Lx(2,i)-y0
z1=Lx(3,i)-z0
x2=A_matrix(1,1)*x1+A_matrix(1,2)*y1+A_matrix(1,3)*z1
y2=A_matrix(2,1)*x1+A_matrix(2,2)*y1+A_matrix(2,3)*z1
z2=A_matrix(3,1)*x1+A_matrix(3,2)*y1+A_matrix(3,3)*z1
r2=x2**2+y2**2+z2**2
zu_GS(i,ib,ik)=exp(-0.5d0*r2)
enddo
end if
enddo
enddo
if(Myrank == 0)write(*,'(A)')'wave-functions initialization is completed'
return
End Subroutine init_wf
|
#' memuse Arithmetic
#'
#' Binary arithmetic operations for \code{memuse} objects.
#'
#' Simple arithmetic reductions.
#'
#' @param x
#' A \code{memuse} object.
#' @param ...
#' Additional arguments
#' @param na.rm
#' Whether \code{NA}'s should be ignored.
#'
#' @return
#' Returns a \code{memuse} class object.
#'
#' @examples
#' \dontrun{
#' x = mu(2000)
#' y = mu(5000)
#'
#' sum(x, y)
#'
#' ### Mixing numeric and memuse objects will work, but the first one must be a
#' ### memuse object.
#' sum(mu(10), 10) # This will work
#' sum(10, mu(10)) # This will not
#' }
#'
#' @seealso \code{ \link{Constructor} \link{memuse-class} }
#' @keywords Methods
#' @export
setMethod("sum", signature(x="memuse"),
function(x, ..., na.rm=FALSE)
{
ret <- mu.size(x, as.is=FALSE)
other <- list(...)
# checking type of inputs
typecheck <- sapply(other, sumargcheck)
if (length(other) > 0)
{
other <- sum(sapply(other, vectorizedsum))
ret <- ret + other
}
internal.mu(ret, unit.prefix=mu.prefix(x), unit.names=mu.names(x))
}
)
sumargcheck = function(i)
{
if ( !is.memuse(i) && !(is.numeric(i) && is.vector(i)) )
stop("method 'sum' can only work with combinations of memuse and vector objects")
}
vectorizedsum = function(i)
{
if (is.memuse(i))
mu.size(i, as.is=FALSE)
else
sum(i)
}
|
The corn crake is mainly a lowland species , but breeds up to 1 @,@ 400 m ( 4 @,@ 600 ft ) altitude in the Alps , 2 @,@ 700 m ( 8 @,@ 900 ft ) in China and 3 @,@ 000 m ( 9 @,@ 800 ft ) in Russia . When breeding in Eurasia , the corn crake 's habitats would originally have included river meadows with tall grass and meadow plants including sedges and irises . It is now mainly found in cool moist grassland used for the production of hay , particularly moist traditional farmland with limited cutting or fertiliser use . It also utilises other treeless grasslands in mountains or taiga , on coasts , or where created by fire . <unk> areas like wetland edges may be used , but very wet habitats are avoided , as are open areas and those with vegetation more than 50 cm ( 20 in ) tall , or too dense to walk through . The odd bush or hedge may be used as a calling post . Grassland which is not mown or grazed becomes too matted to be suitable for nesting , but locally crops such as cereals , peas , rape , clover or potatoes may be used . After breeding , adults move to taller vegetation such as common reed , iris , or nettles to moult , returning to the hay and silage meadows for the second brood . In China , flax is also used for nest sites . Although males often sing in intensively managed grass or cereal crops , successful breeding is uncommon , and nests in the field margins or nearby fallow ground are more likely to succeed .
|
World Heritage Sites
|
#include <stdio.h>
#include <gsl/gsl_statistics.h>
#include <assert.h>
extern "C" {
#include <quadmath.h>
}
#ifndef N
#define N 64
#endif
#define K N/2
#ifndef FB
#define FB 64
#endif
#if(FB==64)
#define FT double
#elif(FB==80)
#define FT long double
#endif
int
main(int argc, char *argv[])
{
assert(argc == 3);
int i;
char *inname = argv[1];
char *outname = argv[2];
FILE *infile = fopen(inname, "r");
FILE *outfile = fopen(outname, "w");
FT data[K], w[K];
FT wsd;
for (i = 0 ; i < K ; i++) {
__float128 in_data;
fread(&in_data, sizeof(__float128), 1, infile);
data[i] = (float) in_data;
}
for (i = 0 ; i < K ; i++) {
__float128 in_data;
fread(&in_data, sizeof(__float128), 1, infile);
w[i] = (float) in_data;
}
fclose(infile);
// library call
double wmean;
#if(FB==64)
wmean = gsl_stats_wmean(w, 1, data, 1, K);
wsd = gsl_stats_wsd_with_fixed_mean(w, 1, data, 1, K, wmean);
#elif(FB==80)
wmean = gsl_stats_long_double_wmean(w, 1, data, 1, K);
wsd = gsl_stats_long_double_wsd_with_fixed_mean(w, 1, data, 1, K, wmean);
#else
exit(-1);
#endif
//printf ("The sample wsd is %g\n", wsd);
__float128 out_data;
out_data = (__float128) wsd;
fwrite(&out_data, sizeof(__float128), 1, outfile);
fclose(outfile);
return 0;
}
|
From mathcomp
Require Import ssreflect ssrbool ssrnat eqtype seq ssrfun.
From fcsl
Require Import prelude pred pcm unionmap heap.
From HTT
Require Import stmod stsep stlog stlogR.
From SSL
Require Import core.
From Hammer Require Import Hammer.
(* Configure Hammer *)
Set Hammer ATPLimit 60.
Unset Hammer Eprover.
Unset Hammer Vampire.
Add Search Blacklist "fcsl.".
Add Search Blacklist "HTT.".
Add Search Blacklist "Coq.ssr.ssrfun".
Add Search Blacklist "mathcomp.ssreflect.ssrfun".
Add Search Blacklist "mathcomp.ssreflect.bigop".
Add Search Blacklist "mathcomp.ssreflect.choice".
Add Search Blacklist "mathcomp.ssreflect.div".
Add Search Blacklist "mathcomp.ssreflect.finfun".
Add Search Blacklist "mathcomp.ssreflect.fintype".
Add Search Blacklist "mathcomp.ssreflect.path".
Add Search Blacklist "mathcomp.ssreflect.tuple".
Inductive sll (x : ptr) (s : seq nat) (h : heap) : Prop :=
| sll_1 of (x) == (null) of
@perm_eq nat_eqType (s) (@nil nat) /\ h = empty
| sll_2 of ~~ ((x) == (null)) of
exists (v : nat) (s1 : seq nat) (nxt : ptr),
exists h_sll_nxts1_1,
@perm_eq nat_eqType (s) (([:: v]) ++ (s1)) /\ h = x :-> (v) \+ x .+ 1 :-> (nxt) \+ h_sll_nxts1_1 /\ sll nxt s1 h_sll_nxts1_1.
Inductive dll (x : ptr) (z : ptr) (s : seq nat) (h : heap) : Prop :=
| dll_1 of (x) == (null) of
@perm_eq nat_eqType (s) (@nil nat) /\ h = empty
| dll_2 of ~~ ((x) == (null)) of
exists (v : nat) (s1 : seq nat) (w : ptr),
exists h_dll_wxs1_0,
@perm_eq nat_eqType (s) (([:: v]) ++ (s1)) /\ h = x :-> (v) \+ x .+ 1 :-> (w) \+ x .+ 2 :-> (z) \+ h_dll_wxs1_0 /\ dll w x s1 h_dll_wxs1_0.
|
```python
import numpy as np
import matplotlib.pyplot as plt
import scipy.io.wavfile
import subprocess
import librosa
import librosa.display
import IPython.display as ipd
from pathlib import Path, PurePath
from tqdm.notebook import tqdm
import os
import random
import pickle
import pandas as pd
from collections import defaultdict
import utility
import functions
```
## Task 1
#### Settings
```python
N_TRACKS = 1413
HOP_SIZE = 512
OFFSET = 1.0
DURATION = 30
```
do not run again below this line
```python
data_folder = Path("data/mp3s-32k/")
mp3_tracks = data_folder.glob("*/*/*.mp3")
tracks = data_folder.glob("*/*/*.wav")
tracks_list = list(enumerate(tracks))
# save tracks_list
open_file = open("tracks_list.pkl", "wb")
pickle.dump(tracks_list, open_file)
open_file.close()
```
```python
tracks_list[0]
```
(0, WindowsPath('data/mp3s-32k/aerosmith/Aerosmith/01-Make_It.wav'))
```python
data_folder = Path("query/")
mp3_tracks = data_folder.glob("*.mp3")
query = data_folder.glob("*.wav")
query_list = list(enumerate(query))
# save query_list
open_file = open("query_list.pkl", "wb")
pickle.dump(query_list, open_file)
open_file.close()
```
#### Preprocessing
```python
# convert mp3 to wav
for track in tqdm(mp3_tracks, total=N_TRACKS):
utility.convert_mp3_to_wav(str(track))
```
do not run again above this line
#### Audio signals
```python
open_file = open("tracks_list.pkl", "rb")
tracks_list = pickle.load(open_file)
open_file.close()
```
```python
# list of track title and author
tracks_title = []
for i in range(len(tracks_list)):
aux = tracks_list[i][1].__str__().replace('_', ' ').replace('.wav', '').split("\\", 4)
tracks_title.append([aux[-1], aux[2]])
tracks_titles = list(enumerate(tracks_title))
```
```python
tracks_titles[0]
```
(0, ['01-Make It', 'aerosmith'])
```python
for idx, audio in tracks_list:
if idx >= 2:
break
track, sr, onset_env, peaks = utility.load_audio_peaks(audio, OFFSET, DURATION, HOP_SIZE)
utility.plot_spectrogram_and_peaks(track, sr, peaks, onset_env)
```
### 1 Minhashing
By using the function <code>functions.zeros_ones_matrix</code> we compute a zeros-ones matrix of representative of the dataset.
The number of rows corresponds to the maximum possible peak index and each column is representative of a track.
For each track we assign ones-entries to the indexes of peaks (bins where peaks occur).
```python
m = round(sr*DURATION/HOP_SIZE) # maximum peak index
m
```
1292
do not run again below this line
```python
# generate and store tracks matrix
tracks_matrix = functions.zeros_ones_matrix(m, 'tracks', tracks_list)
```
0%| | 0/1413 [00:00<?, ?it/s]
do not run again above this line
```python
open_file = open("tracks_matrix.pkl", "rb")
tracks_matrix = pickle.load(open_file)
open_file.close()
tracks_matrix.shape, tracks_matrix.nbytes
```
((1292, 1413), 1825596)
```python
# pandas visualization of tracks matrix
new_col = [item[1][0].split("-", 1)[1] for item in tracks_titles]
df = pd.DataFrame(tracks_matrix, columns=new_col)
df
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Make It</th>
<th>Somebody</th>
<th>Dream On</th>
<th>One Way Street</th>
<th>Mama Kin</th>
<th>Write Me a Letter</th>
<th>Movin Out</th>
<th>Walking the Dog</th>
<th>Draw the Line</th>
<th>I Wanna Know Why</th>
<th>...</th>
<th>Zooropa</th>
<th>Babyface</th>
<th>Numb</th>
<th>Lemon</th>
<th>Stay Faraway So Close</th>
<th>Daddy s Gonna Pay For Your Crashed Car</th>
<th>Some Days Are Better Than Others</th>
<th>The First Time</th>
<th>Dirty Day</th>
<th>The Wanderer</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>...</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
</tr>
<tr>
<th>1</th>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>...</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
</tr>
<tr>
<th>2</th>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>...</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
</tr>
<tr>
<th>3</th>
<td>1</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>...</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>1</td>
<td>0</td>
<td>0</td>
<td>0</td>
</tr>
<tr>
<th>4</th>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>...</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
</tr>
<tr>
<th>...</th>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>1287</th>
<td>1</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>1</td>
<td>...</td>
<td>0</td>
<td>0</td>
<td>1</td>
<td>0</td>
<td>1</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>1</td>
<td>0</td>
</tr>
<tr>
<th>1288</th>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>1</td>
<td>0</td>
<td>...</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
</tr>
<tr>
<th>1289</th>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>...</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>1</td>
</tr>
<tr>
<th>1290</th>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>...</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
</tr>
<tr>
<th>1291</th>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>1</td>
<td>0</td>
<td>1</td>
<td>0</td>
<td>0</td>
<td>...</td>
<td>1</td>
<td>1</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
</tr>
</tbody>
</table>
<p>1292 rows × 1413 columns</p>
</div>
We want to associate each track with a *signature* based on the previously defined representative matrix of the dataset.
In order to do this, first we generate $n_{hash}$ random hash functions from an *universal family of hash functions with a prime field*:
\begin{equation}
H = \{h_{c_0, c_1} | c_0 \in \{1,2,..., p - 1\}, c_1 \in \{1,2,..., p - 1\}\}
\end{equation}
\begin{equation}
h_{c_0, c_1}(x) = ((c_1x + c_0)\mod p)\mod n); x \in U
\end{equation}
- $U : \{0,1,..., m-1\}$
- number $n \le m$
- prime number $p \ge m$
The probability that distinct elements $x_1, x_2 \in U$ collide in $n$ is $P(h(x_1) = h(x_2)) \le \frac{1}{n}$
```python
# hash settings
p = 1789 # prime number
n = 1000
nh = 24 # number of random hash functions
```
do not run again below this line
```python
# generate random coefficients
c_0 = random.sample(range(0, p - 1), nh)
c_1 = random.sample(range(0, p - 1), nh)
# save coefficients
with open('coefficient.npy', 'wb') as f:
np.save(f, c_0)
np.save(f, c_1)
```
do not run again above this line
```python
with open('coefficient.npy', 'rb') as f:
c_0 = np.load(f)
c_1 = np.load(f)
c_0, c_1, [len(set(c_0)), len(set(c_1))]
```
(array([1576, 17, 372, 516, 208, 297, 307, 1447, 1147, 684, 1078,
706, 785, 523, 363, 774, 1589, 525, 1212, 539, 1584, 268,
62, 318]),
array([ 821, 1457, 1596, 1441, 1051, 889, 611, 204, 1724, 392, 361,
1412, 560, 194, 394, 869, 1110, 605, 1693, 1389, 590, 1136,
261, 1190]),
[24, 24])
For each $k^{th}$ track (column of tracks matrix) we apply the $i^{th}$ *hash function* to each *row-index* with non-zero entry.
The $i^{th}$ component of the signature is equal to the *minimum* among these values.
- $k \in \{1, 2, 3,..., n_{tracks}\}$ where $n_{tracks}$ is the number of tracks in the dataset (variable <code>N_TRACKS</code>)
- $i \in \{1, 2, 3,..., n_{hash}\}$ where $n_{hash}$ is the number of random hash functions generated from the previously defined family (variable <code>nh</code>)
The function <code>functions.minhash</code> performs these operations.
The *signature matrix* - we compute through the function <code>functions.generate_signature</code> - has shape $(n_{hash}, n_{tracks})$.
do not run again below this line
```python
# generate and store signature matrix
signature_matrix = functions.generate_signature(tracks_list, tracks_matrix, 'tracks', nh, c_0, c_1, p, n)
```
0%| | 0/1413 [00:00<?, ?it/s]
do not run again above this line
```python
open_file = open("signature_matrix_tracks.pkl", "rb")
signature_matrix = pickle.load(open_file)
open_file.close()
signature_matrix.shape, signature_matrix.nbytes
```
((24, 1413), 33912)
```python
# pandas visualization of signature matrix
new_col = [item[1][0].split("-", 1)[1] for item in tracks_titles]
df = pd.DataFrame(signature_matrix, columns=new_col)
df
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Make It</th>
<th>Somebody</th>
<th>Dream On</th>
<th>One Way Street</th>
<th>Mama Kin</th>
<th>Write Me a Letter</th>
<th>Movin Out</th>
<th>Walking the Dog</th>
<th>Draw the Line</th>
<th>I Wanna Know Why</th>
<th>...</th>
<th>Zooropa</th>
<th>Babyface</th>
<th>Numb</th>
<th>Lemon</th>
<th>Stay Faraway So Close</th>
<th>Daddy s Gonna Pay For Your Crashed Car</th>
<th>Some Days Are Better Than Others</th>
<th>The First Time</th>
<th>Dirty Day</th>
<th>The Wanderer</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>33</td>
<td>4</td>
<td>7</td>
<td>20</td>
<td>8</td>
<td>5</td>
<td>6</td>
<td>4</td>
<td>19</td>
<td>1</td>
<td>...</td>
<td>16</td>
<td>8</td>
<td>23</td>
<td>16</td>
<td>11</td>
<td>5</td>
<td>24</td>
<td>1</td>
<td>1</td>
<td>3</td>
</tr>
<tr>
<th>1</th>
<td>20</td>
<td>46</td>
<td>7</td>
<td>14</td>
<td>5</td>
<td>9</td>
<td>7</td>
<td>54</td>
<td>8</td>
<td>35</td>
<td>...</td>
<td>45</td>
<td>13</td>
<td>24</td>
<td>1</td>
<td>14</td>
<td>11</td>
<td>1</td>
<td>7</td>
<td>3</td>
<td>7</td>
</tr>
<tr>
<th>2</th>
<td>13</td>
<td>19</td>
<td>8</td>
<td>2</td>
<td>19</td>
<td>3</td>
<td>14</td>
<td>2</td>
<td>2</td>
<td>48</td>
<td>...</td>
<td>22</td>
<td>6</td>
<td>6</td>
<td>9</td>
<td>19</td>
<td>38</td>
<td>2</td>
<td>10</td>
<td>18</td>
<td>7</td>
</tr>
<tr>
<th>3</th>
<td>10</td>
<td>4</td>
<td>15</td>
<td>4</td>
<td>19</td>
<td>11</td>
<td>5</td>
<td>1</td>
<td>0</td>
<td>8</td>
<td>...</td>
<td>13</td>
<td>5</td>
<td>9</td>
<td>4</td>
<td>18</td>
<td>6</td>
<td>11</td>
<td>4</td>
<td>0</td>
<td>19</td>
</tr>
<tr>
<th>4</th>
<td>11</td>
<td>1</td>
<td>4</td>
<td>12</td>
<td>16</td>
<td>45</td>
<td>66</td>
<td>27</td>
<td>1</td>
<td>11</td>
<td>...</td>
<td>2</td>
<td>15</td>
<td>27</td>
<td>2</td>
<td>17</td>
<td>10</td>
<td>14</td>
<td>27</td>
<td>3</td>
<td>6</td>
</tr>
<tr>
<th>5</th>
<td>21</td>
<td>6</td>
<td>14</td>
<td>36</td>
<td>6</td>
<td>10</td>
<td>21</td>
<td>53</td>
<td>18</td>
<td>45</td>
<td>...</td>
<td>20</td>
<td>3</td>
<td>7</td>
<td>8</td>
<td>8</td>
<td>17</td>
<td>17</td>
<td>8</td>
<td>30</td>
<td>29</td>
</tr>
<tr>
<th>6</th>
<td>9</td>
<td>1</td>
<td>6</td>
<td>3</td>
<td>27</td>
<td>2</td>
<td>7</td>
<td>9</td>
<td>81</td>
<td>9</td>
<td>...</td>
<td>87</td>
<td>14</td>
<td>4</td>
<td>0</td>
<td>11</td>
<td>22</td>
<td>2</td>
<td>17</td>
<td>16</td>
<td>2</td>
</tr>
<tr>
<th>7</th>
<td>1</td>
<td>5</td>
<td>0</td>
<td>15</td>
<td>18</td>
<td>0</td>
<td>22</td>
<td>39</td>
<td>25</td>
<td>12</td>
<td>...</td>
<td>34</td>
<td>2</td>
<td>2</td>
<td>1</td>
<td>12</td>
<td>1</td>
<td>28</td>
<td>6</td>
<td>12</td>
<td>17</td>
</tr>
<tr>
<th>8</th>
<td>4</td>
<td>14</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>35</td>
<td>7</td>
<td>68</td>
<td>7</td>
<td>9</td>
<td>...</td>
<td>3</td>
<td>3</td>
<td>9</td>
<td>6</td>
<td>55</td>
<td>57</td>
<td>4</td>
<td>4</td>
<td>9</td>
<td>3</td>
</tr>
<tr>
<th>9</th>
<td>7</td>
<td>22</td>
<td>10</td>
<td>10</td>
<td>11</td>
<td>7</td>
<td>10</td>
<td>20</td>
<td>34</td>
<td>10</td>
<td>...</td>
<td>59</td>
<td>12</td>
<td>12</td>
<td>46</td>
<td>41</td>
<td>18</td>
<td>10</td>
<td>6</td>
<td>4</td>
<td>4</td>
</tr>
<tr>
<th>10</th>
<td>6</td>
<td>58</td>
<td>5</td>
<td>0</td>
<td>19</td>
<td>19</td>
<td>9</td>
<td>5</td>
<td>37</td>
<td>48</td>
<td>...</td>
<td>1</td>
<td>6</td>
<td>53</td>
<td>26</td>
<td>12</td>
<td>20</td>
<td>9</td>
<td>0</td>
<td>6</td>
<td>2</td>
</tr>
<tr>
<th>11</th>
<td>7</td>
<td>9</td>
<td>11</td>
<td>18</td>
<td>0</td>
<td>9</td>
<td>6</td>
<td>7</td>
<td>11</td>
<td>33</td>
<td>...</td>
<td>12</td>
<td>0</td>
<td>11</td>
<td>0</td>
<td>5</td>
<td>16</td>
<td>13</td>
<td>6</td>
<td>4</td>
<td>6</td>
</tr>
<tr>
<th>12</th>
<td>22</td>
<td>13</td>
<td>36</td>
<td>5</td>
<td>9</td>
<td>3</td>
<td>7</td>
<td>54</td>
<td>9</td>
<td>5</td>
<td>...</td>
<td>3</td>
<td>6</td>
<td>20</td>
<td>1</td>
<td>61</td>
<td>18</td>
<td>12</td>
<td>2</td>
<td>2</td>
<td>9</td>
</tr>
<tr>
<th>13</th>
<td>19</td>
<td>39</td>
<td>10</td>
<td>12</td>
<td>13</td>
<td>9</td>
<td>19</td>
<td>12</td>
<td>6</td>
<td>19</td>
<td>...</td>
<td>17</td>
<td>17</td>
<td>4</td>
<td>9</td>
<td>6</td>
<td>9</td>
<td>6</td>
<td>18</td>
<td>3</td>
<td>11</td>
</tr>
<tr>
<th>14</th>
<td>4</td>
<td>17</td>
<td>1</td>
<td>1</td>
<td>16</td>
<td>7</td>
<td>7</td>
<td>6</td>
<td>11</td>
<td>1</td>
<td>...</td>
<td>40</td>
<td>8</td>
<td>5</td>
<td>9</td>
<td>5</td>
<td>17</td>
<td>9</td>
<td>8</td>
<td>37</td>
<td>7</td>
</tr>
<tr>
<th>15</th>
<td>2</td>
<td>13</td>
<td>16</td>
<td>15</td>
<td>16</td>
<td>14</td>
<td>4</td>
<td>18</td>
<td>14</td>
<td>0</td>
<td>...</td>
<td>20</td>
<td>6</td>
<td>16</td>
<td>2</td>
<td>14</td>
<td>18</td>
<td>0</td>
<td>0</td>
<td>6</td>
<td>1</td>
</tr>
<tr>
<th>16</th>
<td>19</td>
<td>10</td>
<td>3</td>
<td>2</td>
<td>29</td>
<td>19</td>
<td>3</td>
<td>1</td>
<td>16</td>
<td>0</td>
<td>...</td>
<td>14</td>
<td>0</td>
<td>0</td>
<td>30</td>
<td>58</td>
<td>32</td>
<td>0</td>
<td>12</td>
<td>27</td>
<td>2</td>
</tr>
<tr>
<th>17</th>
<td>4</td>
<td>30</td>
<td>19</td>
<td>11</td>
<td>24</td>
<td>17</td>
<td>10</td>
<td>4</td>
<td>4</td>
<td>8</td>
<td>...</td>
<td>44</td>
<td>3</td>
<td>10</td>
<td>3</td>
<td>6</td>
<td>16</td>
<td>3</td>
<td>25</td>
<td>39</td>
<td>9</td>
</tr>
<tr>
<th>18</th>
<td>25</td>
<td>14</td>
<td>5</td>
<td>1</td>
<td>28</td>
<td>11</td>
<td>40</td>
<td>19</td>
<td>4</td>
<td>7</td>
<td>...</td>
<td>42</td>
<td>9</td>
<td>13</td>
<td>4</td>
<td>14</td>
<td>23</td>
<td>7</td>
<td>10</td>
<td>7</td>
<td>2</td>
</tr>
<tr>
<th>19</th>
<td>1</td>
<td>29</td>
<td>14</td>
<td>0</td>
<td>0</td>
<td>14</td>
<td>35</td>
<td>2</td>
<td>3</td>
<td>1</td>
<td>...</td>
<td>4</td>
<td>29</td>
<td>0</td>
<td>2</td>
<td>9</td>
<td>20</td>
<td>10</td>
<td>15</td>
<td>1</td>
<td>8</td>
</tr>
<tr>
<th>20</th>
<td>15</td>
<td>10</td>
<td>12</td>
<td>14</td>
<td>1</td>
<td>24</td>
<td>37</td>
<td>6</td>
<td>22</td>
<td>5</td>
<td>...</td>
<td>31</td>
<td>11</td>
<td>1</td>
<td>1</td>
<td>2</td>
<td>15</td>
<td>3</td>
<td>22</td>
<td>25</td>
<td>6</td>
</tr>
<tr>
<th>21</th>
<td>58</td>
<td>10</td>
<td>8</td>
<td>2</td>
<td>13</td>
<td>13</td>
<td>13</td>
<td>1</td>
<td>25</td>
<td>15</td>
<td>...</td>
<td>3</td>
<td>13</td>
<td>1</td>
<td>29</td>
<td>18</td>
<td>9</td>
<td>10</td>
<td>53</td>
<td>18</td>
<td>0</td>
</tr>
<tr>
<th>22</th>
<td>2</td>
<td>9</td>
<td>3</td>
<td>3</td>
<td>6</td>
<td>3</td>
<td>2</td>
<td>1</td>
<td>2</td>
<td>7</td>
<td>...</td>
<td>29</td>
<td>21</td>
<td>19</td>
<td>2</td>
<td>9</td>
<td>15</td>
<td>8</td>
<td>19</td>
<td>1</td>
<td>3</td>
</tr>
<tr>
<th>23</th>
<td>0</td>
<td>4</td>
<td>2</td>
<td>3</td>
<td>5</td>
<td>16</td>
<td>12</td>
<td>6</td>
<td>5</td>
<td>1</td>
<td>...</td>
<td>9</td>
<td>2</td>
<td>4</td>
<td>2</td>
<td>19</td>
<td>20</td>
<td>3</td>
<td>22</td>
<td>0</td>
<td>9</td>
</tr>
</tbody>
</table>
<p>24 rows × 1413 columns</p>
</div>
Now we're applying the same operations to the queries, stored in the query folder.
```python
open_file = open("query_list.pkl", "rb")
query_list = pickle.load(open_file)
open_file.close()
```
do not run again below this line
```python
# generate and store query matrix
query_matrix = functions.zeros_ones_matrix(m, 'query', query_list)
```
0%| | 0/10 [00:00<?, ?it/s]
do not run again below this line
```python
# generate and store signature query
signature_query = functions.generate_signature(query_list, query_matrix, "query", nh, c_0, c_1, p, n)
```
0%| | 0/10 [00:00<?, ?it/s]
```python
open_file = open("query_matrix.pkl", "rb")
query_matrix = pickle.load(open_file)
open_file.close()
query_matrix
```
array([[0, 0, 0, ..., 0, 0, 0],
[0, 0, 0, ..., 0, 0, 0],
[0, 0, 0, ..., 0, 0, 0],
...,
[0, 0, 1, ..., 0, 0, 0],
[0, 0, 0, ..., 0, 0, 0],
[0, 0, 0, ..., 1, 0, 0]], dtype=int8)
```python
open_file = open("signature_matrix_query.pkl", "rb")
signature_query = pickle.load(open_file)
open_file.close()
signature_query
```
array([[ 7, 11, 61, 13, 27, 33, 16, 3, 41, 20],
[ 7, 8, 1, 3, 28, 1, 1, 7, 10, 30],
[ 8, 26, 29, 18, 3, 2, 3, 0, 6, 2],
[ 15, 0, 5, 4, 0, 13, 25, 1, 14, 47],
[ 4, 3, 2, 26, 42, 0, 0, 2, 33, 0],
[ 14, 10, 47, 7, 26, 4, 8, 13, 51, 0],
[ 6, 14, 7, 1, 9, 12, 9, 1, 6, 12],
[ 0, 6, 11, 13, 6, 0, 14, 11, 1, 3],
[ 0, 6, 9, 0, 26, 15, 3, 17, 9, 4],
[ 10, 4, 101, 1, 19, 1, 56, 9, 21, 6],
[ 5, 8, 20, 6, 2, 0, 16, 33, 43, 10],
[ 11, 11, 54, 9, 6, 7, 30, 9, -99, 27],
[ 36, 5, 13, 2, 1, 31, 3, 1, -71, 1],
[ 10, 6, 32, 3, 31, 3, 3, 33, 10, 2],
[ 1, 9, 82, 5, 6, 22, 6, 11, 16, 17],
[ 16, 0, 1, 2, 33, 6, 6, 0, 28, 29],
[ 3, 5, 85, 5, 18, 8, 4, 35, 39, 0],
[ 19, 10, 1, 18, 10, 3, 3, 6, 8, 34],
[ 5, 15, 84, 2, 16, 9, 65, 11, 37, 6],
[ 14, 1, 9, 23, 0, 1, 3, 5, 79, 22],
[ 12, 10, 26, 8, 2, 0, 36, 6, 37, 18],
[ 8, 10, 3, 11, 0, 1, 33, 5, 73, 29],
[ 3, 3, 3, 7, 8, 29, 6, 0, 2, 1],
[ 2, 1, 0, 12, 22, 3, 1, 27, 9, 0]], dtype=int8)
### 1 LSH matching
We look for possible matches with queries by performing Locality Sensitive Hashing.
Steps we made:
- split the signature matrix and the query matrix into equidimensional bands;
- generate hash function of random coefficients for each band;
- hash each split-signature belonging to a specific band to a bucket;
- a dictionary for each band maps bucket to track index;
- a dictionary (print of the following functions) maps query to a list of sets (one for each band) of candidates, track indexes that share the bucket with the query for that band;
- final candidates are those that share at least one bucket with the query;
- evaluate Jaccard similarity.
```python
bands = [4, 6, 8]
```
```python
# generate directories
for i in bands:
path = f'{i}_bands'
os.makedirs(path)
```
#### bands = 4
```python
candidates_4 = functions.lsh(bands[0], signature_query, signature_matrix)
```
defaultdict(None, {0: [{2}, {2, 770}, {1210, 2, 394}, {2, 42}], 1: [{976}, {976}, {976}, {976, 310}], 2: [{1367}, {1056, 779, 332, 822, 1367}, {1367}, {1367}], 3: [{125}, {125, 1151}, {125, 510, 79}, {125, 1351}], 4: [{624, 1024, 1362, 1318}, {1024, 629, 543}, {1024}, {1024}], 5: [{837, 669, 1109}, {1332, 669, 287}, {914, 669}, {752, 650, 669}], 6: [{456}, {456, 1295}, {456, 1072}, {456, 1043}], 7: [{561, 1409}, {561}, {561, 572, 61, 390}, {561}], 8: [{396, 533}, {488, 396}, {396}, {793, 396}], 9: [{1166}, {1166}, {1166, 382, 895}, {1313, 1166}]})
```python
candidates_4
```
[[2, 770, 42, 1210, 394],
[976, 310],
[1056, 822, 1367, 779, 332],
[79, 1351, 125, 510, 1151],
[624, 1024, 1362, 629, 1318, 543],
[837, 650, 752, 914, 1332, 1109, 669, 287],
[1072, 1043, 456, 1295],
[561, 1409, 390, 572, 61],
[533, 488, 793, 396],
[1313, 1166, 382, 895]]
```python
# threshold = 0.2
functions.find_matches(signature_matrix, signature_query, cand, tracks_titles, 0.2)
```
The [1m1st[0m track you are looking for could be [1mDream On, sung by Aerosmith.[0m Jaccard similarity is equal to 1.0.
The [1m2nd[0m track you are looking for could be [1mPenny Lane, sung by Beatles.[0m Jaccard similarity is equal to 0.2.
The [1m2nd[0m track you are looking for could be [1mI Want To Break Free, sung by Queen.[0m Jaccard similarity is equal to 1.0.
The [1m2nd[0m track you are looking for could be [1mShout, sung by Depeche Mode.[0m Jaccard similarity is equal to 0.3.
The [1m3th[0m track you are looking for could be [1mEverybody s Trying To Be My Baby, sung by Beatles.[0m Jaccard similarity is equal to 0.2.
The [1m3th[0m track you are looking for could be [1mOctober, sung by U2.[0m Jaccard similarity is equal to 1.0.
The [1m4th[0m track you are looking for could be [1mOb-La-Di Ob-La-Da, sung by Beatles.[0m Jaccard similarity is equal to 1.0.
The [1m5th[0m track you are looking for could be [1mKarma Police, sung by Radiohead.[0m Jaccard similarity is equal to 1.0.
The [1m6th[0m track you are looking for could be [1mHeartbreaker, sung by Led Zeppelin.[0m Jaccard similarity is equal to 1.0.
The [1m6th[0m track you are looking for could be [1mSlither, sung by Metallica.[0m Jaccard similarity is equal to 0.2.
The [1m7th[0m track you are looking for could be [1mGo Your Own Way, sung by Fleetwood Mac.[0m Jaccard similarity is equal to 1.0.
The [1m7th[0m track you are looking for could be [1mSave Me, sung by Queen.[0m Jaccard similarity is equal to 0.2.
The [1m8th[0m track you are looking for could be [1mAmerican Idiot, sung by Green Day.[0m Jaccard similarity is equal to 1.0.
The [1m9th[0m track you are looking for could be [1mSomebody, sung by Depeche Mode.[0m Jaccard similarity is equal to 1.0.
The [1m10th[0m track you are looking for could be [1mBlack Friday, sung by Steely Dan.[0m Jaccard similarity is equal to 1.0.
[[[2, 1.0]],
[[105, 0.2], [976, 1.0], [413, 0.3]],
[[96, 0.2], [1367, 1.0]],
[[125, 1.0]],
[[1024, 1.0]],
[[669, 1.0], [821, 0.2]],
[[456, 1.0], [969, 0.2]],
[[561, 1.0]],
[[396, 1.0]],
[[1166, 1.0]]]
```python
# threshold = 0.3
functions.find_matches(signature_matrix, signature_query, cand, tracks_titles, 0.3)
```
The [1m1st[0m track you are looking for could be [1mDream On, sung by Aerosmith.[0m Jaccard similarity is equal to 1.0.
The [1m2nd[0m track you are looking for could be [1mI Want To Break Free, sung by Queen.[0m Jaccard similarity is equal to 1.0.
The [1m3th[0m track you are looking for could be [1mOctober, sung by U2.[0m Jaccard similarity is equal to 1.0.
The [1m4th[0m track you are looking for could be [1mOb-La-Di Ob-La-Da, sung by Beatles.[0m Jaccard similarity is equal to 1.0.
The [1m5th[0m track you are looking for could be [1mKarma Police, sung by Radiohead.[0m Jaccard similarity is equal to 1.0.
The [1m6th[0m track you are looking for could be [1mHeartbreaker, sung by Led Zeppelin.[0m Jaccard similarity is equal to 1.0.
The [1m7th[0m track you are looking for could be [1mGo Your Own Way, sung by Fleetwood Mac.[0m Jaccard similarity is equal to 1.0.
The [1m8th[0m track you are looking for could be [1mAmerican Idiot, sung by Green Day.[0m Jaccard similarity is equal to 1.0.
The [1m9th[0m track you are looking for could be [1mSomebody, sung by Depeche Mode.[0m Jaccard similarity is equal to 1.0.
The [1m10th[0m track you are looking for could be [1mBlack Friday, sung by Steely Dan.[0m Jaccard similarity is equal to 1.0.
[[[2, 1.0]],
[[976, 1.0]],
[[1367, 1.0]],
[[125, 1.0]],
[[1024, 1.0]],
[[669, 1.0]],
[[456, 1.0]],
[[561, 1.0]],
[[396, 1.0]],
[[1166, 1.0]]]
#### bands = 6
```python
candidates_6 = functions.lsh(bands[1], signature_query, signature_matrix)
```
defaultdict(None, {0: [{2}, {2, 659}, {1088, 2}, {2}, {2, 282, 1340}, {2}], 1: [{72, 771, 976}, {976}, {976, 1257, 527}, {664, 556, 976}, {976, 212}, {976, 1088, 182}], 2: [{1367}, {1367}, {203, 1367}, {112, 1367}, {726, 1367}, {1367}], 3: [{9, 125}, {125}, {125, 661}, {125}, {762, 125}, {125}], 4: [{1024, 137, 425}, {16, 1024}, {1024, 430, 1240}, {224, 33, 1024, 790, 1183}, {1024, 146}, {1024, 1359}], 5: [{669, 1118}, {669}, {1083, 669}, {841, 236, 669}, {669, 182, 375}, {669}], 6: [{456, 1248}, {456}, {456}, {456, 305}, {456}, {456, 641}], 7: [{561, 170}, {561, 691}, {561}, {561}, {561}, {561}], 8: [{396, 276}, {666, 523, 396, 45}, {396}, {396}, {416, 932, 396}, {396}], 9: [{1166}, {221, 1166, 847}, {429, 1166}, {1166}, {1085, 725, 1166}, {763, 1166}]})
```python
candidates_6
```
[[1088, 2, 659, 282, 1340],
[1088, 771, 72, 1257, 556, 527, 976, 212, 182, 664],
[112, 726, 1367, 203],
[661, 9, 762, 125],
[1024, 224, 33, 137, 425, 430, 1359, 16, 146, 790, 1240, 1183],
[182, 375, 841, 1083, 236, 669, 1118],
[1248, 305, 641, 456],
[561, 170, 691],
[416, 276, 932, 666, 523, 396, 45],
[1085, 725, 763, 429, 221, 1166, 847]]
#### bands = 8
```python
candidates_8 = functions.lsh(bands[2], signature_query, signature_matrix)
```
defaultdict(None, {0: [{2}, {2}, {2, 1391, 853, 410}, {2, 199}, {2}, {2, 118}, {2}, {2}], 1: [{976, 1153}, {976}, {976, 105, 413}, {976, 357, 975}, {976, 482}, {976}, {976}, {976}], 2: [{674, 1367}, {547, 1367}, {640, 109, 1367}, {1367}, {541, 1367, 671}, {1367}, {1367}, {96, 751, 1271, 253, 1367}], 3: [{125}, {125}, {508, 125, 1078}, {41, 125}, {125}, {125}, {955, 988, 125}, {125}], 4: [{1024, 1001}, {1024}, {1024, 1256, 427, 1005, 1328, 786, 764}, {1024, 1208}, {1024}, {1024, 1073}, {1024}, {1024}], 5: [{669}, {572, 669, 534}, {669}, {669}, {724, 669}, {821, 669, 926}, {628, 669}, {669}], 6: [{456, 914}, {456, 140, 492, 1261, 633, 470, 409, 1083}, {456, 969}, {456}, {456, 292}, {456, 457}, {456}, {456}], 7: [{561}, {472, 561}, {561}, {561}, {385, 666, 561}, {561, 477}, {561}, {561, 811, 1053}], 8: [{396}, {396}, {396, 492, 422}, {396}, {33, 396}, {396}, {396}, {396}], 9: [{1166}, {1166}, {149, 1166}, {435, 1166, 1019}, {209, 1166}, {1166}, {1166}, {1166, 685, 1158}]})
```python
candidates_8
```
[[2, 853, 118, 199, 410, 1391],
[976, 1153, 482, 357, 105, 413, 975],
[640, 96, 674, 547, 109, 751, 1367, 253, 1271, 541, 671],
[1078, 988, 41, 955, 508, 125],
[1024, 1256, 1001, 427, 1005, 1328, 1073, 786, 1208, 764],
[724, 821, 534, 628, 572, 669, 926],
[292, 456, 969, 409, 457, 140, 492, 1261, 914, 470, 633, 1083],
[561, 385, 1053, 472, 666, 811, 477],
[33, 422, 396, 492],
[209, 435, 149, 1158, 1019, 685, 1166]]
#### Masking matching
We check the results by performing for each signature query a boolean comparison with all the signatures (not only with the candidates).
```python
threshold_0 = 0.5
matches_0 = functions.find_similarities(signature_matrix, signature_query, tracks_titles, threshold_0)
```
The [1m1st[0m track you are looking for could be [1mDream On, sung by Aerosmith.[0m Jaccard similarity is equal to 1.0.
The [1m2nd[0m track you are looking for could be [1mI Want To Break Free, sung by Queen.[0m Jaccard similarity is equal to 1.0.
The [1m3th[0m track you are looking for could be [1mOctober, sung by U2.[0m Jaccard similarity is equal to 1.0.
The [1m4th[0m track you are looking for could be [1mOb-La-Di Ob-La-Da, sung by Beatles.[0m Jaccard similarity is equal to 1.0.
The [1m5th[0m track you are looking for could be [1mKarma Police, sung by Radiohead.[0m Jaccard similarity is equal to 1.0.
The [1m6th[0m track you are looking for could be [1mHeartbreaker, sung by Led Zeppelin.[0m Jaccard similarity is equal to 1.0.
The [1m7th[0m track you are looking for could be [1mGo Your Own Way, sung by Fleetwood Mac.[0m Jaccard similarity is equal to 1.0.
The [1m8th[0m track you are looking for could be [1mAmerican Idiot, sung by Green Day.[0m Jaccard similarity is equal to 1.0.
The [1m9th[0m track you are looking for could be [1mSomebody, sung by Depeche Mode.[0m Jaccard similarity is equal to 1.0.
The [1m10th[0m track you are looking for could be [1mBlack Friday, sung by Steely Dan.[0m Jaccard similarity is equal to 1.0.
```python
threshold_1 = 0.25
matches_1 = functions.find_similarities(signature_matrix, signature_query, tracks_titles, threshold_1)
```
The [1m1st[0m track you are looking for could be [1mDream On, sung by Aerosmith.[0m Jaccard similarity is equal to 1.0.
The [1m1st[0m track you are looking for could be [1mKyoto Song, sung by Cure.[0m Jaccard similarity is equal to 0.4.
The [1m1st[0m track you are looking for could be [1mCry Freedom, sung by Dave Matthews Band.[0m Jaccard similarity is equal to 0.3.
The [1m2nd[0m track you are looking for could be [1mShout, sung by Depeche Mode.[0m Jaccard similarity is equal to 0.3.
The [1m2nd[0m track you are looking for could be [1mI Want To Break Free, sung by Queen.[0m Jaccard similarity is equal to 1.0.
The [1m3th[0m track you are looking for could be [1mOctober, sung by U2.[0m Jaccard similarity is equal to 1.0.
The [1m4th[0m track you are looking for could be [1mOb-La-Di Ob-La-Da, sung by Beatles.[0m Jaccard similarity is equal to 1.0.
The [1m4th[0m track you are looking for could be [1mSail Away Sweet Sister, sung by Queen.[0m Jaccard similarity is equal to 0.3.
The [1m5th[0m track you are looking for could be [1mKarma Police, sung by Radiohead.[0m Jaccard similarity is equal to 1.0.
The [1m6th[0m track you are looking for could be [1mCross-Tie Walker, sung by Creedence Clearwater Revival.[0m Jaccard similarity is equal to 0.3.
The [1m6th[0m track you are looking for could be [1mHeartbreaker, sung by Led Zeppelin.[0m Jaccard similarity is equal to 1.0.
The [1m7th[0m track you are looking for could be [1mGo Your Own Way, sung by Fleetwood Mac.[0m Jaccard similarity is equal to 1.0.
The [1m7th[0m track you are looking for could be [1mFun It, sung by Queen.[0m Jaccard similarity is equal to 0.3.
The [1m8th[0m track you are looking for could be [1mAmerican Idiot, sung by Green Day.[0m Jaccard similarity is equal to 1.0.
The [1m9th[0m track you are looking for could be [1mSomebody, sung by Depeche Mode.[0m Jaccard similarity is equal to 1.0.
The [1m10th[0m track you are looking for could be [1mHero Of The Day, sung by Metallica.[0m Jaccard similarity is equal to 0.3.
The [1m10th[0m track you are looking for could be [1mBlack Friday, sung by Steely Dan.[0m Jaccard similarity is equal to 1.0.
```python
matches_1
```
[[[2, 1.0], [224, 0.4], [302, 0.3]],
[[413, 0.3], [976, 1.0]],
[[1367, 1.0]],
[[125, 1.0], [967, 0.3]],
[[1024, 1.0]],
[[171, 0.3], [669, 1.0]],
[[456, 1.0], [928, 0.3]],
[[561, 1.0]],
[[396, 1.0]],
[[787, 0.3], [1166, 1.0]]]
## Q2.1
On this first part we are collecting the data and merge them in a single pandas file. The merge is process on the track_id
```python
import os
import fun
import pandas as pd
import warnings
import numpy as np
from ex2function import kmean
from sklearn import preprocessing
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from yellowbrick.cluster import KElbowVisualizer
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, plot_confusion_matrix
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import accuracy_score
```
```python
# it check if the data are download if not it download it
dir = 'data/csv/'
files = os.listdir(dir)
if (len(files) < 3) :
#download the data if there is not the csv
fun.downloadcsv()
files.remove("tr.txt")
#load the differente csv file into pd
df1 = pd.read_csv(dir+files[0])
df2 = pd.read_csv(dir+files[1])
df3 = pd.read_csv(dir+files[2])
```
```python
#merge the data
dataset = pd.merge(df1, df2, on='track_id', how='inner')
dataset = pd.merge(dataset, df3, on='track_id', how='inner')
print(dataset.shape, df1.shape, df2.shape, df3.shape)
```
(13129, 820) (13129, 250) (106574, 519) (106574, 53)
## Q2.2
On this part we are will aplly a dimential reduction on the continuous variable we will use Principal Component Analysis. However in a first time we will transform the non numerical variaibles into discret numerical varaible Note that we won't apply the dimential reduction to this row
```python
# transform non numerical variaibles
le = preprocessing.LabelEncoder()
dataset_all = dataset.copy()
for n in dataset_all.columns.values:
#columns with non-numeric values
if dataset_all[n].dtype == object :
#replace missing values by 'None'
dataset_all[n].fillna("None", inplace = True)
le = preprocessing.LabelEncoder()
dataset_all[n]= le.fit_transform(dataset_all[n])
elif dataset_all[n].dtype == float :
#replace missing values by 0.
dataset_all[n].fillna(0., inplace = True)
elif dataset_all[n].dtype == int:
#replace missing values by 0
dataset_all[n].fillna(0, inplace = True)
#take the row that are only numérical
dataset_Num = dataset[dataset.T[dataset.dtypes!=np.object].index]
```
On the following method we decide how many compenent are been keep in orther to keep > 70% of the total variance. this is done via a brute force algorithm
```python
def reduc07(dataset):
for n in range(len(dataset)):
pca = PCA(n_components=n)
x = StandardScaler().fit_transform(dataset[dataset.columns.values])
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data = principalComponents)
if sum(pca.explained_variance_ratio_) < .7:
continue
return principalDf
```
```python
# find .7 without the discret value
principalDf = reduc07(dataset_all[dataset_Num.columns.values[1:]])
track_id = dataset_all["track_id"]
print(len(principalDf))
```
13129
```python
#merge the discret value with the coninuous value
dataset_all.drop(dataset_Num.columns.values, axis = 1, inplace = True)
x = StandardScaler().fit_transform(dataset_all[dataset_all.columns.values])
principalDf = pd.DataFrame(data = x)
dataset_all = pd.merge(dataset_all, principalDf, left_index=True, right_index=True)
dataset_all["track_id"] = track_id
# Note that every time that we will use k mean we will remove the track_id
```
## Q2.3
On this task we were ask to implement our K-means this as been done into the class kmean who is in the file function.py
Following this transformation of the data we are trying to find the optimal number of cluster On this purpose we will use two different method in the first place we will use the Elbow Method. In a segond time we wil use the Silhouette analysis.
```python
# find the number of cluster with Elbow Method
model = KMeans()
visualizer = KElbowVisualizer(model, k=(2,30), metric='distortion', timings=False)
visualizer.fit(dataset_all[dataset_all.columns.values[:-1]])
visualizer.show()
print("according to the Elbow Method the otimal number of k is",visualizer.elbow_value_)
```
```python
range_n_clusters = [i for i in range(2,30)]
silhouette_avg = []
for num_clusters in range_n_clusters:
# initialise kmeans
kmeans = KMeans(n_clusters=3)
kmeans.fit(dataset_all[dataset_all.columns.values[:-1]])
cluster_labels = kmeans.labels_
# silhouette score
silhouette_avg.append(silhouette_score(dataset_all[dataset_all.columns.values[:-1]], cluster_labels))
plt.plot(range_n_clusters,silhouette_avg,'bx-')
plt.show()
```
The conclution of this two different analysis is difficult to do the Elbow Method seems two indicated a k at 7. However, the silhouette analysis indicate that the number of cluster does not have an important impact on the result. This would mean that depending on our purpose we could select the number of cluster. However, it could also mean that the k-mean aproach does not suits this dataset <br /> <br />
On the next point we will perform the k-means the we implement ad compare it to the Kmeans++ of sklearn we will take K as 7
```python
k = 7
# our method
km = kmean(dataset_all[dataset_all.columns.values[:-1]])
cluster1 = km.fit(k)
# kmean ++
ourmodel = KMeans(n_clusters=k)
ourmodel.fit(dataset_all[dataset_all.columns.values[:-1]])
cluster2 = ourmodel.predict(dataset_all[dataset_all.columns.values[:-1]])
```
Now that we have our cluster find by two different method we will try to see if they are similar the following code will take each cluster of our kmeans and see with which cluster of kmeans++ is the most similar
```python
onehot_encoder = OneHotEncoder(sparse=False)
c1 = onehot_encoder.fit_transform(cluster1.reshape(-1,1))
c2 = onehot_encoder.fit_transform(cluster2.reshape(-1,1))
for i in range(k):
y_one = c1[:,i]
max = -1
m = [0, 0]
for y in range(k):
y_two = c2[:,y]
ac = np.round(accuracy_score(y_one, y_two),3)
if ac > max:
max = ac
m = [i, y]
print('cluster',m[0],'map to cluster',m[1],'with an accuracy of',max)
```
cluster 0 map to cluster 6 with an accuracy of 0.997
cluster 1 map to cluster 3 with an accuracy of 0.996
cluster 2 map to cluster 1 with an accuracy of 0.999
cluster 3 map to cluster 2 with an accuracy of 0.999
cluster 4 map to cluster 5 with an accuracy of 0.999
cluster 5 map to cluster 4 with an accuracy of 0.998
cluster 6 map to cluster 0 with an accuracy of 0.994
As we could observe all the cluster have find an equivalent cluster in the other k-mean even if they are never exactly the same. It should be expected as we work with a large number of k
## Q2.4
During this last part we will apply the k-mean on the dataset and put the result in corelation with some variable.<br />
On a first time we will create tables were the different cluster are visible with the variaibles <br />
It is important to know that we take the same amount of point for all the data this as the purpose to make the data clear. Furthermore, we select only 7 feature (the same amount as k) for each variaibles
```python
#The 5 varaible
v1 = "artist_location"
v2 = "track_genre_top"
v3 = "track_language_code"
v4 = "album_type"
v5 = "track_publisher"
variaibles = [v1, v2, v3, v4, v5]
```
```python
# creation of the pivot table + Calculation of the percentage
pivot = list()
for v in variaibles:
value = dataset[v].value_counts()[:k].index.values
df = pd.DataFrame()
value = np.append(value, 'total')
df[v] = value
df.set_index(v, inplace= True)
ba = min(dataset[v].value_counts()[:k][-1],100)
for i in range(k):
df[i] = 0
for v1 in value:
ids = dataset[dataset[v] == v1]['track_id'].values
va = list()
bal = 0
for id in ids:
cu = km.predic(dataset_all[dataset_all['track_id'] == id][dataset_all.columns.values[:-1]])
df.loc[v1,cu[0]] += 1
if bal > ba:
break
bal += 1
for i in range(k):
df[i] = round((df[i]/df[i].sum())*100, 3)
df.loc['total',i] += df[i].sum()
pivot.append(df)
```
## Analysis
Important some data on the following chart as mark as NaN this mean that no value have been assigne for this cluster
```python
pivot[0]
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>0</th>
<th>1</th>
<th>2</th>
<th>3</th>
<th>4</th>
<th>5</th>
<th>6</th>
</tr>
<tr>
<th>artist_location</th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>Brooklyn, NY</th>
<td>0.0</td>
<td>15.566</td>
<td>0.0</td>
<td>0.0</td>
<td>NaN</td>
<td>16.972</td>
<td>13.502</td>
</tr>
<tr>
<th>New York, NY</th>
<td>0.0</td>
<td>14.151</td>
<td>0.0</td>
<td>0.0</td>
<td>NaN</td>
<td>15.596</td>
<td>16.034</td>
</tr>
<tr>
<th>San Francisco, CA</th>
<td>0.0</td>
<td>15.566</td>
<td>0.0</td>
<td>0.0</td>
<td>NaN</td>
<td>11.009</td>
<td>18.987</td>
</tr>
<tr>
<th>Chicago, IL</th>
<td>0.0</td>
<td>12.736</td>
<td>0.0</td>
<td>0.0</td>
<td>NaN</td>
<td>12.385</td>
<td>20.253</td>
</tr>
<tr>
<th>Italy</th>
<td>100.0</td>
<td>11.321</td>
<td>100.0</td>
<td>100.0</td>
<td>NaN</td>
<td>12.385</td>
<td>1.688</td>
</tr>
<tr>
<th>Baltimore, MD</th>
<td>0.0</td>
<td>16.509</td>
<td>0.0</td>
<td>0.0</td>
<td>NaN</td>
<td>18.349</td>
<td>11.392</td>
</tr>
<tr>
<th>Providence, RI</th>
<td>0.0</td>
<td>14.151</td>
<td>0.0</td>
<td>0.0</td>
<td>NaN</td>
<td>13.303</td>
<td>18.143</td>
</tr>
<tr>
<th>total</th>
<td>100.0</td>
<td>100.000</td>
<td>100.0</td>
<td>100.0</td>
<td>NaN</td>
<td>99.999</td>
<td>99.999</td>
</tr>
</tbody>
</table>
</div>
As we could see on this table the K-means as perfectly identify the cluster (0, 2, 3) as part of Italy for the rest of the data there is no outstanding identification
```python
pivot[1]
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>0</th>
<th>1</th>
<th>2</th>
<th>3</th>
<th>4</th>
<th>5</th>
<th>6</th>
</tr>
<tr>
<th>track_genre_top</th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>Rock</th>
<td>0.000</td>
<td>11.923</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>17.062</td>
<td>15.556</td>
</tr>
<tr>
<th>Electronic</th>
<td>0.000</td>
<td>14.231</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>14.692</td>
<td>15.111</td>
</tr>
<tr>
<th>Hip-Hop</th>
<td>0.000</td>
<td>13.077</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>17.062</td>
<td>14.222</td>
</tr>
<tr>
<th>Folk</th>
<td>0.000</td>
<td>12.692</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>13.270</td>
<td>18.222</td>
</tr>
<tr>
<th>Old-Time / Historic</th>
<td>83.333</td>
<td>13.462</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>10.900</td>
<td>12.889</td>
</tr>
<tr>
<th>Pop</th>
<td>0.000</td>
<td>14.615</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>13.744</td>
<td>15.556</td>
</tr>
<tr>
<th>Classical</th>
<td>16.667</td>
<td>20.000</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>13.270</td>
<td>8.444</td>
</tr>
<tr>
<th>total</th>
<td>100.000</td>
<td>100.000</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>100.000</td>
<td>100.000</td>
</tr>
</tbody>
</table>
</div>
As we see on this data Old-Time/Historic track genre as been identify and assign to the cluster 0. An important aspect to consider is that none of the genre as been put in the cluster 2, 3, 4. this could indicated that the k 2, 3 and 4 are farways from the parameters who as an impact on the genre it could be interesting to do further investigation to determine which parameters have an significant impact on the genre. (Question 2.4.6)
```python
pivot[2]
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>0</th>
<th>1</th>
<th>2</th>
<th>3</th>
<th>4</th>
<th>5</th>
<th>6</th>
</tr>
<tr>
<th>track_language_code</th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>en</th>
<td>0.000</td>
<td>15.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>11.111</td>
<td>31.818</td>
</tr>
<tr>
<th>es</th>
<td>0.000</td>
<td>15.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>14.815</td>
<td>27.273</td>
</tr>
<tr>
<th>pt</th>
<td>44.444</td>
<td>0.0</td>
<td>75.0</td>
<td>0.0</td>
<td>0.0</td>
<td>14.815</td>
<td>9.091</td>
</tr>
<tr>
<th>tr</th>
<td>0.000</td>
<td>20.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>18.519</td>
<td>18.182</td>
</tr>
<tr>
<th>fr</th>
<td>22.222</td>
<td>35.0</td>
<td>25.0</td>
<td>0.0</td>
<td>20.0</td>
<td>3.704</td>
<td>4.545</td>
</tr>
<tr>
<th>it</th>
<td>33.333</td>
<td>15.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>18.519</td>
<td>9.091</td>
</tr>
<tr>
<th>ru</th>
<td>0.000</td>
<td>0.0</td>
<td>0.0</td>
<td>100.0</td>
<td>80.0</td>
<td>18.519</td>
<td>0.000</td>
</tr>
<tr>
<th>total</th>
<td>99.999</td>
<td>100.0</td>
<td>100.0</td>
<td>100.0</td>
<td>100.0</td>
<td>100.002</td>
<td>100.000</td>
</tr>
</tbody>
</table>
</div>
This result are more interessting as more cluster have been identify we cloud first say that the pt language as most likely been identify in the cluster 2 (Note that the cluster 0 cloud also been an option) but the most standing result come from the ru who as been identify in the cluster 3 and mostlikely in cluster 4
```python
pivot[3]
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>0</th>
<th>1</th>
<th>2</th>
<th>3</th>
<th>4</th>
<th>5</th>
<th>6</th>
</tr>
<tr>
<th>album_type</th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>Album</th>
<td>0.0</td>
<td>35.714</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>17.647</td>
<td>24.242</td>
</tr>
<tr>
<th>Radio Program</th>
<td>0.0</td>
<td>21.429</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>29.412</td>
<td>24.242</td>
</tr>
<tr>
<th>Live Performance</th>
<td>0.0</td>
<td>21.429</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>41.176</td>
<td>18.182</td>
</tr>
<tr>
<th>Single Tracks</th>
<td>0.0</td>
<td>21.429</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>11.765</td>
<td>33.333</td>
</tr>
<tr>
<th>Contest</th>
<td>100.0</td>
<td>0.000</td>
<td>100.0</td>
<td>100.0</td>
<td>100.0</td>
<td>0.000</td>
<td>0.000</td>
</tr>
<tr>
<th>total</th>
<td>100.0</td>
<td>100.001</td>
<td>100.0</td>
<td>100.0</td>
<td>100.0</td>
<td>100.000</td>
<td>99.999</td>
</tr>
</tbody>
</table>
</div>
As we cloud see the contest seems to belong to the cluster 0, 2, 3 and 4. On of that top we cloud see that the live Performance seems to be identify in cluster 5.
```python
pivot[4]
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>0</th>
<th>1</th>
<th>2</th>
<th>3</th>
<th>4</th>
<th>5</th>
<th>6</th>
</tr>
<tr>
<th>track_publisher</th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>Victrola Dog (ASCAP)</th>
<td>0.0</td>
<td>0.0</td>
<td>100.0</td>
<td>25.0</td>
<td>57.143</td>
<td>0.000</td>
<td>0.000</td>
</tr>
<tr>
<th>Cherry Red Music (UK)</th>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.000</td>
<td>54.545</td>
<td>0.000</td>
</tr>
<tr>
<th>Edison</th>
<td>50.0</td>
<td>40.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.000</td>
<td>9.091</td>
<td>0.000</td>
</tr>
<tr>
<th>WFMU</th>
<td>0.0</td>
<td>20.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.000</td>
<td>0.000</td>
<td>83.333</td>
</tr>
<tr>
<th>Allister Thompson</th>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
<td>75.0</td>
<td>42.857</td>
<td>0.000</td>
<td>0.000</td>
</tr>
<tr>
<th>Maximum R&amp;D (ASCAP)</th>
<td>50.0</td>
<td>20.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.000</td>
<td>18.182</td>
<td>0.000</td>
</tr>
<tr>
<th>Zen Schlubbo Music, BMI</th>
<td>0.0</td>
<td>20.0</td>
<td>0.0</td>
<td>0.0</td>
<td>0.000</td>
<td>18.182</td>
<td>16.667</td>
</tr>
<tr>
<th>total</th>
<td>100.0</td>
<td>100.0</td>
<td>100.0</td>
<td>100.0</td>
<td>100.000</td>
<td>100.000</td>
<td>100.000</td>
</tr>
</tbody>
</table>
</div>
from this chart we cloud say that WFMU seems to belong to cluster 6, Allister Thompson to cluster 3, Cherry Red Music (UK) to cluster 5. <br />
<br />
During this analysis of the chart different correlation as been identify this does not mean that there is causation
## Q2.4.7
In a first time we wil genereate all the data
```python
#merge the data
dataset = df2
dataset = pd.merge(dataset, df3, on='track_id', how='inner')
# transform non numerical variaibles
le = preprocessing.LabelEncoder()
dataset_all = dataset.copy()
for n in dataset_all.columns.values:
#columns with non-numeric values
if dataset_all[n].dtype == object :
#replace missing values by 'None'
dataset_all[n].fillna("None", inplace = True)
le = preprocessing.LabelEncoder()
dataset_all[n]= le.fit_transform(dataset_all[n])
elif dataset_all[n].dtype == float :
#replace missing values by 0.
dataset_all[n].fillna(0., inplace = True)
elif dataset_all[n].dtype == int:
#replace missing values by 0
dataset_all[n].fillna(0, inplace = True)
#take the row that are only numérical
dataset_Num = dataset[dataset.T[dataset.dtypes!=np.object].index]
# find .7 without the discret value
principalDf = reduc07(dataset_all[dataset_Num.columns.values[1:]])
track_id = dataset_all["track_id"]
print(len(principalDf))
#merge the discret value with the coninuous value
dataset_all.drop(dataset_Num.columns.values, axis = 1, inplace = True)
x = StandardScaler().fit_transform(dataset_all[dataset_all.columns.values])
principalDf = pd.DataFrame(data = x)
dataset_all = pd.merge(dataset_all, principalDf, left_index=True, right_index=True)
dataset_all["track_id"] = track_id
# Note that every time that we will use k mean we will remove the track_id
```
106574
```python
#KMeans++
ourmodel = KMeans(n_clusters=k)
ourmodel.fit(dataset_all[dataset_all.columns.values[:-1]])
# creation of the pivot table + Calculation of the percentage
pivot = list()
for v in variaibles:
value = dataset[v].value_counts()[:k].index.values
df = pd.DataFrame()
value = np.append(value, 'total')
df[v] = value
df.set_index(v, inplace= True)
ba = min(dataset[v].value_counts()[:k][-1],100)
for i in range(k):
df[i] = 0
for v1 in value:
ids = dataset[dataset[v] == v1]['track_id'].values
va = list()
bal = 0
for id in ids:
cu = ourmodel.predict(dataset_all[dataset_all['track_id'] == id][dataset_all.columns.values[:-1]])
df.loc[v1,cu[0]] += 1
if bal > ba:
break
bal += 1
for i in range(k):
df[i] = round((df[i]/df[i].sum())*100, 3)
df.loc['total',i] += df[i].sum()
pivot.append(df)
```
```python
pivot[0]
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>0</th>
<th>1</th>
<th>2</th>
<th>3</th>
<th>4</th>
<th>5</th>
<th>6</th>
</tr>
<tr>
<th>artist_location</th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>Brooklyn, NY</th>
<td>0.0</td>
<td>18.713</td>
<td>16.477</td>
<td>15.356</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
</tr>
<tr>
<th>France</th>
<td>0.0</td>
<td>12.865</td>
<td>23.864</td>
<td>14.232</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
</tr>
<tr>
<th>New York, NY</th>
<td>0.0</td>
<td>14.620</td>
<td>17.045</td>
<td>17.603</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
</tr>
<tr>
<th>Chicago, IL</th>
<td>0.0</td>
<td>14.035</td>
<td>14.773</td>
<td>19.476</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
</tr>
<tr>
<th>Perm, Russia</th>
<td>100.0</td>
<td>1.170</td>
<td>0.000</td>
<td>0.000</td>
<td>100.0</td>
<td>100.0</td>
<td>100.0</td>
</tr>
<tr>
<th>Portland, OR</th>
<td>0.0</td>
<td>15.205</td>
<td>15.341</td>
<td>18.352</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
</tr>
<tr>
<th>Italy</th>
<td>0.0</td>
<td>23.392</td>
<td>12.500</td>
<td>14.981</td>
<td>0.0</td>
<td>0.0</td>
<td>0.0</td>
</tr>
<tr>
<th>total</th>
<td>100.0</td>
<td>100.000</td>
<td>100.000</td>
<td>100.000</td>
<td>100.0</td>
<td>100.0</td>
<td>100.0</td>
</tr>
</tbody>
</table>
</div>
As we cloud observe there is a difference in comparaison with our previous result we don't have the italy who is clearly identify but Perm, Russia
```python
pivot[1]
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>0</th>
<th>1</th>
<th>2</th>
<th>3</th>
<th>4</th>
<th>5</th>
<th>6</th>
</tr>
<tr>
<th>track_genre_top</th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>Rock</th>
<td>NaN</td>
<td>15.084</td>
<td>14.163</td>
<td>13.907</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>Experimental</th>
<td>NaN</td>
<td>15.084</td>
<td>21.888</td>
<td>7.947</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>Electronic</th>
<td>NaN</td>
<td>15.084</td>
<td>9.442</td>
<td>17.550</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>Hip-Hop</th>
<td>NaN</td>
<td>15.084</td>
<td>14.163</td>
<td>13.907</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>Folk</th>
<td>NaN</td>
<td>13.966</td>
<td>12.876</td>
<td>15.563</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>Pop</th>
<td>NaN</td>
<td>11.173</td>
<td>12.876</td>
<td>17.219</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>Instrumental</th>
<td>NaN</td>
<td>14.525</td>
<td>14.592</td>
<td>13.907</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>total</th>
<td>NaN</td>
<td>100.000</td>
<td>100.000</td>
<td>100.000</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
</tbody>
</table>
</div>
There is also significant change on this table as there is no clear cluster anymore
```python
pivot[2]
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>0</th>
<th>1</th>
<th>2</th>
<th>3</th>
<th>4</th>
<th>5</th>
<th>6</th>
</tr>
<tr>
<th>track_language_code</th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>en</th>
<td>0.000</td>
<td>8.163</td>
<td>24.390</td>
<td>21.333</td>
<td>0.000</td>
<td>0.0</td>
<td>NaN</td>
</tr>
<tr>
<th>es</th>
<td>0.000</td>
<td>26.531</td>
<td>9.756</td>
<td>17.333</td>
<td>0.000</td>
<td>0.0</td>
<td>NaN</td>
</tr>
<tr>
<th>fr</th>
<td>0.000</td>
<td>8.163</td>
<td>29.268</td>
<td>18.667</td>
<td>0.000</td>
<td>0.0</td>
<td>NaN</td>
</tr>
<tr>
<th>pt</th>
<td>0.000</td>
<td>32.653</td>
<td>2.439</td>
<td>17.333</td>
<td>0.000</td>
<td>0.0</td>
<td>NaN</td>
</tr>
<tr>
<th>de</th>
<td>20.833</td>
<td>0.000</td>
<td>7.317</td>
<td>8.000</td>
<td>86.667</td>
<td>75.0</td>
<td>NaN</td>
</tr>
<tr>
<th>ru</th>
<td>50.000</td>
<td>12.245</td>
<td>9.756</td>
<td>6.667</td>
<td>13.333</td>
<td>25.0</td>
<td>NaN</td>
</tr>
<tr>
<th>it</th>
<td>29.167</td>
<td>12.245</td>
<td>17.073</td>
<td>10.667</td>
<td>0.000</td>
<td>0.0</td>
<td>NaN</td>
</tr>
<tr>
<th>total</th>
<td>100.000</td>
<td>100.000</td>
<td>99.999</td>
<td>100.000</td>
<td>100.000</td>
<td>100.0</td>
<td>NaN</td>
</tr>
</tbody>
</table>
</div>
The change as also append on this chart as the only language who is clearly identify is de
```python
pivot[3]
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>0</th>
<th>1</th>
<th>2</th>
<th>3</th>
<th>4</th>
<th>5</th>
<th>6</th>
</tr>
<tr>
<th>album_type</th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>Album</th>
<td>NaN</td>
<td>22.222</td>
<td>25.926</td>
<td>16.667</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>Radio Program</th>
<td>NaN</td>
<td>44.444</td>
<td>7.407</td>
<td>23.810</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>Live Performance</th>
<td>NaN</td>
<td>0.000</td>
<td>51.852</td>
<td>4.762</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>Single Tracks</th>
<td>NaN</td>
<td>22.222</td>
<td>7.407</td>
<td>28.571</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>Contest</th>
<td>NaN</td>
<td>11.111</td>
<td>7.407</td>
<td>26.190</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>total</th>
<td>NaN</td>
<td>99.999</td>
<td>99.999</td>
<td>100.000</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
</tbody>
</table>
</div>
As on the previous chart there are some change. However, we are able to refind a cluster that we had previously Live performance seem to be identify in cluster 2
```python
pivot[4]
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>0</th>
<th>1</th>
<th>2</th>
<th>3</th>
<th>4</th>
<th>5</th>
<th>6</th>
</tr>
<tr>
<th>track_publisher</th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>Victrola Dog (ASCAP)</th>
<td>24.324</td>
<td>27.273</td>
<td>0.000</td>
<td>0.000</td>
<td>0.0</td>
<td>18.182</td>
<td>0.0</td>
</tr>
<tr>
<th>You've Been a Wonderful Laugh Track (ASCAP)</th>
<td>31.081</td>
<td>4.545</td>
<td>12.121</td>
<td>19.231</td>
<td>0.0</td>
<td>0.000</td>
<td>0.0</td>
</tr>
<tr>
<th>Section 27</th>
<td>8.108</td>
<td>22.727</td>
<td>30.303</td>
<td>30.769</td>
<td>0.0</td>
<td>0.000</td>
<td>0.0</td>
</tr>
<tr>
<th>www.headphonica.com</th>
<td>0.000</td>
<td>22.727</td>
<td>33.333</td>
<td>50.000</td>
<td>0.0</td>
<td>0.000</td>
<td>0.0</td>
</tr>
<tr>
<th>Studio 11</th>
<td>17.568</td>
<td>0.000</td>
<td>0.000</td>
<td>0.000</td>
<td>45.0</td>
<td>54.545</td>
<td>0.0</td>
</tr>
<tr>
<th>Mitoma Industries</th>
<td>18.919</td>
<td>22.727</td>
<td>24.242</td>
<td>0.000</td>
<td>0.0</td>
<td>0.000</td>
<td>0.0</td>
</tr>
<tr>
<th>Toucan Music</th>
<td>0.000</td>
<td>0.000</td>
<td>0.000</td>
<td>0.000</td>
<td>55.0</td>
<td>27.273</td>
<td>100.0</td>
</tr>
<tr>
<th>total</th>
<td>100.000</td>
<td>99.999</td>
<td>99.999</td>
<td>100.000</td>
<td>100.0</td>
<td>100.000</td>
<td>100.0</td>
</tr>
</tbody>
</table>
</div>
This chart is the one who as change the most in our previous version we had 3 clear cluster. Now we cloud sat that Toucan Music seems to be in cluster 4 but with less confidence as previously
If you could choose, would you rather collect more observations (with fewer features) or fewer observations (with more features) based on the previous analyses? <br />
I will collect more data having a large data set will be more challeging. As we will mostlikely have to clean the data. However, such a dataset will allow us to explore more possibilies. In the same times these kind of dataset will reduce problems such as overfitting. In global a small data set is always more easy to use but a large dataset if properly clean and explore will be more generale and will have more valuable observation.
## Task 3
```python
##NAIVE SOLUTION: naive approach to this problem would be to loop through each number and then loop again through the array
# looking for a pair that sums to S.The running time for the below solution would be O(n2)
# because in the worst case we are looping through the array twice to find a pair.
##FASTER SOLUTION: We can write a faster algorithm that will find pairs that sum to S in linear time.
#The algorithm makes a dictionary
import time
import itertools
```
```python
# Getting the inputs from the user
my_list = []
target_sum = int(input('Please enter the summation target: '))
list_len = int(input('Enter the length of the desired list of integers: '))
print('Please enter the desired list of integers: ')
for inp in range(list_len):
my_list.append(int(input('')))
unique_list = set(my_list)
# For measuring the run-time
# start_time = time.time()
# Iterating between the elements of the list and calculating the sum of each pair
for n in itertools.combinations(unique_list, 2):
if n[0] + n[1] == target_sum:
print ([n[0], n[1]])
# Uncomment to print out the run-time
# print(f"Running time is: {time.time() - start_time}")
```
```python
# Now solving the problem with O(n) time complexity
unique_list = list(unique_list)
my_dict = {}
# Uncomment for measuring the run-time
# start_time = time.time()
for i in unique_list:
if i in my_dict.keys():
print (f"({i}, {my_dict[i]})")
key = target_sum - i
my_dict[key] = i
# Uncomment to print out the run-time
# print(f"Running time is: {time.time() - start_time}")
```
|
% !TEX root = main.tex
%----------------------------------------------------------------------
\section{The method of moments}
%----------------------------------------------------------------------
Perhaps the simplest estimators are based on the \emph{method of moments}.
\bigskip
Let $X$ be a random variable, let $f(x;\mathbf{\theta})$ be its PMF/PDF and consider its $k$th moment:
\[\begin{array}{lll}
\expe(X^k) & = \displaystyle\sum_{i=1}^{\infty} x_i^k f(x_i;\mathbf{\theta}) \qquad & \text{(discrete case),} \\[3ex]
\expe(X^k) & = \displaystyle\int_{-\infty}^{\infty} x^k f(x;\mathbf{\theta})\,dx & \text{(continuous case)}.
\end{array}\]
Let $X_1,X_2,\ldots,X_n$ be a random sample from the distribution of $X$. To estimate $k$ parameters $\mathbf{\theta} = (\theta_1,\theta_2,\ldots,\theta_k)$, we equate the expressions for the first $k$ moments with the first $k$ empirical moments, then solve the resulting system of equations with respect to $\theta_1,\theta_2,\ldots,\theta_k$. The number of equations must be equal to the number of unknown parameters we wish to estimate: these are known as the \emph{moment equations}.
\[
\expe(X) = \frac{1}{n}\sum_{i=1}^n X_i,
\quad
\expe(X^2) = \frac{1}{n}\sum_{i=1}^n X_i^2,
\quad\ldots,\quad
\expe(X^k) = \frac{1}{n}\sum_{i=1}^n X_i^k.
\]
%
%\ben
%\it % single parameter
%To estimate a scalar parameter $\theta$ using the \emph{method of moments}, we equate the first theoretical moment with the first empirical moment (i.e.\ the sample mean), then solve this with respect to $\theta$:
%\[
%\expe(X;\theta) = \frac{1}{n}\sum_{i=1}^n X_i.% \equiv \bar{X}.
%\]
%\it % two parameters
%To estimate two parameters $\mathbf{\theta} = (\theta_1,\theta_2)$, we equate the first two theoretical moments with the first two empirical moments, then solve this system of two equations with respect to $\theta_1$ and $\theta_2$:
%\[
%\expe(X;\mathbf{\theta}) = \frac{1}{n}\sum_{i=1}^n X_i
%\qquad\text{and}\qquad
%\expe(X^2;\mathbf{\theta}) = \frac{1}{n}\sum_{i=1}^n X_i^2,
%\]
%\it % k parameters
%To estimate $k$ parameters $\mathbf{\theta} = (\theta_1,\theta_2,\ldots,\theta_k)$, we equate the first $k$ theoretical moments with the first $k$ empirical moments, then solve the resulting system of equations with respect to $\theta_1,\theta_2,\ldots,\theta_k$. The number of equations must be equal to the number of unknown parameters we wish to estimate.
%%\it In fact, we can equate the theoretical and empirical expected values of \emph{any} functions of $X$:
%%\begin{align*}
%%\expe\big[g_1(X);\mathbf{\theta}\big] & = \frac{1}{n}\sum_{i=1}^n g_1(X_i), \\
%%\expe\big[g_2(X);\mathbf{\theta}\big] & = \frac{1}{n}\sum_{i=1}^n g_2(X_i), \quad\text{etc.}
%%\end{align*}
%\een
% example: uniform
\begin{example}
Let $X_1,X_2,\ldots,X_n$ be a random sample from the continuous $\text{Uniform}(0,\theta)$ distribution, where $\theta>0$ is unknown. Find an estimator of $\theta$ using the method of moments.
\begin{solution}
Let $X\sim\text{Uniform}(0,\theta)$. In this case, $\expe(X) = \theta/2$ so the first moment equation is
\[
\frac{1}{2}\theta = \frac{1}{n}\sum_{i=1}^n X_i.
\]
Solving for $\theta$ we obtain $\hat{\theta}_{\text{\scriptsize{MME}}} = \displaystyle\frac{2}{n}\sum_{i=1}^n X_i$.
\end{solution}
\end{example}
% example: normal
\begin{example}
Let $X_1,X_2,\ldots,X_n$ be a random sample from the $N(\mu,\sigma^2)$ distribution. Find estimators of $\mu$ and $\sigma^2$ using the method of moments.
\end{example}
\begin{solution}
Let $X\sim N(\mu,\sigma^2)$ and let $\theta=(\mu,\sigma^2)$ be the parameter vector. Equating the first and second moments with the empirical first and second moments,
\[
\expe(X) = \frac{1}{n}\sum_{i=1}^n X_i
\qquad\text{and}\qquad
\expe(X^2) = \frac{1}{n}\sum_{i=1}^n X_i^2.
\]
The theoretical moments are $\expe(X) = \mu$ and $\expe(X^2) = \sigma^2 + \mu^2$. Solving the resulting equations for $\mu$ and $\sigma^2$, we obtain %the following MMEs of $\mu$ and $\sigma^2$:
\[
\hat{\mu}_{\text{\scriptsize{MME}}} = \Xbar
\qquad\text{and}\qquad
\hat{\sigma}^2_{\text{\scriptsize{MME}}}
%= \frac{1}{n}\sum_{i=1}^n X_i^2 - \bar{X}^2
= \frac{1}{n}\sum_{i=1}^n (X_i-\bar{X})^2.
\]
%\begin{align*}
%\hat{\mu} & = \Xbar, \\
%\hat{\sigma}^2 & = \frac{1}{n}\sum_{i=1}^n X_i^2 - \bar{X}^2 = \frac{1}{n}\sum_{i=1}^n (X_i-\bar{X})^2.
%\end{align*}
\bit
\it The MME of $\mu$ is the \emph{sample mean}.
\it The MME of $\sigma^2$ is the \emph{empirical mean squared deviation from the sample mean}.
\eit
Note that the latter is \emph{not} the sample variance.
\end{solution}
%----------------------------------------------------------------------
\begin{exercise}
\begin{questions}
% MME of Bernoulli distribution
\question
Let $X_1,X_2,\ldots,X_n$ be a random sample from the $\text{Bernoulli}(\theta)$ distribution. Find an estimator of $\theta$ using the method of moments.
\begin{answer}
The first moment of the $\text{Bernoulli}(\theta)$ distribution is $\theta$.
Equating this to the first empirical moment (i.e.\ the sample mean),
\[
\hat{\theta}_{\text{\scriptsize{MME}}} = \frac{1}{n}\sum_{i=1}^n X_i.
\]
The MME of $\theta$ is thus the proportion of successes observed in $n$ trials.
\end{answer}
% MME of exponential distribution
\question
Let $X_1,X_2,\ldots,X_n$ be a random sample from the $\text{Exponential}(\lambda)$ distribution, where $\lambda>0$ is an unknown rate parameter. Find an estimator of $\lambda$ using the method of moments.
\begin{answer}
Let $X\sim\text{Exponential}(\lambda)$. Then $\expe(X) = 1/\lambda$, so the first moment equation is
\[
\frac{1}{\lambda} = \bar{X}.
\]
Solving for $\lambda$, we obtain $\hat{\lambda}_{\text{\scriptsize{MME}}} = \bar{X}^{-1}$.
\end{answer}
% MME of Poisson distribution
\question
Let $X_1,X_2,\ldots,X_n$ be a random sample from the $\text{Poisson}(\lambda)$ distribution, where $\lambda>0$ is unknown. Find an estimator of $\lambda$ using the method of moments.
\begin{answer}
Let $X\sim\text{Poisson}(\lambda)$. The expected value of $X$ is $\expe(X)=\lambda$, so the first moment equation is simply
\[
\lambda = \bar{X}.
\]
Solving for $\lambda$, we obtain $\hat{\lambda}_{\text{\scriptsize{MME}}} = \bar{X}$.
\end{answer}
\end{questions}
\end{exercise}
%----------------------------------------------------------------------
|
State Before: ι : Type u_1
inst✝ : Fintype ι
I J : Box ι
⊢ J ∈ splitCenter I ↔ ∃ s, Box.splitCenterBox I s = J State After: no goals Tactic: simp [splitCenter] |
module Mod_plcd_elmdir
contains
subroutine plcd_elmdir(a,e,elmat,elrhs)
!------------------------------------------------------------------------
! This routine modifies the element stiffness to impose the correct
! boundary conditions
!------------------------------------------------------------------------
use typre
use Mod_PLCD
use Mod_Element
use Mod_plcd_Stages
use Mod_plcdExacso
implicit none
class(PLCDProblem), target :: a
class(FiniteElement), intent(in) :: e
real(rp), intent(inout) :: elmat(:,:,:,:),elrhs(:,:)
real(rp) :: adiag, pressval, pressgrad(3)
integer(ip) :: inode,ipoin,idofn,iffix_aux
type(plcdExacso) :: Exacso
real(rp), pointer :: coord(:)
!Dirichlet boundary conditions
do inode=1,e%pnode
ipoin=e%lnods(inode)
do idofn=1,a%ndofbc
iffix_aux = a%cs%kfl_fixno(idofn,ipoin)
if(iffix_aux==1) then
adiag=elmat(idofn,inode,idofn,inode)
!Only if the Dirichlet columns are to be deleted
if (a%kfl_DeleteDirichletColumns) then
elrhs(:,1:e%pnode)=elrhs(:,1:e%pnode) &
- elmat(:,1:e%pnode,idofn,inode)*(a%cs%bvess(idofn,ipoin)*a%css%CurrentLoadFactor - a%Displacement(idofn,ipoin,1))
elmat(:,1:e%pnode,idofn,inode)=0.0_rp
endif
elrhs(idofn,inode)=adiag*(a%cs%bvess(idofn,ipoin)*a%css%CurrentLoadFactor - a%Displacement(idofn,ipoin,1))
elmat(idofn,inode,:,1:e%pnode)=0.0_rp
elmat(idofn,inode,idofn,inode)=adiag
end if
end do
end do
!FixPressure if required
if (a%UseUPFormulation .and. (a%kfl_confi == 1)) then
do inode = 1,e%pnode
ipoin=e%lnods(inode)
if (ipoin == a%nodpr) then
adiag=elmat(a%ndofbc+1,inode,a%ndofbc+1,inode)
if (a%kfl_DeleteDirichletColumns) then
elmat(:,1:e%pnode,a%ndofbc+1,inode)=0.0_rp
endif
elmat(a%ndofbc+1,inode,:,1:e%pnode)=0.0_rp
elrhs(a%ndofbc+1,inode) = 0.0_rp
elmat(a%ndofbc+1,inode,a%ndofbc+1,inode) = adiag
if (a%kfl_exacs > 0) then
call a%Mesh%GetPointCoord(ipoin,coord)
call Exacso%ComputeSolution(e%ndime,coord,a)
call Exacso%GetPressure(e%ndime,pressval,pressgrad)
elrhs(a%ndofbc+1,inode) = adiag*(pressval*a%css%CurrentLoadFactor - a%Pressure(ipoin,1))
endif
endif
enddo
endif
end subroutine plcd_elmdir
end module
|
open import Prelude
module Implicits.Substitutions.Lemmas.Type where
open import Implicits.Syntax.Type
open import Implicits.Substitutions
open import Data.Fin.Substitution
open import Data.Fin.Substitution.Lemmas
open import Data.Vec.Properties
open import Extensions.Substitution
open import Data.Star using (Star; ε; _◅_)
open import Data.Product hiding (map)
typeLemmas : TermLemmas Type
typeLemmas = record { termSubst = TypeSubst.typeSubst; app-var = refl ; /✶-↑✶ = Lemma./✶-↑✶ }
where
open TypeSubst
module Lemma {T₁ T₂} {lift₁ : Lift T₁ Type} {lift₂ : Lift T₂ Type} where
open Lifted lift₁ using () renaming (_↑✶_ to _↑✶₁_; _/✶_ to _/✶₁_)
open Lifted lift₂ using () renaming (_↑✶_ to _↑✶₂_; _/✶_ to _/✶₂_)
/✶-↑✶ : ∀ {m n} (σs₁ : Subs T₁ m n) (σs₂ : Subs T₂ m n) →
(∀ k x → (simpl (tvar x)) /✶₁ σs₁ ↑✶₁ k ≡ (simpl (tvar x)) /✶₂ σs₂ ↑✶₂ k) →
∀ k t → t /✶₁ σs₁ ↑✶₁ k ≡ t /✶₂ σs₂ ↑✶₂ k
/✶-↑✶ ρs₁ ρs₂ hyp k (simpl (tvar x)) = hyp k x
/✶-↑✶ ρs₁ ρs₂ hyp k (simpl (tc c)) = begin
(simpl $ tc c) /✶₁ ρs₁ ↑✶₁ k
≡⟨ TypeApp.tc-/✶-↑✶ _ k ρs₁ ⟩
(simpl $ tc c)
≡⟨ sym $ TypeApp.tc-/✶-↑✶ _ k ρs₂ ⟩
(simpl $ tc c) /✶₂ ρs₂ ↑✶₂ k ∎
/✶-↑✶ ρs₁ ρs₂ hyp k (simpl (a →' b)) = begin
(simpl $ a →' b) /✶₁ ρs₁ ↑✶₁ k
≡⟨ TypeApp.→'-/✶-↑✶ _ k ρs₁ ⟩
simpl ((a /✶₁ ρs₁ ↑✶₁ k) →' (b /✶₁ ρs₁ ↑✶₁ k))
≡⟨ cong₂ (λ a b → simpl (a →' b)) (/✶-↑✶ ρs₁ ρs₂ hyp k a) (/✶-↑✶ ρs₁ ρs₂ hyp k b) ⟩
simpl ((a /✶₂ ρs₂ ↑✶₂ k) →' (b /✶₂ ρs₂ ↑✶₂ k))
≡⟨ sym (TypeApp.→'-/✶-↑✶ _ k ρs₂) ⟩
(simpl $ a →' b) /✶₂ ρs₂ ↑✶₂ k
∎
/✶-↑✶ ρs₁ ρs₂ hyp k (a ⇒ b) = begin
(a ⇒ b) /✶₁ ρs₁ ↑✶₁ k
≡⟨ TypeApp.⇒-/✶-↑✶ _ k ρs₁ ⟩ --
(a /✶₁ ρs₁ ↑✶₁ k) ⇒ (b /✶₁ ρs₁ ↑✶₁ k)
≡⟨ cong₂ _⇒_ (/✶-↑✶ ρs₁ ρs₂ hyp k a) (/✶-↑✶ ρs₁ ρs₂ hyp k b) ⟩
(a /✶₂ ρs₂ ↑✶₂ k) ⇒ (b /✶₂ ρs₂ ↑✶₂ k)
≡⟨ sym (TypeApp.⇒-/✶-↑✶ _ k ρs₂) ⟩
(a ⇒ b) /✶₂ ρs₂ ↑✶₂ k
∎
/✶-↑✶ ρs₁ ρs₂ hyp k (∀' a) = begin
(∀' a) /✶₁ ρs₁ ↑✶₁ k
≡⟨ TypeApp.∀'-/✶-↑✶ _ k ρs₁ ⟩
∀' (a /✶₁ ρs₁ ↑✶₁ (suc k))
≡⟨ cong ∀' (/✶-↑✶ ρs₁ ρs₂ hyp (suc k) a) ⟩
∀' (a /✶₂ ρs₂ ↑✶₂ (suc k))
≡⟨ sym (TypeApp.∀'-/✶-↑✶ _ k ρs₂) ⟩
(∀' a) /✶₂ ρs₂ ↑✶₂ k
∎
open TermLemmas typeLemmas public hiding (var; id)
open AdditionalLemmas typeLemmas public
open TypeSubst using (module Lifted)
-- The above lemma /✶-↑✶ specialized to single substitutions
/-↑⋆ : ∀ {T₁ T₂} {lift₁ : Lift T₁ Type} {lift₂ : Lift T₂ Type} →
let open Lifted lift₁ using () renaming (_↑⋆_ to _↑⋆₁_; _/_ to _/₁_)
open Lifted lift₂ using () renaming (_↑⋆_ to _↑⋆₂_; _/_ to _/₂_)
in
∀ {n k} (ρ₁ : Sub T₁ n k) (ρ₂ : Sub T₂ n k) →
(∀ i x → (simpl (tvar x)) /₁ ρ₁ ↑⋆₁ i ≡ (simpl (tvar x)) /₂ ρ₂ ↑⋆₂ i) →
∀ i a → a /₁ ρ₁ ↑⋆₁ i ≡ a /₂ ρ₂ ↑⋆₂ i
/-↑⋆ ρ₁ ρ₂ hyp i a = /✶-↑✶ (ρ₁ ◅ ε) (ρ₂ ◅ ε) hyp i a
-- weakening a simple type gives a simple type
simpl-wk : ∀ {ν} k (τ : SimpleType (k + ν)) → ∃ λ τ' → (simpl τ) / wk ↑⋆ k ≡ simpl τ'
simpl-wk k (tc x) = , refl
simpl-wk k (tvar n) = , var-/-wk-↑⋆ k n
simpl-wk k (x →' x₁) = , refl
|
Benedetto <unk> ( September 26 , 1766 ) – Cardinal @-@ Deacon of SS . Cosma e Damiano ; prefect of the S.C. of Index
|
module Data.Maybe.Instance where
open import Class.Equality
open import Class.Monad
open import Data.Maybe
open import Data.Maybe.Properties
instance
Maybe-Monad : ∀ {a} -> Monad (Maybe {a})
Maybe-Monad = record { _>>=_ = λ x f → maybe f nothing x ; return = just }
Maybe-Eq : ∀ {A} {{_ : Eq A}} → Eq (Maybe A)
Maybe-Eq ⦃ record { _≟_ = _≟_ } ⦄ = record { _≟_ = ≡-dec _≟_ }
Maybe-EqB : ∀ {A} {{_ : Eq A}} → EqB (Maybe A)
Maybe-EqB = Eq→EqB
|
[STATEMENT]
lemma map_eq_replicate_imp_list_all_const:
"map f xs = replicate n x \<Longrightarrow> n = length xs \<Longrightarrow> list_all (\<lambda>y. f y = x) xs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>map f xs = replicate n x; n = length xs\<rbrakk> \<Longrightarrow> list_all (\<lambda>y. f y = x) xs
[PROOF STEP]
by (induction xs arbitrary: n) simp_all |
%NOFRICTION return link object with zero friction
%
% LINK = NOFRICTION(LINK)
%
%
% Ryan Steindl based on Robotics Toolbox for MATLAB (v6 and v9)
%
% Copyright (C) 1993-2011, by Peter I. Corke
%
% This file is part of The Robotics Toolbox for MATLAB (RTB).
%
% RTB is free software: you can redistribute it and/or modify
% it under the terms of the GNU Lesser General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% RTB is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU Lesser General Public License for more details.
%
% You should have received a copy of the GNU Leser General Public License
% along with RTB. If not, see <http://www.gnu.org/licenses/>.
%
% http://www.petercorke.com
function l2 = nofriction(l, only)
%Link.nofriction Remove friction
%
% LN = L.nofriction() is a link object with the same parameters as L except
% nonlinear (Coulomb) friction parameter is zero.
%
% LN = L.nofriction('all') is a link object with the same parameters as L
% except all friction parameters are zero.
l2 = Link(l);
if (nargin == 2) && strcmpi(only(1:3), 'all')
l2.B = 0;
end
l2.Tc = [0 0];
end
|
# Defines various functions for "moving" around
# Pascal's triangle.
_upval(a::Entry{<: Integer}) = a.val*(a.n - a.k)÷a.n
_upval(a::Entry{<: AbstractFloat}) = a.val*(a.n - a.k)/a.n
function up!(a::Entry)
if isatright(a) || isfirst(a)
throw(OutOfBoundsError("no entry above"))
end
a.val = _upval(a)
a.n -= 1
return a
end
up(a::Entry) = up!(Entry(a))
up!(r::Row) = prev!(r)
up(r::Row) = prev(r)
_downval(a::Entry{<: Integer}) = a.val*(a.n + 1)÷(a.n - a.k + 1)
_downval(a::Entry{<: AbstractFloat}) = a.val*(a.n + 1)/(a.n - a.k + 1)
function down!(a::Entry)
a.val = _downval(a)
a.n += 1
return a
end
down(a::Entry) = down!(Entry(a))
down!(r::Row) = next!(r)
down(r::Row) = next(r)
_leftval(a::Entry{<: Integer}) = a.val*a.k÷(a.n - a.k + 1)
_leftval(a::Entry{<: AbstractFloat}) = a.val*a.k/(a.n - a.k + 1)
function _left!(a)
a.val = _leftval(a)
a.k -= 1
return a
end
function left!(a::Entry)
isatleft(a) && throw(OutOfBoundsError("no entry to the left"))
return _left!(a)
end
left(a::Entry) = left!(Entry(a))
left!(c::Column) = prev!(c)
left(c::Column) = prev(c)
left!(c::LazyColumn) = prev!(c)
left(c::LazyColumn) = prev(c)
_rightval(a::Entry{<: Integer}) = a.val*(a.n - a.k)÷(a.k + 1)
_rightval(a::Entry{<: AbstractFloat}) = a.val*(a.n - a.k)/(a.k + 1)
function _right!(a::Entry)
a.val = _rightval(a)
a.k += 1
return a
end
function right!(a::Entry)
isatright(a) && throw(OutOfBoundsError("no entry to the right"))
return _right!(a)
end
right(a::Entry) = right!(Entry(a))
right!(c::Column) = next!(c)
right(c::Column) = next(c)
right!(c::LazyColumn) = next!(c)
right(c::LazyColumn) = next(c)
function prev!(a::Entry)
isfirst(a) && throw(OutOfBoundsError("no previous entry"))
if !isatleft(a)
return _left!(a)
else
a.n -= 1
a.k = a.n
a.val = one(a.val)
return a
end
end
prev(a::Entry) = prev!(Entry(a))
function prev!(r::Row)
isfirst(r) && throw(OutOfBoundsError("no previous row"))
r.rownum -= 1
datalength = numelements(r.rownum)
if datalength ≥ 1
r.data[1] -= r.rownum
for i ∈ 2:datalength
r.data[i] -= r.data[i-1]
end
end
return r
end
function prev(r::Row)
newrow = Row(r)
return prev!(newrow)
end
function prev!(c::Column)
isfirst(c) && throw(OutOfBoundsError("no previous column"))
c.colnum -= 1
for i ∈ length(c.data):-1:2
c.data[i] -= c.data[i-1]
end
return c
end
function prev(c::Column)
newcol = Column(c)
return prev!(newcol)
end
function prev!(c::LazyColumn)
isfirst(c) && throw(OutOfBoundsError("no previous column"))
c.colnum -= 1
for (r, val) ∈ c.data
c.data[r] = value(up!(Entry(r + c.colnum, r - 1, val)))
end
return c
end
function prev(c::LazyColumn)
newcol = LazyColumn(c)
return prev!(newcol)
end
function next!(a::Entry)
if !isatright(a)
return _right!(a)
else
a.n += 1
a.k = zero(a.k)
a.val = one(a.val)
return a
end
end
next(a::Entry) = next!(Entry(a))
function next!(r::Row)
datalength = numelements(r.rownum)
if isodd(r.rownum) && r.rownum ≥ 3
if r.rownum == 3
newdata = one(eltype(r.data))*6
else
newdata = r.data[datalength]
end
if length(r.data) > datalength
r.data[datalength+1] = newdata
else
push!(r.data, newdata)
end
datalength += 1
end
if datalength ≥ 1
for i ∈ datalength:-1:2
r.data[i] += r.data[i-1]
end
r.data[1] += r.rownum
end
r.rownum += 1
return r
end
function next(r::Row)
newrow = Row(r)
return next!(newrow)
end
function next!(c::Column)
c.colnum += 1
for i ∈ 2:length(c.data)
c.data[i] += c.data[i-1]
end
return c
end
function next(c::Column)
newcol = Column(c)
return next!(newcol)
end
function _downright!(e::Entry)
if isatright(e)
e.n += 1
e.k += 1
return e
end
a = right(e)
e += a
return e
end
function next!(c::LazyColumn)
for (r, val) ∈ c.data
e = Entry(r + c.colnum - 1, c.colnum, val)
_downright!(e)
c.data[r] = value(e)
end
c.colnum += 1
return c
end
function next(c::LazyColumn)
newcol = LazyColumn(c)
return next!(newcol)
end |
text_raw \<open>\subsection[Inverse Images]{Inverse Images\isalabel{subsec:inverse-morphism-image}}\<close>
theory InverseImageMorphismChoice
imports ParticularStructureMorphisms MorphismImage
begin
context particular_struct_morphism
begin
definition same_image (infix \<open>\<sim>\<close> 75) where
\<open>x \<sim> y \<equiv> x \<in> src.\<P> \<and> y \<in> src.\<P> \<and> \<phi> x = \<phi> y\<close>
lemma \<^marker>\<open>tag (proof) aponly\<close> same_image_I[intro!]:
assumes \<open>x \<in> src.\<P>\<close> \<open>y \<in> src.\<P>\<close> \<open>\<phi> x = \<phi> y\<close>
shows \<open>x \<sim> y\<close>
using assms by (auto simp: same_image_def)
lemma \<^marker>\<open>tag (proof) aponly\<close> same_image_E[elim!]:
assumes \<open>x \<sim> y\<close>
obtains \<open>x \<in> src.\<P>\<close> \<open>y \<in> src.\<P>\<close> \<open>\<phi> x = \<phi> y\<close>
using assms by (auto simp: same_image_def)
lemma \<^marker>\<open>tag (proof) aponly\<close> same_image_iff[simp]: \<open>x \<sim> y \<longleftrightarrow> x \<in> src.\<P> \<and> y \<in> src.\<P> \<and> \<phi> x = \<phi> y\<close>
by (auto simp: same_image_def)
lemma \<^marker>\<open>tag (proof) aponly\<close> same_image_sym[sym]: \<open>x \<sim> y \<Longrightarrow> y \<sim> x\<close>
by auto
lemma \<^marker>\<open>tag (proof) aponly\<close> same_image_trans[trans]: \<open>\<lbrakk> x \<sim> y ; y \<sim> z \<rbrakk> \<Longrightarrow> x \<sim> z\<close>
by auto
lemma \<^marker>\<open>tag (proof) aponly\<close> same_image_refl[intro!]: \<open>x \<in> src.\<P> \<Longrightarrow> x \<sim> x\<close>
by auto
definition \<open>eq_class x \<equiv> { y . x \<sim> y }\<close>
lemma \<^marker>\<open>tag (proof) aponly\<close> eq_class_I[intro!]: \<open>x \<sim> y \<Longrightarrow> y \<in> eq_class x\<close>
by (auto simp: eq_class_def)
lemma \<^marker>\<open>tag (proof) aponly\<close> eq_class_D[dest!]: \<open>y \<in> eq_class x \<Longrightarrow> x \<sim> y\<close>
by (auto simp: eq_class_def)
lemma \<^marker>\<open>tag (proof) aponly\<close> eq_class_swap[simp]: \<open>y \<in> eq_class x \<longleftrightarrow> x \<in> eq_class y\<close>
by auto
lemma \<^marker>\<open>tag (proof) aponly\<close> eq_class_unique[simp]: \<open>\<lbrakk> x \<in> eq_class y ; x \<in> eq_class z \<rbrakk> \<Longrightarrow> eq_class x = eq_class z\<close>
by auto
lemma \<^marker>\<open>tag (proof) aponly\<close> eq_class_subset_P[intro!]: \<open>eq_class x \<subseteq> src.\<P>\<close>
by auto
lemma \<^marker>\<open>tag (proof) aponly\<close> eq_class_endurant_cases[cases set]:
obtains
(eq_class_subst) \<open>eq_class x \<subseteq> src.\<S>\<close>
| (eq_class_moment) \<open>eq_class x \<subseteq> src.\<M>\<close>
proof (cases \<open>x \<in> src.\<P>\<close>)
assume \<open>x \<in> src.\<P>\<close>
then consider (substantial) \<open>x \<in> src.\<S>\<close> | (moment) \<open>x \<in> src.\<M>\<close>
by blast
then show ?thesis
proof (cases)
case substantial
then show ?thesis
apply (intro that(1))
using morph_preserves_substantials \<open>x \<in> src.\<P>\<close>
by (metis eq_class_D particular_struct_morphism.same_image_E particular_struct_morphism_axioms subsetI)
next
case moment
then show ?thesis
apply (intro that(2))
using morph_preserves_moments \<open>x \<in> src.\<P>\<close>
by (metis eq_class_D morph_preserves_moments_simp particular_struct_morphism.same_image_E particular_struct_morphism_axioms subsetI)
qed
next
assume \<open>x \<notin> src.\<P>\<close>
then have \<open>eq_class x = \<emptyset>\<close> by auto
then show ?thesis using that by auto
qed
lemma \<^marker>\<open>tag (proof) aponly\<close> eq_class_moment[simp]: \<open>eq_class x \<subseteq> src.\<M> \<longleftrightarrow> x \<notin> src.\<S>\<close>
apply (cases x rule: eq_class_endurant_cases ; safe ; simp add: eq_class_def)
subgoal by blast
subgoal by auto
by blast
lemma \<^marker>\<open>tag (proof) aponly\<close> eq_class_substantial[simp]: \<open>eq_class x \<subseteq> src.\<S> \<longleftrightarrow> x \<notin> src.\<M>\<close>
apply (cases x rule: eq_class_endurant_cases ; safe ; simp add: eq_class_def)
subgoal by blast
subgoal by auto
by (simp add: subset_iff)
definition \<open>eq_classes \<equiv> { eq_class x | x . x \<in> src.\<P> }\<close>
lemma \<^marker>\<open>tag (proof) aponly\<close> eq_classes_I[intro]: \<open>\<lbrakk> x \<in> src.\<P> ; X = eq_class x \<rbrakk> \<Longrightarrow> X \<in> eq_classes \<close>
by (auto simp: eq_classes_def)
lemma \<^marker>\<open>tag (proof) aponly\<close> eq_classes_E[elim!]:
assumes \<open>X \<in> eq_classes\<close>
obtains x where \<open>x \<in> src.\<P>\<close> \<open>X = eq_class x\<close>
using assms by (auto simp: eq_classes_def)
lemma \<^marker>\<open>tag (proof) aponly\<close> eq_classes_disj: \<open>\<lbrakk> X \<in> eq_classes ; Y \<in> eq_classes ; x \<in> X ; x \<in> Y \<rbrakk> \<Longrightarrow> X = Y\<close>
by auto
lemma \<^marker>\<open>tag (proof) aponly\<close> eq_classes_un: \<open>\<Union> eq_classes = src.\<P>\<close>
by auto
lemma \<^marker>\<open>tag (proof) aponly\<close> eq_classes_non_empty[dest]: \<open>X \<in> eq_classes \<Longrightarrow> X \<noteq> \<emptyset>\<close>
by (auto simp: eq_classes_def eq_class_def)
lemma \<^marker>\<open>tag (proof) aponly\<close> eq_class_non_empty[simp]: \<open>eq_class x \<noteq> \<emptyset> \<longleftrightarrow> x \<in> src.\<P>\<close>
by (auto simp: eq_class_def)
definition \<open>subst_eq_classes \<equiv> { X . X \<in> eq_classes \<and> X \<subseteq> src.\<S>}\<close>
lemma \<^marker>\<open>tag (proof) aponly\<close> subst_eq_classes_I: \<open>\<lbrakk> X \<in> eq_classes ; X \<subseteq> src.\<S> \<rbrakk> \<Longrightarrow> X \<in> subst_eq_classes\<close>
by (auto simp: subst_eq_classes_def)
lemma \<^marker>\<open>tag (proof) aponly\<close> subst_eq_classes_E:
assumes \<open>X \<in> subst_eq_classes\<close>
obtains \<open>X \<in> eq_classes\<close> \<open>X \<subseteq> src.\<S>\<close>
using assms by (auto simp: subst_eq_classes_def)
lemma \<^marker>\<open>tag (proof) aponly\<close> subst_eq_classes_E1[elim!]:
assumes \<open>X \<in> subst_eq_classes\<close>
obtains x where \<open>X \<in> eq_classes\<close> \<open>x \<in> X\<close> \<open>\<And>x. x \<in> X \<Longrightarrow> x \<in> src.\<S>\<close>
using assms by (auto simp: subst_eq_classes_def)
lemma \<^marker>\<open>tag (proof) aponly\<close> subst_eq_classes_iff: \<open>X \<in> subst_eq_classes \<longleftrightarrow> X \<in> eq_classes \<and> X \<subseteq> src.\<S>\<close>
by (auto simp: subst_eq_classes_def)
lemma \<^marker>\<open>tag (proof) aponly\<close> subst_eq_classes_I1[intro]: \<open>\<lbrakk> x \<in> src.\<S> ; X = eq_class x \<rbrakk> \<Longrightarrow> X \<in> subst_eq_classes\<close>
by (auto simp: subst_eq_classes_def)
lemma \<^marker>\<open>tag (proof) aponly\<close> subst_eq_classes_D1: \<open>\<lbrakk> X \<in> subst_eq_classes ; x \<in> X \<rbrakk> \<Longrightarrow> x \<in> src.\<S>\<close>
by auto
lemma \<^marker>\<open>tag (proof) aponly\<close> subst_eq_classes_D2: \<open>X \<in> subst_eq_classes \<Longrightarrow> X \<in> eq_classes\<close>
by auto
lemma \<^marker>\<open>tag (proof) aponly\<close> subst_classes_disj[simp]:
\<open>\<lbrakk> X \<in> subst_eq_classes ; Y \<in> subst_eq_classes ; x \<in> X ; x \<in> Y \<rbrakk> \<Longrightarrow> X = Y\<close>
using subst_eq_classes_D2 eq_classes_disj by metis
lemma \<^marker>\<open>tag (proof) aponly\<close> subst_eq_classes_un[simp]: \<open>\<Union> subst_eq_classes = src.\<S>\<close>
by auto
lemma \<^marker>\<open>tag (proof) aponly\<close> subst_eq_class_non_empty[dest]: \<open>X \<in> subst_eq_classes \<Longrightarrow> X \<noteq> \<emptyset>\<close>
by auto
end
context particular_struct_morphism_sig
begin
end
locale choice_function =
fixes f :: \<open>'a set \<Rightarrow> 'a\<close>
assumes f_in_X1: \<open>\<forall>X. X \<noteq> \<emptyset> \<longrightarrow> f X \<in> X\<close>
locale particular_struct_morphism_with_choice =
particular_struct_morphism where Typ\<^sub>p\<^sub>1 = Typ\<^sub>p\<^sub>1 and Typ\<^sub>p\<^sub>2 = Typ\<^sub>p\<^sub>2 and Typ\<^sub>q = Typ\<^sub>q +
choice_function f
for
f :: \<open>'p\<^sub>1 set \<Rightarrow> 'p\<^sub>1\<close> and
Typ\<^sub>p\<^sub>1 :: \<open>'p\<^sub>1 itself\<close> and
Typ\<^sub>p\<^sub>2 :: \<open>'p\<^sub>2 itself\<close> and
Typ\<^sub>q :: \<open>'q itself\<close>
begin
inductive_set delta :: \<open>('p\<^sub>2 \<times> 'p\<^sub>1) set\<close> (\<open>\<Delta>\<close>)
where
delta_substantial: \<open>X \<in> subst_eq_classes \<Longrightarrow> (\<phi> (f X), f X) \<in> \<Delta>\<close>
| delta_moment: \<open>\<lbrakk> (x\<^sub>1,y\<^sub>1) \<in> \<Delta> ; x\<^sub>2 \<in> src.\<P> ; \<phi> x\<^sub>2 \<triangleleft>\<^sub>t x\<^sub>1 \<rbrakk> \<Longrightarrow> (\<phi> x\<^sub>2,f (eq_class x\<^sub>2)) \<in> \<Delta>\<close>
lemma \<^marker>\<open>tag (proof) aponly\<close> f_in_X[simp]: \<open>f X \<in> X \<longleftrightarrow> X \<noteq> \<emptyset>\<close> using f_in_X1 by auto
lemma \<^marker>\<open>tag (proof) aponly\<close> f_eq_class[simp]: \<open>x \<in> src.\<P> \<Longrightarrow> \<phi> (f (eq_class x)) = \<phi> x\<close>
by (metis eq_class_def eq_class_non_empty f_in_X1 mem_Collect_eq same_image_E)
lemma \<^marker>\<open>tag (proof) aponly\<close> f_eq_class_in_end[intro!,simp]: \<open>x \<in> src.\<P> \<Longrightarrow> f (eq_class x) \<in> src.\<P>\<close>
by (metis eq_class_non_empty eq_class_unique f_in_X1)
lemma \<^marker>\<open>tag (proof) aponly\<close> x_sim_f_eq_class[simp,intro!]: \<open>x \<sim> f (eq_class x)\<close> if \<open>x \<in> src.\<P>\<close> for x
by (auto simp: that)
lemma \<^marker>\<open>tag (proof) aponly\<close> delta_dom:
assumes \<open>(x,y) \<in> \<Delta>\<close>
shows \<open>x \<in> \<phi> ` src.\<P> \<and> y \<in> src.\<P>\<close>
using assms
by (induct rule: delta.induct ; safe? ; simp)
lemma \<^marker>\<open>tag (proof) aponly\<close> delta_domE:
assumes \<open>(x,y) \<in> \<Delta>\<close>
obtains z where \<open>z \<in> src.\<P>\<close> \<open>x = \<phi> z\<close> \<open>\<phi> z \<in> tgt.\<P>\<close> \<open>y \<in> src.\<P>\<close>
using assms[THEN delta_dom]
by (elim conjE imageE ; simp add: morph_preserves_particulars)
lemma \<^marker>\<open>tag (proof) aponly\<close> delta_img:
assumes \<open>(x,y) \<in> \<Delta>\<close>
shows \<open>\<phi> y = x\<close>
proof -
show ?thesis
using assms by (induct ; hypsubst_thin? ; simp)
qed
declare [[smt_timeout=600]]
lemma \<^marker>\<open>tag (proof) aponly\<close> delta_E1:
assumes \<open>(x,y) \<in> \<Delta>\<close>
obtains x\<^sub>s where \<open>x = \<phi> x\<^sub>s\<close> \<open>y = f (eq_class x\<^sub>s)\<close>
using assms apply (cases ; simp)
using f_eq_class by blast
lemma \<^marker>\<open>tag (proof) aponly\<close> delta_E2:
assumes \<open>(x,y) \<in> \<Delta>\<close>
obtains X where \<open>X \<in> eq_classes\<close> \<open>x = \<phi> y\<close> \<open>y = f X\<close>
using assms apply (cases ; simp)
subgoal by auto
by blast
lemma \<^marker>\<open>tag (proof) aponly\<close> delta_single:
assumes \<open>(x,y\<^sub>1) \<in> \<Delta>\<close> \<open>(x,y\<^sub>2) \<in> \<Delta>\<close>
shows \<open>y\<^sub>1 = y\<^sub>2\<close>
using assms
proof -
obtain doms[simp,intro!]: \<open>y\<^sub>1 \<in> src.\<P>\<close> \<open>y\<^sub>2 \<in> src.\<P>\<close> \<open>x \<in> \<phi> ` src.\<P>\<close>
and phi_y[simp]: \<open>\<phi> y\<^sub>1 = x\<close> \<open>\<phi> y\<^sub>2 = x\<close>
using assms
by (simp add: delta_dom delta_img)
show ?thesis
using assms doms phi_y
proof (induct arbitrary: y\<^sub>2)
show G1: \<open>f X = y\<^sub>2\<close>
if A: \<open>X \<in> subst_eq_classes\<close> \<open>(\<phi> (f X), y\<^sub>2) \<in> \<Delta>\<close>
and doms: \<open>f X \<in> src.\<P>\<close> \<open>y\<^sub>2 \<in> src.\<P>\<close>
\<open>\<phi> (f X) \<in> \<phi> ` src.\<P>\<close>
\<open>\<phi> (f X) = \<phi> (f X)\<close>
\<open>\<phi> y\<^sub>2 = \<phi> (f X)\<close>
for X y\<^sub>2
proof -
have B: \<open>f X \<in> src.\<S>\<close> using A by blast
then have C: \<open>\<phi> (f X) \<in> tgt.\<S>\<close> using morph_preserves_substantials by blast
from doms(2) show \<open>f X = y\<^sub>2\<close>
proof (cases y\<^sub>2 rule: src.endurant_cases)
assume substantial: \<open>y\<^sub>2 \<in> src.\<S>\<close>
then obtain D: \<open>\<phi> y\<^sub>2 \<in> tgt.\<S>\<close> \<open>\<phi> (f X) \<in> tgt.\<S>\<close>
using \<open>f X \<in> src.\<S>\<close> by (meson inherence_sig.\<S>_E morph_preserves_substantials)
show ?thesis using A(2)
apply (cases rule: delta.cases)
subgoal by (metis (mono_tags, lifting) A(1) B doms(2) eq_class_I eq_classes_I f_in_X1 mem_Collect_eq particular_struct_morphism.eq_classes_disj particular_struct_morphism.same_image_I particular_struct_morphism_axioms src.endurantI3 subst_eq_class_non_empty subst_eq_classes_def)
using C by auto
next
assume moment: \<open>y\<^sub>2 \<in> src.\<M>\<close>
show ?thesis using A(2)
apply (cases)
subgoal using moment by blast
by (metis C inherence_sig.\<M>_I inherence_sig.\<S>_E)
qed
qed
fix x\<^sub>3 y\<^sub>3 x\<^sub>4 y\<^sub>4
assume A: \<open>(x\<^sub>3, y\<^sub>3) \<in> \<Delta>\<close>
\<open>\<And>y\<^sub>2. \<lbrakk> (x\<^sub>3, y\<^sub>2) \<in> \<Delta>
; y\<^sub>3 \<in> src.\<P>
; y\<^sub>2 \<in> src.\<P>
; x\<^sub>3 \<in> \<phi> ` src.\<P>
; \<phi> y\<^sub>3 = x\<^sub>3
; \<phi> y\<^sub>2 = x\<^sub>3 \<rbrakk> \<Longrightarrow> y\<^sub>3 = y\<^sub>2\<close>
\<open>x\<^sub>4 \<in> src.\<P>\<close>
\<open>\<phi> x\<^sub>4 \<triangleleft>\<^sub>t x\<^sub>3\<close>
\<open>(\<phi> x\<^sub>4, y\<^sub>4) \<in> \<Delta>\<close>
\<open>f (eq_class x\<^sub>4) \<in> src.\<P>\<close>
\<open>y\<^sub>4 \<in> src.endurants\<close>
\<open>\<phi> x\<^sub>4 \<in> \<phi> ` src.endurants\<close>
\<open>\<phi> (f (eq_class x\<^sub>4)) = \<phi> x\<^sub>4\<close>
\<open>\<phi> y\<^sub>4 = \<phi> x\<^sub>4\<close>
then have \<open>eq_class x\<^sub>4 = eq_class y\<^sub>4\<close> using A(3) A(7) by auto
obtain Y where Y: \<open>Y \<in> eq_classes\<close> \<open>y\<^sub>4 = f Y\<close>
using delta_E2[OF \<open>(\<phi> x\<^sub>4, y\<^sub>4) \<in> \<Delta>\<close>] by metis
have \<open>Y = eq_class y\<^sub>4\<close> using Y(1) Y(2) eq_class_unique by blast
then have \<open>f (eq_class y\<^sub>4) = y\<^sub>4\<close> using Y(2) by simp
then show \<open>f (eq_class x\<^sub>4) = y\<^sub>4\<close>
using \<open>eq_class x\<^sub>4 = eq_class y\<^sub>4\<close> by simp
qed
qed
lemma \<^marker>\<open>tag (proof) aponly\<close> delta_range:
assumes \<open>x \<in> \<phi> ` src.\<P>\<close>
shows \<open>\<exists>y. (x,y) \<in> \<Delta>\<close>
using assms
apply (induct x rule: wfP_induct[OF tgt.inherence_is_noetherian] ; simp)
proof -
fix x
assume A: \<open>\<forall>y. x \<triangleleft>\<^sub>t y \<longrightarrow> y \<in> \<phi> ` src.\<P> \<longrightarrow> (\<exists>ya. (y, ya) \<in> \<Delta>)\<close>
\<open>x \<in> \<phi> ` src.\<P>\<close>
then obtain x\<^sub>s where B[simp]: \<open>x = \<phi> x\<^sub>s\<close> \<open>x\<^sub>s \<in> src.\<P>\<close> using A(2) by blast
have D: \<open>eq_class x\<^sub>s \<in> subst_eq_classes\<close> if \<open>x\<^sub>s \<in> src.\<S>\<close>
using that by auto
have E: \<open>\<phi> (f (eq_class x\<^sub>s)) = \<phi> x\<^sub>s\<close> using B(2) by simp
have substantial: \<open>\<exists>y. (\<phi> x\<^sub>s, y) \<in> \<Delta>\<close> if \<open>x\<^sub>s \<in> src.\<S>\<close>
proof-
have \<open>(\<phi> x\<^sub>s, f (eq_class x\<^sub>s)) \<in> \<Delta>\<close> if \<open>x\<^sub>s \<in> src.\<S>\<close>
using delta.intros(1)[OF D,simplified E,OF that] .
then show ?thesis
using that by blast
qed
have moment: \<open>\<exists>y. (\<phi> x\<^sub>s, y) \<in> \<Delta>\<close> if as: \<open>x\<^sub>s \<in> src.\<M>\<close>
proof-
obtain y\<^sub>s where F: \<open>x\<^sub>s \<triangleleft>\<^sub>s y\<^sub>s\<close> using as by blast
then obtain G: \<open>y\<^sub>s \<in> src.\<P>\<close> \<open>\<phi> x\<^sub>s \<triangleleft>\<^sub>t \<phi> y\<^sub>s\<close>
using morph_reflects_inherence by auto
then have H: \<open>\<phi> y\<^sub>s \<in> \<phi> ` src.\<P>\<close> by blast
obtain y where I: \<open>(\<phi> y\<^sub>s,y) \<in> \<Delta>\<close>
using A(1)[rule_format,simplified,OF G(2) H] by blast
have \<open>(\<phi> x\<^sub>s, f (eq_class x\<^sub>s)) \<in> \<Delta>\<close>
using delta.intros(2)[OF I B(2) G(2)] by blast
then show ?thesis by blast
qed
show \<open>\<exists>y. (x, y) \<in> \<Delta>\<close>
apply (simp)
using B(2) apply (cases x\<^sub>s rule: src.endurant_cases)
using substantial moment by auto
qed
lemma \<^marker>\<open>tag (proof) aponly\<close> delta_inj:
assumes \<open>(x\<^sub>1,y) \<in> \<Delta>\<close> \<open>(x\<^sub>2,y) \<in> \<Delta>\<close>
shows \<open>x\<^sub>1 = x\<^sub>2\<close>
using assms delta_img by auto
definition someInvMorph :: \<open>'p\<^sub>2 \<Rightarrow> 'p\<^sub>1\<close> where
\<open>someInvMorph x \<equiv> if x \<in> \<phi> ` src.\<P> then THE y. (x,y) \<in> \<Delta> else undefined\<close>
lemma \<^marker>\<open>tag (proof) aponly\<close> someInvMorph_ex:
assumes \<open>x \<in> \<phi> ` src.\<P>\<close>
shows \<open>\<exists>!y. (x,y) \<in> \<Delta>\<close>
using assms
by (intro ex_ex1I ; simp add: delta_range delta_single)
lemma \<^marker>\<open>tag (proof) aponly\<close> someInvMorph_eq_iff:
assumes \<open>x \<in> \<phi> ` src.\<P>\<close>
shows \<open>someInvMorph x = y \<longleftrightarrow> (x,y) \<in> \<Delta>\<close>
apply (simp add: someInvMorph_def assms)
apply (rule the1I2[OF someInvMorph_ex[OF assms]] ; intro iffI ; simp?)
using delta_single by simp
lemma \<^marker>\<open>tag (proof) aponly\<close> someInvMorph_inj_phi_img: \<open>inj_on someInvMorph (\<phi> ` src.\<P>)\<close>
apply (intro inj_onI)
subgoal premises P for x y
supply S = P(1,2)[THEN someInvMorph_eq_iff,simplified P(3)]
using S(1)[simplified S(2)]
by (meson P(1) delta_range delta_inj)
done
lemma \<^marker>\<open>tag (proof) aponly\<close> someInvMorph_delta_I[intro!]:
assumes \<open>x \<in> \<phi> ` src.\<P>\<close> \<open>y = someInvMorph x\<close>
shows \<open>(x,y) \<in> \<Delta>\<close>
using assms someInvMorph_eq_iff by blast
lemma \<^marker>\<open>tag (proof) aponly\<close> someInvMorph_delta_E:
assumes \<open>(x,y) \<in> \<Delta>\<close>
obtains \<open>x \<in> \<phi> ` src.\<P>\<close> \<open>y = someInvMorph x\<close>
using assms
by (metis delta_dom someInvMorph_eq_iff)
lemma \<^marker>\<open>tag (proof) aponly\<close> someInvMorph_phi_phi[simp]:
assumes \<open>x \<in> src.\<P>\<close>
shows \<open>\<phi> (someInvMorph (\<phi> x)) = \<phi> x\<close>
apply (simp add: someInvMorph_def imageI[OF assms])
apply (rule the1I2[of \<open>\<lambda>y. (\<phi> x, y) \<in> \<Delta>\<close>])
subgoal using assms by (simp add: someInvMorph_ex)
using assms delta_img by blast
lemma \<^marker>\<open>tag (proof) aponly\<close> someInvMorph_as_inv[simp]: \<open>x \<in> \<phi> ` src.\<P> \<Longrightarrow> \<phi> (someInvMorph x) = x\<close>
by auto
lemma \<^marker>\<open>tag (proof) aponly\<close> someInvMorph_delta_simp: \<open>(x,y) \<in> \<Delta> \<longleftrightarrow> x \<in> \<phi> ` src.\<P> \<and> y = someInvMorph x\<close>
using someInvMorph_delta_E by blast
lemma \<^marker>\<open>tag (proof) aponly\<close> someInvMorph_image: \<open>someInvMorph ` \<phi> ` src.\<P> \<subseteq> src.\<P>\<close>
using delta_dom by blast
lemma \<^marker>\<open>tag (proof) aponly\<close> someInvMorphImgUnique:
assumes \<open>x \<sim> y\<close> \<open>x \<in> someInvMorph ` \<phi> ` src.\<P>\<close> \<open>y \<in> someInvMorph ` \<phi> ` src.\<P>\<close>
shows \<open>x = y\<close>
using assms
by auto
context
begin
interpretation img: particular_struct_surjection \<Gamma>\<^sub>1 \<open>MorphImg \<phi> \<Gamma>\<^sub>1\<close> \<phi>
by simp
declare img.morph_is_surjective[simp del]
interpretation some_inv_to_some_inv_img: particular_struct_bijection_1 \<open>MorphImg \<phi> \<Gamma>\<^sub>1\<close> someInvMorph \<open>TYPE('p\<^sub>2)\<close> \<open>TYPE('p\<^sub>1)\<close>
apply (intro img.tgt.inj_morph_img_isomorphism[simplified morph_image_tgt_struct])
subgoal using someInvMorph_inj_phi_img img.morph_is_surjective by auto
using src.injection_to_ZF_exist by blast
private lemma \<^marker>\<open>tag (proof) aponly\<close> A1[simp]: \<open>some_inv_to_some_inv_img.tgt.\<Q>\<S> = src.\<Q>\<S>\<close>
by auto
private lemma \<^marker>\<open>tag (proof) aponly\<close> S1[simp]: \<open>some_inv_to_some_inv_img.tgt.endurants = someInvMorph ` \<phi> ` src.\<P>\<close>
using img.morph_is_surjective
by auto
private lemma \<^marker>\<open>tag (proof) aponly\<close> C1: \<open>someInvMorph ` \<phi> ` src.\<P> \<subseteq> src.\<P>\<close>
using someInvMorph_image by blast
private lemma \<^marker>\<open>tag (proof) aponly\<close> A2: \<open>x \<in> src.\<P>\<close> if \<open>x \<in> someInvMorph ` \<phi> ` src.\<P>\<close> for x
using C1 that by blast
private lemma \<^marker>\<open>tag (proof) aponly\<close> A3[simp]: \<open>some_inv_to_some_inv_img.img_inheres_in x y \<longleftrightarrow>
(\<exists>x\<^sub>1 y\<^sub>1. img.src_inheres_in x\<^sub>1 y\<^sub>1 \<and> x = someInvMorph (\<phi> x\<^sub>1) \<and> y = someInvMorph(\<phi> y\<^sub>1))\<close> for x y
apply (intro iffI ; (elim exE conjE)? ; hypsubst_thin?)
subgoal by (metis delta_dom morph_image_inheres_in_E morph_reflects_inherence
someInvMorph_as_inv someInvMorph_delta_I
some_inv_to_some_inv_img.inv_inheres_in_reflects
some_inv_to_some_inv_img.inv_morph_morph
some_inv_to_some_inv_img.tgt.inherence_scope)
subgoal for x\<^sub>1 y\<^sub>1
by blast
done
private lemma \<^marker>\<open>tag (proof) aponly\<close> A4[simp]: \<open>some_inv_to_some_inv_img.img_towards x y \<longleftrightarrow>
(\<exists>x\<^sub>1 y\<^sub>1. img.src_towards x\<^sub>1 y\<^sub>1 \<and> x = someInvMorph (\<phi> x\<^sub>1) \<and> y = someInvMorph(\<phi> y\<^sub>1))\<close> for x y
apply (intro iffI ; (elim exE conjE)? ; hypsubst_thin?)
subgoal
by (metis S1 delta_dom img.I_img_eq_tgt_I morph_image_def morph_image_towards_D
morph_reflects_towardness someInvMorph_as_inv someInvMorph_delta_I
some_inv_to_some_inv_img.inv_morph_morph
some_inv_to_some_inv_img.inv_towardness_reflects
some_inv_to_some_inv_img.morph_image_towards_D(1,2))
subgoal for x\<^sub>1 y\<^sub>1
apply (simp only: particular_struct_morphism_image_simps)
by blast
done
private lemma \<^marker>\<open>tag (proof) aponly\<close> A5[simp]: \<open>some_inv_to_some_inv_img.img_assoc_quale x q \<longleftrightarrow>
(\<exists>y. img.src_assoc_quale y q \<and> x = someInvMorph (\<phi> y))\<close> for x q
apply (intro iffI ; (elim exE conjE)? ; hypsubst_thin?)
subgoal
by (metis delta_E2 delta_dom img.I_img_eq_tgt_I morph_image_def morph_image_dests(9)
morph_reflects_quale_assoc someInvMorph_delta_I
some_inv_to_some_inv_img.I_img_eq_tgt_I
some_inv_to_some_inv_img.morph_image_E
some_inv_to_some_inv_img.morph_image_iff
some_inv_to_some_inv_img.morph_reflects_quale_assoc
some_inv_to_some_inv_img.tgt.assoc_quale_scopeD(1)
src.\<P>_def)
subgoal for x\<^sub>1
apply (simp only: particular_struct_morphism_image_simps)
by blast
done
private lemma \<^marker>\<open>tag (proof) aponly\<close> ex_simp1: \<open>(\<exists>x \<in> X. y = x) \<longleftrightarrow> y \<in> X\<close> for y :: 'p\<^sub>1 and X by blast
private lemma \<^marker>\<open>tag (proof) aponly\<close> A6: \<open>\<exists>y\<in>someInvMorph ` \<phi> ` src.\<P>. z = y\<close>
if as: \<open>x \<in> someInvMorph ` \<phi> ` src.\<P>\<close> \<open>img.src_towards x z\<close> for x z
proof (simp only: ex_simp1)
obtain y where Y: \<open>y \<in> src.\<P>\<close> \<open>x = someInvMorph (\<phi> y)\<close>
using as imageE by blast
have AA: \<open>img.src_towards (someInvMorph (\<phi> y)) z\<close> using as(2) Y by simp
then have \<open>img.tgt_towards (\<phi> (someInvMorph (\<phi> y))) (\<phi> z)\<close>
using Y(1)
by (meson particular_struct_morphism_image_simps(5))
then have \<open>img.tgt_towards (\<phi> y) (\<phi> z)\<close>
using someInvMorph_phi_phi Y by simp
then have \<open>some_inv_to_some_inv_img.tgt_towards (someInvMorph (\<phi> y)) (someInvMorph (\<phi> z))\<close>
apply (simp only: particular_struct_morphism_image_simps ; elim exE conjE)
by blast
then have \<open>some_inv_to_some_inv_img.tgt_towards x (someInvMorph (\<phi> z))\<close>
using Y(2) by simp
then have BB: \<open>img.src_towards x (someInvMorph (\<phi> z))\<close>
by (metis A2 Y(2) \<open>some_inv_to_some_inv_img.src_towards (\<phi> (someInvMorph (\<phi> y))) (\<phi> z)\<close>
img.I_img_eq_tgt_I img.morph_reflects_towardness morph_image_def
morph_image_towards_D(2) someInvMorph_as_inv
some_inv_to_some_inv_img.morph_image_towards_D(2) that(1))
have CC: \<open>someInvMorph (\<phi> z) = z\<close>
using src.towardness_single as(2) BB by simp
then show \<open>z \<in> someInvMorph ` \<phi> ` src.endurants\<close>
using CC
by (metis \<open>some_inv_to_some_inv_img.src_towards (\<phi> y) (\<phi> z)\<close>
image_eqI morph_image_towards_E)
qed
interpretation some_inv_img_to_src: pre_particular_struct_morphism \<open>MorphImg (someInvMorph \<circ> \<phi>) \<Gamma>\<^sub>1\<close> \<Gamma>\<^sub>1 id \<open>TYPE('p\<^sub>1)\<close> \<open>TYPE('p\<^sub>1)\<close>
proof -
show \<open>pre_particular_struct_morphism (MorphImg (someInvMorph \<circ> \<phi>) \<Gamma>\<^sub>1) \<Gamma>\<^sub>1 id\<close>
apply (simp only: morph_img_comp ; unfold_locales ; (simp only: id_def A1 S1 A3 A4 A5)?)
subgoal AX2 using A2 .
subgoal AX3 for x y
by (metis A2 S1 img.I_img_eq_tgt_I img.morph_reflects_inherence morph_image_def
someInvMorph_as_inv someInvMorph_phi_phi some_inv_to_some_inv_img.I_img_eq_tgt_I
some_inv_to_some_inv_img.morph_image_E src.inherence_scope)
subgoal AX4 for x z
by (metis AX2 S1 img.morph_preserves_particulars img.morph_reflects_inherence
someInvMorph_phi_phi some_inv_to_some_inv_img.I_img_eq_tgt_I
some_inv_to_some_inv_img.morph_image_I src.endurantI2 src.moment_non_migration)
subgoal AX5 for x y
(* slow *)
by (metis (no_types, lifting) A4 AX2 S1 img.morph_reflects_towardness morph_image_particulars
particular_struct_morphism_sig.morph_image_iff someInvMorph_as_inv
some_inv_to_some_inv_img.I_img_eq_tgt_I
some_inv_to_some_inv_img.morph_reflects_towardness)
subgoal AX6 for x z using A6 .
subgoal AX7 for x q
by (metis A2 S1 morph_image_particulars morph_reflects_quale_assoc someInvMorph_as_inv
someInvMorph_phi_phi some_inv_to_some_inv_img.I_img_eq_tgt_I
some_inv_to_some_inv_img.morph_image_iff src.assoc_quale_scopeD(1))
done
qed
private lemma \<^marker>\<open>tag (proof) aponly\<close> A7: \<open>\<exists>w\<^sub>s \<in> src.\<W>. w \<subseteq> w\<^sub>s\<close> if as: \<open>w \<in> some_inv_to_some_inv_img.\<W>\<^sub>\<phi>\<close> for w
proof (rule ccontr ; simp)
assume AA: \<open>\<forall>x\<in>src.\<W>. \<not> w \<subseteq> x\<close>
then have BB: False if \<open>w\<^sub>1 \<in> src.\<W>\<close> \<open>w \<subseteq> w\<^sub>1\<close> for w\<^sub>1 using that by metis
obtain w\<^sub>2 where CC: \<open>some_inv_to_some_inv_img.world_corresp w\<^sub>2 w\<close> using as by blast
then obtain DD: \<open>w\<^sub>2 \<in> some_inv_to_some_inv_img.src.\<W>\<close>
\<open>\<And>x. x \<in> some_inv_to_some_inv_img.src.\<P> \<Longrightarrow> x \<in> w\<^sub>2 \<longleftrightarrow> someInvMorph x \<in> w\<close>
using some_inv_to_some_inv_img.world_corresp_E[OF CC] by metis
have EE: \<open>w\<^sub>2 \<in> ((`) \<phi>) ` src.\<W>\<close> using DD(1) by (simp add: morph_image_worlds_src)
then obtain w\<^sub>3 where FF: \<open>w\<^sub>2 = \<phi> ` w\<^sub>3\<close> \<open>w\<^sub>3 \<in> src.\<W>\<close> by blast
have GG: \<open>\<And>x. x \<in> \<phi> ` src.\<P> \<Longrightarrow> x \<in> \<phi> ` w\<^sub>3 \<longleftrightarrow> someInvMorph x \<in> w\<close>
using DD(2) img.morph_is_surjective by (simp only: FF(1) ; simp)
have HH: \<open>\<And>x. x \<in> \<phi> ` w\<^sub>3 \<Longrightarrow> someInvMorph x \<in> w\<close>
using GG src.worlds_are_made_of_particulars FF(2) by blast
then have II: \<open>\<And>x. x \<in> w\<^sub>3 \<Longrightarrow> someInvMorph (\<phi> x) \<in> w\<close> by blast
then have JJ: \<open>\<And>x. x \<in> w\<^sub>3 \<Longrightarrow> \<phi> (someInvMorph (\<phi> x)) \<in> \<phi> ` w\<close> by blast
then have KK: \<open>\<And>x. x \<in> w\<^sub>3 \<Longrightarrow> (\<phi> x) \<in> \<phi> ` w\<close>
using FF(2) src.\<P>_I by auto
show False
by (metis A2 BB DD(1) DD(2) img.morph_worlds_correspond_tgt_src
img.world_corresp_def morph_image_particulars
someInvMorph_as_inv some_inv_to_some_inv_img.I_img_eq_tgt_I
some_inv_to_some_inv_img.morph_image_iff
some_inv_to_some_inv_img.morph_is_surjective some_inv_to_some_inv_img.tgt.\<P>_I
subsetI that)
qed
private abbreviation srcWorlds (\<open>\<W>\<^sub>A\<close>) where \<open>\<W>\<^sub>A \<equiv> src.\<W>\<close>
private abbreviation srcParticulars (\<open>\<P>\<^sub>A\<close>) where \<open>\<P>\<^sub>A \<equiv> src.\<P>\<close>
private abbreviation srcInheresIn (infix \<open>\<triangleleft>\<^sub>A\<close> 75) where
\<open>(\<triangleleft>\<^sub>A) \<equiv> src_inheres_in\<close>
private abbreviation srcAssocQuale (infix \<open>\<leadsto>\<^sub>A\<close> 75) where
\<open>(\<leadsto>\<^sub>A) \<equiv> src_assoc_quale\<close>
private abbreviation srcQualia (\<open>\<Q>\<^sub>A\<close>) where
\<open>\<Q>\<^sub>A \<equiv> img.\<Q>\<^sub>s\<close>
private abbreviation srcQualitySpaces (\<open>\<Q>\<S>\<^sub>A\<close>) where
\<open>\<Q>\<S>\<^sub>A \<equiv> some_inv_img_to_src.tgt.\<Q>\<S>\<close>
private abbreviation revImageParticulars (\<open>\<P>\<^sub>R\<close>) where \<open>\<P>\<^sub>R \<equiv> some_inv_to_some_inv_img.tgt.endurants\<close>
private abbreviation revImageInheresIn (infix \<open>\<triangleleft>\<^sub>R\<close> 75) where
\<open>(\<triangleleft>\<^sub>R) \<equiv> some_inv_img_to_src.src_inheres_in\<close>
private lemma \<^marker>\<open>tag (proof) aponly\<close> some_inv_to_some_inv_img_img_inheres_in_eq: \<open>some_inv_to_some_inv_img.img_inheres_in = (\<triangleleft>\<^sub>R)\<close>
by (intro ext ; simp)
private abbreviation revImageAssocQuale (infix \<open>\<leadsto>\<^sub>R\<close> 75) where
\<open>(\<leadsto>\<^sub>R) \<equiv> some_inv_to_some_inv_img.img_assoc_quale\<close>
private abbreviation revImageQualia (\<open>\<Q>\<^sub>R\<close>) where
\<open>\<Q>\<^sub>R \<equiv> some_inv_img_to_src.\<Q>\<^sub>s\<close>
private abbreviation revImageWorldCorresp (infix \<open>\<Leftrightarrow>\<^sub>R\<close> 75) where
\<open>(\<Leftrightarrow>\<^sub>R) \<equiv> some_inv_img_to_src.world_corresp\<close>
private abbreviation revImageWorlds (\<open>\<W>\<^sub>R\<close>) where
\<open>\<W>\<^sub>R \<equiv> some_inv_img_to_src.src.\<W>\<close>
private lemma \<^marker>\<open>tag (proof) aponly\<close> some_inv_to_some_inv_img_\<W>\<^sub>\<phi>: \<open>some_inv_to_some_inv_img.\<W>\<^sub>\<phi> = \<W>\<^sub>R\<close>
by auto
private abbreviation imageInheresIn (infix \<open>\<triangleleft>\<^sub>I\<close> 75) where
\<open>(\<triangleleft>\<^sub>I) \<equiv> img.tgt_inheres_in\<close>
private abbreviation imageWorlds (\<open>\<W>\<^sub>I\<close>) where
\<open>\<W>\<^sub>I \<equiv> img.tgt.\<W>\<close>
private abbreviation imageAssocQuale (infix \<open>\<leadsto>\<^sub>I\<close> 75) where
\<open>(\<leadsto>\<^sub>I) \<equiv> img.tgt_assoc_quale\<close>
private abbreviation imageQualia (\<open>\<Q>\<^sub>I\<close>) where
\<open>\<Q>\<^sub>I \<equiv> img.\<Q>\<^sub>t\<close>
private abbreviation imageParticulars (\<open>\<P>\<^sub>I\<close>) where \<open>\<P>\<^sub>I \<equiv> img.tgt.\<P>\<close>
private abbreviation imageWorldCorresp (infix \<open>\<Leftrightarrow>\<^sub>I\<close> 75) where
\<open>(\<Leftrightarrow>\<^sub>I) \<equiv> img.world_corresp\<close>
private abbreviation someInvMorphAbbrev (\<open>\<phi>\<^sub>\<leftarrow>\<close>) where
\<open>\<phi>\<^sub>\<leftarrow> \<equiv> someInvMorph\<close>
interpretation some_inv_img_to_src: particular_struct_morphism \<open>MorphImg (someInvMorph \<circ> \<phi>) \<Gamma>\<^sub>1\<close> \<Gamma>\<^sub>1 id \<open>TYPE('p\<^sub>1)\<close> \<open>TYPE('p\<^sub>1)\<close>
apply (unfold_locales)
subgoal G1 for w\<^sub>s
apply (auto ; simp only: particular_struct_morphism_sig.world_corresp_def)
apply (simp only: particular_struct_morphism_image_simps
possible_worlds_sig.\<P>_def id_def ; simp)
apply (elim exE conjE ; simp)
subgoal for w\<^sub>1 w\<^sub>2
apply (intro exI[of _ w\<^sub>2]) (* apply (intro exI[of _ w\<^sub>s]) *)
apply (intro conjI allI impI ballI iffI ; (elim exE conjE)? ; simp? ; hypsubst_thin?)
subgoal for _ y _ w\<^sub>4
by (metis A2 S1 img.I_img_eq_tgt_I img.world_corresp_def morph_image_def someInvMorph_as_inv some_inv_to_some_inv_img.I_img_eq_tgt_I some_inv_to_some_inv_img.morph_image_iff some_inv_to_some_inv_img.src_world_corresp_image some_inv_to_some_inv_img.tgt.\<P>_I some_inv_to_some_inv_img.world_corresp_def src_to_img1_world_corresp)
subgoal for _ y _ w\<^sub>4
by (smt image_iff img.I_img_eq_tgt_I img.world_preserve_img morph_image_def someInvMorph_as_inv some_inv_to_some_inv_img.I_img_eq_tgt_I some_inv_to_some_inv_img.morph_image_iff some_inv_to_some_inv_img.tgt.\<P>_I some_inv_to_some_inv_img.world_preserve_img)
done
done
subgoal G2 for w\<^sub>t
apply (auto ; simp only: particular_struct_morphism_sig.world_corresp_def)
apply (simp only: particular_struct_morphism_image_simps
possible_worlds_sig.\<P>_def id_def )
subgoal premises P
apply (rule exE[OF img.morph_worlds_correspond_src_tgt[OF P]])
subgoal for w\<^sub>1
apply (elim img.world_corresp_E)
subgoal premises Q
apply (rule exE[OF some_inv_to_some_inv_img.morph_worlds_correspond_src_tgt[OF Q(2)]])
subgoal for w\<^sub>2
apply (elim some_inv_to_some_inv_img.world_corresp_E)
subgoal premises T
apply (rule T(2)[THEN A7, THEN bexE])
subgoal premises V for w\<^sub>3
apply (intro exI[of _ w\<^sub>2] conjI ballI iffI
; (intro CollectI)? ; (elim UnionE CollectE exE conjE)?
; simp)
prefer 2
subgoal G2_2 using P Q T V(1) V(2)[THEN subsetD]
by (metis A2 S1 morph_image_particulars someInvMorph_as_inv some_inv_to_some_inv_img.I_img_eq_tgt_I some_inv_to_some_inv_img.morph_image_iff some_inv_to_some_inv_img.tgt.\<P>_I)
prefer 2
subgoal G2_3 using P Q T V(1) V(2)[THEN subsetD]
by (metis img.world_preserve_img morph_image_particulars someInvMorph_as_inv some_inv_to_some_inv_img.I_img_eq_tgt_I some_inv_to_some_inv_img.morph_image_iff some_inv_to_some_inv_img.tgt.\<P>_I some_inv_to_some_inv_img.world_preserve_img src.\<P>_I)
subgoal G2_1
apply (intro exI[of _ w\<^sub>1] conjI exI[of _ w\<^sub>t] )
subgoal G2_1_1
using P Q T V apply auto
subgoal G2_1_1_1 for x
by (metis some_inv_to_some_inv_img.morph_image_E some_inv_to_some_inv_img.morph_image_def some_inv_to_some_inv_img.morph_is_surjective some_inv_to_some_inv_img.src_world_corresp_image some_inv_to_some_inv_img.tgt.\<P>_I some_inv_to_some_inv_img.world_corresp_E)
subgoal G2_1_1_2 for x by blast
done
subgoal G2_1_2
using P Q T V apply auto
subgoal for x
by (smt A2 imageI morph_image_particulars possible_worlds_sig.\<P>_I someInvMorph_as_inv)
done
subgoal G2_1_3
using P Q T V by auto
done
done
done
done
done
done
done
done
done
private lemma \<^marker>\<open>tag (proof) aponly\<close> lemma1: \<open>particular_struct_morphism \<Gamma>\<^sub>1 \<Gamma>\<^sub>1 (id \<circ> someInvMorph \<circ> \<phi>)\<close>
apply (intro particular_struct_morphism_comp[of _ \<open>MorphImg (someInvMorph \<circ> \<phi>) \<Gamma>\<^sub>1\<close>]
particular_struct_morphism_comp[of _ \<open>MorphImg \<phi> \<Gamma>\<^sub>1\<close>])
using img.particular_struct_morphism_axioms
some_inv_to_some_inv_img.particular_struct_morphism_axioms
some_inv_img_to_src.particular_struct_morphism_axioms
by auto
interpretation src_to_src: particular_struct_morphism \<Gamma>\<^sub>1 \<Gamma>\<^sub>1 \<open>someInvMorph \<circ> \<phi>\<close> \<open>TYPE('p\<^sub>1)\<close> \<open>TYPE('p\<^sub>1)\<close>
using lemma1 by simp
private lemma \<^marker>\<open>tag (proof) aponly\<close> someInvMorph_to_endomorphism: \<open>particular_struct_endomorphism \<Gamma>\<^sub>1 (someInvMorph \<circ> \<phi>)\<close>
by (intro_locales)
private lemma \<^marker>\<open>tag (proof) aponly\<close> someInvMorph_to_eq_class_choice: \<open>(someInvMorph \<circ> \<phi>) x = f (eq_class x)\<close> if \<open>x \<in> \<P>\<^sub>A\<close>
using that apply simp
by (smt delta_E2 delta_dom f_in_X1 img.eq_class_I img.eq_classes_I img.eq_classes_disj
img.eq_classes_non_empty morph_image_I morph_image_def
particular_struct_morphism.same_image_I particular_struct_morphism_axioms
someInvMorph_delta_I)
lemma \<^marker>\<open>tag (proof) aponly\<close> eq_class_choice_inv_morph_ex: \<open>\<exists>\<sigma>. particular_struct_endomorphism \<Gamma>\<^sub>1 \<sigma> \<and> (\<forall>x \<in> src.\<P>. \<sigma> x = f (eq_class x))\<close>
by (intro exI[of _ \<open>someInvMorph \<circ> \<phi>\<close>] conjI ballI someInvMorph_to_endomorphism someInvMorph_to_eq_class_choice ; simp)
end
lemmas eq_class_choice = eq_class_choice_inv_morph_ex
end
context particular_struct_morphism
begin
lemma \<^marker>\<open>tag (proof) aponly\<close> choice_from_choice_ex:
assumes \<open>x \<in> src.\<P>\<close> \<open>y \<in> src.\<P>\<close> \<open>\<phi> x = \<phi> y\<close>
\<open>\<exists>(f :: 'p\<^sub>1 set \<Rightarrow> 'p\<^sub>1). particular_struct_morphism_with_choice \<Gamma>\<^sub>1 \<Gamma>\<^sub>2 \<phi> f\<close>
shows \<open>\<exists>(\<sigma> :: 'p\<^sub>1 \<Rightarrow> 'p\<^sub>1). particular_struct_endomorphism \<Gamma>\<^sub>1 \<sigma> \<and> \<sigma> x = y \<and> \<sigma> y = y\<close>
proof -
obtain f :: \<open>'p\<^sub>1 set \<Rightarrow> 'p\<^sub>1\<close> where \<open>particular_struct_morphism_with_choice \<Gamma>\<^sub>1 \<Gamma>\<^sub>2 \<phi> f\<close>
using assms by blast
then interpret f_choice: particular_struct_morphism_with_choice \<open>\<Gamma>\<^sub>1\<close> \<open>\<Gamma>\<^sub>2\<close> \<open>\<phi>\<close> \<open>f\<close> by simp
define g where \<open>g X \<equiv> if y \<in> X then y else f X\<close> for X
interpret g_choice: particular_struct_morphism_with_choice \<open>\<Gamma>\<^sub>1\<close> \<open>\<Gamma>\<^sub>2\<close> \<phi> g
apply (unfold_locales)
apply (intro allI impI ; simp only: g_def)
subgoal for X
by (cases \<open>y \<in> X\<close> ; simp)
done
obtain \<sigma> where A: \<open>particular_struct_endomorphism \<Gamma>\<^sub>1 \<sigma>\<close>
\<open>\<And>x. x \<in> src.\<P> \<Longrightarrow> \<sigma> x = g (eq_class x)\<close>
using g_choice.eq_class_choice by blast
have B: \<open>\<sigma> x = y\<close> by (auto simp: g_def A(2) assms)
have C: \<open>\<sigma> y = y\<close> by (auto simp: g_def A(2) assms)
show ?thesis
by (intro exI[of _ \<sigma>] conjI A(1) B C)
qed
lemma \<^marker>\<open>tag (proof) aponly\<close> choice_exists:
\<open>\<exists>(f :: 'p\<^sub>1 set \<Rightarrow> 'p\<^sub>1). particular_struct_morphism_with_choice \<Gamma>\<^sub>1 \<Gamma>\<^sub>2 \<phi> f\<close>
apply (intro exI[of _ "\<lambda>X. SOME x. x \<in> X"])
apply (unfold_locales ; auto)
subgoal for X x
using someI[of \<open>\<lambda>x. x \<in> X\<close>,of x] by blast
done
lemma \<^marker>\<open>tag (proof) aponly\<close> choice:
assumes \<open>x \<in> src.\<P>\<close> \<open>y \<in> src.\<P>\<close> \<open>\<phi> x = \<phi> y\<close>
shows \<open>\<exists>(\<sigma> :: 'p\<^sub>1 \<Rightarrow> 'p\<^sub>1). particular_struct_endomorphism \<Gamma>\<^sub>1 \<sigma> \<and> \<sigma> x = y \<and> \<sigma> y = y\<close>
using choice_from_choice_ex[OF assms choice_exists] by blast
end
end
|
lemma topological_basis_prod: assumes A: "topological_basis A" and B: "topological_basis B" shows "topological_basis ((\<lambda>(a, b). a \<times> b) ` (A \<times> B))" |
require(graphics)
require(Matrix)
pdf(file="plots/Experiment5b.pdf",
width=3.7, height=4.0, family="serif", pointsize=14)
data = as.matrix(read.table("results/Experiment5b_times.dat", sep=",")[3])/1000
plot_colors <- c("orangered","orange","cornflowerblue")
barplot( data,
space = c(0.1,0.1),
xlab = "",
ylab = "",
col=plot_colors,
ylim = c(0,1350),
log = "",
axes = FALSE,
names.arg = c("MT-Ops","","Dist-PFor"),
args.legend = list(x="topleft", bty="n", bty="n", ncol=1),
beside = TRUE,
)
axis(2, las=1)
mtext(2,text="Execution Time [s]",line=2.8)
mtext(1,text="MT-PFor",line=0.2)
mtext(1,text="Parallel Configurations",line=2)
text(2.73,800,"scale-out")
text(2.5,1200,"1x USCensus")
box() # box around plot
dev.off()
|
From mathcomp
Require Import ssreflect ssrbool ssrnat eqtype seq ssrfun.
From fcsl
Require Import prelude pred pcm unionmap heap.
From HTT
Require Import stmod stsep stlog stlogR.
From SSL
Require Import core.
From Hammer Require Import Hammer.
(* Configure Hammer *)
Set Hammer ATPLimit 60.
Unset Hammer Eprover.
Unset Hammer Vampire.
Add Search Blacklist "fcsl.".
Add Search Blacklist "HTT.".
Add Search Blacklist "Coq.ssr.ssrfun".
Add Search Blacklist "mathcomp.ssreflect.ssrfun".
Add Search Blacklist "mathcomp.ssreflect.bigop".
Add Search Blacklist "mathcomp.ssreflect.choice".
Add Search Blacklist "mathcomp.ssreflect.div".
Add Search Blacklist "mathcomp.ssreflect.finfun".
Add Search Blacklist "mathcomp.ssreflect.fintype".
Add Search Blacklist "mathcomp.ssreflect.path".
Add Search Blacklist "mathcomp.ssreflect.tuple".
Inductive sll (x : ptr) (s : seq nat) (h : heap) : Prop :=
| sll_1 of (x) == (null) of
(s) == (@nil nat) /\ h = empty
| sll_2 of ~~ ((x) == (null)) of
exists (v : nat) (s1 : seq nat) (nxt : ptr),
exists h_sll_nxts1_0,
(s) == (([:: v]) ++ (s1)) /\ h = x :-> (v) \+ x .+ 1 :-> (nxt) \+ h_sll_nxts1_0 /\ sll nxt s1 h_sll_nxts1_0.
|
data ⊥ : Set where
module M₁ where
postulate
∥_∥ : Set → Set
{-# POLARITY ∥_∥ ++ #-}
data D : Set where
c : ∥ D ∥ → D
module M₂ where
postulate
_⇒_ : Set → Set → Set
lambda : {A B : Set} → (A → B) → A ⇒ B
apply : {A B : Set} → A ⇒ B → A → B
{-# POLARITY _⇒_ ++ #-}
data D : Set where
c : D ⇒ ⊥ → D
not-inhabited : D → ⊥
not-inhabited (c f) = apply f (c f)
d : D
d = c (lambda not-inhabited)
bad : ⊥
bad = not-inhabited d
postulate
F₁ : Set → Set → Set₁ → Set₁ → Set₁ → Set
{-# POLARITY F₁ _ ++ + - * #-}
data D₁ : Set where
c : F₁ (D₁ → ⊥) D₁ Set Set Set → D₁
postulate
F₂ : ∀ {a} → Set a → Set a → Set a
{-# POLARITY F₂ * * ++ #-}
data D₂ (A : Set) : Set where
c : F₂ A (D₂ A) → D₂ A
module _ (A : Set₁) where
postulate
F₃ : Set → Set
{-# POLARITY F₃ ++ #-}
data D₃ : Set where
c : F₃ Set D₃ → D₃
postulate
F₄ : ∀ {a} → Set a → Set a → Set a
{-# POLARITY F₄ * ++ #-}
data D₄ (A : Set) : Set where
c : F₄ (D₄ A) A → D₄ A
postulate
F₅ : ⦃ _ : Set ⦄ → Set
{-# POLARITY F₅ ++ #-}
data D₅ : Set where
c : F₅ ⦃ D₅ ⦄ → D₅
|
(* Author: Tobias Nipkow, 2007 *)
theory QElin_inf
imports LinArith
begin
subsection {*Quantifier elimination with infinitesimals \label{sec:lin-inf}*}
text{* This section formalizes Loos and Weispfenning's quantifier
elimination procedure based on (the simulation of)
infinitesimals~\cite{LoosW93}. *}
fun asubst_peps :: "real * real list \<Rightarrow> atom \<Rightarrow> atom fm" ("asubst\<^sub>+") where
"asubst_peps (r,cs) (Less s (d#ds)) =
(if d=0 then Atom(Less s ds) else
let u = s - d*r; v = d *\<^sub>s cs + ds; less = Atom(Less u v)
in if d<0 then less else Or less (Atom(Eq u v)))" |
"asubst_peps rcs (Eq r (d#ds)) = (if d=0 then Atom(Eq r ds) else FalseF)" |
"asubst_peps rcs a = Atom a"
abbreviation subst_peps :: "atom fm \<Rightarrow> real * real list \<Rightarrow> atom fm" ("subst\<^sub>+")
where "subst\<^sub>+ \<phi> rcs \<equiv> amap\<^bsub>fm\<^esub> (asubst\<^sub>+ rcs) \<phi>"
definition "nolb f xs l x = (\<forall>y\<in>{l<..<x}. y \<notin> LB f xs)"
lemma nolb_And[simp]:
"nolb (And f g) xs l x = (nolb f xs l x \<and> nolb g xs l x)"
apply(clarsimp simp:nolb_def)
apply blast
done
lemma nolb_Or[simp]:
"nolb (Or f g) xs l x = (nolb f xs l x \<and> nolb g xs l x)"
apply(clarsimp simp:nolb_def)
apply blast
done
declare[[simp_depth_limit=4]]
definition "EQ2 = EQ"
lemma EQ2_Or[simp]: "EQ2 (Or f g) xs = (EQ2 f xs Un EQ2 g xs)"
by(auto simp:EQ2_def)
lemma EQ2_And[simp]: "EQ2 (And f g) xs = (EQ2 f xs Un EQ2 g xs)"
by(auto simp:EQ2_def)
lemma innermost_intvl2:
"\<lbrakk> nqfree f; nolb f xs l x; l < x; x \<notin> EQ2 f xs; R.I f (x#xs); l < y; y \<le> x\<rbrakk>
\<Longrightarrow> R.I f (y#xs)"
unfolding EQ2_def by(blast intro:innermost_intvl)
lemma I_subst_peps2:
"nqfree f \<Longrightarrow> r+\<langle>cs,xs\<rangle> < x \<Longrightarrow> nolb f xs (r+\<langle>cs,xs\<rangle>) x
\<Longrightarrow> \<forall>y \<in> {r+\<langle>cs,xs\<rangle> <.. x}. R.I f (y#xs) \<and> y \<notin> EQ2 f xs
\<Longrightarrow> R.I (subst\<^sub>+ f (r,cs)) xs"
proof(induct f)
case FalseF thus ?case
by simp (metis linorder_antisym_conv1 linorder_neq_iff)
next
case (Atom a)
show ?case
proof(cases "((r,cs),a)" rule:asubst_peps.cases)
case (1 r cs s d ds)
{ assume "d=0" hence ?thesis using Atom 1 by auto }
moreover
{ assume "d<0"
have "s < d*x + \<langle>ds,xs\<rangle>" using Atom 1 by simp
moreover have "d*x < d*(r + \<langle>cs,xs\<rangle>)" using `d<0` Atom 1
by (simp add: mult_strict_left_mono_neg)
ultimately have "s < d * (r + \<langle>cs,xs\<rangle>) + \<langle>ds,xs\<rangle>" by(simp add:algebra_simps)
hence ?thesis using 1
by (auto simp add: iprod_left_add_distrib algebra_simps)
} moreover
{ let ?L = "(s - \<langle>ds,xs\<rangle>) / d" let ?U = "r + \<langle>cs,xs\<rangle>"
assume "d>0"
hence "?U < x" and "\<forall>y. ?U < y \<and> y < x \<longrightarrow> y \<noteq> ?L"
and "\<forall>y. ?U < y \<and> y \<le> x \<longrightarrow> ?L < y" using Atom 1
by(simp_all add:nolb_def depends\<^sub>R_def Ball_def field_simps)
hence "?L < ?U \<or> ?L = ?U"
by (metis linorder_neqE_linordered_idom order_refl)
hence ?thesis using Atom 1 `d>0`
by (simp add: iprod_left_add_distrib field_simps)
} ultimately show ?thesis by force
next
case 2 thus ?thesis using Atom
by (fastforce simp: nolb_def EQ2_def depends\<^sub>R_def field_simps split: split_if_asm)
qed (insert Atom, auto)
next
case Or thus ?case by(simp add:Ball_def)(metis order_refl innermost_intvl2)
qed simp_all
declare[[simp_depth_limit=50]]
lemma I_subst_peps:
"nqfree f \<Longrightarrow> R.I (subst\<^sub>+ f (r,cs)) xs \<Longrightarrow>
(\<exists>leps>r+\<langle>cs,xs\<rangle>. \<forall>x. r+\<langle>cs,xs\<rangle> < x \<and> x \<le> leps \<longrightarrow> R.I f (x#xs))"
proof(induct f)
case TrueF thus ?case by simp (metis less_add_one)
next
case (Atom a)
show ?case
proof (cases "((r,cs),a)" rule: asubst_peps.cases)
case (1 r cs s d ds)
{ assume "d=0" hence ?thesis using Atom 1 by auto (metis less_add_one) }
moreover
{ assume "d<0"
with Atom 1 have "r + \<langle>cs,xs\<rangle> < (s - \<langle>ds,xs\<rangle>)/d" (is "?a < ?b")
by(simp add:field_simps iprod_left_add_distrib)
then obtain x where "?a < x" "x < ?b" by(metis dense)
hence " \<forall>y. ?a < y \<and> y \<le> x \<longrightarrow> s < d*y + \<langle>ds,xs\<rangle>"
using `d<0` by (simp add:field_simps)
(metis add_le_cancel_right mult_le_cancel_left order_antisym linear mult.commute xt1(8))
hence ?thesis using 1 `?a<x` by auto
} moreover
{ let ?a = "s - d * r" let ?b = "\<langle>d *\<^sub>s cs + ds,xs\<rangle>"
assume "d>0"
with Atom 1 have "?a < ?b \<or> ?a = ?b" by auto
hence ?thesis
proof
assume "?a = ?b"
thus ?thesis using `d>0` Atom 1
by(simp add:field_simps iprod_left_add_distrib)
(metis add_0_left add_less_cancel_right distrib_left mult.commute mult_strict_left_mono)
next
assume "?a < ?b"
{ fix x assume "r+\<langle>cs,xs\<rangle> < x \<and> x \<le> r+\<langle>cs,xs\<rangle> + 1"
hence "d*(r + \<langle>cs,xs\<rangle>) < d*x"
using `d>0` by(metis mult_strict_left_mono)
hence "s < d*x + \<langle>ds,xs\<rangle>" using `d>0` `?a < ?b`
by (simp add:algebra_simps iprod_left_add_distrib)
}
thus ?thesis using 1 `d>0`
by(force simp: iprod_left_add_distrib)
qed
} ultimately show ?thesis by (metis less_linear)
qed (insert Atom, auto split:split_if_asm intro: less_add_one)
next
case And thus ?case
apply clarsimp
apply(rule_tac x="min leps lepsa" in exI)
apply simp
done
next
case Or thus ?case by force
qed simp_all
definition
"qe_eps\<^sub>1(f) =
(let as = R.atoms\<^sub>0 f; lbs = lbounds as; ebs = ebounds as
in list_disj (inf\<^sub>- f # map (subst\<^sub>+ f) lbs @ map (subst f) ebs))"
theorem I_eps1:
assumes "nqfree f" shows "R.I (qe_eps\<^sub>1 f) xs = (\<exists>x. R.I f (x#xs))"
(is "?QE = ?EX")
proof
let ?as = "R.atoms\<^sub>0 f" let ?ebs = "ebounds ?as"
assume ?QE
{ assume "R.I (inf\<^sub>- f) xs"
hence ?EX using `?QE` min_inf[of f xs] `nqfree f`
by(auto simp add:qe_eps\<^sub>1_def amap_fm_list_disj)
} moreover
{ assume "\<forall>x \<in> EQ f xs. \<not>R.I f (x#xs)"
"\<not> R.I (inf\<^sub>- f) xs"
with `?QE` `nqfree f` obtain r cs where "R.I (subst\<^sub>+ f (r,cs)) xs"
by (fastforce simp: qe_eps\<^sub>1_def set_ebounds diff_divide_distrib eval_def I_subst `nqfree f`)
then obtain leps where "R.I f (leps#xs)"
using I_subst_peps[OF `nqfree f`] by fastforce
hence ?EX .. }
ultimately show ?EX by blast
next
let ?as = "R.atoms\<^sub>0 f" let ?ebs = "ebounds ?as"
assume ?EX
then obtain x where x: "R.I f (x#xs)" ..
{ assume "R.I (inf\<^sub>- f) xs"
hence ?QE using `nqfree f` by(auto simp:qe_eps\<^sub>1_def)
} moreover
{ assume "\<exists>rcs \<in> set ?ebs. R.I (subst f rcs) xs"
hence ?QE by(auto simp:qe_eps\<^sub>1_def) } moreover
{ assume "\<not> R.I (inf\<^sub>- f) xs"
and "\<forall>rcs \<in> set ?ebs. \<not> R.I (subst f rcs) xs"
hence noE: "\<forall>e \<in> EQ f xs. \<not> R.I f (e#xs)" using `nqfree f`
by (force simp:set_ebounds I_subst diff_divide_distrib eval_def split:split_if_asm)
hence "x \<notin> EQ f xs" using x by fastforce
obtain l where "l \<in> LB f xs" "l < x"
using LBex[OF `nqfree f` x `\<not> R.I(inf\<^sub>- f) xs` `x \<notin> EQ f xs`] ..
have "\<exists>l\<in>LB f xs. l<x \<and> nolb f xs l x \<and>
(\<forall>y. l < y \<and> y \<le> x \<longrightarrow> R.I f (y#xs))"
using dense_interval[where P = "\<lambda>x. R.I f (x#xs)", OF finite_LB `l\<in>LB f xs` `l<x` x] x innermost_intvl[OF `nqfree f` _ _ `x \<notin> EQ f xs`]
by (simp add:nolb_def)
then obtain r c cs
where *: "Less r (c#cs) \<in> set(R.atoms\<^sub>0 f) \<and> c>0 \<and>
(r - \<langle>cs,xs\<rangle>)/c < x \<and> nolb f xs ((r - \<langle>cs,xs\<rangle>)/c) x
\<and> (\<forall>y. (r - \<langle>cs,xs\<rangle>)/c < y \<and> y \<le> x \<longrightarrow> R.I f (y#xs))"
by blast
then have "R.I (subst\<^sub>+ f (r/c, (-1/c) *\<^sub>s cs)) xs" using noE
by(auto intro!: I_subst_peps2[OF `nqfree f`]
simp:EQ2_def diff_divide_distrib algebra_simps)
with * have ?QE
by(simp add:qe_eps\<^sub>1_def bex_Un set_lbounds) metis
} ultimately show ?QE by blast
qed
lemma qfree_asubst_peps: "qfree (asubst\<^sub>+ rcs a)"
by(cases "(rcs,a)" rule:asubst_peps.cases) simp_all
lemma qfree_subst_peps: "nqfree \<phi> \<Longrightarrow> qfree (subst\<^sub>+ \<phi> rcs)"
by(induct \<phi>) (simp_all add:qfree_asubst_peps)
lemma qfree_qe_eps\<^sub>1: "nqfree \<phi> \<Longrightarrow> qfree(qe_eps\<^sub>1 \<phi>)"
apply(simp add:qe_eps\<^sub>1_def)
apply(rule qfree_list_disj)
apply (auto simp:qfree_min_inf qfree_subst_peps qfree_map_fm)
done
definition "qe_eps = R.lift_nnf_qe qe_eps\<^sub>1"
lemma qfree_qe_eps: "qfree(qe_eps \<phi>)"
by(simp add: qe_eps_def R.qfree_lift_nnf_qe qfree_qe_eps\<^sub>1)
lemma I_qe_eps: "R.I (qe_eps \<phi>) xs = R.I \<phi> xs"
by(simp add:qe_eps_def R.I_lift_nnf_qe qfree_qe_eps\<^sub>1 I_eps1)
end
|
!move_nodes.f90
!Algoritmo para deteccao de comunidades em rede, baseado no artigo:
!------------------------------------------------------------------
!Paper: Fast unfolding of communities in large networks
!Authors: Vincent D Blondel, Jean-Loup Guillaume, Renaud Lambiotte and Etienne Lefebvre
!doi:10.1088/1742-5468/2008/10/P10008
!------------------------------------------------------------------
!Este programa eh composto de tres partes: louvain.f90; move_nodes.f90; aggregate.f90
!A variavel ki(i) nesta subrotina eh diferente da usada em louvain.f90
!variaveis comuns somente a move_nodes.f90 e suas funcoes
module variaveis_move
integer*8, allocatable :: ki(:)
end module
subroutine move_nodes()
use variaveis
use variaveis_move
real*8 gain_q, q, best_q, best_c
integer*8 kc, e_ic
integer*8 i, j, move, n
!-------grau dos nos (ki) do grafo G1---
allocate (ki(numb_comun))
do i=1,numb_comun
ki(i) = 0
do j =1,numb_comun
ki(i) = ki(i)+G1(i,j)
end do
end do
!---------------------------------------
deallocate(C1, C2)
allocate (C1(numb_comun),C2(numb_comun))
do i=1,numb_comun !Aloca o no i na comunidade Ci
C2(i)=i
C1(i)=C2(i)
end do
move = numb_comun
do while (move.ne.0)!mova enquanto houver ganho de modularidade
move = numb_comun
do i=1,numb_comun
! i= int(rand(0)*(numb_comun+1-1))+1
best_q = -1000
best_c = C2(i)
do j=1,numb_comun
if(G1(i,j).gt.0) then
gain_q = (1.d0/m)*(e_ic(C2(j),i)- &
(ki(i)*kc(C2(j),i)/(2.d0*m)))
if(best_q.lt.gain_q) then
best_q = gain_q
best_c = C2(j)
end if
end if
end do
C2(i) = best_c
if(C1(i).eq.C2(i)) then
move = move - 1
end if
C1(i) = C2(i)
end do
end do
deallocate (ki)
end subroutine move_nodes
!-------grau da comunidade (kc)--------------
function kc(cmt,node)
use variaveis
use variaveis_move
integer*8 i, kc,node, cmt
kc = 0
do i = 1,numb_comun
if (C2(i).eq.cmt) then
kc = kc+ki(i)
end if
end do
if (C2(node).eq.cmt) then
kc = kc - ki(node)
end if
return
end function kc
!-soma dos pesos dos links entre o no i e a comunidade C(cmt)-
function e_ic(cmt,node)
use variaveis
use variaveis_move
integer*8 j, e_ic, cmt, node
e_ic = 0
do j = 1,numb_comun
if(node.ne.j) then
if ( (C2(j).eq.cmt).and.(G1(node,j).gt.0) ) then
e_ic = e_ic+G1(node,j)
end if
end if
end do
return
end function e_ic
|
State Before: case empty
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A B : Matrix n n R
⊢ ∀ (c : n → R),
(∀ (i : n), ¬i ∈ ∅ → c i = 0) → ∀ (k : n), ¬k ∈ ∅ → (∀ (i j : n), A i j = B i j + c i * B k j) → det A = det B State After: case empty
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A B : Matrix n n R
c : n → R
hs : ∀ (i : n), ¬i ∈ ∅ → c i = 0
k : n
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
⊢ det A = det B Tactic: rintro c hs k - A_eq State Before: case empty
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A B : Matrix n n R
c : n → R
hs : ∀ (i : n), ¬i ∈ ∅ → c i = 0
k : n
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
⊢ det A = det B State After: case empty
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A B : Matrix n n R
c : n → R
hs : ∀ (i : n), ¬i ∈ ∅ → c i = 0
k : n
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
this : ∀ (i : n), c i = 0
⊢ det A = det B Tactic: have : ∀ i, c i = 0 := by
intro i
specialize hs i
contrapose! hs
simp [hs] State Before: case empty
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A B : Matrix n n R
c : n → R
hs : ∀ (i : n), ¬i ∈ ∅ → c i = 0
k : n
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
this : ∀ (i : n), c i = 0
⊢ det A = det B State After: case empty.e_M
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A B : Matrix n n R
c : n → R
hs : ∀ (i : n), ¬i ∈ ∅ → c i = 0
k : n
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
this : ∀ (i : n), c i = 0
⊢ A = B Tactic: congr State Before: case empty.e_M
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A B : Matrix n n R
c : n → R
hs : ∀ (i : n), ¬i ∈ ∅ → c i = 0
k : n
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
this : ∀ (i : n), c i = 0
⊢ A = B State After: case empty.e_M.a.h
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A B : Matrix n n R
c : n → R
hs : ∀ (i : n), ¬i ∈ ∅ → c i = 0
k : n
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
this : ∀ (i : n), c i = 0
i j : n
⊢ A i j = B i j Tactic: ext (i j) State Before: case empty.e_M.a.h
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A B : Matrix n n R
c : n → R
hs : ∀ (i : n), ¬i ∈ ∅ → c i = 0
k : n
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
this : ∀ (i : n), c i = 0
i j : n
⊢ A i j = B i j State After: no goals Tactic: rw [A_eq, this, MulZeroClass.zero_mul, add_zero] State Before: m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A B : Matrix n n R
c : n → R
hs : ∀ (i : n), ¬i ∈ ∅ → c i = 0
k : n
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
⊢ ∀ (i : n), c i = 0 State After: m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A B : Matrix n n R
c : n → R
hs : ∀ (i : n), ¬i ∈ ∅ → c i = 0
k : n
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
i : n
⊢ c i = 0 Tactic: intro i State Before: m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A B : Matrix n n R
c : n → R
hs : ∀ (i : n), ¬i ∈ ∅ → c i = 0
k : n
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
i : n
⊢ c i = 0 State After: m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A B : Matrix n n R
c : n → R
k : n
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
i : n
hs : ¬i ∈ ∅ → c i = 0
⊢ c i = 0 Tactic: specialize hs i State Before: m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A B : Matrix n n R
c : n → R
k : n
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
i : n
hs : ¬i ∈ ∅ → c i = 0
⊢ c i = 0 State After: m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A B : Matrix n n R
c : n → R
k : n
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
i : n
hs : c i ≠ 0
⊢ ¬i ∈ ∅ ∧ c i ≠ 0 Tactic: contrapose! hs State Before: m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A B : Matrix n n R
c : n → R
k : n
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
i : n
hs : c i ≠ 0
⊢ ¬i ∈ ∅ ∧ c i ≠ 0 State After: no goals Tactic: simp [hs] State Before: case insert
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A : Matrix n n R
i : n
s : Finset n
_hi : ¬i ∈ s
ih :
∀ {B : Matrix n n R} (c : n → R),
(∀ (i : n), ¬i ∈ s → c i = 0) → ∀ (k : n), ¬k ∈ s → (∀ (i j : n), A i j = B i j + c i * B k j) → det A = det B
B : Matrix n n R
⊢ ∀ (c : n → R),
(∀ (i_1 : n), ¬i_1 ∈ insert i s → c i_1 = 0) →
∀ (k : n), ¬k ∈ insert i s → (∀ (i j : n), A i j = B i j + c i * B k j) → det A = det B State After: case insert
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A : Matrix n n R
i : n
s : Finset n
_hi : ¬i ∈ s
ih :
∀ {B : Matrix n n R} (c : n → R),
(∀ (i : n), ¬i ∈ s → c i = 0) → ∀ (k : n), ¬k ∈ s → (∀ (i j : n), A i j = B i j + c i * B k j) → det A = det B
B : Matrix n n R
c : n → R
hs : ∀ (i_1 : n), ¬i_1 ∈ insert i s → c i_1 = 0
k : n
hk : ¬k ∈ insert i s
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
⊢ det A = det B Tactic: intro c hs k hk A_eq State Before: case insert
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A : Matrix n n R
i : n
s : Finset n
_hi : ¬i ∈ s
ih :
∀ {B : Matrix n n R} (c : n → R),
(∀ (i : n), ¬i ∈ s → c i = 0) → ∀ (k : n), ¬k ∈ s → (∀ (i j : n), A i j = B i j + c i * B k j) → det A = det B
B : Matrix n n R
c : n → R
hs : ∀ (i_1 : n), ¬i_1 ∈ insert i s → c i_1 = 0
k : n
hk : ¬k ∈ insert i s
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
⊢ det A = det B State After: case insert
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A : Matrix n n R
i : n
s : Finset n
_hi : ¬i ∈ s
ih :
∀ {B : Matrix n n R} (c : n → R),
(∀ (i : n), ¬i ∈ s → c i = 0) → ∀ (k : n), ¬k ∈ s → (∀ (i j : n), A i j = B i j + c i * B k j) → det A = det B
B : Matrix n n R
c : n → R
hs : ∀ (i_1 : n), ¬i_1 ∈ insert i s → c i_1 = 0
k : n
hk : ¬k ∈ insert i s
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
hAi : A i = B i + c i • B k
⊢ det A = det B Tactic: have hAi : A i = B i + c i • B k := funext (A_eq i) State Before: case insert
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A : Matrix n n R
i : n
s : Finset n
_hi : ¬i ∈ s
ih :
∀ {B : Matrix n n R} (c : n → R),
(∀ (i : n), ¬i ∈ s → c i = 0) → ∀ (k : n), ¬k ∈ s → (∀ (i j : n), A i j = B i j + c i * B k j) → det A = det B
B : Matrix n n R
c : n → R
hs : ∀ (i_1 : n), ¬i_1 ∈ insert i s → c i_1 = 0
k : n
hk : ¬k ∈ insert i s
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
hAi : A i = B i + c i • B k
⊢ det A = det B State After: case insert.hij
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A : Matrix n n R
i : n
s : Finset n
_hi : ¬i ∈ s
ih :
∀ {B : Matrix n n R} (c : n → R),
(∀ (i : n), ¬i ∈ s → c i = 0) → ∀ (k : n), ¬k ∈ s → (∀ (i j : n), A i j = B i j + c i * B k j) → det A = det B
B : Matrix n n R
c : n → R
hs : ∀ (i_1 : n), ¬i_1 ∈ insert i s → c i_1 = 0
k : n
hk : ¬k ∈ insert i s
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
hAi : A i = B i + c i • B k
⊢ i ≠ k
case insert.x
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A : Matrix n n R
i : n
s : Finset n
_hi : ¬i ∈ s
ih :
∀ {B : Matrix n n R} (c : n → R),
(∀ (i : n), ¬i ∈ s → c i = 0) → ∀ (k : n), ¬k ∈ s → (∀ (i j : n), A i j = B i j + c i * B k j) → det A = det B
B : Matrix n n R
c : n → R
hs : ∀ (i_1 : n), ¬i_1 ∈ insert i s → c i_1 = 0
k : n
hk : ¬k ∈ insert i s
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
hAi : A i = B i + c i • B k
⊢ ∀ (i_1 : n), ¬i_1 ∈ s → update c i 0 i_1 = 0
case insert.k
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A : Matrix n n R
i : n
s : Finset n
_hi : ¬i ∈ s
ih :
∀ {B : Matrix n n R} (c : n → R),
(∀ (i : n), ¬i ∈ s → c i = 0) → ∀ (k : n), ¬k ∈ s → (∀ (i j : n), A i j = B i j + c i * B k j) → det A = det B
B : Matrix n n R
c : n → R
hs : ∀ (i_1 : n), ¬i_1 ∈ insert i s → c i_1 = 0
k : n
hk : ¬k ∈ insert i s
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
hAi : A i = B i + c i • B k
⊢ n
case insert.x
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A : Matrix n n R
i : n
s : Finset n
_hi : ¬i ∈ s
ih :
∀ {B : Matrix n n R} (c : n → R),
(∀ (i : n), ¬i ∈ s → c i = 0) → ∀ (k : n), ¬k ∈ s → (∀ (i j : n), A i j = B i j + c i * B k j) → det A = det B
B : Matrix n n R
c : n → R
hs : ∀ (i_1 : n), ¬i_1 ∈ insert i s → c i_1 = 0
k : n
hk : ¬k ∈ insert i s
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
hAi : A i = B i + c i • B k
⊢ ¬?insert.k ∈ s
case insert.x
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A : Matrix n n R
i : n
s : Finset n
_hi : ¬i ∈ s
ih :
∀ {B : Matrix n n R} (c : n → R),
(∀ (i : n), ¬i ∈ s → c i = 0) → ∀ (k : n), ¬k ∈ s → (∀ (i j : n), A i j = B i j + c i * B k j) → det A = det B
B : Matrix n n R
c : n → R
hs : ∀ (i_1 : n), ¬i_1 ∈ insert i s → c i_1 = 0
k : n
hk : ¬k ∈ insert i s
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
hAi : A i = B i + c i • B k
⊢ ∀ (i_1 j : n), A i_1 j = updateRow B i (A i) i_1 j + update c i 0 i_1 * updateRow B i (A i) ?insert.k j Tactic: rw [@ih (updateRow B i (A i)) (Function.update c i 0), hAi, det_updateRow_add_smul_self] State Before: case insert.hij
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A : Matrix n n R
i : n
s : Finset n
_hi : ¬i ∈ s
ih :
∀ {B : Matrix n n R} (c : n → R),
(∀ (i : n), ¬i ∈ s → c i = 0) → ∀ (k : n), ¬k ∈ s → (∀ (i j : n), A i j = B i j + c i * B k j) → det A = det B
B : Matrix n n R
c : n → R
hs : ∀ (i_1 : n), ¬i_1 ∈ insert i s → c i_1 = 0
k : n
hk : ¬k ∈ insert i s
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
hAi : A i = B i + c i • B k
⊢ i ≠ k State After: no goals Tactic: exact mt (fun h => show k ∈ insert i s from h ▸ Finset.mem_insert_self _ _) hk State Before: case insert.x
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A : Matrix n n R
i : n
s : Finset n
_hi : ¬i ∈ s
ih :
∀ {B : Matrix n n R} (c : n → R),
(∀ (i : n), ¬i ∈ s → c i = 0) → ∀ (k : n), ¬k ∈ s → (∀ (i j : n), A i j = B i j + c i * B k j) → det A = det B
B : Matrix n n R
c : n → R
hs : ∀ (i_1 : n), ¬i_1 ∈ insert i s → c i_1 = 0
k : n
hk : ¬k ∈ insert i s
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
hAi : A i = B i + c i • B k
⊢ ∀ (i_1 : n), ¬i_1 ∈ s → update c i 0 i_1 = 0 State After: case insert.x
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A : Matrix n n R
i : n
s : Finset n
_hi : ¬i ∈ s
ih :
∀ {B : Matrix n n R} (c : n → R),
(∀ (i : n), ¬i ∈ s → c i = 0) → ∀ (k : n), ¬k ∈ s → (∀ (i j : n), A i j = B i j + c i * B k j) → det A = det B
B : Matrix n n R
c : n → R
hs : ∀ (i_1 : n), ¬i_1 ∈ insert i s → c i_1 = 0
k : n
hk : ¬k ∈ insert i s
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
hAi : A i = B i + c i • B k
i' : n
hi' : ¬i' ∈ s
⊢ update c i 0 i' = 0 Tactic: intro i' hi' State Before: case insert.x
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A : Matrix n n R
i : n
s : Finset n
_hi : ¬i ∈ s
ih :
∀ {B : Matrix n n R} (c : n → R),
(∀ (i : n), ¬i ∈ s → c i = 0) → ∀ (k : n), ¬k ∈ s → (∀ (i j : n), A i j = B i j + c i * B k j) → det A = det B
B : Matrix n n R
c : n → R
hs : ∀ (i_1 : n), ¬i_1 ∈ insert i s → c i_1 = 0
k : n
hk : ¬k ∈ insert i s
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
hAi : A i = B i + c i • B k
i' : n
hi' : ¬i' ∈ s
⊢ update c i 0 i' = 0 State After: case insert.x
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A : Matrix n n R
i : n
s : Finset n
_hi : ¬i ∈ s
ih :
∀ {B : Matrix n n R} (c : n → R),
(∀ (i : n), ¬i ∈ s → c i = 0) → ∀ (k : n), ¬k ∈ s → (∀ (i j : n), A i j = B i j + c i * B k j) → det A = det B
B : Matrix n n R
c : n → R
hs : ∀ (i_1 : n), ¬i_1 ∈ insert i s → c i_1 = 0
k : n
hk : ¬k ∈ insert i s
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
hAi : A i = B i + c i • B k
i' : n
hi' : ¬i' ∈ s
⊢ (if i' = i then 0 else c i') = 0 Tactic: rw [Function.update_apply] State Before: case insert.x
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A : Matrix n n R
i : n
s : Finset n
_hi : ¬i ∈ s
ih :
∀ {B : Matrix n n R} (c : n → R),
(∀ (i : n), ¬i ∈ s → c i = 0) → ∀ (k : n), ¬k ∈ s → (∀ (i j : n), A i j = B i j + c i * B k j) → det A = det B
B : Matrix n n R
c : n → R
hs : ∀ (i_1 : n), ¬i_1 ∈ insert i s → c i_1 = 0
k : n
hk : ¬k ∈ insert i s
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
hAi : A i = B i + c i • B k
i' : n
hi' : ¬i' ∈ s
⊢ (if i' = i then 0 else c i') = 0 State After: case insert.x.inl
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A : Matrix n n R
i : n
s : Finset n
_hi : ¬i ∈ s
ih :
∀ {B : Matrix n n R} (c : n → R),
(∀ (i : n), ¬i ∈ s → c i = 0) → ∀ (k : n), ¬k ∈ s → (∀ (i j : n), A i j = B i j + c i * B k j) → det A = det B
B : Matrix n n R
c : n → R
hs : ∀ (i_1 : n), ¬i_1 ∈ insert i s → c i_1 = 0
k : n
hk : ¬k ∈ insert i s
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
hAi : A i = B i + c i • B k
i' : n
hi' : ¬i' ∈ s
hi'i : i' = i
⊢ 0 = 0
case insert.x.inr
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A : Matrix n n R
i : n
s : Finset n
_hi : ¬i ∈ s
ih :
∀ {B : Matrix n n R} (c : n → R),
(∀ (i : n), ¬i ∈ s → c i = 0) → ∀ (k : n), ¬k ∈ s → (∀ (i j : n), A i j = B i j + c i * B k j) → det A = det B
B : Matrix n n R
c : n → R
hs : ∀ (i_1 : n), ¬i_1 ∈ insert i s → c i_1 = 0
k : n
hk : ¬k ∈ insert i s
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
hAi : A i = B i + c i • B k
i' : n
hi' : ¬i' ∈ s
hi'i : ¬i' = i
⊢ c i' = 0 Tactic: split_ifs with hi'i State Before: case insert.x.inl
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A : Matrix n n R
i : n
s : Finset n
_hi : ¬i ∈ s
ih :
∀ {B : Matrix n n R} (c : n → R),
(∀ (i : n), ¬i ∈ s → c i = 0) → ∀ (k : n), ¬k ∈ s → (∀ (i j : n), A i j = B i j + c i * B k j) → det A = det B
B : Matrix n n R
c : n → R
hs : ∀ (i_1 : n), ¬i_1 ∈ insert i s → c i_1 = 0
k : n
hk : ¬k ∈ insert i s
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
hAi : A i = B i + c i • B k
i' : n
hi' : ¬i' ∈ s
hi'i : i' = i
⊢ 0 = 0 State After: no goals Tactic: rfl State Before: case insert.x.inr
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A : Matrix n n R
i : n
s : Finset n
_hi : ¬i ∈ s
ih :
∀ {B : Matrix n n R} (c : n → R),
(∀ (i : n), ¬i ∈ s → c i = 0) → ∀ (k : n), ¬k ∈ s → (∀ (i j : n), A i j = B i j + c i * B k j) → det A = det B
B : Matrix n n R
c : n → R
hs : ∀ (i_1 : n), ¬i_1 ∈ insert i s → c i_1 = 0
k : n
hk : ¬k ∈ insert i s
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
hAi : A i = B i + c i • B k
i' : n
hi' : ¬i' ∈ s
hi'i : ¬i' = i
⊢ c i' = 0 State After: no goals Tactic: exact hs i' fun h => hi' ((Finset.mem_insert.mp h).resolve_left hi'i) State Before: case insert.k
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A : Matrix n n R
i : n
s : Finset n
_hi : ¬i ∈ s
ih :
∀ {B : Matrix n n R} (c : n → R),
(∀ (i : n), ¬i ∈ s → c i = 0) → ∀ (k : n), ¬k ∈ s → (∀ (i j : n), A i j = B i j + c i * B k j) → det A = det B
B : Matrix n n R
c : n → R
hs : ∀ (i_1 : n), ¬i_1 ∈ insert i s → c i_1 = 0
k : n
hk : ¬k ∈ insert i s
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
hAi : A i = B i + c i • B k
⊢ n State After: no goals Tactic: exact k State Before: case insert.x
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A : Matrix n n R
i : n
s : Finset n
_hi : ¬i ∈ s
ih :
∀ {B : Matrix n n R} (c : n → R),
(∀ (i : n), ¬i ∈ s → c i = 0) → ∀ (k : n), ¬k ∈ s → (∀ (i j : n), A i j = B i j + c i * B k j) → det A = det B
B : Matrix n n R
c : n → R
hs : ∀ (i_1 : n), ¬i_1 ∈ insert i s → c i_1 = 0
k : n
hk : ¬k ∈ insert i s
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
hAi : A i = B i + c i • B k
⊢ ¬k ∈ s State After: no goals Tactic: exact fun h => hk (Finset.mem_insert_of_mem h) State Before: case insert.x
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A : Matrix n n R
i : n
s : Finset n
_hi : ¬i ∈ s
ih :
∀ {B : Matrix n n R} (c : n → R),
(∀ (i : n), ¬i ∈ s → c i = 0) → ∀ (k : n), ¬k ∈ s → (∀ (i j : n), A i j = B i j + c i * B k j) → det A = det B
B : Matrix n n R
c : n → R
hs : ∀ (i_1 : n), ¬i_1 ∈ insert i s → c i_1 = 0
k : n
hk : ¬k ∈ insert i s
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
hAi : A i = B i + c i • B k
⊢ ∀ (i_1 j : n), A i_1 j = updateRow B i (A i) i_1 j + update c i 0 i_1 * updateRow B i (A i) k j State After: case insert.x
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A : Matrix n n R
i : n
s : Finset n
_hi : ¬i ∈ s
ih :
∀ {B : Matrix n n R} (c : n → R),
(∀ (i : n), ¬i ∈ s → c i = 0) → ∀ (k : n), ¬k ∈ s → (∀ (i j : n), A i j = B i j + c i * B k j) → det A = det B
B : Matrix n n R
c : n → R
hs : ∀ (i_1 : n), ¬i_1 ∈ insert i s → c i_1 = 0
k : n
hk : ¬k ∈ insert i s
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
hAi : A i = B i + c i • B k
i' j' : n
⊢ A i' j' = updateRow B i (A i) i' j' + update c i 0 i' * updateRow B i (A i) k j' Tactic: intro i' j' State Before: case insert.x
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A : Matrix n n R
i : n
s : Finset n
_hi : ¬i ∈ s
ih :
∀ {B : Matrix n n R} (c : n → R),
(∀ (i : n), ¬i ∈ s → c i = 0) → ∀ (k : n), ¬k ∈ s → (∀ (i j : n), A i j = B i j + c i * B k j) → det A = det B
B : Matrix n n R
c : n → R
hs : ∀ (i_1 : n), ¬i_1 ∈ insert i s → c i_1 = 0
k : n
hk : ¬k ∈ insert i s
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
hAi : A i = B i + c i • B k
i' j' : n
⊢ A i' j' = updateRow B i (A i) i' j' + update c i 0 i' * updateRow B i (A i) k j' State After: case insert.x
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A : Matrix n n R
i : n
s : Finset n
_hi : ¬i ∈ s
ih :
∀ {B : Matrix n n R} (c : n → R),
(∀ (i : n), ¬i ∈ s → c i = 0) → ∀ (k : n), ¬k ∈ s → (∀ (i j : n), A i j = B i j + c i * B k j) → det A = det B
B : Matrix n n R
c : n → R
hs : ∀ (i_1 : n), ¬i_1 ∈ insert i s → c i_1 = 0
k : n
hk : ¬k ∈ insert i s
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
hAi : A i = B i + c i • B k
i' j' : n
⊢ A i' j' = (if i' = i then A i j' else B i' j') + (if i' = i then 0 else c i') * updateRow B i (A i) k j' Tactic: rw [updateRow_apply, Function.update_apply] State Before: case insert.x
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A : Matrix n n R
i : n
s : Finset n
_hi : ¬i ∈ s
ih :
∀ {B : Matrix n n R} (c : n → R),
(∀ (i : n), ¬i ∈ s → c i = 0) → ∀ (k : n), ¬k ∈ s → (∀ (i j : n), A i j = B i j + c i * B k j) → det A = det B
B : Matrix n n R
c : n → R
hs : ∀ (i_1 : n), ¬i_1 ∈ insert i s → c i_1 = 0
k : n
hk : ¬k ∈ insert i s
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
hAi : A i = B i + c i • B k
i' j' : n
⊢ A i' j' = (if i' = i then A i j' else B i' j') + (if i' = i then 0 else c i') * updateRow B i (A i) k j' State After: case insert.x.inl
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A : Matrix n n R
i : n
s : Finset n
_hi : ¬i ∈ s
ih :
∀ {B : Matrix n n R} (c : n → R),
(∀ (i : n), ¬i ∈ s → c i = 0) → ∀ (k : n), ¬k ∈ s → (∀ (i j : n), A i j = B i j + c i * B k j) → det A = det B
B : Matrix n n R
c : n → R
hs : ∀ (i_1 : n), ¬i_1 ∈ insert i s → c i_1 = 0
k : n
hk : ¬k ∈ insert i s
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
hAi : A i = B i + c i • B k
i' j' : n
hi'i : i' = i
⊢ A i' j' = A i j' + 0 * updateRow B i (A i) k j'
case insert.x.inr
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A : Matrix n n R
i : n
s : Finset n
_hi : ¬i ∈ s
ih :
∀ {B : Matrix n n R} (c : n → R),
(∀ (i : n), ¬i ∈ s → c i = 0) → ∀ (k : n), ¬k ∈ s → (∀ (i j : n), A i j = B i j + c i * B k j) → det A = det B
B : Matrix n n R
c : n → R
hs : ∀ (i_1 : n), ¬i_1 ∈ insert i s → c i_1 = 0
k : n
hk : ¬k ∈ insert i s
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
hAi : A i = B i + c i • B k
i' j' : n
hi'i : ¬i' = i
⊢ A i' j' = B i' j' + c i' * updateRow B i (A i) k j' Tactic: split_ifs with hi'i State Before: case insert.x.inr
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A : Matrix n n R
i : n
s : Finset n
_hi : ¬i ∈ s
ih :
∀ {B : Matrix n n R} (c : n → R),
(∀ (i : n), ¬i ∈ s → c i = 0) → ∀ (k : n), ¬k ∈ s → (∀ (i j : n), A i j = B i j + c i * B k j) → det A = det B
B : Matrix n n R
c : n → R
hs : ∀ (i_1 : n), ¬i_1 ∈ insert i s → c i_1 = 0
k : n
hk : ¬k ∈ insert i s
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
hAi : A i = B i + c i • B k
i' j' : n
hi'i : ¬i' = i
⊢ A i' j' = B i' j' + c i' * updateRow B i (A i) k j' State After: no goals Tactic: rw [A_eq, updateRow_ne fun h : k = i => hk <| h ▸ Finset.mem_insert_self k s] State Before: case insert.x.inl
m : Type ?u.1702450
n : Type u_1
inst✝⁴ : DecidableEq n
inst✝³ : Fintype n
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
A : Matrix n n R
i : n
s : Finset n
_hi : ¬i ∈ s
ih :
∀ {B : Matrix n n R} (c : n → R),
(∀ (i : n), ¬i ∈ s → c i = 0) → ∀ (k : n), ¬k ∈ s → (∀ (i j : n), A i j = B i j + c i * B k j) → det A = det B
B : Matrix n n R
c : n → R
hs : ∀ (i_1 : n), ¬i_1 ∈ insert i s → c i_1 = 0
k : n
hk : ¬k ∈ insert i s
A_eq : ∀ (i j : n), A i j = B i j + c i * B k j
hAi : A i = B i + c i • B k
i' j' : n
hi'i : i' = i
⊢ A i' j' = A i j' + 0 * updateRow B i (A i) k j' State After: no goals Tactic: simp [hi'i] |
Require Import VerdiRaft.Raft.
Local Arguments update {_} {_} _ _ _ _ _ : simpl never.
Require Import VerdiRaft.CommonTheorems.
Require Import VerdiRaft.SpecLemmas.
Require Import VerdiRaft.AppendEntriesReplySublogInterface.
Require Import VerdiRaft.SortedInterface.
Require Import VerdiRaft.NextIndexSafetyInterface.
Section NextIndexSafety.
Context {orig_base_params : BaseParams}.
Context {one_node_params : OneNodeParams orig_base_params}.
Context {raft_params : RaftParams orig_base_params}.
Context {aersi : append_entries_reply_sublog_interface}.
Context {si : sorted_interface}.
Lemma nextIndex_safety_init :
raft_net_invariant_init nextIndex_safety.
Proof using.
unfold raft_net_invariant_init, nextIndex_safety.
intros.
discriminate.
Qed.
Definition nextIndex_preserved st st' :=
(type st' = Leader ->
type st = Leader /\
maxIndex (log st) <= maxIndex (log st') /\
nextIndex st' = nextIndex st).
Lemma nextIndex_safety_preserved :
forall st st',
(forall h',
type st = Leader ->
Nat.pred (getNextIndex st h') <= maxIndex (log st)) ->
nextIndex_preserved st st' ->
(forall h',
type st' = Leader ->
Nat.pred (getNextIndex st' h') <= maxIndex (log st')).
Proof using.
unfold getNextIndex, nextIndex_preserved in *.
intuition.
repeat find_rewrite.
auto.
unfold assoc_default in *.
specialize (H h').
break_match.
- eauto using Nat.le_trans.
- lia.
Qed.
Theorem handleClientRequest_nextIndex_preserved :
forall h st client id c out st' ps,
handleClientRequest h st client id c = (out, st', ps) ->
nextIndex_preserved st st'.
Proof using.
unfold handleClientRequest, nextIndex_preserved.
intros.
repeat break_match; repeat find_inversion; simpl in *; try congruence.
intuition.
Qed.
Lemma nextIndex_safety_client_request :
raft_net_invariant_client_request nextIndex_safety.
Proof using.
unfold raft_net_invariant_client_request, nextIndex_safety.
simpl.
intros.
repeat find_higher_order_rewrite.
update_destruct_simplify.
- eauto using nextIndex_safety_preserved, handleClientRequest_nextIndex_preserved.
- auto.
Qed.
Lemma handleTimeout_nextIndex_preserved :
forall h d out d' l,
handleTimeout h d = (out, d', l) ->
nextIndex_preserved d d'.
Proof using.
unfold handleTimeout, tryToBecomeLeader, nextIndex_preserved.
intros.
repeat break_match; repeat find_inversion; simpl in *; try congruence.
auto.
Qed.
Lemma nextIndex_safety_timeout :
raft_net_invariant_timeout nextIndex_safety.
Proof using.
unfold raft_net_invariant_timeout, nextIndex_safety.
simpl.
intros.
repeat find_higher_order_rewrite.
update_destruct_simplify.
- eauto using nextIndex_safety_preserved, handleTimeout_nextIndex_preserved.
- auto.
Qed.
Lemma handleAppendEntries_nextIndex_preserved :
forall h st t n pli plt es ci st' ps,
handleAppendEntries h st t n pli plt es ci = (st', ps) ->
nextIndex_preserved st st'.
Proof using.
unfold handleAppendEntries, nextIndex_preserved, advanceCurrentTerm.
intros.
repeat break_match; repeat find_inversion; simpl in *; try congruence; auto.
Qed.
Lemma nextIndex_safety_append_entries :
raft_net_invariant_append_entries nextIndex_safety.
Proof using.
unfold raft_net_invariant_append_entries, nextIndex_safety.
simpl.
intros.
repeat find_higher_order_rewrite.
update_destruct_simplify.
- eauto using nextIndex_safety_preserved, handleAppendEntries_nextIndex_preserved.
- auto.
Qed.
Lemma handleAppendEntriesReply_nextIndex :
forall h st st' m t es res h',
handleAppendEntriesReply h st h' t es res = (st', m) ->
type st' = Leader ->
type st = Leader /\
((nextIndex st' = nextIndex st \/
(res = true /\
currentTerm st = t /\
nextIndex st' =
(assoc_set name_eq_dec (nextIndex st) h'
(Nat.max (getNextIndex st h') (S (maxIndex es)))))) \/
(res = false /\
currentTerm st = t /\
nextIndex st' =
(assoc_set name_eq_dec (nextIndex st) h'
(pred (getNextIndex st h'))))).
Proof using.
unfold handleAppendEntriesReply, advanceCurrentTerm.
intros.
repeat break_match; repeat find_inversion; do_bool; simpl in *; intuition; congruence.
Qed.
Lemma nextIndex_safety_append_entries_reply :
raft_net_invariant_append_entries_reply nextIndex_safety.
Proof using si aersi.
unfold raft_net_invariant_append_entries_reply, nextIndex_safety, getNextIndex.
simpl.
intros.
repeat find_higher_order_rewrite.
update_destruct_simplify.
- erewrite handleAppendEntriesReply_log by eauto.
find_copy_apply_lem_hyp handleAppendEntriesReply_nextIndex; auto.
intuition; repeat find_rewrite.
+ auto.
+ destruct (name_eq_dec h' (pSrc p)).
* subst. rewrite get_set_same_default.
unfold getNextIndex.
apply Nat.max_case; auto.
{ destruct es; simpl.
* lia.
* pose proof append_entries_reply_sublog_invariant _ ltac:(eauto).
unfold append_entries_reply_sublog in *.
eapply_prop_hyp pBody pBody; simpl; eauto.
apply maxIndex_is_max; auto.
apply logs_sorted_invariant; auto.
}
* rewrite get_set_diff_default by auto.
auto.
+ destruct (name_eq_dec h' (pSrc p)).
* subst. rewrite get_set_same_default.
unfold getNextIndex.
apply NPeano.Nat.le_le_pred.
auto.
* rewrite get_set_diff_default by auto.
auto.
- auto.
Qed.
Lemma handleRequestVote_nextIndex_preserved :
forall st h h' t lli llt st' m,
handleRequestVote h st t h' lli llt = (st', m) ->
nextIndex_preserved st st'.
Proof using.
unfold handleRequestVote, nextIndex_preserved, advanceCurrentTerm.
intros.
repeat break_match; repeat find_inversion; simpl in *; auto; try congruence.
Qed.
Lemma nextIndex_safety_request_vote :
raft_net_invariant_request_vote nextIndex_safety.
Proof using.
unfold raft_net_invariant_request_vote, nextIndex_safety.
simpl.
intros.
repeat find_higher_order_rewrite.
update_destruct_simplify.
- eauto using nextIndex_safety_preserved, handleRequestVote_nextIndex_preserved.
- auto.
Qed.
Lemma handleRequestVoteReply_matchIndex :
forall n st src t v,
type (handleRequestVoteReply n st src t v) = Leader ->
type st = Leader /\
nextIndex (handleRequestVoteReply n st src t v) =
nextIndex st \/
nextIndex (handleRequestVoteReply n st src t v) = [].
Proof using.
unfold handleRequestVoteReply.
intros.
repeat break_match; repeat find_inversion; simpl in *; auto; try congruence.
Qed.
Lemma nextIndex_safety_request_vote_reply :
raft_net_invariant_request_vote_reply nextIndex_safety.
Proof using.
unfold raft_net_invariant_request_vote_reply, nextIndex_safety.
simpl.
intros.
repeat find_higher_order_rewrite.
update_destruct_simplify.
- find_copy_apply_lem_hyp handleRequestVoteReply_matchIndex.
unfold getNextIndex in *.
erewrite handleRequestVoteReply_log in * by eauto.
intuition; repeat find_rewrite.
+ auto.
+ unfold assoc_default. simpl.
auto using NPeano.Nat.le_le_pred.
- auto.
Qed.
Lemma doLeader_nextIndex_preserved :
forall st h os st' ms,
doLeader st h = (os, st', ms) ->
nextIndex_preserved st st'.
Proof using.
unfold doLeader, nextIndex_preserved.
intros.
repeat break_match; repeat find_inversion; auto; try congruence.
Qed.
Lemma nextIndex_safety_do_leader :
raft_net_invariant_do_leader nextIndex_safety.
Proof using.
unfold raft_net_invariant_do_leader, nextIndex_safety.
simpl.
intros.
repeat find_higher_order_rewrite.
update_destruct_simplify.
- eauto using nextIndex_safety_preserved, doLeader_nextIndex_preserved.
- auto.
Qed.
Lemma doGenericServer_nextIndex_preserved :
forall h st os st' ms,
doGenericServer h st = (os, st', ms) ->
nextIndex_preserved st st'.
Proof using.
unfold doGenericServer, nextIndex_preserved.
intros.
repeat break_match; repeat find_inversion; simpl in *; auto; try congruence;
use_applyEntries_spec; subst; simpl in *; auto.
Qed.
Lemma nextIndex_safety_do_generic_server :
raft_net_invariant_do_generic_server nextIndex_safety.
Proof using.
unfold raft_net_invariant_do_generic_server, nextIndex_safety.
simpl.
intros.
repeat find_higher_order_rewrite.
update_destruct_simplify.
- eauto using nextIndex_safety_preserved, doGenericServer_nextIndex_preserved.
- auto.
Qed.
Lemma nextIndex_safety_state_same_packet_subset :
raft_net_invariant_state_same_packet_subset nextIndex_safety.
Proof using.
unfold raft_net_invariant_state_same_packet_subset, nextIndex_safety.
simpl.
intros.
repeat find_reverse_higher_order_rewrite.
auto.
Qed.
Lemma nextIndex_safety_reboot :
raft_net_invariant_reboot nextIndex_safety.
Proof using.
unfold raft_net_invariant_reboot, nextIndex_safety, reboot.
simpl.
intros.
subst.
repeat find_higher_order_rewrite.
update_destruct_simplify.
- unfold getNextIndex, assoc_default. simpl. lia.
- auto.
Qed.
Lemma nextIndex_safety_invariant :
forall net,
raft_intermediate_reachable net ->
nextIndex_safety net.
Proof using si aersi.
intros.
apply raft_net_invariant; auto.
- apply nextIndex_safety_init.
- apply nextIndex_safety_client_request.
- apply nextIndex_safety_timeout.
- apply nextIndex_safety_append_entries.
- apply nextIndex_safety_append_entries_reply.
- apply nextIndex_safety_request_vote.
- apply nextIndex_safety_request_vote_reply.
- apply nextIndex_safety_do_leader.
- apply nextIndex_safety_do_generic_server.
- apply nextIndex_safety_state_same_packet_subset.
- apply nextIndex_safety_reboot.
Qed.
Instance nisi : nextIndex_safety_interface.
Proof.
split.
exact nextIndex_safety_invariant.
Qed.
End NextIndexSafety.
|
It was with great surprise that an email and telephone call has led to this posting. On Wednesday, April 10, 2013, I was notified by Niko Wahl, curator of the Mauthausen Memorial, about 90 miles from Vienna, Austria, that a permanent display of “This Is Your Life, Hanna Bloch Kohner” would be opening along with the an entirely new exhibition at the memorial on Sunday, May 5.
The museum, which was closed for many years, has been restructured and modernized to enable visitors to bear witness to the testimonies and artifacts from this infamous concentration camp. The result of the contact made by Mr. Wahl led to an invitation by the Austrian Ministry of Culture, for myself and my husband, Steve, to come to Mauthausen and take part in the ceremonial re-dedication of the memorial and museum.
With more than one-thousand invited guests, including the Heads of State from Austria, Poland, and Hungary, Ministers from Israel and Russia, and American Ambassador to Austria, William Eacho, we had the opportunity to witness the dedication speeches, the introduction of 30 Mauthausen survivors, and ceremonial commemorations to be included in a time capsule that will remain in place at the memorial for at least 100 years.
I was honored to see the promenant place that Hanna’s story has, just off the main museum entrence. Hanna’s display features an eight minute segment of the “This Is Your Life” episode. As I observed the many guests in attendance who came through the permanent exhibit, I had the opportunity to share Hanna’s story in more detail. This was the experience of a life-time, and the result of work we do at Voices of the Generations, that continues to be rewarding, fulfilling, and ever more important.
Julie Kohner with American Ambassador to Austria, William Eacho and his wife, Donna Eacho, at the re-dedication of the Mauthausen Memorial.
The State of Israel memorial at Mauthausen. |
[GOAL]
⊢ |exp 1 - 2244083 / 825552| ≤ 1 / 10 ^ 10
[PROOFSTEP]
apply exp_approx_start
[GOAL]
case h
⊢ |exp 1 - expNear 0 1 (2244083 / 825552)| ≤ |1| ^ 0 / ↑(Nat.factorial 0) * (1 / 10 ^ 10)
[PROOFSTEP]
iterate 13 refine' exp_1_approx_succ_eq (by norm_num1; rfl) (by norm_cast) _
[GOAL]
case h
⊢ |exp 1 - expNear 0 1 (2244083 / 825552)| ≤ |1| ^ 0 / ↑(Nat.factorial 0) * (1 / 10 ^ 10)
[PROOFSTEP]
refine' exp_1_approx_succ_eq (by norm_num1; rfl) (by norm_cast) _
[GOAL]
⊢ 0 + 1 = ?m.664
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 1 = ?m.664
[PROOFSTEP]
rfl
[GOAL]
⊢ ↑1 = ?m.675
[PROOFSTEP]
norm_cast
[GOAL]
case h
⊢ |exp 1 - expNear 1 1 ((2244083 / 825552 - 1) * 1)| ≤ |1| ^ 1 / ↑(Nat.factorial 1) * (1 / 10 ^ 10 * 1)
[PROOFSTEP]
refine' exp_1_approx_succ_eq (by norm_num1; rfl) (by norm_cast) _
[GOAL]
⊢ 1 + 1 = ?m.1007
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 2 = ?m.1007
[PROOFSTEP]
rfl
[GOAL]
⊢ ↑2 = ?m.1009
[PROOFSTEP]
norm_cast
[GOAL]
case h
⊢ |exp 1 - expNear 2 1 (((2244083 / 825552 - 1) * 1 - 1) * 2)| ≤ |1| ^ 2 / ↑(Nat.factorial 2) * (1 / 10 ^ 10 * 1 * 2)
[PROOFSTEP]
refine' exp_1_approx_succ_eq (by norm_num1; rfl) (by norm_cast) _
[GOAL]
⊢ 2 + 1 = ?m.1077
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 3 = ?m.1077
[PROOFSTEP]
rfl
[GOAL]
⊢ ↑3 = ?m.1079
[PROOFSTEP]
norm_cast
[GOAL]
case h
⊢ |exp 1 - expNear 3 1 ((((2244083 / 825552 - 1) * 1 - 1) * 2 - 1) * 3)| ≤
|1| ^ 3 / ↑(Nat.factorial 3) * (1 / 10 ^ 10 * 1 * 2 * 3)
[PROOFSTEP]
refine' exp_1_approx_succ_eq (by norm_num1; rfl) (by norm_cast) _
[GOAL]
⊢ 3 + 1 = ?m.1147
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 4 = ?m.1147
[PROOFSTEP]
rfl
[GOAL]
⊢ ↑4 = ?m.1149
[PROOFSTEP]
norm_cast
[GOAL]
case h
⊢ |exp 1 - expNear 4 1 (((((2244083 / 825552 - 1) * 1 - 1) * 2 - 1) * 3 - 1) * 4)| ≤
|1| ^ 4 / ↑(Nat.factorial 4) * (1 / 10 ^ 10 * 1 * 2 * 3 * 4)
[PROOFSTEP]
refine' exp_1_approx_succ_eq (by norm_num1; rfl) (by norm_cast) _
[GOAL]
⊢ 4 + 1 = ?m.1217
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 5 = ?m.1217
[PROOFSTEP]
rfl
[GOAL]
⊢ ↑5 = ?m.1219
[PROOFSTEP]
norm_cast
[GOAL]
case h
⊢ |exp 1 - expNear 5 1 ((((((2244083 / 825552 - 1) * 1 - 1) * 2 - 1) * 3 - 1) * 4 - 1) * 5)| ≤
|1| ^ 5 / ↑(Nat.factorial 5) * (1 / 10 ^ 10 * 1 * 2 * 3 * 4 * 5)
[PROOFSTEP]
refine' exp_1_approx_succ_eq (by norm_num1; rfl) (by norm_cast) _
[GOAL]
⊢ 5 + 1 = ?m.1287
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 6 = ?m.1287
[PROOFSTEP]
rfl
[GOAL]
⊢ ↑6 = ?m.1289
[PROOFSTEP]
norm_cast
[GOAL]
case h
⊢ |exp 1 - expNear 6 1 (((((((2244083 / 825552 - 1) * 1 - 1) * 2 - 1) * 3 - 1) * 4 - 1) * 5 - 1) * 6)| ≤
|1| ^ 6 / ↑(Nat.factorial 6) * (1 / 10 ^ 10 * 1 * 2 * 3 * 4 * 5 * 6)
[PROOFSTEP]
refine' exp_1_approx_succ_eq (by norm_num1; rfl) (by norm_cast) _
[GOAL]
⊢ 6 + 1 = ?m.1357
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 7 = ?m.1357
[PROOFSTEP]
rfl
[GOAL]
⊢ ↑7 = ?m.1359
[PROOFSTEP]
norm_cast
[GOAL]
case h
⊢ |exp 1 - expNear 7 1 ((((((((2244083 / 825552 - 1) * 1 - 1) * 2 - 1) * 3 - 1) * 4 - 1) * 5 - 1) * 6 - 1) * 7)| ≤
|1| ^ 7 / ↑(Nat.factorial 7) * (1 / 10 ^ 10 * 1 * 2 * 3 * 4 * 5 * 6 * 7)
[PROOFSTEP]
refine' exp_1_approx_succ_eq (by norm_num1; rfl) (by norm_cast) _
[GOAL]
⊢ 7 + 1 = ?m.1427
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 8 = ?m.1427
[PROOFSTEP]
rfl
[GOAL]
⊢ ↑8 = ?m.1429
[PROOFSTEP]
norm_cast
[GOAL]
case h
⊢ |exp 1 -
expNear 8 1
(((((((((2244083 / 825552 - 1) * 1 - 1) * 2 - 1) * 3 - 1) * 4 - 1) * 5 - 1) * 6 - 1) * 7 - 1) * 8)| ≤
|1| ^ 8 / ↑(Nat.factorial 8) * (1 / 10 ^ 10 * 1 * 2 * 3 * 4 * 5 * 6 * 7 * 8)
[PROOFSTEP]
refine' exp_1_approx_succ_eq (by norm_num1; rfl) (by norm_cast) _
[GOAL]
⊢ 8 + 1 = ?m.1497
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 9 = ?m.1497
[PROOFSTEP]
rfl
[GOAL]
⊢ ↑9 = ?m.1499
[PROOFSTEP]
norm_cast
[GOAL]
case h
⊢ |exp 1 -
expNear 9 1
((((((((((2244083 / 825552 - 1) * 1 - 1) * 2 - 1) * 3 - 1) * 4 - 1) * 5 - 1) * 6 - 1) * 7 - 1) * 8 - 1) *
9)| ≤
|1| ^ 9 / ↑(Nat.factorial 9) * (1 / 10 ^ 10 * 1 * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9)
[PROOFSTEP]
refine' exp_1_approx_succ_eq (by norm_num1; rfl) (by norm_cast) _
[GOAL]
⊢ 9 + 1 = ?m.1567
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 10 = ?m.1567
[PROOFSTEP]
rfl
[GOAL]
⊢ ↑10 = ?m.1569
[PROOFSTEP]
norm_cast
[GOAL]
case h
⊢ |exp 1 -
expNear 10 1
(((((((((((2244083 / 825552 - 1) * 1 - 1) * 2 - 1) * 3 - 1) * 4 - 1) * 5 - 1) * 6 - 1) * 7 - 1) * 8 - 1) * 9 -
1) *
10)| ≤
|1| ^ 10 / ↑(Nat.factorial 10) * (1 / 10 ^ 10 * 1 * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10)
[PROOFSTEP]
refine' exp_1_approx_succ_eq (by norm_num1; rfl) (by norm_cast) _
[GOAL]
⊢ 10 + 1 = ?m.1637
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 11 = ?m.1637
[PROOFSTEP]
rfl
[GOAL]
⊢ ↑11 = ?m.1639
[PROOFSTEP]
norm_cast
[GOAL]
case h
⊢ |exp 1 -
expNear 11 1
((((((((((((2244083 / 825552 - 1) * 1 - 1) * 2 - 1) * 3 - 1) * 4 - 1) * 5 - 1) * 6 - 1) * 7 - 1) * 8 - 1) *
9 -
1) *
10 -
1) *
11)| ≤
|1| ^ 11 / ↑(Nat.factorial 11) * (1 / 10 ^ 10 * 1 * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10 * 11)
[PROOFSTEP]
refine' exp_1_approx_succ_eq (by norm_num1; rfl) (by norm_cast) _
[GOAL]
⊢ 11 + 1 = ?m.1707
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 12 = ?m.1707
[PROOFSTEP]
rfl
[GOAL]
⊢ ↑12 = ?m.1709
[PROOFSTEP]
norm_cast
[GOAL]
case h
⊢ |exp 1 -
expNear 12 1
(((((((((((((2244083 / 825552 - 1) * 1 - 1) * 2 - 1) * 3 - 1) * 4 - 1) * 5 - 1) * 6 - 1) * 7 - 1) * 8 - 1) *
9 -
1) *
10 -
1) *
11 -
1) *
12)| ≤
|1| ^ 12 / ↑(Nat.factorial 12) * (1 / 10 ^ 10 * 1 * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10 * 11 * 12)
[PROOFSTEP]
refine' exp_1_approx_succ_eq (by norm_num1; rfl) (by norm_cast) _
[GOAL]
⊢ 12 + 1 = ?m.1777
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 13 = ?m.1777
[PROOFSTEP]
rfl
[GOAL]
⊢ ↑13 = ?m.1779
[PROOFSTEP]
norm_cast
[GOAL]
case h
⊢ |exp 1 -
expNear 13 1
((((((((((((((2244083 / 825552 - 1) * 1 - 1) * 2 - 1) * 3 - 1) * 4 - 1) * 5 - 1) * 6 - 1) * 7 - 1) * 8 - 1) *
9 -
1) *
10 -
1) *
11 -
1) *
12 -
1) *
13)| ≤
|1| ^ 13 / ↑(Nat.factorial 13) * (1 / 10 ^ 10 * 1 * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10 * 11 * 12 * 13)
[PROOFSTEP]
norm_num1
[GOAL]
case h
⊢ |exp 1 - expNear 13 1 (5 / 7)| ≤ |1| ^ 13 / ↑(Nat.factorial 13) * (243243 / 390625)
[PROOFSTEP]
refine' exp_approx_end' _ (by norm_num1; rfl) _ (by norm_cast) (by simp) _
[GOAL]
⊢ 13 + 1 = ?m.2354
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 14 = ?m.2354
[PROOFSTEP]
rfl
[GOAL]
⊢ ↑14 = ?m.2356
[PROOFSTEP]
norm_cast
[GOAL]
⊢ |1| ≤ 1
[PROOFSTEP]
simp
[GOAL]
case h
⊢ |1 - 5 / 7| ≤ 243243 / 390625 - |1| / 14 * ((14 + 1) / 14)
[PROOFSTEP]
rw [_root_.abs_one, abs_of_pos]
[GOAL]
case h
⊢ 1 - 5 / 7 ≤ 243243 / 390625 - 1 / 14 * ((14 + 1) / 14)
[PROOFSTEP]
norm_num1
[GOAL]
case h
⊢ 0 < 1 - 5 / 7
[PROOFSTEP]
norm_num1
[GOAL]
⊢ |exp 1 - 363916618873 / 133877442384| ≤ 1 / 10 ^ 20
[PROOFSTEP]
apply exp_approx_start
[GOAL]
case h
⊢ |exp 1 - expNear 0 1 (363916618873 / 133877442384)| ≤ |1| ^ 0 / ↑(Nat.factorial 0) * (1 / 10 ^ 20)
[PROOFSTEP]
iterate 21 refine' exp_1_approx_succ_eq (by norm_num1; rfl) (by norm_cast) _
[GOAL]
case h
⊢ |exp 1 - expNear 0 1 (363916618873 / 133877442384)| ≤ |1| ^ 0 / ↑(Nat.factorial 0) * (1 / 10 ^ 20)
[PROOFSTEP]
refine' exp_1_approx_succ_eq (by norm_num1; rfl) (by norm_cast) _
[GOAL]
⊢ 0 + 1 = ?m.3624
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 1 = ?m.3624
[PROOFSTEP]
rfl
[GOAL]
⊢ ↑1 = ?m.3635
[PROOFSTEP]
norm_cast
[GOAL]
case h
⊢ |exp 1 - expNear 1 1 ((363916618873 / 133877442384 - 1) * 1)| ≤ |1| ^ 1 / ↑(Nat.factorial 1) * (1 / 10 ^ 20 * 1)
[PROOFSTEP]
refine' exp_1_approx_succ_eq (by norm_num1; rfl) (by norm_cast) _
[GOAL]
⊢ 1 + 1 = ?m.3967
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 2 = ?m.3967
[PROOFSTEP]
rfl
[GOAL]
⊢ ↑2 = ?m.3969
[PROOFSTEP]
norm_cast
[GOAL]
case h
⊢ |exp 1 - expNear 2 1 (((363916618873 / 133877442384 - 1) * 1 - 1) * 2)| ≤
|1| ^ 2 / ↑(Nat.factorial 2) * (1 / 10 ^ 20 * 1 * 2)
[PROOFSTEP]
refine' exp_1_approx_succ_eq (by norm_num1; rfl) (by norm_cast) _
[GOAL]
⊢ 2 + 1 = ?m.4037
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 3 = ?m.4037
[PROOFSTEP]
rfl
[GOAL]
⊢ ↑3 = ?m.4039
[PROOFSTEP]
norm_cast
[GOAL]
case h
⊢ |exp 1 - expNear 3 1 ((((363916618873 / 133877442384 - 1) * 1 - 1) * 2 - 1) * 3)| ≤
|1| ^ 3 / ↑(Nat.factorial 3) * (1 / 10 ^ 20 * 1 * 2 * 3)
[PROOFSTEP]
refine' exp_1_approx_succ_eq (by norm_num1; rfl) (by norm_cast) _
[GOAL]
⊢ 3 + 1 = ?m.4107
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 4 = ?m.4107
[PROOFSTEP]
rfl
[GOAL]
⊢ ↑4 = ?m.4109
[PROOFSTEP]
norm_cast
[GOAL]
case h
⊢ |exp 1 - expNear 4 1 (((((363916618873 / 133877442384 - 1) * 1 - 1) * 2 - 1) * 3 - 1) * 4)| ≤
|1| ^ 4 / ↑(Nat.factorial 4) * (1 / 10 ^ 20 * 1 * 2 * 3 * 4)
[PROOFSTEP]
refine' exp_1_approx_succ_eq (by norm_num1; rfl) (by norm_cast) _
[GOAL]
⊢ 4 + 1 = ?m.4177
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 5 = ?m.4177
[PROOFSTEP]
rfl
[GOAL]
⊢ ↑5 = ?m.4179
[PROOFSTEP]
norm_cast
[GOAL]
case h
⊢ |exp 1 - expNear 5 1 ((((((363916618873 / 133877442384 - 1) * 1 - 1) * 2 - 1) * 3 - 1) * 4 - 1) * 5)| ≤
|1| ^ 5 / ↑(Nat.factorial 5) * (1 / 10 ^ 20 * 1 * 2 * 3 * 4 * 5)
[PROOFSTEP]
refine' exp_1_approx_succ_eq (by norm_num1; rfl) (by norm_cast) _
[GOAL]
⊢ 5 + 1 = ?m.4247
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 6 = ?m.4247
[PROOFSTEP]
rfl
[GOAL]
⊢ ↑6 = ?m.4249
[PROOFSTEP]
norm_cast
[GOAL]
case h
⊢ |exp 1 - expNear 6 1 (((((((363916618873 / 133877442384 - 1) * 1 - 1) * 2 - 1) * 3 - 1) * 4 - 1) * 5 - 1) * 6)| ≤
|1| ^ 6 / ↑(Nat.factorial 6) * (1 / 10 ^ 20 * 1 * 2 * 3 * 4 * 5 * 6)
[PROOFSTEP]
refine' exp_1_approx_succ_eq (by norm_num1; rfl) (by norm_cast) _
[GOAL]
⊢ 6 + 1 = ?m.4317
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 7 = ?m.4317
[PROOFSTEP]
rfl
[GOAL]
⊢ ↑7 = ?m.4319
[PROOFSTEP]
norm_cast
[GOAL]
case h
⊢ |exp 1 -
expNear 7 1
((((((((363916618873 / 133877442384 - 1) * 1 - 1) * 2 - 1) * 3 - 1) * 4 - 1) * 5 - 1) * 6 - 1) * 7)| ≤
|1| ^ 7 / ↑(Nat.factorial 7) * (1 / 10 ^ 20 * 1 * 2 * 3 * 4 * 5 * 6 * 7)
[PROOFSTEP]
refine' exp_1_approx_succ_eq (by norm_num1; rfl) (by norm_cast) _
[GOAL]
⊢ 7 + 1 = ?m.4387
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 8 = ?m.4387
[PROOFSTEP]
rfl
[GOAL]
⊢ ↑8 = ?m.4389
[PROOFSTEP]
norm_cast
[GOAL]
case h
⊢ |exp 1 -
expNear 8 1
(((((((((363916618873 / 133877442384 - 1) * 1 - 1) * 2 - 1) * 3 - 1) * 4 - 1) * 5 - 1) * 6 - 1) * 7 - 1) *
8)| ≤
|1| ^ 8 / ↑(Nat.factorial 8) * (1 / 10 ^ 20 * 1 * 2 * 3 * 4 * 5 * 6 * 7 * 8)
[PROOFSTEP]
refine' exp_1_approx_succ_eq (by norm_num1; rfl) (by norm_cast) _
[GOAL]
⊢ 8 + 1 = ?m.4457
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 9 = ?m.4457
[PROOFSTEP]
rfl
[GOAL]
⊢ ↑9 = ?m.4459
[PROOFSTEP]
norm_cast
[GOAL]
case h
⊢ |exp 1 -
expNear 9 1
((((((((((363916618873 / 133877442384 - 1) * 1 - 1) * 2 - 1) * 3 - 1) * 4 - 1) * 5 - 1) * 6 - 1) * 7 - 1) *
8 -
1) *
9)| ≤
|1| ^ 9 / ↑(Nat.factorial 9) * (1 / 10 ^ 20 * 1 * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9)
[PROOFSTEP]
refine' exp_1_approx_succ_eq (by norm_num1; rfl) (by norm_cast) _
[GOAL]
⊢ 9 + 1 = ?m.4527
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 10 = ?m.4527
[PROOFSTEP]
rfl
[GOAL]
⊢ ↑10 = ?m.4529
[PROOFSTEP]
norm_cast
[GOAL]
case h
⊢ |exp 1 -
expNear 10 1
(((((((((((363916618873 / 133877442384 - 1) * 1 - 1) * 2 - 1) * 3 - 1) * 4 - 1) * 5 - 1) * 6 - 1) * 7 - 1) *
8 -
1) *
9 -
1) *
10)| ≤
|1| ^ 10 / ↑(Nat.factorial 10) * (1 / 10 ^ 20 * 1 * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10)
[PROOFSTEP]
refine' exp_1_approx_succ_eq (by norm_num1; rfl) (by norm_cast) _
[GOAL]
⊢ 10 + 1 = ?m.4597
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 11 = ?m.4597
[PROOFSTEP]
rfl
[GOAL]
⊢ ↑11 = ?m.4599
[PROOFSTEP]
norm_cast
[GOAL]
case h
⊢ |exp 1 -
expNear 11 1
((((((((((((363916618873 / 133877442384 - 1) * 1 - 1) * 2 - 1) * 3 - 1) * 4 - 1) * 5 - 1) * 6 - 1) * 7 - 1) *
8 -
1) *
9 -
1) *
10 -
1) *
11)| ≤
|1| ^ 11 / ↑(Nat.factorial 11) * (1 / 10 ^ 20 * 1 * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10 * 11)
[PROOFSTEP]
refine' exp_1_approx_succ_eq (by norm_num1; rfl) (by norm_cast) _
[GOAL]
⊢ 11 + 1 = ?m.4667
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 12 = ?m.4667
[PROOFSTEP]
rfl
[GOAL]
⊢ ↑12 = ?m.4669
[PROOFSTEP]
norm_cast
[GOAL]
case h
⊢ |exp 1 -
expNear 12 1
(((((((((((((363916618873 / 133877442384 - 1) * 1 - 1) * 2 - 1) * 3 - 1) * 4 - 1) * 5 - 1) * 6 - 1) * 7 - 1) *
8 -
1) *
9 -
1) *
10 -
1) *
11 -
1) *
12)| ≤
|1| ^ 12 / ↑(Nat.factorial 12) * (1 / 10 ^ 20 * 1 * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10 * 11 * 12)
[PROOFSTEP]
refine' exp_1_approx_succ_eq (by norm_num1; rfl) (by norm_cast) _
[GOAL]
⊢ 12 + 1 = ?m.4737
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 13 = ?m.4737
[PROOFSTEP]
rfl
[GOAL]
⊢ ↑13 = ?m.4739
[PROOFSTEP]
norm_cast
[GOAL]
case h
⊢ |exp 1 -
expNear 13 1
((((((((((((((363916618873 / 133877442384 - 1) * 1 - 1) * 2 - 1) * 3 - 1) * 4 - 1) * 5 - 1) * 6 - 1) * 7 -
1) *
8 -
1) *
9 -
1) *
10 -
1) *
11 -
1) *
12 -
1) *
13)| ≤
|1| ^ 13 / ↑(Nat.factorial 13) * (1 / 10 ^ 20 * 1 * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10 * 11 * 12 * 13)
[PROOFSTEP]
refine' exp_1_approx_succ_eq (by norm_num1; rfl) (by norm_cast) _
[GOAL]
⊢ 13 + 1 = ?m.4807
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 14 = ?m.4807
[PROOFSTEP]
rfl
[GOAL]
⊢ ↑14 = ?m.4809
[PROOFSTEP]
norm_cast
[GOAL]
case h
⊢ |exp 1 -
expNear 14 1
(((((((((((((((363916618873 / 133877442384 - 1) * 1 - 1) * 2 - 1) * 3 - 1) * 4 - 1) * 5 - 1) * 6 - 1) * 7 -
1) *
8 -
1) *
9 -
1) *
10 -
1) *
11 -
1) *
12 -
1) *
13 -
1) *
14)| ≤
|1| ^ 14 / ↑(Nat.factorial 14) * (1 / 10 ^ 20 * 1 * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10 * 11 * 12 * 13 * 14)
[PROOFSTEP]
refine' exp_1_approx_succ_eq (by norm_num1; rfl) (by norm_cast) _
[GOAL]
⊢ 14 + 1 = ?m.4877
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 15 = ?m.4877
[PROOFSTEP]
rfl
[GOAL]
⊢ ↑15 = ?m.4879
[PROOFSTEP]
norm_cast
[GOAL]
case h
⊢ |exp 1 -
expNear 15 1
((((((((((((((((363916618873 / 133877442384 - 1) * 1 - 1) * 2 - 1) * 3 - 1) * 4 - 1) * 5 - 1) * 6 - 1) * 7 -
1) *
8 -
1) *
9 -
1) *
10 -
1) *
11 -
1) *
12 -
1) *
13 -
1) *
14 -
1) *
15)| ≤
|1| ^ 15 / ↑(Nat.factorial 15) * (1 / 10 ^ 20 * 1 * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10 * 11 * 12 * 13 * 14 * 15)
[PROOFSTEP]
refine' exp_1_approx_succ_eq (by norm_num1; rfl) (by norm_cast) _
[GOAL]
⊢ 15 + 1 = ?m.4947
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 16 = ?m.4947
[PROOFSTEP]
rfl
[GOAL]
⊢ ↑16 = ?m.4949
[PROOFSTEP]
norm_cast
[GOAL]
case h
⊢ |exp 1 -
expNear 16 1
(((((((((((((((((363916618873 / 133877442384 - 1) * 1 - 1) * 2 - 1) * 3 - 1) * 4 - 1) * 5 - 1) * 6 - 1) * 7 -
1) *
8 -
1) *
9 -
1) *
10 -
1) *
11 -
1) *
12 -
1) *
13 -
1) *
14 -
1) *
15 -
1) *
16)| ≤
|1| ^ 16 / ↑(Nat.factorial 16) *
(1 / 10 ^ 20 * 1 * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10 * 11 * 12 * 13 * 14 * 15 * 16)
[PROOFSTEP]
refine' exp_1_approx_succ_eq (by norm_num1; rfl) (by norm_cast) _
[GOAL]
⊢ 16 + 1 = ?m.5017
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 17 = ?m.5017
[PROOFSTEP]
rfl
[GOAL]
⊢ ↑17 = ?m.5019
[PROOFSTEP]
norm_cast
[GOAL]
case h
⊢ |exp 1 -
expNear 17 1
((((((((((((((((((363916618873 / 133877442384 - 1) * 1 - 1) * 2 - 1) * 3 - 1) * 4 - 1) * 5 - 1) * 6 - 1) * 7 -
1) *
8 -
1) *
9 -
1) *
10 -
1) *
11 -
1) *
12 -
1) *
13 -
1) *
14 -
1) *
15 -
1) *
16 -
1) *
17)| ≤
|1| ^ 17 / ↑(Nat.factorial 17) *
(1 / 10 ^ 20 * 1 * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10 * 11 * 12 * 13 * 14 * 15 * 16 * 17)
[PROOFSTEP]
refine' exp_1_approx_succ_eq (by norm_num1; rfl) (by norm_cast) _
[GOAL]
⊢ 17 + 1 = ?m.5087
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 18 = ?m.5087
[PROOFSTEP]
rfl
[GOAL]
⊢ ↑18 = ?m.5089
[PROOFSTEP]
norm_cast
[GOAL]
case h
⊢ |exp 1 -
expNear 18 1
(((((((((((((((((((363916618873 / 133877442384 - 1) * 1 - 1) * 2 - 1) * 3 - 1) * 4 - 1) * 5 - 1) * 6 - 1) *
7 -
1) *
8 -
1) *
9 -
1) *
10 -
1) *
11 -
1) *
12 -
1) *
13 -
1) *
14 -
1) *
15 -
1) *
16 -
1) *
17 -
1) *
18)| ≤
|1| ^ 18 / ↑(Nat.factorial 18) *
(1 / 10 ^ 20 * 1 * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10 * 11 * 12 * 13 * 14 * 15 * 16 * 17 * 18)
[PROOFSTEP]
refine' exp_1_approx_succ_eq (by norm_num1; rfl) (by norm_cast) _
[GOAL]
⊢ 18 + 1 = ?m.5157
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 19 = ?m.5157
[PROOFSTEP]
rfl
[GOAL]
⊢ ↑19 = ?m.5159
[PROOFSTEP]
norm_cast
[GOAL]
case h
⊢ |exp 1 -
expNear 19 1
((((((((((((((((((((363916618873 / 133877442384 - 1) * 1 - 1) * 2 - 1) * 3 - 1) * 4 - 1) * 5 - 1) * 6 - 1) *
7 -
1) *
8 -
1) *
9 -
1) *
10 -
1) *
11 -
1) *
12 -
1) *
13 -
1) *
14 -
1) *
15 -
1) *
16 -
1) *
17 -
1) *
18 -
1) *
19)| ≤
|1| ^ 19 / ↑(Nat.factorial 19) *
(1 / 10 ^ 20 * 1 * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10 * 11 * 12 * 13 * 14 * 15 * 16 * 17 * 18 * 19)
[PROOFSTEP]
refine' exp_1_approx_succ_eq (by norm_num1; rfl) (by norm_cast) _
[GOAL]
⊢ 19 + 1 = ?m.5227
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 20 = ?m.5227
[PROOFSTEP]
rfl
[GOAL]
⊢ ↑20 = ?m.5229
[PROOFSTEP]
norm_cast
[GOAL]
case h
⊢ |exp 1 -
expNear 20 1
(((((((((((((((((((((363916618873 / 133877442384 - 1) * 1 - 1) * 2 - 1) * 3 - 1) * 4 - 1) * 5 - 1) * 6 - 1) *
7 -
1) *
8 -
1) *
9 -
1) *
10 -
1) *
11 -
1) *
12 -
1) *
13 -
1) *
14 -
1) *
15 -
1) *
16 -
1) *
17 -
1) *
18 -
1) *
19 -
1) *
20)| ≤
|1| ^ 20 / ↑(Nat.factorial 20) *
(1 / 10 ^ 20 * 1 * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10 * 11 * 12 * 13 * 14 * 15 * 16 * 17 * 18 * 19 * 20)
[PROOFSTEP]
refine' exp_1_approx_succ_eq (by norm_num1; rfl) (by norm_cast) _
[GOAL]
⊢ 20 + 1 = ?m.5297
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 21 = ?m.5297
[PROOFSTEP]
rfl
[GOAL]
⊢ ↑21 = ?m.5299
[PROOFSTEP]
norm_cast
[GOAL]
case h
⊢ |exp 1 -
expNear 21 1
((((((((((((((((((((((363916618873 / 133877442384 - 1) * 1 - 1) * 2 - 1) * 3 - 1) * 4 - 1) * 5 - 1) * 6 - 1) *
7 -
1) *
8 -
1) *
9 -
1) *
10 -
1) *
11 -
1) *
12 -
1) *
13 -
1) *
14 -
1) *
15 -
1) *
16 -
1) *
17 -
1) *
18 -
1) *
19 -
1) *
20 -
1) *
21)| ≤
|1| ^ 21 / ↑(Nat.factorial 21) *
(1 / 10 ^ 20 * 1 * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10 * 11 * 12 * 13 * 14 * 15 * 16 * 17 * 18 * 19 * 20 * 21)
[PROOFSTEP]
norm_num1
[GOAL]
case h
⊢ |exp 1 - expNear 21 1 (36295539 / 44271641)| ≤ |1| ^ 21 / ↑(Nat.factorial 21) * (311834363841 / 610351562500)
[PROOFSTEP]
refine' exp_approx_end' _ (by norm_num1; rfl) _ (by norm_cast) (by simp) _
[GOAL]
⊢ 21 + 1 = ?m.5994
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 22 = ?m.5994
[PROOFSTEP]
rfl
[GOAL]
⊢ ↑22 = ?m.5996
[PROOFSTEP]
norm_cast
[GOAL]
⊢ |1| ≤ 1
[PROOFSTEP]
simp
[GOAL]
case h
⊢ |1 - 36295539 / 44271641| ≤ 311834363841 / 610351562500 - |1| / 22 * ((22 + 1) / 22)
[PROOFSTEP]
rw [_root_.abs_one, abs_of_pos]
[GOAL]
case h
⊢ 1 - 36295539 / 44271641 ≤ 311834363841 / 610351562500 - 1 / 22 * ((22 + 1) / 22)
[PROOFSTEP]
norm_num1
[GOAL]
case h
⊢ 0 < 1 - 36295539 / 44271641
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 2.7182818283 < 2244083 / 825552 - 1 / 10 ^ 10
[PROOFSTEP]
norm_num
[GOAL]
⊢ 1 / 10 ^ 10 + 2244083 / 825552 < 2.7182818286
[PROOFSTEP]
norm_num
[GOAL]
⊢ 0.36787944116 < exp (-1)
[PROOFSTEP]
rw [exp_neg, lt_inv _ (exp_pos _)]
[GOAL]
⊢ exp 1 < 0.36787944116⁻¹
⊢ 0 < 0.36787944116
[PROOFSTEP]
refine' lt_of_le_of_lt (sub_le_iff_le_add.1 (abs_sub_le_iff.1 exp_one_near_10).1) _
[GOAL]
⊢ 1 / 10 ^ 10 + 2244083 / 825552 < 0.36787944116⁻¹
⊢ 0 < 0.36787944116
[PROOFSTEP]
all_goals norm_num
[GOAL]
⊢ 1 / 10 ^ 10 + 2244083 / 825552 < 0.36787944116⁻¹
[PROOFSTEP]
norm_num
[GOAL]
⊢ 0 < 0.36787944116
[PROOFSTEP]
norm_num
[GOAL]
⊢ exp (-1) < 0.3678794412
[PROOFSTEP]
rw [exp_neg, inv_lt (exp_pos _)]
[GOAL]
⊢ 0.3678794412⁻¹ < exp 1
⊢ 0 < 0.3678794412
[PROOFSTEP]
refine' lt_of_lt_of_le _ (sub_le_comm.1 (abs_sub_le_iff.1 exp_one_near_10).2)
[GOAL]
⊢ 0.3678794412⁻¹ < 2244083 / 825552 - 1 / 10 ^ 10
⊢ 0 < 0.3678794412
[PROOFSTEP]
all_goals norm_num
[GOAL]
⊢ 0.3678794412⁻¹ < 2244083 / 825552 - 1 / 10 ^ 10
[PROOFSTEP]
norm_num
[GOAL]
⊢ 0 < 0.3678794412
[PROOFSTEP]
norm_num
[GOAL]
⊢ |log 2 - 287209 / 414355| ≤ 1 / 10 ^ 10
[PROOFSTEP]
suffices |log 2 - 287209 / 414355| ≤ 1 / 17179869184 + (1 / 10 ^ 10 - 1 / 2 ^ 34)
by
norm_num1 at *
assumption
[GOAL]
this : |log 2 - 287209 / 414355| ≤ 1 / 17179869184 + (1 / 10 ^ 10 - 1 / 2 ^ 34)
⊢ |log 2 - 287209 / 414355| ≤ 1 / 10 ^ 10
[PROOFSTEP]
norm_num1 at *
[GOAL]
this : |log 2 - 287209 / 414355| ≤ 1 / 10000000000
⊢ |log 2 - 287209 / 414355| ≤ 1 / 10000000000
[PROOFSTEP]
assumption
[GOAL]
⊢ |log 2 - 287209 / 414355| ≤ 1 / 17179869184 + (1 / 10 ^ 10 - 1 / 2 ^ 34)
[PROOFSTEP]
have t : |(2⁻¹ : ℝ)| = 2⁻¹ := by rw [abs_of_pos]; norm_num
[GOAL]
⊢ |2⁻¹| = 2⁻¹
[PROOFSTEP]
rw [abs_of_pos]
[GOAL]
⊢ 0 < 2⁻¹
[PROOFSTEP]
norm_num
[GOAL]
t : |2⁻¹| = 2⁻¹
⊢ |log 2 - 287209 / 414355| ≤ 1 / 17179869184 + (1 / 10 ^ 10 - 1 / 2 ^ 34)
[PROOFSTEP]
have z := Real.abs_log_sub_add_sum_range_le (show |(2⁻¹ : ℝ)| < 1 by rw [t]; norm_num) 34
[GOAL]
t : |2⁻¹| = 2⁻¹
⊢ |2⁻¹| < 1
[PROOFSTEP]
rw [t]
[GOAL]
t : |2⁻¹| = 2⁻¹
⊢ 2⁻¹ < 1
[PROOFSTEP]
norm_num
[GOAL]
t : |2⁻¹| = 2⁻¹
z : |(Finset.sum (range 34) fun i => 2⁻¹ ^ (i + 1) / (↑i + 1)) + log (1 - 2⁻¹)| ≤ |2⁻¹| ^ (34 + 1) / (1 - |2⁻¹|)
⊢ |log 2 - 287209 / 414355| ≤ 1 / 17179869184 + (1 / 10 ^ 10 - 1 / 2 ^ 34)
[PROOFSTEP]
rw [t] at z
[GOAL]
t : |2⁻¹| = 2⁻¹
z : |(Finset.sum (range 34) fun i => 2⁻¹ ^ (i + 1) / (↑i + 1)) + log (1 - 2⁻¹)| ≤ 2⁻¹ ^ (34 + 1) / (1 - 2⁻¹)
⊢ |log 2 - 287209 / 414355| ≤ 1 / 17179869184 + (1 / 10 ^ 10 - 1 / 2 ^ 34)
[PROOFSTEP]
norm_num1 at z
[GOAL]
t : |2⁻¹| = 2⁻¹
z : |(Finset.sum (range 34) fun x => (1 / 2) ^ (x + 1) / (↑x + 1)) + log (1 / 2)| ≤ 1 / 17179869184
⊢ |log 2 - 287209 / 414355| ≤ 1 / 17179869184 + (1 / 10 ^ 10 - 1 / 2 ^ 34)
[PROOFSTEP]
rw [one_div (2 : ℝ), log_inv, ← sub_eq_add_neg, _root_.abs_sub_comm] at z
[GOAL]
t : |2⁻¹| = 2⁻¹
z : |log 2 - Finset.sum (range 34) fun x => 2⁻¹ ^ (x + 1) / (↑x + 1)| ≤ 1 / 17179869184
⊢ |log 2 - 287209 / 414355| ≤ 1 / 17179869184 + (1 / 10 ^ 10 - 1 / 2 ^ 34)
[PROOFSTEP]
apply le_trans (_root_.abs_sub_le _ _ _) (add_le_add z _)
[GOAL]
t : |2⁻¹| = 2⁻¹
z : |log 2 - Finset.sum (range 34) fun x => 2⁻¹ ^ (x + 1) / (↑x + 1)| ≤ 1 / 17179869184
⊢ |(Finset.sum (range 34) fun x => 2⁻¹ ^ (x + 1) / (↑x + 1)) - 287209 / 414355| ≤ 1 / 10 ^ 10 - 1 / 2 ^ 34
[PROOFSTEP]
simp_rw [sum_range_succ]
[GOAL]
t : |2⁻¹| = 2⁻¹
z : |log 2 - Finset.sum (range 34) fun x => 2⁻¹ ^ (x + 1) / (↑x + 1)| ≤ 1 / 17179869184
⊢ |(Finset.sum (range 0) fun x => 2⁻¹ ^ (x + 1) / (↑x + 1)) + 2⁻¹ ^ (0 + 1) / (↑0 + 1) + 2⁻¹ ^ (1 + 1) / (↑1 + 1) +
2⁻¹ ^ (2 + 1) / (↑2 + 1) +
2⁻¹ ^ (3 + 1) / (↑3 + 1) +
2⁻¹ ^ (4 + 1) / (↑4 + 1) +
2⁻¹ ^ (5 + 1) / (↑5 + 1) +
2⁻¹ ^ (6 + 1) / (↑6 + 1) +
2⁻¹ ^ (7 + 1) / (↑7 + 1) +
2⁻¹ ^ (8 + 1) / (↑8 + 1) +
2⁻¹ ^ (9 + 1) / (↑9 + 1) +
2⁻¹ ^ (10 + 1) / (↑10 + 1) +
2⁻¹ ^ (11 + 1) / (↑11 + 1) +
2⁻¹ ^ (12 + 1) / (↑12 + 1) +
2⁻¹ ^ (13 + 1) / (↑13 + 1) +
2⁻¹ ^ (14 + 1) / (↑14 + 1) +
2⁻¹ ^ (15 + 1) / (↑15 + 1) +
2⁻¹ ^ (16 + 1) / (↑16 + 1) +
2⁻¹ ^ (17 + 1) / (↑17 + 1) +
2⁻¹ ^ (18 + 1) / (↑18 + 1) +
2⁻¹ ^ (19 + 1) / (↑19 + 1) +
2⁻¹ ^ (20 + 1) / (↑20 + 1) +
2⁻¹ ^ (21 + 1) / (↑21 + 1) +
2⁻¹ ^ (22 + 1) / (↑22 + 1) +
2⁻¹ ^ (23 + 1) / (↑23 + 1) +
2⁻¹ ^ (24 + 1) / (↑24 + 1) +
2⁻¹ ^ (25 + 1) / (↑25 + 1) +
2⁻¹ ^ (26 + 1) / (↑26 + 1) +
2⁻¹ ^ (27 + 1) / (↑27 + 1) +
2⁻¹ ^ (28 + 1) / (↑28 + 1) +
2⁻¹ ^ (29 + 1) / (↑29 + 1) +
2⁻¹ ^ (30 + 1) / (↑30 + 1) +
2⁻¹ ^ (31 + 1) / (↑31 + 1) +
2⁻¹ ^ (32 + 1) / (↑32 + 1) +
2⁻¹ ^ (33 + 1) / (↑33 + 1) -
287209 / 414355| ≤
1 / 10 ^ 10 - 1 / 2 ^ 34
[PROOFSTEP]
norm_num
[GOAL]
t : |2⁻¹| = 2⁻¹
z : |log 2 - Finset.sum (range 34) fun x => 2⁻¹ ^ (x + 1) / (↑x + 1)| ≤ 1 / 17179869184
⊢ |30417026706710207 / 51397301678363663775930777600| ≤ 7011591 / 167772160000000000
[PROOFSTEP]
rw [abs_of_pos]
[GOAL]
t : |2⁻¹| = 2⁻¹
z : |log 2 - Finset.sum (range 34) fun x => 2⁻¹ ^ (x + 1) / (↑x + 1)| ≤ 1 / 17179869184
⊢ 30417026706710207 / 51397301678363663775930777600 ≤ 7011591 / 167772160000000000
[PROOFSTEP]
norm_num
[GOAL]
t : |2⁻¹| = 2⁻¹
z : |log 2 - Finset.sum (range 34) fun x => 2⁻¹ ^ (x + 1) / (↑x + 1)| ≤ 1 / 17179869184
⊢ 0 < 30417026706710207 / 51397301678363663775930777600
[PROOFSTEP]
norm_num
[GOAL]
⊢ 0.6931471803 < 287209 / 414355 - 1 / 10 ^ 10
[PROOFSTEP]
norm_num1
[GOAL]
⊢ 1 / 10 ^ 10 + 287209 / 414355 < 0.6931471808
[PROOFSTEP]
norm_num
|
State Before: A B : NonemptyFinLinOrdCat
f : A ⟶ B
⊢ Mono f ↔ Function.Injective ↑f State After: A B : NonemptyFinLinOrdCat
f : A ⟶ B
⊢ Mono f → Function.Injective ↑f Tactic: refine' ⟨_, ConcreteCategory.mono_of_injective f⟩ State Before: A B : NonemptyFinLinOrdCat
f : A ⟶ B
⊢ Mono f → Function.Injective ↑f State After: A B : NonemptyFinLinOrdCat
f : A ⟶ B
a✝ : Mono f
⊢ Function.Injective ↑f Tactic: intro State Before: A B : NonemptyFinLinOrdCat
f : A ⟶ B
a✝ : Mono f
⊢ Function.Injective ↑f State After: A B : NonemptyFinLinOrdCat
f : A ⟶ B
a✝ : Mono f
a₁ a₂ : ↑A
h : ↑f a₁ = ↑f a₂
⊢ a₁ = a₂ Tactic: intro a₁ a₂ h State Before: A B : NonemptyFinLinOrdCat
f : A ⟶ B
a✝ : Mono f
a₁ a₂ : ↑A
h : ↑f a₁ = ↑f a₂
⊢ a₁ = a₂ State After: A B : NonemptyFinLinOrdCat
f : A ⟶ B
a✝ : Mono f
a₁ a₂ : ↑A
h : ↑f a₁ = ↑f a₂
X : NonemptyFinLinOrdCat := of (ULift (Fin 1))
⊢ a₁ = a₂ Tactic: let X := NonemptyFinLinOrdCat.of (ULift (Fin 1)) State Before: A B : NonemptyFinLinOrdCat
f : A ⟶ B
a✝ : Mono f
a₁ a₂ : ↑A
h : ↑f a₁ = ↑f a₂
X : NonemptyFinLinOrdCat := of (ULift (Fin 1))
⊢ a₁ = a₂ State After: A B : NonemptyFinLinOrdCat
f : A ⟶ B
a✝ : Mono f
a₁ a₂ : ↑A
h : ↑f a₁ = ↑f a₂
X : NonemptyFinLinOrdCat := of (ULift (Fin 1))
g₁ : X ⟶ A := { toFun := fun x => a₁, monotone' := (_ : ∀ (x x_1 : ↑X), x ≤ x_1 → (fun x => a₁) x ≤ (fun x => a₁) x) }
⊢ a₁ = a₂ Tactic: let g₁ : X ⟶ A := ⟨fun _ => a₁, fun _ _ _ => by rfl⟩ State Before: A B : NonemptyFinLinOrdCat
f : A ⟶ B
a✝ : Mono f
a₁ a₂ : ↑A
h : ↑f a₁ = ↑f a₂
X : NonemptyFinLinOrdCat := of (ULift (Fin 1))
g₁ : X ⟶ A := { toFun := fun x => a₁, monotone' := (_ : ∀ (x x_1 : ↑X), x ≤ x_1 → (fun x => a₁) x ≤ (fun x => a₁) x) }
⊢ a₁ = a₂ State After: A B : NonemptyFinLinOrdCat
f : A ⟶ B
a✝ : Mono f
a₁ a₂ : ↑A
h : ↑f a₁ = ↑f a₂
X : NonemptyFinLinOrdCat := of (ULift (Fin 1))
g₁ : X ⟶ A := { toFun := fun x => a₁, monotone' := (_ : ∀ (x x_1 : ↑X), x ≤ x_1 → (fun x => a₁) x ≤ (fun x => a₁) x) }
g₂ : X ⟶ A := { toFun := fun x => a₂, monotone' := (_ : ∀ (x x_1 : ↑X), x ≤ x_1 → (fun x => a₂) x ≤ (fun x => a₂) x) }
⊢ a₁ = a₂ Tactic: let g₂ : X ⟶ A := ⟨fun _ => a₂, fun _ _ _ => by rfl⟩ State Before: A B : NonemptyFinLinOrdCat
f : A ⟶ B
a✝ : Mono f
a₁ a₂ : ↑A
h : ↑f a₁ = ↑f a₂
X : NonemptyFinLinOrdCat := of (ULift (Fin 1))
g₁ : X ⟶ A := { toFun := fun x => a₁, monotone' := (_ : ∀ (x x_1 : ↑X), x ≤ x_1 → (fun x => a₁) x ≤ (fun x => a₁) x) }
g₂ : X ⟶ A := { toFun := fun x => a₂, monotone' := (_ : ∀ (x x_1 : ↑X), x ≤ x_1 → (fun x => a₂) x ≤ (fun x => a₂) x) }
⊢ a₁ = a₂ State After: A B : NonemptyFinLinOrdCat
f : A ⟶ B
a✝ : Mono f
a₁ a₂ : ↑A
h : ↑f a₁ = ↑f a₂
X : NonemptyFinLinOrdCat := of (ULift (Fin 1))
g₁ : X ⟶ A := { toFun := fun x => a₁, monotone' := (_ : ∀ (x x_1 : ↑X), x ≤ x_1 → (fun x => a₁) x ≤ (fun x => a₁) x) }
g₂ : X ⟶ A := { toFun := fun x => a₂, monotone' := (_ : ∀ (x x_1 : ↑X), x ≤ x_1 → (fun x => a₂) x ≤ (fun x => a₂) x) }
⊢ ↑g₁ { down := 0 } = ↑g₂ { down := 0 } Tactic: change g₁ (ULift.up (0 : Fin 1)) = g₂ (ULift.up (0 : Fin 1)) State Before: A B : NonemptyFinLinOrdCat
f : A ⟶ B
a✝ : Mono f
a₁ a₂ : ↑A
h : ↑f a₁ = ↑f a₂
X : NonemptyFinLinOrdCat := of (ULift (Fin 1))
g₁ : X ⟶ A := { toFun := fun x => a₁, monotone' := (_ : ∀ (x x_1 : ↑X), x ≤ x_1 → (fun x => a₁) x ≤ (fun x => a₁) x) }
g₂ : X ⟶ A := { toFun := fun x => a₂, monotone' := (_ : ∀ (x x_1 : ↑X), x ≤ x_1 → (fun x => a₂) x ≤ (fun x => a₂) x) }
⊢ ↑g₁ { down := 0 } = ↑g₂ { down := 0 } State After: A B : NonemptyFinLinOrdCat
f : A ⟶ B
a✝ : Mono f
a₁ a₂ : ↑A
h : ↑f a₁ = ↑f a₂
X : NonemptyFinLinOrdCat := of (ULift (Fin 1))
g₁ : X ⟶ A := { toFun := fun x => a₁, monotone' := (_ : ∀ (x x_1 : ↑X), x ≤ x_1 → (fun x => a₁) x ≤ (fun x => a₁) x) }
g₂ : X ⟶ A := { toFun := fun x => a₂, monotone' := (_ : ∀ (x x_1 : ↑X), x ≤ x_1 → (fun x => a₂) x ≤ (fun x => a₂) x) }
eq : g₁ ≫ f = g₂ ≫ f
⊢ ↑g₁ { down := 0 } = ↑g₂ { down := 0 } Tactic: have eq : g₁ ≫ f = g₂ ≫ f := by
ext
exact h State Before: A B : NonemptyFinLinOrdCat
f : A ⟶ B
a✝ : Mono f
a₁ a₂ : ↑A
h : ↑f a₁ = ↑f a₂
X : NonemptyFinLinOrdCat := of (ULift (Fin 1))
g₁ : X ⟶ A := { toFun := fun x => a₁, monotone' := (_ : ∀ (x x_1 : ↑X), x ≤ x_1 → (fun x => a₁) x ≤ (fun x => a₁) x) }
g₂ : X ⟶ A := { toFun := fun x => a₂, monotone' := (_ : ∀ (x x_1 : ↑X), x ≤ x_1 → (fun x => a₂) x ≤ (fun x => a₂) x) }
eq : g₁ ≫ f = g₂ ≫ f
⊢ ↑g₁ { down := 0 } = ↑g₂ { down := 0 } State After: A B : NonemptyFinLinOrdCat
f : A ⟶ B
a✝ : Mono f
a₁ a₂ : ↑A
h : ↑f a₁ = ↑f a₂
X : NonemptyFinLinOrdCat := of (ULift (Fin 1))
g₁ : X ⟶ A := { toFun := fun x => a₁, monotone' := (_ : ∀ (x x_1 : ↑X), x ≤ x_1 → (fun x => a₁) x ≤ (fun x => a₁) x) }
g₂ : X ⟶ A := { toFun := fun x => a₂, monotone' := (_ : ∀ (x x_1 : ↑X), x ≤ x_1 → (fun x => a₂) x ≤ (fun x => a₂) x) }
eq : g₁ = g₂
⊢ ↑g₁ { down := 0 } = ↑g₂ { down := 0 } Tactic: rw [cancel_mono] at eq State Before: A B : NonemptyFinLinOrdCat
f : A ⟶ B
a✝ : Mono f
a₁ a₂ : ↑A
h : ↑f a₁ = ↑f a₂
X : NonemptyFinLinOrdCat := of (ULift (Fin 1))
g₁ : X ⟶ A := { toFun := fun x => a₁, monotone' := (_ : ∀ (x x_1 : ↑X), x ≤ x_1 → (fun x => a₁) x ≤ (fun x => a₁) x) }
g₂ : X ⟶ A := { toFun := fun x => a₂, monotone' := (_ : ∀ (x x_1 : ↑X), x ≤ x_1 → (fun x => a₂) x ≤ (fun x => a₂) x) }
eq : g₁ = g₂
⊢ ↑g₁ { down := 0 } = ↑g₂ { down := 0 } State After: no goals Tactic: rw [eq] State Before: A B : NonemptyFinLinOrdCat
f : A ⟶ B
a✝ : Mono f
a₁ a₂ : ↑A
h : ↑f a₁ = ↑f a₂
X : NonemptyFinLinOrdCat := of (ULift (Fin 1))
x✝² x✝¹ : ↑X
x✝ : x✝² ≤ x✝¹
⊢ (fun x => a₁) x✝² ≤ (fun x => a₁) x✝¹ State After: no goals Tactic: rfl State Before: A B : NonemptyFinLinOrdCat
f : A ⟶ B
a✝ : Mono f
a₁ a₂ : ↑A
h : ↑f a₁ = ↑f a₂
X : NonemptyFinLinOrdCat := of (ULift (Fin 1))
g₁ : X ⟶ A := { toFun := fun x => a₁, monotone' := (_ : ∀ (x x_1 : ↑X), x ≤ x_1 → (fun x => a₁) x ≤ (fun x => a₁) x) }
x✝² x✝¹ : ↑X
x✝ : x✝² ≤ x✝¹
⊢ (fun x => a₂) x✝² ≤ (fun x => a₂) x✝¹ State After: no goals Tactic: rfl State Before: A B : NonemptyFinLinOrdCat
f : A ⟶ B
a✝ : Mono f
a₁ a₂ : ↑A
h : ↑f a₁ = ↑f a₂
X : NonemptyFinLinOrdCat := of (ULift (Fin 1))
g₁ : X ⟶ A := { toFun := fun x => a₁, monotone' := (_ : ∀ (x x_1 : ↑X), x ≤ x_1 → (fun x => a₁) x ≤ (fun x => a₁) x) }
g₂ : X ⟶ A := { toFun := fun x => a₂, monotone' := (_ : ∀ (x x_1 : ↑X), x ≤ x_1 → (fun x => a₂) x ≤ (fun x => a₂) x) }
⊢ g₁ ≫ f = g₂ ≫ f State After: case w
A B : NonemptyFinLinOrdCat
f : A ⟶ B
a✝ : Mono f
a₁ a₂ : ↑A
h : ↑f a₁ = ↑f a₂
X : NonemptyFinLinOrdCat := of (ULift (Fin 1))
g₁ : X ⟶ A := { toFun := fun x => a₁, monotone' := (_ : ∀ (x x_1 : ↑X), x ≤ x_1 → (fun x => a₁) x ≤ (fun x => a₁) x) }
g₂ : X ⟶ A := { toFun := fun x => a₂, monotone' := (_ : ∀ (x x_1 : ↑X), x ≤ x_1 → (fun x => a₂) x ≤ (fun x => a₂) x) }
x✝ : (forget NonemptyFinLinOrdCat).obj X
⊢ (forget NonemptyFinLinOrdCat).map (g₁ ≫ f) x✝ = (forget NonemptyFinLinOrdCat).map (g₂ ≫ f) x✝ Tactic: ext State Before: case w
A B : NonemptyFinLinOrdCat
f : A ⟶ B
a✝ : Mono f
a₁ a₂ : ↑A
h : ↑f a₁ = ↑f a₂
X : NonemptyFinLinOrdCat := of (ULift (Fin 1))
g₁ : X ⟶ A := { toFun := fun x => a₁, monotone' := (_ : ∀ (x x_1 : ↑X), x ≤ x_1 → (fun x => a₁) x ≤ (fun x => a₁) x) }
g₂ : X ⟶ A := { toFun := fun x => a₂, monotone' := (_ : ∀ (x x_1 : ↑X), x ≤ x_1 → (fun x => a₂) x ≤ (fun x => a₂) x) }
x✝ : (forget NonemptyFinLinOrdCat).obj X
⊢ (forget NonemptyFinLinOrdCat).map (g₁ ≫ f) x✝ = (forget NonemptyFinLinOrdCat).map (g₂ ≫ f) x✝ State After: no goals Tactic: exact h |
Players listed with no appearances have been in the matchday squad but only as unused substitutes .
|
using BinDeps
WORD_SIZE = 32
@BinDeps.setup
group = library_group("libav")
deps = [
libavcodec = library_dependency("libavcodec", aliases=[["libavcodec-ffmpeg.so.","libavcodec.","libavcodec.so.","libavcodec.ffmpeg.so.","avcodec-"].*["53" "54" "55" "56"]...], group = group)
libavformat = library_dependency("libavformat", aliases=[["libavformat-ffmpeg.so.","libavformat.","libavformat.so.","libavformat.ffmpeg.so.","avformat-"].*["53" "54" "55" "56"]...], group = group)
libavutil = library_dependency("libavutil", aliases=[["libavutil-ffmpeg.so.", "libavutil.","libavutil.so.", "libavutil.ffmpeg.so.", "avutil-"].*["51" "52" "54"]...], group = group)
libswscale = library_dependency("libswscale", aliases=[["libswscale-ffmpeg.so.","libswscale.","libswscale.so.","libswscale.ffmpeg.so.","swscale-"].*["2" "3"]...], group = group)
libavfilter = library_dependency("libavfilter", aliases=[["libavfilter-ffmpeg.so.","libavfilter.","libavfilter.so.","libavfilter.ffmpeg.so.","avfilter-"].*["2" "3" "4" "5"]...], group = group)
libavdevice = library_dependency("libavdevice", aliases=[["libavdevice-ffmpeg.so.","libavdevice.","libavdevice.so.","libavdevice.ffmpeg.so.","avdevice-"].*["53" "54" "55" "56"]...], group = group)
]
libav_libs = [libavutil, libavcodec, libavformat, libavfilter, libswscale, libavdevice]
# if (have_avresample = dlopen_e("libavresample") != C_NULL)
# libavresample = library_dependency("libavresample", aliases=[["libavresample.so.","avresample-"].*["1"]...], group = group)
# push!(deps, libavresample)
# end
# if (have_swresample = dlopen_e("libswresample") != C_NULL)
# libswresample = library_dependency("libswresample", aliases=[["libswresample.so.","libswresample.ffmpeg.so.","swresample-"].*["0"]...], group = group)
# push!(deps, libswresample)
# push!(libav_libs, libswresample)
# end
if is_windows()
provides(Binaries, URI("http://www.daniel-thiem.de/ffmpeg/ffmpeg-2.2.3-win64-shared.7z"),
libav_libs,
os = :Windows,
unpacked_dir="ffmpeg-2.2.3-win64-shared/bin")
end
if is_apple()
using Homebrew
provides( Homebrew.HB, "ffmpeg", libav_libs, os = :Darwin )
end
# System Package Managers
apt_packages = Dict(
"libavcodec-extra-53" => libavcodec,
"libavcodec53" => libavcodec,
"libavcodec-extra-54" => libavcodec,
"libavcodec54" => libavcodec,
"libavcodec-extra-55" => libavcodec,
"libavcodec55" => libavcodec,
"libavdevice53" => libavdevice,
"libavfilter2" => libavfilter,
"libavfilter3" => libavfilter,
"libavfilter4" => libavfilter,
"libavformat53" => libavformat,
"libavformat54" => libavformat,
"libavformat55" => libavformat,
#"libavresample1" => libavresample,
"libavutil51" => libavutil,
"libavutil52" => libavutil,
"libswscale2" => libswscale,
## Available from https://launchpad.net/~jon-severinsson/+archive/ubuntu/ffmpeg
"libavcodec55-ffmpeg" => libavcodec,
"libavdevice55-ffmpeg" => libavdevice,
"libavfilter4-ffmpeg" => libavfilter,
"libavformat55-ffmpeg" => libavformat,
"libavutil52-ffmpeg" => libavutil,
#"libswresample0-ffmpeg" => libswresample,
"libswscale2-ffmpeg" => libswscale,
# ffmpeg is again available in standard packages as of ubuntu 15.04
"libavcodec-ffmpeg56" => libavcodec,
"libavdevice-ffmpeg56" => libavdevice,
"libavfilter-ffmpeg5" => libavfilter,
"libavformat-ffmpeg56" => libavformat,
"libavutil-ffmpeg54" => libavutil,
"libswscale-ffmpeg3" => libswscale,
#"libavresample-ffmpeg2" => libavresample,
)
#if have_swresample
# apt_packages["libswresample0-ffmpeg"] = libswresample
#end
#if have_avresample
# apt_packages["libavresample1"] = libavresample
#end
provides(AptGet,
apt_packages)
provides(Yum,
Dict("ffmpeg" => libav_libs))
provides(Pacman,
Dict("ffmpeg" => libav_libs))
provides(Sources,
URI("http://www.ffmpeg.org/releases/ffmpeg-2.3.2.tar.gz"),
libav_libs)
provides(BuildProcess, Autotools(configure_options=["--enable-gpl"]), libav_libs, os = :Unix)
@BinDeps.install Dict(
:libavcodec => :libavcodec,
:libavformat => :libavformat,
:libavutil => :libavutil,
:libswscale => :libswscale,
:libavfilter => :libavfilter,
:libavdevice => :libavdevice
)
|
source("common.r")
df <- read.csv("../results/determinism.csv")
df <- df %>% filter(Commutativity == "true") %>% select(-Commutativity)
maxTime <- max(df %>% select(Time) %>% filter(!((Time == "memout") | (Time == "timeout"))) %>% transform(Time = strtoi(Time)))
outValue <-((maxTime %/% 30) + 10) * 30
df <- df %>%
transform(Time = ifelse(Time == "memout" | Time == "timeout", outValue, strtoi(Time)) / 1000) %>%
transform(Pruning = gsub("true", "Yes", gsub("false", "No", Pruning)))
range <- seq(0, max(df %>% select(Time)), by=0.5)
ticks <- range
print(ticks)
ticks[length(ticks)] <- "Timeout"
df <- ddply(df, c("Name", "Pruning"), summarise,
Mean = mean(Time),
Trials = length(Time),
Sd = sd(Time),
Se = Sd / sqrt(Trials))
plot <- ggplot(df, aes(x=Name,y=Mean,fill=Pruning)) +
mytheme() +
scale_fill_manual(values=c("red", "blue")) +
theme(legend.title = element_text(size = 8),
legend.position = c(0.85, 0.8),
axis.text.x=element_text(angle=50, vjust=0.5)) +
geom_bar(stat="identity",position="dodge") +
geom_errorbar(aes(ymin=Mean-Se,ymax=Mean+Se,width=.4), position=position_dodge(0.9)) +
scale_y_continuous(breaks = range, labels = ticks) +
labs(x = "Benchmark", y = "Time (s)")
mysave("../results/determinism.pdf", plot)
|
Formal statement is: lemma lift_prime_elem_poly: assumes "prime_elem (c :: 'a :: semidom)" shows "prime_elem [:c:]" Informal statement is: If $c$ is a prime element, then $[:c:]$ is a prime element. |
[STATEMENT]
lemma iteration_mono_eq: assumes xn: "x ^ p = (n :: int)"
shows "(n div x ^ pm + x * int pm) div int p = x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (n div x ^ pm + x * int pm) div int p = x
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (n div x ^ pm + x * int pm) div int p = x
[PROOF STEP]
have [simp]: "\<And> n. (x + x * n) = x * (1 + n)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>n. x + x * n = x * (1 + n)
[PROOF STEP]
by (auto simp: field_simps)
[PROOF STATE]
proof (state)
this:
x + x * ?n = x * (1 + ?n)
goal (1 subgoal):
1. (n div x ^ pm + x * int pm) div int p = x
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (n div x ^ pm + x * int pm) div int p = x
[PROOF STEP]
unfolding xn[symmetric] p
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (x ^ Suc pm div x ^ pm + x * int pm) div int (Suc pm) = x
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
(n div x ^ pm + x * int pm) div int p = x
goal:
No subgoals!
[PROOF STEP]
qed |
#include <gsl/gsl_test.h>
#include <gsl/gsl_ieee_utils.h>
#include <gsl/gsl_math.h>
#include <gsl/gsl_cblas.h>
#include "tests.h"
void
test_spr (void) {
const double flteps = 1e-4, dbleps = 1e-6;
{
int order = 101;
int uplo = 121;
int N = 2;
float alpha = -0.3f;
float Ap[] = { -0.764f, -0.257f, -0.064f };
float X[] = { 0.455f, -0.285f };
int incX = -1;
float Ap_expected[] = { -0.788367f, -0.218097f, -0.126108f };
cblas_sspr(order, uplo, N, alpha, X, incX, Ap);
{
int i;
for (i = 0; i < 3; i++) {
gsl_test_rel(Ap[i], Ap_expected[i], flteps, "sspr(case 1426)");
}
};
};
{
int order = 101;
int uplo = 122;
int N = 2;
float alpha = -0.3f;
float Ap[] = { -0.764f, -0.257f, -0.064f };
float X[] = { 0.455f, -0.285f };
int incX = -1;
float Ap_expected[] = { -0.788367f, -0.218097f, -0.126108f };
cblas_sspr(order, uplo, N, alpha, X, incX, Ap);
{
int i;
for (i = 0; i < 3; i++) {
gsl_test_rel(Ap[i], Ap_expected[i], flteps, "sspr(case 1427)");
}
};
};
{
int order = 102;
int uplo = 121;
int N = 2;
float alpha = -0.3f;
float Ap[] = { -0.764f, -0.257f, -0.064f };
float X[] = { 0.455f, -0.285f };
int incX = -1;
float Ap_expected[] = { -0.788367f, -0.218097f, -0.126108f };
cblas_sspr(order, uplo, N, alpha, X, incX, Ap);
{
int i;
for (i = 0; i < 3; i++) {
gsl_test_rel(Ap[i], Ap_expected[i], flteps, "sspr(case 1428)");
}
};
};
{
int order = 102;
int uplo = 122;
int N = 2;
float alpha = -0.3f;
float Ap[] = { -0.764f, -0.257f, -0.064f };
float X[] = { 0.455f, -0.285f };
int incX = -1;
float Ap_expected[] = { -0.788367f, -0.218097f, -0.126108f };
cblas_sspr(order, uplo, N, alpha, X, incX, Ap);
{
int i;
for (i = 0; i < 3; i++) {
gsl_test_rel(Ap[i], Ap_expected[i], flteps, "sspr(case 1429)");
}
};
};
{
int order = 101;
int uplo = 121;
int N = 2;
double alpha = -1;
double Ap[] = { 0.819, 0.175, -0.809 };
double X[] = { -0.645, -0.222 };
int incX = -1;
double Ap_expected[] = { 0.769716, 0.03181, -1.225025 };
cblas_dspr(order, uplo, N, alpha, X, incX, Ap);
{
int i;
for (i = 0; i < 3; i++) {
gsl_test_rel(Ap[i], Ap_expected[i], dbleps, "dspr(case 1430)");
}
};
};
{
int order = 101;
int uplo = 122;
int N = 2;
double alpha = -1;
double Ap[] = { 0.819, 0.175, -0.809 };
double X[] = { -0.645, -0.222 };
int incX = -1;
double Ap_expected[] = { 0.769716, 0.03181, -1.225025 };
cblas_dspr(order, uplo, N, alpha, X, incX, Ap);
{
int i;
for (i = 0; i < 3; i++) {
gsl_test_rel(Ap[i], Ap_expected[i], dbleps, "dspr(case 1431)");
}
};
};
{
int order = 102;
int uplo = 121;
int N = 2;
double alpha = -1;
double Ap[] = { 0.819, 0.175, -0.809 };
double X[] = { -0.645, -0.222 };
int incX = -1;
double Ap_expected[] = { 0.769716, 0.03181, -1.225025 };
cblas_dspr(order, uplo, N, alpha, X, incX, Ap);
{
int i;
for (i = 0; i < 3; i++) {
gsl_test_rel(Ap[i], Ap_expected[i], dbleps, "dspr(case 1432)");
}
};
};
{
int order = 102;
int uplo = 122;
int N = 2;
double alpha = -1;
double Ap[] = { 0.819, 0.175, -0.809 };
double X[] = { -0.645, -0.222 };
int incX = -1;
double Ap_expected[] = { 0.769716, 0.03181, -1.225025 };
cblas_dspr(order, uplo, N, alpha, X, incX, Ap);
{
int i;
for (i = 0; i < 3; i++) {
gsl_test_rel(Ap[i], Ap_expected[i], dbleps, "dspr(case 1433)");
}
};
};
}
|
module Language.LSP.CodeAction.MakeLemma
import Core.Context
import Core.Core
import Core.Env
import Core.Metadata
import Core.UnifyState
import Data.List
import Data.List1
import Data.String
import Idris.REPL.Opts
import Idris.Syntax
import Idris.Resugar
import Language.JSON
import Language.LSP.CodeAction
import Language.LSP.Message
import Libraries.Data.List.Extra
import Libraries.Data.PosMap
import Parser.Unlit
import Server.Configuration
import Server.Log
import Server.Utils
import TTImp.TTImp
import TTImp.Interactive.MakeLemma
buildCodeAction : Name -> URI -> List TextEdit -> CodeAction
buildCodeAction name uri edits =
let workspaceEdit = MkWorkspaceEdit { changes = Just (singleton uri edits)
, documentChanges = Nothing
, changeAnnotations = Nothing
}
in MkCodeAction { title = "Make lemma of \{show name}"
, kind = Just RefactorExtract
, diagnostics = Just []
, isPreferred = Just False
, disabled = Nothing
, edit = Just workspaceEdit
, command = Nothing
, data_ = Nothing
}
findBlankLine : List String -> Int -> Int
findBlankLine [] acc = acc
findBlankLine (x :: xs) acc = if trim x == "" then acc else findBlankLine xs (acc - 1)
export
makeLemma : Ref LSPConf LSPConfiguration
=> Ref MD Metadata
=> Ref Ctxt Defs
=> Ref UST UState
=> Ref Syn SyntaxInfo
=> Ref ROpts REPLOpts
=> CodeActionParams -> Core (Maybe CodeAction)
makeLemma params = do
let True = params.range.start.line == params.range.end.line
| _ => do logString Debug "makeLemma: start and end line were different"
pure Nothing
[] <- searchCache params.range MakeLemma
| action :: _ => do logString Debug "makeLemma: found cached action"
pure (Just action)
nameLocs <- gets MD nameLocMap
let line = params.range.start.line
let col = params.range.start.character
let Just (loc@(_, nstart, nend), name) = findPointInTreeLoc (line, col) nameLocs
| Nothing => do logString Debug "makeLemma: couldn't find name at \{show (line, col)}"
pure Nothing
context <- gets Ctxt gamma
toBeBracketed <- gets Syn bracketholes
let toBrack = elemBy (\x, y => dropNS x == dropNS y) name toBeBracketed
[(n, nidx, Hole locs _, ty)] <- lookupNameBy (\g => (definition g, type g)) name context
| _ => do logString Debug $ "makeLemma: \{show name} is not a metavariable"
pure Nothing
(lty, lapp) <- makeLemma replFC name locs ty
lemmaTy <- pterm lty
papp <- pterm lapp
let lemmaApp = show $ the PTerm $ if toBrack then addBracket replFC papp else papp
src <- lines <$> getSource
let Just srcLine = elemAt src (integerToNat (cast line))
| Nothing => do logString Debug $ "makeLemma: error while fetching the referenced line"
pure Nothing
let (markM, _) = isLitLine srcLine
-- TODO: currently it has the same behaviour of the compiler, it puts the new
-- definition in the first blank line above the hole. We want to put it
-- exactly above the clause or declaration that uses the hole, however
-- this information is needed within the compiler, so waiting future PRs.
let blank = findBlankLine (reverse $ take (integerToNat (cast line)) src) line
let appEdit = MkTextEdit (MkRange (uncurry MkPosition nstart) (uncurry MkPosition nend)) lemmaApp
let tyEdit = MkTextEdit (MkRange (MkPosition blank 0) (MkPosition blank 0))
(relit markM "\{show $ dropNS name} : \{show lemmaTy}\n\n")
let action = buildCodeAction name params.textDocument.uri [tyEdit, appEdit]
modify LSPConf (record { cachedActions $= insert (cast loc, MakeLemma, [action]) })
pure $ Just action
|
How to make replayer look like actual tables?
Hi can you please give me instructions on how to accomplish this? I looked in the manual but there was nothing there and a search on this site came up empty for me... Thanks!
I'm sorry I should have been more clear. It says I can skin the replayer to look exactly like my tables. I use Mercury theme on stars and it only has Nova and Grinder as options.
Unfortunately, there are no more skins at the moment. Our development department has now a lot of much more important tasks. But as soon as the new themes appear, they will be immediately added to the list.
OK, TY! The wording in the manual made it seem like you could skin them yourself which was why I was asking.
Thanks for replying to all my questions. Much appreciated. |
# In below program we will calculate correlation of a and b.
# So output matrix will have 4 values,
# because 2*2(n*n- n is the number of arrays)
import numpy as np
a=np.array([5,6,4,3,2])
b=np.array([4,9,4,3,1])
print()
c=np.corrcoef(a,b)
print(c)
print()
# Covariance of three arrays
print()
a=np.array([[5,6,4,3,2],[4,9,4,3,1],[1,8,4,3,9]])
c=np.corrcoef(a)
print(c)
|
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE ScopedTypeVariables #-}
-----------------------------------------------------------------------------
-- |
-- Module :
-- Copyright : (c) 2013 Boyun Tang
-- License : BSD-style
-- Maintainer : [email protected]
-- Stability : experimental
-- Portability : ghc
--
--
--
-----------------------------------------------------------------------------
module Main where
import Control.Arrow
import Data.Colour.Names
import Data.Colour.Palette.BrewerSet
import qualified Data.HashMap.Strict as H
import qualified Data.List as L
import qualified Data.Text as T
import qualified Data.Text.IO as T
import qualified Data.Text.Read as T
import qualified Data.Vector as V
import qualified Data.Vector.Unboxed as UV
import Diagrams.Backend.Cairo
import Diagrams.HeatMap
import Diagrams.HeatMap.Type
import Diagrams.Prelude
import Statistics.Quantile
import Statistics.Sample
import System.Environment
import System.FilePath
cluDiffOpt :: ClustOpt
cluDiffOpt = ClustOpt
{ colorOpt = color2
, rowCluster = Nothing -- Just (eucDis,UPGMA,LeftTree)
, colCluster = Just (eucDis,UPGMA,BottomTree)}
color1 = let cSet = brewerSet RdBu 11
in Three (cSet !! 9) white (cSet !! 1)
color2 = Three green black red
mkPara w h i j (a,b,c) = Para
{ clustOpt = cluDiffOpt
, colorVal = ColorVal a b c
, matrixHeight = h
, matrixWidth = w
, rowTreeHeight = 0.2 * min w h
, colTreeHeight = 0.2 * min w h
, rowFontSize = 0.8 * h / i
, colFontSize = 0.6 * w / j
, legendFontSize = 0.5 * w * 0.1
, fontName = "Arial"
, colorBarPos = Horizontal
, tradeOff = Quality
, colTreeLineWidth = 0.5
, rowTreeLineWidth = 0.5
}
eucDis :: (UV.Unbox a, Num a) => UV.Vector a -> UV.Vector a -> a
eucDis vec1 vec2 = UV.sum $ UV.zipWith (\v1 v2 -> (v1-v2)*(v1-v2)) vec1 vec2
toZscore :: UV.Vector Double -> UV.Vector Double
toZscore vec =
let (m,v) = meanVarianceUnb vec
in UV.map (\e -> (e - m) / sqrt v ) vec
parseTSV :: Bool -> FilePath -> FilePath -> IO Dataset
parseTSV doNormalization datFile labelFile = do
sampleHash <- T.readFile labelFile >>=
return . H.fromList .
map (((!! 0) &&& (!! 1)) . T.split (== '\t')) .
T.lines .
T.filter (/= '\r')
T.readFile datFile >>=
return .
(\(h:ts) ->
let sIDs = V.fromList $ tail h
grIDs = V.map (sampleHash `myIdx`) sIDs
gIDs = V.fromList $ map head ts
j = V.length sIDs
i = V.length gIDs
func = if doNormalization
then toZscore
else id
datum = UV.concat $ map (func . UV.fromList . map (fst . fromRight . T.double) . tail) ts
m = Matrix i j RowMajor datum
in Dataset (Just gIDs) (Just sIDs) (Just grIDs) m
) . map (T.split (== '\t')) . T.lines . T.filter (/= '\r')
where
myIdx h k = if k `H.member` h
then h H.! k
else error $ show k
fromRight :: Show a => Either a b -> b
fromRight (Right b) = b
fromRight (Left a) = error $ show a
main :: IO ()
main = do
doNormal:w:h:datFile:labelFile:_ <- getArgs
dataset <- parseTSV (read doNormal) datFile labelFile
let vMin = continuousBy medianUnbiased 1 100 $ dat $ datM $ dataset
vMean = if (read doNormal)
then 0
else continuousBy medianUnbiased 50 100 $ dat $ datM $ dataset
vMax = continuousBy medianUnbiased 99 100 $ dat $ datM $ dataset
j = fromIntegral $ nCol $ datM dataset
i = fromIntegral $ nRow $ datM dataset
para = mkPara (read w) (read h) i j (vMin,vMean,vMax)
renderCairo (replaceExtension datFile "pdf") (mkWidth 600) $ pad 1.03 $ fst $ plotHeatMap para dataset
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% REPORT OF THE PROJECT
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\documentclass[a4paper,11pt]{article}
\usepackage[utf8]{inputenc}
\usepackage[T1]{fontenc}
\usepackage[english]{babel}
\usepackage{amsmath,amssymb,amsthm,amsopn}
\usepackage{mathrsfs}
\usepackage{graphicx}
%\usepackage{tikz}
%\usepackage{array}
%\usepackage[top=1cm,bottom=1cm]{geometry}
%\usepackage{listings}
%\usepackage{xcolor}
\usepackage{hyperref}
\hypersetup{
colorlinks=true,
linkcolor=blue,
citecolor=red,
}
% fancy headers and footers
%\usepackage{fancyhdr}
%\pagestyle{fancy}
%\fancyhead[L]{BCPST 2 - Lycée Jacques Prévert}
%\fancyhead[R]{Rappels d'analyse}
%\pagenumbering{gobble} % no page numbering
% Création des labels Théorème, Lemme, etc...
\newtheoremstyle{break}%
{}{}%
{\itshape}{}%
{\bfseries}{}% % Note that final punctuation is omitted.
{\newline}{}
\newtheoremstyle{sc}%
{}{}%
{}{}%
{\scshape}{}% % Note that final punctuation is omitted.
{\newline}{}
\theoremstyle{break}
\newtheorem{thm}{Theorem}[section]
\newtheorem{lm}[thm]{Lemma}
\newtheorem{prop}[thm]{Proposition}
\newtheorem{cor}[thm]{Corollary}
\theoremstyle{sc}
\newtheorem{exo}{Exercice}
\theoremstyle{definition}
\newtheorem{defi}[thm]{Definition}
\newtheorem{ex}[thm]{Example}
\theoremstyle{remark}
\newtheorem{rem}[thm]{Remark}
% Raccourcis pour les opérateurs mathématiques (les espaces avant-après sont modifiés pour mieux rentrer dans les codes mathématiques usuels)
\DeclareMathOperator{\Ker}{Ker}
\DeclareMathOperator{\Id}{Id}
\DeclareMathOperator{\Img}{Im}
\DeclareMathOperator{\Card}{Card}
\DeclareMathOperator{\Vect}{Vect}
\DeclareMathOperator{\Tr}{Tr}
\DeclareMathOperator{\Mod}{mod}
\DeclareMathOperator{\Ord}{Ord}
\DeclareMathOperator{\lcm}{lcm}
% Nouvelles commandes
\newcommand{\ps}[2]{\left\langle#1,#2\right\rangle}
\newcommand{\ent}[2]{[\![#1,#2]\!]}
\newcommand{\diff}{\mathop{}\!\mathrm{d}}
\newcommand{\ie}{\emph{i.e. }}
% opening
\title{Normal bases generation in C}
\author{Édouard \textsc{Rousseau}\\Supervised by Michaël \textsc{Quisquater}}
\begin{document}
\maketitle
\begin{abstract}
This is the report of a C project about the generation of normal elements in
finite fields. Consider a field extension $\mathbb{F}_{p^d}/\mathbb{F}_p$, we
say that $\alpha\in\mathbb{F}_{p^d}$ is normal if $\left\{
\alpha,\alpha^p,\alpha^{p^2},\cdots,\alpha^{p^{d-1}} \right\}$ is a basis of
$\mathbb{F}_{p^d}$ over $\mathbb{F}_p$. We first give some theory to
characterize normal elements, then we describe three algorithms to compute
normal elements : a randomized algorithm, Lüneburg's algorithm, and
Lenstra's algorithm. Finally, we give experimental results about our
implementation. All this work is based on Gao's PhD thesis~\cite{Ga93}.
\end{abstract}
\tableofcontents
\clearpage
\section{Introduction}
\subsection{Normal bases}
With the rise of electronics and computer science, areas like
cryptography and coding theory have been largely studied. In both these
domains, finite fields often have a fundamental role. Normal bases can be
used to implement efficiently the finite fields arithmetic in the hardware,
consumming less power than other bases. But normal bases can also be used as a
theoretic tools to understand field extensions, and it is probably why normal bases were
studied since the $19^\textrm{th}$ century. Gauss, for example, used normal
bases to study when regular polygons can be drawn with ruler and compass
alone, a \emph{very} old problem. The notion of normal bases is not linked
with finite fields, if $\mathbb{K}$ is a field and $\mathbb{L}$ is a finite
Galois extension of $\mathbb{K}$ of Galois group $G$, a normal basis of
$\mathbb{L}$ over $\mathbb{K}$ is a basis of the form $\left\{
\sigma(\alpha)\;|\;\sigma\in G
\right\}$ where $\alpha\in\mathbb{L}$. In other words, it is a basis composed
of an element $\alpha$ and all its conjugates. In the case of finite fields,
the definition that we give is indeed the same as this one, since, in this case,
$G$ is generated by the Frobenius automorphism. Given a finite Galois extension
$\mathbb{L}/\mathbb{K}$, normal bases can also be used to realize the Galois
correspondence of $\mathbb{L}/\mathbb{K}$. Theory tells us that there exists a
correspondence between intermediate extensions of $\mathbb{L}/\mathbb{K}$ and subgroups
of $G$, but it does not give an \emph{effective} way of realising that
correspondence. Normal bases are a way to solve that problem. In this report, we
focus on the problem of finding normal elements in finite fields. That is
why, from now on, we do not look at cases other then finite extensions
of finite fields (\ie extensions of type
$\mathbb{F}_{q^d}/\mathbb{F}_q$).
\subsection{Recalls and notations}
In all the text, we denote by $\mathbb{F}_n$ the field with $n$ elements. We
recall that $n$ must be a \emph{prime power} (\ie $n=p^d$ where $p$ is a prime
number and $d$ is a positive number). We have
$\mathbb{F}_{p^d}\cong\mathbb{F}_p[X]/(P)$, where $P$ is an irreducible
polynomial of degree $d$ in $\mathbb{F}_p[X]$, and this is the representation
that will be used in the C code. From now, we note
$X$ both for the indeterminate $X$ and for its image $\bar X$ in the
quotient $\mathbb{F}_p[X]/(P)$. $\mathbb{F}_{p^d}$ is a vector space over
$\mathbb{F}_p$, of dimension $d$, the basis $\left\{ 1, X, X^2, \dots,
X^{d-1} \right\}$ is called the \emph{polynomial basis} in the following. We also recall
that $\mathbb{F}_{p^d}$ is a \emph{field extension} of $\mathbb{F}_p$ and the
characteristic of $\mathbb{F}_{p^d}$ is $p$. Lastly, we denote by $\sigma$ the
\emph{Frobenius map}, defined by $\sigma(x)=x^p$ for all
$x\in\mathbb{F}_{p^d}$. This map is a $\mathbb{F}_p$-automorphism of
$\mathbb{F}_{p^d}$ (\ie $\sigma$ is a field morphism, a bijection, and $\forall
y\in\mathbb{F}_p,\;\sigma(y)=y$), as a consequence, $\sigma$ is also a linear
map.
Note that we look only
at \emph{prime extensions}, (\ie extensions of type
$\mathbb{F}_{p^d}/\mathbb{F}_p$). This choice has been made because the theory
of normal elements in extensions of type $\mathbb{F}_{q^n}/\mathbb{F}_q$ is not
different (replacing $p$ by $q$ in the following demontrations would be
sufficient), but the implementation of the algorithms is simpler in the case of
prime extensions. Speaking about implementation, we must introduce Flint
(Fast Library for Number Theory), because \emph{every} function wrote in this
project uses Flint.
\subsection{Flint}
Flint~\cite{Ha10} is a C library maintained by William Hart, and developed
by him and many others since 2007. In Flint, we use the type
\texttt{fq\_t}, where the elements of $F_{p^d}\cong\mathbb{F}_p[X]/(P)$ are
represented as polynomials of degree less than $d$. The underlying data
structure is \texttt{fmpz\_poly\_t}, that is a \texttt{struct} containing
\begin{enumerate}
\item a pointer to arbitrary large integers \texttt{fmpz}, representing the
coefficients of the polynomial;
\item the number of memory allocations for the pointer of type
\texttt{slong}: Flint's own \texttt{unsigned long};
\item the degree of the polynomial, a \texttt{slong} too.
\end{enumerate}
In Flint, it is possible to work with finite fields of arbitrary large
order and with polynomials or matrices over these fields. The basic functions
are already implemented, such as gcd or derivative for polynomials, or reduced
row echelon form for matrices. Using Flint save a lot of time, but it also has
its limits. It is possible to deal with arbitrary \emph{prime}
extensions. For extension of type $\mathbb{F}_{q^n}/\mathbb{F}_q$, it is still
possible, using polynomial arithmetic over $\mathbb{F}_q$. But using
polynomials
over $\mathbb{F}_{q^n}$ would have required polynomial in two
indeterminates, and everything would be more complicated. That is the reason we
look only at prime extensions.
All these recalls being made, and the true hero
(Flint) of our functions being introduced, we can begin our journey.
\section{Basics on normal bases}
In all this section, we set $p$ a prime number and $d$ a positive number. We
work with the extension $\mathbb{F}_{p^d}/\mathbb{F}_p$ and the Frobenius
morphism $\sigma$ defined above. We are now able to give a formal definition
of a normal element and a normal basis.
\begin{defi}[normal element, normal basis]
Let $\alpha\in\mathbb{F}_{p^d}$, we say that $\alpha$ is a \emph{normal
element} if $\left\{ \alpha, \sigma(\alpha),\dots,\sigma^{d-1}(\alpha)
\right\}=\left\{ \alpha, \alpha^{p}, \dots, \alpha^{p^{d-1}} \right\}$ is a
basis of $\mathbb{F}_{p^d}$. Such a basis is called a \emph{normal basis}.
\end{defi}
In order to recognize a normal element $\alpha$, we can compute the dimension of the
linear span of $\left\{ \sigma^i(\alpha) \;|\; 0\leq i < k \right\}$. A way of
doing that is to construct the matrix $M$ which columns are the coordinates of the
elements $\sigma^i(\alpha)$ in the polynomial basis, and to check if $M$ is
non-singular. This can be done using Gauss algorithm. This method is not
efficient so we work on other characterizations of normal elements. We need
two more definitions to be able to state our results.
\begin{defi}[trace function]
The \emph{trace function}
$\Tr_{p^d|p}:\mathbb{F}_{p^d}\to\mathbb{F}_p$ of the extension
$\mathbb{F}_{p^d}/\mathbb{F}_p$ is the function defined by
\[
\Tr_{p^d|p}(\alpha) = \sum_{i=0}^{d-1}\alpha^{p^i}.
\]
It takes its values in $\mathbb{F}_p$ since $(\Tr_{p^d|p}(\alpha))^p
= \Tr_{p^d|p}(\alpha)$.
\end{defi}
The trace is a tool that permit to know when a family of elements $\alpha_1,
\dots, \alpha_d$ forms a basis of $\mathbb{F}_{p^d}$ over
$\mathbb{F}_p$, using the discrinant of these elements.
\begin{defi}[discriminant]
Let $\alpha_1, \dots, \alpha_d$ be elements in $\mathbb{F}_{p^d}$, the
\emph{discriminant} $\Delta(\alpha_1, \dots, \alpha_d)$ of these elements
is the determinant
\[
\Delta(\alpha_1, \dots, \alpha_d)=\det
\begin{pmatrix}
\Tr(\alpha_1\alpha_1) & \Tr(\alpha_1\alpha_2) & \dots &
\Tr(\alpha_1\alpha_d) \\
\Tr(\alpha_2\alpha_1) & \Tr(\alpha_2\alpha_2) & \dots &
\Tr(\alpha_2\alpha_d) \\
\vdots & \vdots & & \vdots \\
\Tr(\alpha_d\alpha_1) & \Tr(\alpha_d\alpha_2) & \dots &
\Tr(\alpha_d\alpha_d)
\end{pmatrix}
\]
where $\Tr$ is the same trace as before. We ommit the indices because
the extension is always the same.
\end{defi}
With this last tool, we can state our first theorem.
\begin{thm}[Theorem 2.2.1, \cite{Ga93}]
For any $n$ elements $\alpha_1, \dots, \alpha_d$ in
$\mathbb{F}_{p^d}$, they form a basis of $\mathbb{F}_{p^d}$ over
$\mathbb{F}_p$ if and only if $\Delta(\alpha_1, \dots,
\alpha_d)\neq0$.
\end{thm}
\begin{proof}
First assume that $\alpha_1, \dots, \alpha_d$ form a basis of
$\mathbb{F}_{p^d}$ over $\mathbb{F}_p$. We prove that
$\Delta(\alpha_1, \dots, \alpha_d)\neq0$ by showing that the row vectors
$L_1, \dots, L_d$ of
the matrix in the definition of $\Delta(\alpha_1, \dots, \alpha_d)$ are
linearly independent over $\mathbb{F}_p$, and thus the matrix is
nonsingular. Suppose that there exists $c_1, \dots,
c_d\in\mathbb{F}_p$ with $\sum_i c_iL_i = 0$, it means
\[
c_1\Tr(\alpha_1\alpha_j) + \dots + c_n\Tr(\alpha_d\alpha_j) =
0\textrm{ for }1\leq j\leq d.
\]
Then with $\beta=c_1\alpha_1 + \dots + c_d\alpha_d$, by linearity of the
trace function, we get $\Tr(\beta\alpha_j) = 0$ for $1\leq j\leq d$. Since
$\alpha_1, \dots, \alpha_d$ is a basis of $\mathbb{F}_{p^d}$, it follows
that $\Tr(\beta\alpha) = 0$ for all $\alpha\in\mathbb{F}_{p^d}$. If
$\beta\neq0$, it means that $\Tr(\gamma)=0$ for all
$\gamma\in\mathbb{F}_{p^d}$ : we can see that this is impossible by thinking
of $\Tr(\gamma)$ as the trace of the multiplication-by-$\gamma$ linear map. So we have $\beta=0$, and then
$c_1\alpha_1 + \dots + c_d\alpha_d = 0$ implies $c_1=\dots=c_d=0$.
Conversely, assume that $\Delta(\alpha_1, \dots, \alpha_d)\neq0$ and
$c_1\alpha_1+\dots+c_d\alpha_d=0$ for some $c_1, \dots,
c_d\in\mathbb{F}_p$. By multiplying by $\alpha_j$, we have
\[
c_1\alpha_1\alpha_j+\dots+c_d\alpha_d\alpha_j\textrm{ for }1\leq j \leq d,
\]
and by applying the trace function, we get
\[
c_1\Tr(\alpha_1\alpha_j)+\dots+c_n\Tr(\alpha_d\alpha_j)\textrm{ for }1\leq
j\leq d.
\]
But this is the same as $\sum c_iL_i=0$ where the $L_i$ are the rows of the
matrix in $\Delta(\alpha_1, \dots, \alpha_d)$. Since this matrix is
nonsingular by hypothesis, its rows are linearly independent and it follow
that $c_1=\dots=c_d$. Therefore $\alpha_1, \dots, \alpha_d$ are linearly
independant over $\mathbb{F}_p$, and form a family of $d$ vectors in a
$d$-dimensionnal vector space, so $\alpha_1, \dots, \alpha_d$ form basis of
$\mathbb{F}_{p^d}$ over $\mathbb{F}_p$.
\end{proof}
This characterization is interesting in itself, but the matrix defined in
$\Delta(\alpha_1, \dots, \alpha_d)$ is quite complicated. Fortunately,
we can use a much simpler matrix.
\begin{cor}
\label{circulantCor}
The elements $\alpha_1, \dots, \alpha_d$ form a basis of
$\mathbb{F}_{p^d}$ over $\mathbb{F}_p$ if and only if the matrix $A$ is
nonsigular, where
\[
A =
\begin{pmatrix}
\alpha_1 & \alpha_2 & \dots & \alpha_d \\
\alpha_1^p & \alpha_2^p & \dots & \alpha_d^p \\
\vdots & \vdots & & \vdots \\
\alpha_1^{p^{d-1}} & \alpha_2^{p^{d-1}} & \dots &
\alpha_d^{p^{d-1}}.
\end{pmatrix}
\]
\end{cor}
\begin{proof}
We have $\Delta(\alpha_1, \dots, \alpha_d)=\det(A^tA)=(\det A)^2$.
\end{proof}
We now have a simpler matrix to study, but it is still a matrix, so the nature
of the problem is the same as before. Next lemma allow us to change the nature
of the problem, by working with polynomials.
\begin{lm}
\label{henselLm}
For any $n$ elements $a_0, a_1, \dots, a_{d-1}\in\mathbb{F}_{p^d}$, the
$d\times d$ circulant matrix
\[
c[a_0, a_1, \dots, a_{d-1}] =
\begin{pmatrix}
a_0 & a_1 & a_2 & \dots & a_{d-1} \\
a_{d-1} & a_0 & a_1 & \dots & a_{d-2} \\
a_{d-2} & a_{d-1} & a_0 & \dots & a_{d-3} \\
\vdots & \vdots & \vdots & & \vdots \\
a_1 & a_2 & a_3 & \dots & a_0
\end{pmatrix}
\]
is nonsigular if and only if the polynomial $\sum a_iX^i$ is relatively
prime to $X^d-1$.
\end{lm}
\begin{proof}
Let $A$ be the following $d\times d$ permutation matrix
\[
\begin{pmatrix}
0 & 1 & 0 & \dots & 0 & 0 \\
0 & 0 & 1 & \dots & 0 & 0 \\
\vdots & \vdots & \vdots & & \vdots & \vdots \\
0 & 0 & 0 & \dots & 0 & 1 \\
1 & 0 & 0 & \dots & 0 & 0
\end{pmatrix}.
\]
Then we see that $c[a_0, a_1, \dots, a_{d-1}] =
\sum_{i=0}^{d-1}a_iA^i=f(A)$, where $f(X)=\sum_{i=0}^{d-1}a_iX^i$. On the
first line of $A^i$, there is only one nonzero value, it is a $1$ in
position $i+1$. Hence the matrices $A^0, A, \dots, A^{d-1}$, are linearly
independant, and since $A^d=I_d$, we know that the minimal polynomial
of $A$ is $X^d-1$. Assume that $f(X)$ is relatively prime to $X^d-1$. Then
there are polynomials $a(X), b(X)$ such that
\[
a(X)f(X) + b(X)(X^d-1)=1,
\]
and so
\[
a(A)f(A) = I_d,
\]
as $A^d-I_d=0$. This implies that $f(A)$ is invertible and so
nonsigular. Now assume that $f(X)$ and $X^d-1$ are not relatively prime, and
note $d(X)\neq1$ their gcd. Let $f(X)=f_1(X)d(X)$ and $X^d-1=h(X)d(X)$.
Since $\deg h < n$, we have $h(A)\neq0$. But $h(A)d(A)=0$, so we see that
$d(A)$ is singular. Therefore $f(A)=f_1(A)d(A)$ is also singular. We have shown
that $f(A)$ is singular if and only if $f(X)$ is relatively prime to $X^d-1$.
\end{proof}
We are now able to state the last result oh this section.
\begin{thm}[Hensel, 1888]
\label{hensel}
Let $\alpha\in\mathbb{F}_{p^d}$, $\alpha$ is a normal element of the
extension $\mathbb{F}_{p^d}/\mathbb{F}_p$ if and only if the polynomial
$\alpha^{p^{d-1}}X^{d-1}+\dots+\alpha^pX+\alpha\in\mathbb{F}_{p^d}[X]$
is relatively prime to $X^d-1$.
\end{thm}
\begin{proof}
$\alpha$ is a normal element, if, by definition, the elements $\alpha,
\alpha^p, \dots, \alpha^{p^{d-1}}$ form a basis of
$\mathbb{F}_{p^d}$ over $\mathbb{F}_p$. By
Corollary~\ref{circulantCor}, this is
true if and only if the matrix
\[
A =
\begin{pmatrix}
\alpha & \alpha^p & \alpha^{p^2} & \dots & \alpha^{p^{d-1}} \\
\alpha^p & \alpha^{p^2} & \alpha^{p^3} & \dots & \alpha \\
\vdots & \vdots & \vdots & & \vdots \\
\alpha^{p^{d-1}} & \alpha & \alpha^p & \dots & \alpha^{p^{d-2}}
\end{pmatrix}
\]
is nonsingular. But, reversing the order of the rows in $A$, from the second
row to the last, we get the matrix $c[\alpha, \alpha^p, \dots,
\alpha^{p^{d-1}}]$, that is nonsingular if and only if $A$ if
nonsingular. By Lemma~\ref{henselLm}, $c[\alpha, \alpha^p, \dots,
\alpha^{d-1}]$ is nonsingular if and only if $X^d-1$ and
$\alpha^{p^{d-1}}X^{d-1}+\dots+\alpha^pX+\alpha$ are relatively prime.
This proves the theorem.
\end{proof}
We now have a new characterization of normal elements. We can decide if an
element is normal or not by computing a gcd. It is more efficient that the
naive matrix method because we have efficient algorithms to compute the gcd.
The function \texttt{is\_normal} is exactly the implementation of this
method. The code of this function, and of all the others, is available at
\url{github.com/erou/normalBases}.
\section{Computation of normal bases}
Before trying to compute normal elements, a natural question is to wander if
normal elements always exist in extensions of type
$\mathbb{F}_{p^{d}}/\mathbb{F}_p$. We do not prove this result here (it is done
in~\cite{Ga93}), but the answer is yes. Now that this natural fear has been
eliminated, we give some ways to compute normal elements. We use the same
notations as in the previous section.
\subsection{Randomized algorithm}
First, we give a randomized algorithm. Such algorithms are often based on the
same strategy.
\begin{enumerate}
\item Take a random element, following a certain protocol.
\item Check if this element verify the wanted property.
\end{enumerate}
Our algorithm also uses this strategy, that is why our first function
\texttt{is\_normal} is very important. But the protocol is also essential.
For example, if we just take an element completly at random in
$\mathbb{F}_{p^d}$, our algorithm will not be very efficient. The following
theorem helps us defining a better protocol.
\begin{thm}
Let $f(X)$ be an irreducible polynomial of degree $d$ over
$\mathbb{F}_p$ and $\alpha$ a root of $f(X)$. Let
\[
g(X)=\cfrac{f(X)}{(X-\alpha)f'(\alpha)}.
\]
Then there are at least $p -d(d-1)$ elements $u$ in $\mathbb{F}_p$ such that
$g(u)$ is a normal element of $\mathbb{F}_{p^d}$ over $\mathbb{F}_p$.
\end{thm}
\begin{proof}
Let $\sigma_i=\sigma^i$ be the automorphism
$\theta\to\theta^{p^i},\theta\in\mathbb{F}_{p^d}$, for $0\leq i <d$. Then
$\alpha_i=\sigma_i(\alpha)$ is also a root of $f(X)$, for $0\leq i <d$. The
automorphism $\sigma_i:\mathbb{F}_{p^d}\to\mathbb{F}_{p^d}$ can be extended
into a ring morphism
$\sigma_i:\mathbb{F}_{p^d}[X]\to\mathbb{F}_{p^d}[X]$ by setting
$\sigma_i(X)=X$. We get
\[
g_i(X):=\sigma_i(g(X)) = \cfrac{f(X)}{(X-\alpha_i)f'(\alpha_i)},
\]
and we note that $\sigma_i\sigma_i(g(X)) = \sigma_{i+j}(g(X))$. Then
$g_i(X)$ is a polynomial in $\mathbb{F}_{p^d}[X]$ having $\alpha_k$ as a root
for $k\neq i$ and $g_i(\alpha_i)=1$. Hence, for $i\neq k$, we have that every
root of $f(X)$ is also a root of $g_i(X)g_k(X)$, so
\begin{equation}
g_i(X)g_k(X) \equiv 0 \;(\Mod f(X)), \textrm{ for }i\neq k.
\label{3.1}
\end{equation}
Note that
\begin{equation}
g_1(X)+g_2(X)+\dots+g_d(X)-1=0,
\label{3.2}
\end{equation}
since the left side is a polynomial of degree at most $d-1$ (all the
$g_i$
are of degree $d-1$) and has $\alpha_0,
\alpha_1, \dots, \alpha_{d-1}$ as roots. Multiplying (\ref{3.2}) by $g_i(X)$
and using (\ref{3.1}) to simplify, we have
\begin{equation}
g_i(X)^2 \equiv g_i(X)\;(\Mod f(x)).
\label{3.3}
\end{equation}
We next set the matrix
\[
D=(\sigma_{i+j}(g(X)))_{0\leq i,j <0}
\]
and we study its determinant, $\Delta(X)$. From the equations
(\ref{3.1}), (\ref{3.2}) and (\ref{3.3}), we wee that the entries of $D^tD$
molulo $f(X)$ are all $0$, except on the main diagonal where they are all
$1$. It follows that
\[
\Delta(X)^2 = \det(D^tD) \equiv 1\;(\Mod f(x)).
\]
This proves that $\Delta(X)$ is a nonzero polynomial of degree at most
$d(d-1)$, since it is a sum of $d$ products of polynomials of degree
$d-1$. Therefore $\Delta(X)$ has at most $d(d-1)$ roots in
$\mathbb{F}_p$. The result follows from Corollary~\ref{circulantCor}, since the matrix
$D(u)$ is exactly the one defined in the corollary applied to the elements
$g(u), \sigma(g(u)),\dots,\sigma^{d-1}(g(u))$.
\end{proof}
Now we have our protocol. We take $u\in\mathbb{F}_p$ at random, then we check
if $g(u)$ is normal. If $p$ is large enough, for example $p>2d(d-1)$, then
$g(u)$ is normal with probability at least $1/2$. We will not discuss it, but
the entire computation takes $O((n+\log q)(n\log q)^2)$ bit operations.
A reference for this result is given in~\cite{Ga93}. As always, we are not
\emph{completely} satisfied with randomized algorithm, so we give two
deterministic algorithms.
\subsection{Deterministic algorithms}
First, we define the $\sigma$-Order of an element
$\theta\in\mathbb{F}_{p^d}$, both Lenstra's and Lüneburg's algorithms use it.
\subsubsection{The $\sigma$-Order polynomials}
\begin{defi}[$\sigma$-Order]
Let $\theta\in\mathbb{F}_{p^d}$ be an arbitrary element. Let $k$ be the least
positive integer such that $\sigma^k(\theta)=\theta^{p^k}$ belongs to
$\mathbb{F}_p$-linear span of $\left\{ \sigma^i\theta\;|\;0\leq i < k
\right\}$. If $\sigma^k\theta=\sum_{i=0}^{k-1}c_i\sigma^i\theta$ for that
$k$, then the \emph{$\sigma$-Order} of $\theta$ is the polynomial
\[
\Ord_\theta(X)=X^k-\sum_{i=0}^{k-1}c_iX^i.
\]
\end{defi}
The $\sigma$-Order polynomials are widely used in our functions. We compute
them using Gauss algorithm and the row reduced echelon form. To know if
$\sigma^j(\theta)$ is in the linear span of $\left\{
\sigma^i(\theta)\;|\; 0\leq i <j \right\}$, we construct the matrix $M$ whose columns are the
vectors $\theta, \sigma(\theta), \dots, \sigma^{j}(\theta)$, we compute
the rank using Gauss algorithm. If the rank is $j+1$ then $\theta,
\sigma(\theta), \dots, \sigma^j(\theta)$ are linearly independant. In order to
compute $k$, we do this operation for $j=1$ and while the vectors are linearly
independant, we increase the value of $j$ by $1$. When we get that the
vectors are dependant, we use the reduced row echelon form to get a
$d\times(k+1)$ matrix of type
\[
\begin{pmatrix}
1 & 0 & 0 & \dots & 0 & c_0 \\
0 & 1 & 0 & \dots & 0 & c_1 \\
0 & 0 & 1 & \dots & 0 & c_2 \\
\vdots & \vdots & & \ddots & & \vdots \\
0 & 0 & 0 & \dots & 1 & c_{k-1} \\
0 & 0 & 0 & \dots & 0 & 0 \\
\vdots & \vdots & \vdots & & \vdots & \vdots \\
0 & 0 & 0 & \dots & 0 & 0 \\
\end{pmatrix}
\]
where the coefficients $c_i$ are in the last column. This strategy is
implemented in the function \texttt{sigma\_order}.
We also see that an element
$\alpha\in\mathbb{F}_{p^d}$ is normal is and only if
$\Ord_\alpha(X)=X^d-1$. This characterization is the same as the
definition, thus it is not very interesting. The following property is used in
further demonstrations.
\begin{prop}
\label{orderDiv}
Let $P\in\mathbb{F}_p[X]$, if $P(\sigma)\zeta=0$, then $P$ is divisible
by $\Ord_{\zeta}$.
\end{prop}
\begin{proof}
Let $I_\zeta$ be the set defined by $I_\zeta=\left\{P\in\mathbb{F}_p[X]\;|\; P(\sigma)\zeta
= 0
\right\}$. This is an ideal of $\mathbb{F}_p[X]$, so $I_\zeta$ is principal. The
polynomial
$\Ord_\zeta$ belongs to $I_\zeta$ and has minimum degree in $I_\zeta$ by definition.
Hence $I_\zeta=(\Ord_\zeta)$.
\end{proof}
We always have $(X^d-1)(\sigma)\theta=0$, so for any
$\theta\in\mathbb{F}_{p^d}$, we have that $X^d-1$ is divisible by
$\Ord_\theta$. The result below is also used both in Lenstra's and Lüneburg's
algorithm.
\begin{prop}
\label{orderLcm}
Let $\alpha$ and $\beta$ be two elements in $\mathbb{F}_{p^d}$. Then we
have \[
\Ord_{\alpha+\beta}\;|\;\lcm(\Ord_\alpha,\Ord_\beta).
\]
\end{prop}
\begin{proof}
Let $P=\lcm(\Ord_\alpha,\Ord_\beta)$. By linearity, we have
\[
P(\sigma)(\alpha+\beta)=P(\sigma)\alpha+P(\sigma)\beta=0+0=0.
\]
It follows from Proposition~\ref{orderDiv} that $\Ord_{\alpha+\beta}$ divides
$P$.
\end{proof}
\begin{cor}
\label{orderMul}
Let $\alpha$ and $\beta$ be two elements in $\mathbb{F}_{p^d}$ such that
$\Ord_\alpha$ and $\Ord_\beta$ are relatively prime. Then we have
$\Ord_{\alpha+\beta}=\Ord_\alpha\Ord_\beta$.
\end{cor}
\begin{proof}
Since $\Ord_\alpha$ and $\Ord_\beta$ are relatively prime, we have
$\lcm(\Ord_\alpha, \Ord_\beta)=\Ord_\alpha\Ord_\beta$. It follows from
Proposition~\ref{orderLcm} that $\Ord_{\alpha+\beta}$ divides
$\Ord_\alpha\Ord_\beta$. Now, note that for any
$\theta\in\mathbb{F}_{p^d}$, $\Ord_{-\theta}=\Ord_\theta$, then we also have
\[
\Ord_{\alpha}\;|\;\lcm(\Ord_{\alpha+\beta}, \Ord_\beta)
\]
\[
\Ord_{\beta}\;|\;\lcm(\Ord_{\alpha+\beta}, \Ord_\alpha).
\]
Since $\Ord_\alpha$ and $\Ord_\beta$ are relatively prime, it follows
that
\[
\Ord_{\alpha}\;|\;\Ord_{\alpha+\beta}
\]
\[
\Ord_{\beta}\;|\;\Ord_{\alpha+\beta}.
\]
Therefore, $\Ord_\alpha\Ord_\beta$ divides $\Ord_{\alpha+\beta}$. Both
polynomial are monic so we have the wanted equality.
\end{proof}
\subsubsection{Lenstra's algorithm}
Lenstra's algorithm is based on linear algebra, so our implementation is based
on the matrices module \texttt{fq\_mat} of Flint. Before describing the
algorithm, we need two lemmas.
\begin{lm}
\label{lmExist}
Let $\theta\in\mathbb{F}_{p^d}$ with $\Ord_\theta(X)\neq X^d-1$. Let
$g(X)=(X^d-1)/\Ord_\theta(X)$. Then there exists
$\beta\in\mathbb{F}_{p^d}$ such that
\begin{equation}
g(\sigma)\beta=\theta.
\label{3.4}
\end{equation}
\end{lm}
\begin{proof}
Let $\gamma$ be a normal element of $\mathbb{F}_{p^d}$ over
$\mathbb{F}_p$. Then, by definition of being a normal element, there
exists a polynomial $f(X)\in\mathbb{F}_{p}[X]$ such that
$f(\sigma)\gamma=\theta$. Since $\Ord_\theta(\sigma)\theta=0$, we have
$(\Ord_\theta(\sigma)f(\sigma))\gamma=0$. So, by
Proposition~\ref{orderDiv}, $\Ord_\theta(X)f(X)$ is
divisible by $X^d-1$. We have $g(X)=(X^d-1)/\Ord_\theta(X)$ and $X^d-1|\Ord_\theta(X)f(X)$, so
$g(X)|f(X)$. Let $f(X)=g(X)h(X)$. Then
\[
g(\sigma)(h(\sigma)\gamma)=\theta.
\]
This proves that $\beta=h(\sigma)\gamma$ is a solution of (\ref{3.4}).
\end{proof}
\begin{lm}
\label{lmDegree}
Let $\theta\in\mathbb{F}_{d}$ with $\Ord_\theta(X)\neq X^d-1$. Assume
that there exists a solution $\beta$ of (\ref{3.4}) such that
$\deg\Ord_{\beta}\leq\deg\Ord_{\theta}$. Then there exists a nonzero
element $\zeta\in\mathbb{F}_{p^d}$ such that
\begin{equation}
g(\sigma)\zeta=0,
\label{3.5}
\end{equation}
where $g(X)=(X^d-1)/\Ord_\theta(X)$. Moreover any such $\zeta$ has the
property that
\begin{equation}
\deg\Ord_{\theta+\zeta}>\deg\Ord_\theta
\label{3.6}
\end{equation}
\end{lm}
\begin{proof}
Let $\gamma$ be a normal element in $\mathbb{F}_{p^d}$ over
$\mathbb{F}_p$. We can see that
$\zeta=\Ord_\theta(\sigma)\gamma\neq0$ is a solution of~(\ref{3.5}). In
fact, we prove that~(\ref{3.6}) is true for any solution $\zeta$
of~(\ref{3.5}). From~(\ref{3.4}) and Proposition~\ref{orderDiv}, it follows
that $\Ord_\theta$ divides $\Ord_\beta$, so the hypothesis that
$\deg\Ord_\beta\leq\deg\Ord_\theta$ implies that
$\Ord_\beta=\Ord_\theta$. It follows that $g$ and $\Ord_\theta$ are relatively
prime: suppose $\gcd(g, \Ord_\theta)=d$ and $\deg d>0$, let
$\Ord_\theta=\Ord_\theta'd=\Ord_\beta$ and $g=g'd$, then
$\Ord_\theta'(\sigma)\theta=(\Ord_\theta'
g)(\sigma)\beta=(\Ord_\beta g')(\sigma)\beta=0$ and
$\deg\Ord_\theta'<\deg\Ord_\beta$,
that is a contradiction. Since $\Ord_\zeta$ is a divisor of $g$,
$\Ord_\zeta$ and $\Ord_\theta$ are also relatively prime. Therefore,
by Corollary~\ref{orderMul}
\[
\Ord_{\theta+\zeta}=\Ord_\theta\Ord_\zeta,
\]
and then (\ref{3.6}) follows from the fact that $\zeta\neq0$ and then
$\deg\Ord_\zeta\geq1$.
\end{proof}
We now have a deterministic algorithm to compute a normal element of
$\mathbb{F}_{p^d}$ over $\mathbb{F}_p$.
\begin{enumerate}
\item Take an element $\theta\in\mathbb{F}_{p^d}$ and compute
$\Ord_\theta$.
\item If $\Ord_\theta(X)=X^d-1$ then the algorithm stops ($\theta$ is
normal).
\item Calculate $g(X)=(X^d-1)/\Ord_\theta$ and then find
$\beta\in\mathbb{F}_{p^d}$ such that $g(\sigma)\beta=\theta$.
\item Determine $\Ord_\beta$. If $\deg\Ord_\beta>\deg\Ord_\theta$ then
replace $\theta$ by $\beta$ and go to $2$; otherwise if
$\deg\Ord_\beta\leq\deg\Ord_\theta$ then find a nonzero element $\zeta$
such that $g(\sigma)\zeta=0$, replace $\theta$ by $\theta+\zeta$ and
determine the $\sigma$-Order of the new $\theta$, then go to $2$.
\end{enumerate}
This algorithm ends because with each replacement of $\theta$, the degree of
$\Ord_\theta$ increases by at least one, by Lemma~\ref{lmDegree}, and because
this degree is bounded by $d$ since the only possible $\sigma$-Order of degree
$d$ is $X^d-1$. The existence of the elements $\beta$ of step $3$ is guaranteed
by Lemma~\ref{lmExist} and the existence of $\zeta$ of step $4$ by
Lemma~\ref{lmDegree}. Since $g(\sigma)$ is a linear map, we compute $\beta$ and
$\zeta$, using Gauss algorithm and reduced row echelon form again. Computing
elements of type $g(\sigma)\theta$ is not just polynomial evaluation,
and is widely used in our algorithms, so we have a special function
\texttt{sigma\_composition} to do it. The rest of the algorithm is
essentially linear algebra, it uses Flint matrices and is in the function
\texttt{lenstra}.
\subsubsection{Lüneburg's algorithm}
Lüneburg algorithm is more elementary, it is not based on a powerfull theory
like linear algebra. Let $f$ be an irreducible polynomial of degree $d$ over
$\mathbb{F}_p$ and $\alpha$ a root of $f$. Then $\left\{ 1, \alpha, \dots,
\alpha^{d-1}
\right\}$ is a basis of $\mathbb{F}_{p^d}$ over $\mathbb{F}_p$. Let
$f_i=\Ord_{\alpha^i}$, for $0\leq i < d$. Let
$\gamma=\sum_{i=0}^{d-1}a_i\alpha^i$ be a normal element of
$\mathbb{F}_{p^d}$ over $\mathbb{F}_p$. Note that for any
$a_i\in\mathbb{F}_p$, $\Ord_{a_i\alpha^i}=\Ord_{\alpha^i}=f_i$, and as seen in the
demonstration of Corollary~\ref{orderMul}, $\Ord_\gamma=X^d-1$ divides
$\lcm(f_0, \dots, f_{d-1})$. Since for all $i$, $f_i$ divides $X^d-1$, we have that
$\lcm(f_0, \dots, f_{d-1})$ divides $X^d-1$. Therefore $\lcm(f_0, \dots,
f_{d-1})=X^d-1$. Then we apply the \emph{factor
refinement}~\cite{BaDrSh93}. It is an algorithm that, given a list of
polynomials $f_0, \dots, f_{d-1}$, compute a new list $g_1, \dots, g_r$ of
polynomials, pairwise relatively prime, such that
\[ \prod_{i=0}^{d-1} f_i=\prod_{j=1}^{r} g_j^{e_j}. \]
The factor refinement works that way: we first set $h_i=f_i$ and $e_i=1$ for $0\leq i
<d$ and we consider the list $(h_i, e_i)_{i}$. While there exists $i\neq j$ with $p:=\gcd(h_i, h_j)\neq1$, we delete
$(h_i,e_i)$ and $(h_j,e_j)$ from the list and we add $(p, e_i+e_j),( h_i/p,
e_i), (h_j/p, e_j)$, except for the cases where the first entry is $1$. This
algorithm stops and compute what we expect. In our case, we use it to compute
the pairwise relatively prime polynomials $g_j$ $(1\leq j \leq r)$, and then
we compute the integers $e_{ij}$ such that
\[
f_i=\prod_{1\leq j \leq r}g_j^{e_{ij}}\textrm{, for all }0\leq i<d.
\]
This algorithm is implemented in the function
\texttt{factor\_refinement}, it uses Flint's type
\texttt{fq\_poly\_factor} to represent the list $(h_i, e_i)_i$ and to let Flint
deal with the memory allocation.
Next, for each $j$, $1\leq j \leq r$, we find an index $i(j)$ such that
$e_{ij}$ is maximized. Let
\[
h_j=f_{i(j)}/g_j^{e_{i(j)j}}
\]
and take $\beta_j=h_j(\sigma)\alpha^{i(j)}$. Then
\[
\beta=\sum_{j=1}^r\beta_j
\]
is a normal element of $\mathbb{F}_{p^d}$ over $\mathbb{F}_p$. In fact, the
$\sigma$-Order of $\beta_j$ is $g_j^{e_{i(j)j}}$ ($\beta_j$ has been
constructed in that purpose) for $1\leq j \leq r$. As $g_1, \dots, g_r$ are
pairwise relatively prime, Corollary~\ref{orderMul} states that the
$\sigma$-Order of $\beta$ must
\[
\prod_{j=1}^rg_j^{e_{i(j)j}}=\lcm(f_0, \dots, f_{d-1})=X^d-1,
\]
meaning that $\beta$ is a normal element.
To implement this algorithm, we use our functions
\texttt{sigma\_composition},
\texttt{sigma\_order}, \texttt{factor\_refinement}, and Flint's basic functions to deal with
polynomials. The implementation is a translation of the steps we followed
to obtain $\beta$.
\section{Experimental results}
In the source code, one can find two directories named \texttt{fq} and
\texttt{fq\_nmod} with very little differences between the files inside these
directories. The name of the file determines the type used in the functions. The
type \texttt{fq} supports arbitrary large integers $p$ for the characteristic of
the fields $\mathbb{F}_{p^d}$, whereas \texttt{fq\_nmod} supports only small
integers for $p$ but is faster.
\subsection{Random algorithms}
We implemented the naive algorithm for computing normal elements, in order to
compare this algorithm to the more elaborate one. Surprinsigly, as we see in
Figure~\ref{NaiveElab}, the speed is
often the same, because when we chose an element randomly in
$\mathbb{F}_{p^d}$, it is often a normal element. If the naive algorithm finds a
normal element with its first pick, there is no way that the the elaborate one is
faster, since it does more calculus. There are still a few cases where the naive
algorithm does not find a normal element easily, and where the elaborate one is
faster.
The time spent in those algorithms is essentially to time spent to perform the
tests.
\begin{figure}
\begin{center}
\includegraphics[scale=0.6]{../benchmarks/benchNaiveElab.pdf}
\end{center}
\caption{Benchmark of naive and elaborate algorithms with $p=90001$.}
\label{NaiveElab}
\end{figure}
\subsection{Lüneburg's and Lenstra's algorithms}
Lenstra's algorithm is faster than Lüneburg algorithm, though the complexity
obtained seems to match with the theory in both algorithms. Also, Lenstra's
algotihm is mush more stable than Lüneburg's, the latter can have really
different behaviours for entries of the same sizes. The core of
both Lüneburg's and Lenstra's algorithms is the computation of the
$\sigma$-order polynomials.
\begin{figure}
\begin{center}
\begin{tabular}{ccc}
\textbf{d} & \textbf{Lüneburg} & \textbf{Lenstra} \\
$50$ & $1,87$ s & $0,65$ s \\
$114$ & $93,0$ s & $11,7$ s \\
$205$ & $290$ s & $187$ s \\
$249$ & $1960$ s & $253$ s
\end{tabular}
\caption{Benchmark of Lüneburg's and Lenstra's algorithms with $p=90001$.}
\label{LuneburgLenstra}
\end{center}
\end{figure}
\clearpage
\bibliographystyle{unsrt}
\bibliography{biblio}
\end{document}
|
/*++++++++++++++++++++++++++++++++++++++++++++++++++++++++
* Set of C routines to use FFTW1.3 library
* 2D- FFT's for any size
*
*
* JLP
* Version 12/12/2006 (from my BORLAND version)
*--------------------------------------------------------*/
#include <stdio.h>
#include <math.h>
#include "jlp_fftw.h"
#include "jlp_num_rename.h"
/* JLP2001: To be compatible with "matlab"
* I should not normalize by sqrt(nx*ny): */
/*
#define NORMALIZE_SQRT
*/
/* Content of jlp_fftw.h:
#include <fftw.h>
int FFTW_1D_Y_FLT(float *re, float *im, INT4 *nx, INT4 *ny, INT4 *idim,
INT4 *direct);
int fftw_1D_Y_float(float *re, float *im, int nx, int ny, int direct);
int FFTW_1D_Y_DBLE(double *re, double *im, INT4 *nx, INT4 *ny, INT4 *idim,
INT4 *direct);
int fftw_1D_Y_double(double *re, double *im, int nx, int ny, int direct);
int FFTW_2D_DBLE(double *re, double *im, int *nx, int *ny, int *direct);
int fftw_2D_double(double *re, double *im, int nx, int ny, int direct);
int FFTW_2D_FLT(float *re, float *im, INT4 *nx, INT4 *ny, INT4 *idim,
INT4 *kod);
int fftw_2D_float(float *re, float *im, int nx, int ny, int direct);
int fftw_setup(int nx, int ny);
int FFTW_SETUP(int *nx, int *ny);
int fftw_fast(FFTW_COMPLEX *image, int nx, int ny, int direct);
int fftw_shutdown();
*/
/* Static variables: */
static fftwnd_plan plan_fwd, plan_bkwd;
/*
#define MAIN_TEST
*/
#ifdef MAIN_TEST
main()
{
register int i;
int nx = 9, ny = 8;
double re[128], im[128];
FFTW_COMPLEX* image;
char s[80];
for(i = 0; i < nx * ny; i++)
{
re[i] = i;
im[i] = -i;
}
fftw_2D_double(re, im, nx, ny, 1);
fftw_2D_double(re, im, nx, ny, -1);
for(i = 0; i < nx * ny; i++)
{
printf(" i=%d re=%f im=%f \n", i, re[i], im[i]);
}
printf(" Now going to fast fft...");
gets(s);
nx = 388; ny = 128;
image = (FFTW_COMPLEX *) malloc(nx * ny * sizeof(FFTW_COMPLEX));
for(i = 0; i < nx * ny; i++)
{
c_re(image[i]) = i;
c_im(image[i]) = -i;
}
printf("OK, I go on, with nx=%d ny=%d",nx,ny);
fftw_setup(nx, ny);
fftw_fast(image, nx, ny, 1);
fftw_fast(image, nx, ny, -1);
fftw_shutdown();
for(i = 0; i < 200; i++)
{
printf(" i=%d re=%f im=%f \n", i, c_re(image[i]), c_im(image[i]));
}
gets(s);
free(image);
}
#endif
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
* Interface with Fortran routines (with arrays of first dimension = idim)
* kod=1, or 2 direct
* kod=-1, or -2 inverse
* kod=-2, or 2: power spectrum, and phase
*--------------------------------------------------------------*/
int FFTW_2D_FLT(float *re, float *im, INT4 *nx, INT4 *ny, INT4 *idim,
INT4 *kod)
{
int nx1, ny1, direct, status;
if(*idim != *nx)
{printf("FFTW_2D/Fatal error idim=%d while nx=%d \n", *idim, *nx);
exit(-1);
}
if(*kod != 1 && *kod != -1)
{printf("FFTW_2D/Fatal error kod=%d (option not yet implemented)\n", *kod);
exit(-1);
}
nx1 = *nx; ny1 = *ny; direct = *kod;
status = fftw_2D_float(re, im, nx1, ny1, direct);
return(status);
}
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
* Interface with Fortran routines
* kod=1, or 2 direct
* kod=-1, or -2 inverse
* kod=-2, or 2: power spectrum, and phase
* --------------------------------------------------------------*/
int FFTW_1D_Y_DBLE(double *re, double *im, INT4 *nx, INT4 *ny,
INT4 *idim, INT4 *kod)
{
int nx1, ny1, direct, status;
if(*idim != *nx)
{printf("FFTW_1D_Y/Fatal error idim=%d while nx=%d \n", *idim, *nx);
exit(-1);
}
if(*kod != 1 && *kod != -1)
{printf("FFTW_1D_Y/Fatal error kod=%d (option not yet implemented)\n", *kod);
exit(-1);
}
nx1 = *nx; ny1 = *ny; direct = *kod;
printf("FFTW_1D_Y/ nx1=%d ny1=%d direct=%d\n",nx1,ny1,direct);
status = fftw_1D_Y_double(re, im, nx1, ny1, direct);
return(status);
}
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
* Interface with Fortran routines
* kod=1, or 2 direct
* kod=-1, or -2 inverse
* kod=-2, or 2: power spectrum, and phase
* --------------------------------------------------------------*/
int FFTW_1D_Y_FLT(float *re, float *im, INT4 *nx, INT4 *ny,
INT4 *idim, INT4 *kod)
{
int nx1, ny1, direct, status;
#ifndef TOTO_
double sum;
register int i;
#endif
if(*idim != *nx)
{printf("FFTW_1D_Y_FLT/Fatal error idim=%d while nx=%d \n", *idim, *nx);
exit(-1);
}
if(*kod != 1 && *kod != -1)
{printf("FFTW_1D_Y_FLT/Fatal error kod=%d (option not yet implemented)\n", *kod);
exit(-1);
}
nx1 = *nx; ny1 = *ny; direct = *kod;
#ifdef DEBUG
printf("FFTW_1D_Y_FLT/ nx1=%d ny1=%d direct=%d (fftw)\n",nx1,ny1,direct);
#endif
#ifndef TOTO_
sum =0;
for(i=0; i < ny1; i++) sum += re[nx1/2 + i * nx1];
printf(" Sum of central line = %e and sum/sqrt(ny)=%e \n",
sum,sum/sqrt((double)ny1));
#endif
status = fftw_1D_Y_float(re, im, nx1, ny1, direct);
/* JLP99: to check that FFT is correct: */
/* Not recentred yet !! */
#ifndef TOTO_
for(i=0; i <= 2; i++)
printf("re,im [ixc,%d]: %e %e \n",i,
re[nx1/2 + i*nx1],im[nx1/2 + i*nx1]);
#endif
return(status);
}
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
* 2D FFT routine for double precision arrays
* direct = 1 : direct or forward
* direct = -1 : reverse or backward
* (FORTRAN version)
* --------------------------------------------------------------*/
int FFTW_2D_DBLE(double *re, double *im, int *nx, int *ny, int *direct)
{
int status;
status = fftw_2D_double(re, im, *nx, *ny, *direct);
return(status);
}
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
* 2D FFT routine for double precision arrays
* direct = 1 : direct or forward
* direct = -1 : reverse or backward
*
* --------------------------------------------------------------*/
int fftw_2D_double(double *re, double *im, int nx, int ny, int direct)
{
register int i;
double norm;
int isize;
fftwnd_plan plan;
fftw_direction dir;
FFTW_COMPLEX* in_out;
isize = nx * ny;
in_out = (FFTW_COMPLEX *) malloc(isize * sizeof(FFTW_COMPLEX));
/* Transfer to complex array structure: */
for(i = 0; i < nx * ny; i++)
{
c_re(in_out[i]) = re[i];
c_im(in_out[i]) = im[i];
}
/* direct = 1 : forward
direct = -1 : backward
*/
dir = (direct == 1) ? FFTW_FORWARD : FFTW_BACKWARD;
/* Warning: inversion nx-ny here: */
plan = fftw2d_create_plan(ny, nx, dir,
FFTW_ESTIMATE | FFTW_IN_PLACE);
if(plan == NULL)
{printf("FFTW_2D_DBLE: fatal error creating plan\n"); exit(-1);}
/* Compute the FFT: */
fftwnd(plan, 1, in_out, 1, 0, 0, 0, 0);
/* Transfer back to original arrays (and normalize by sqrt(nx * ny): */
#ifdef NORMALIZE_SQRT
norm = nx * ny; norm = sqrt(norm);
for(i = 0; i < nx * ny; i++)
{
re[i] = c_re(in_out[i]) /norm;
im[i] = c_im(in_out[i]) /norm;
}
#else
if(direct == 1)
{
for(i = 0; i < nx * ny; i++)
{
re[i] = c_re(in_out[i]);
im[i] = c_im(in_out[i]);
}
}
else
{
norm = nx * ny;
for(i = 0; i < nx * ny; i++)
{
re[i] = c_re(in_out[i]) /norm;
im[i] = c_im(in_out[i]) /norm;
}
}
#endif
free(in_out);
/* Delete plan: */
fftwnd_destroy_plan(plan);
return(0);
}
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
* 2D FFT routine for single precision arrays (for which idim=nx)
* (well suited to C arrays since nx=idim...)
* direct = 1 : direct or forward
* direct = -1 : reverse or backward
* --------------------------------------------------------------*/
int fftw_2D_float(float *re, float *im, int nx, int ny, int direct)
{
register int i;
double norm;
int isize;
fftwnd_plan plan;
fftw_direction dir;
FFTW_COMPLEX* in_out;
isize = nx * ny;
in_out = (FFTW_COMPLEX *) malloc(isize * sizeof(FFTW_COMPLEX));
/* Transfer to complex array structure: */
for(i = 0; i < nx * ny; i++)
{
c_re(in_out[i]) = re[i];
c_im(in_out[i]) = im[i];
}
/* direct = 1 : forward
direct = -1 : backward
*/
dir = (direct == 1) ? FFTW_FORWARD : FFTW_BACKWARD;
/* Warning: inversion nx-ny here: */
plan = fftw2d_create_plan(ny, nx, dir,
FFTW_ESTIMATE | FFTW_IN_PLACE);
if(plan == NULL)
{printf("fftw_2D_float: fatal error creating plan\n"); exit(-1);}
/* Compute the FFT: */
fftwnd(plan, 1, in_out, 1, 0, 0, 0, 0);
/* Transfer back to original arrays (and normalize by sqrt(nx * ny): */
#ifdef NORMALIZE_SQRT
norm = nx * ny; norm = sqrt(norm);
for(i = 0; i < nx * ny; i++)
{
re[i] = c_re(in_out[i]) /norm;
im[i] = c_im(in_out[i]) /norm;
}
#else
if(direct == 1)
{
for(i = 0; i < nx * ny; i++)
{
re[i] = c_re(in_out[i]);
im[i] = c_im(in_out[i]);
}
}
else
{
norm = nx * ny;
for(i = 0; i < nx * ny; i++)
{
re[i] = c_re(in_out[i]) /norm;
im[i] = c_im(in_out[i]) /norm;
}
}
#endif
free(in_out);
/* Delete plan: */
fftwnd_destroy_plan(plan);
return(0);
}
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
* 1D FFT routine for single precision arrays
* along the columns (for spectroscopic mode)
* direct = 1 : direct or forward
* direct = -1 : reverse or backward
*
* --------------------------------------------------------------*/
int fftw_1D_Y_float(float *re, float *im, int nx, int ny, int direct)
{
register int i, j;
double norm;
fftw_plan plan;
fftw_direction dir;
FFTW_COMPLEX* in_out;
in_out = (FFTW_COMPLEX *) malloc(ny * sizeof(FFTW_COMPLEX));
if(in_out == NULL)
{printf("fftw_1D_Y_float: fatal error allocating memory\n"); exit(-1);}
/* direct = 1 : forward
direct = -1 : backward
*/
dir = (direct == 1) ? FFTW_FORWARD : FFTW_BACKWARD;
plan = fftw_create_plan(ny, dir, FFTW_ESTIMATE | FFTW_IN_PLACE);
if(plan == NULL)
{printf("fftw_1D_Y_float: fatal error creating plan\n"); exit(-1);}
for(i = 0; i < nx; i++)
{
/* Transfer to complex array structure: */
for(j = 0; j < ny; j++)
{
c_re(in_out[j]) = re[i + j * nx];
c_im(in_out[j]) = im[i + j * nx];
}
/* Compute the FFT: */
fftw(plan, 1, in_out, 1, 0, 0, 0, 0);
/* Transfer back to original arrays (and normalize by sqrt(ny): */
#ifdef NORMALIZE_SQRT
norm = sqrt((double)ny);
for(j = 0; j < ny; j++)
{
re[i + j * nx] = c_re(in_out[j]) /norm;
im[i + j * nx] = c_im(in_out[j]) /norm;
}
#else
if(direct == 1)
{
for(j = 0; j < ny; j++)
{
re[i + j * nx] = c_re(in_out[j]);
im[i + j * nx] = c_im(in_out[j]);
}
}
else
{
norm = (double)ny;
for(j = 0; j < ny; j++)
{
re[i + j * nx] = c_re(in_out[j]) /norm;
im[i + j * nx] = c_im(in_out[j]) /norm;
}
}
#endif
/* End of loop on i (from 0 to nx-1) */
}
free(in_out);
/* Delete plan: */
fftw_destroy_plan(plan);
return(0);
}
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
* 1D FFT routine for double precision arrays
* along the columns (for spectroscopic mode)
* direct = 1 : direct or forward
* direct = -1 : reverse or backward
*
* --------------------------------------------------------------*/
int fftw_1D_Y_double(double *re, double *im, int nx, int ny, int direct)
{
register int i, j;
double norm;
fftw_plan plan;
fftw_direction dir;
FFTW_COMPLEX* in_out;
in_out = (FFTW_COMPLEX *) malloc(ny * sizeof(FFTW_COMPLEX));
/* direct = 1 : forward
direct = -1 : backward
*/
dir = (direct == 1) ? FFTW_FORWARD : FFTW_BACKWARD;
plan = fftw_create_plan(ny, dir, FFTW_ESTIMATE | FFTW_IN_PLACE);
if(plan == NULL)
{printf("fftw_1D_Y: fatal error creating plan\n"); exit(-1);}
for(i = 0; i < nx; i++)
{
/* Transfer to complex array structure: */
for(j = 0; j < ny; j++)
{
c_re(in_out[j]) = re[i + j * nx];
c_im(in_out[j]) = im[i + j * nx];
}
/* Compute the FFT: */
fftw(plan, 1, in_out, 1, 0, 0, 0, 0);
/* Transfer back to original arrays (and normalize by sqrt(ny): */
#ifdef NORMALIZE_SQRT
norm = sqrt((double)ny);
for(j = 0; j < ny; j++)
{
re[i + j * nx] = c_re(in_out[j]) /norm;
im[i + j * nx] = c_im(in_out[j]) /norm;
}
#else
if(direct == 1)
{
for(j = 0; j < ny; j++)
{
re[i + j * nx] = c_re(in_out[j]);
im[i + j * nx] = c_im(in_out[j]);
}
}
else
{
norm = (double)ny;
for(j = 0; j < ny; j++)
{
re[i + j * nx] = c_re(in_out[j]) /norm;
im[i + j * nx] = c_im(in_out[j]) /norm;
}
}
#endif
/* End of loop on i (from 0 to nx-1) */
}
free(in_out);
/* Delete plan: */
fftw_destroy_plan(plan);
return(0);
}
/****************************************************************
* Fortran interface to fftw_setup
****************************************************************/
int FFTW_FSETUP(int *nx, int *ny)
{
fftw_setup(*nx, *ny);
return(0);
}
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// fftw_setup, to initialize FFTW routines
// Create forward and backward plans
//
// --------------------------------------------------------------*/
int fftw_setup(int nx, int ny)
{
char fwd_name[60], bkwd_name[60];
FILE *fp_wisd;
int ToOutput;
/********************** With "forward" ***************************/
#ifdef BORLAND
sprintf(fwd_name,"c:\\tc\\fftw13\\jlp\\fwd_%d.wis",nx);
#else
sprintf(fwd_name,"/d/fftw/fwd_%d%d.wis",nx,ny);
#endif
/* Check if wisdom file already exits: */
printf("fftw_setup/Read wisdom file: %s\n",fwd_name);
if((fp_wisd = fopen(fwd_name,"r")) == NULL)
{
printf("fftw_setup/Failure to open wisdom file: %s\n",fwd_name);
ToOutput = 1;
}
else
{
ToOutput = 0;
if(fftw_import_wisdom_from_file(fp_wisd) == FFTW_FAILURE) ToOutput = 1;
fclose(fp_wisd);
}
/* Create "plan" for fftw (speed is wanted for subsequent FFT's) */
/* Warning: inversion nx-ny here: */
plan_fwd = fftw2d_create_plan(ny, nx, FFTW_FORWARD,
FFTW_MEASURE | FFTW_IN_PLACE | FFTW_USE_WISDOM);
/* Output wisdom file if needed: */
if(ToOutput)
{
printf("fftw_setup/Write wisdom file: %s\n",fwd_name);
if((fp_wisd = fopen(fwd_name,"w")) != NULL)
fftw_export_wisdom_to_file(fp_wisd);
fclose(fp_wisd);
}
/********************** With "backward" ***************************/
#ifdef BORLAND
sprintf(bkwd_name,"c:\\tc\\fftw13\\jlp\\bk_%d%d.wis",nx,ny);
#else
sprintf(bkwd_name,"/d/fftw/bk_%d%d.wis",nx,ny);
#endif
/* Check if wisdom file already exits: */
printf("fftw_setup/Read wisdom file: %s\n",bkwd_name);
if((fp_wisd = fopen(bkwd_name,"r")) == NULL)
{
printf("fftw_setup/Failure to open wisdom file: %s\n",bkwd_name);
ToOutput = 1;
}
else
{
ToOutput = 0;
if(fftw_import_wisdom_from_file(fp_wisd) == FFTW_FAILURE) ToOutput = 1;
fclose(fp_wisd);
}
/* Create "plan" for fftw (speed is wanted for subsequent FFT's) */
/* Warning: inversion nx-ny here: */
plan_bkwd = fftw2d_create_plan(ny, nx, FFTW_BACKWARD,
FFTW_MEASURE | FFTW_IN_PLACE | FFTW_USE_WISDOM);
/* Output wisdom file if needed: */
if(ToOutput)
{
printf("fftw_setup/Write wisdom file: %s\n",bkwd_name);
if((fp_wisd = fopen(bkwd_name,"w")) != NULL)
fftw_export_wisdom_to_file(fp_wisd);
fclose(fp_wisd);
}
return(0);
}
/*************************************************************
*
**************************************************************/
int FFTW_SHUTDOWN()
{
/* Delete plans: */
fftwnd_destroy_plan(plan_fwd);
fftwnd_destroy_plan(plan_bkwd);
return(0);
}
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
* 2D FFT routine for double precision fftw_complex arrays
* direction = 1 : forward
* direction = -1 : backward
* Should be called after fftw_setup and before fftw_shutdown
* --------------------------------------------------------------*/
int fftw_fast(FFTW_COMPLEX *image, int nx, int ny, int dir)
{
register int i;
double norm;
/* Compute the FFT: */
if(dir == 1)
fftwnd(plan_fwd, 1, image, 1, 0, 0, 0, 0);
else
fftwnd(plan_bkwd, 1, image, 1, 0, 0, 0, 0);
/* Normalize by sqrt(nx * ny): */
#ifdef NORMALIZE_SQRT
norm = nx * ny; norm = sqrt(norm);
for(i = 0; i < nx * ny; i++)
{
c_re(image[i]) /= norm;
c_im(image[i]) /= norm;
}
#else
if(dir == -1)
{
norm = (double)(nx * ny);
for(i = 0; i < nx * ny; i++)
{
c_re(image[i]) /= norm;
c_im(image[i]) /= norm;
}
}
#endif
return(0);
}
|
{-# OPTIONS --safe --without-K #-}
module Categories.Examples.Functor.Sets where
import Data.List as List
import Data.List.Properties
open import Data.Nat using (ℕ)
import Data.Product as Product
import Data.Vec as Vec
import Data.Vec.Properties
open import Function using (id; λ-; _$-)
open import Relation.Binary.PropositionalEquality using (refl)
open import Categories.Category.Discrete using (Discrete)
open import Categories.Category.Instance.Sets
open import Categories.Functor using (Endofunctor)
open import Categories.Functor.Bifunctor using (Bifunctor; appʳ)
List : ∀ {o} → Endofunctor (Sets o)
List = record
{ F₀ = List.List
; F₁ = List.map
; identity = map-id $-
; homomorphism = map-compose $-
; F-resp-≈ = λ f≈g → map-cong (λ- f≈g) $-
}
where
open Data.List.Properties
Vec′ : ∀ {o} → Bifunctor (Sets o) (Discrete ℕ) (Sets o)
Vec′ = record
{ F₀ = Product.uncurry Vec.Vec
; F₁ = λ { (f , refl) → Vec.map f }
; identity = map-id $-
; homomorphism = λ { {_} {_} {_} {f , refl} {g , refl} → map-∘ g f _}
; F-resp-≈ = λ { {_} {_} {_} {_ , refl} (g , refl) → map-cong (λ- g) _}
}
where
open Product using (_,_)
open Data.Vec.Properties
Vec : ∀ {o} → ℕ → Endofunctor (Sets o)
Vec = appʳ Vec′
|
[STATEMENT]
theorem thmD12:
assumes induct: "items_le k (\<J> k u) = Gen (paths_le k (\<P> k u))"
assumes induct_tokens: "\<T> k u = \<Z> k u"
shows "items_le k (\<J> k (Suc u)) \<supseteq> Gen (paths_le k (\<P> k (Suc u)))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Gen (paths_le k (\<P> k (Suc u))) \<subseteq> items_le k (\<J> k (Suc u))
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. Gen (paths_le k (\<P> k (Suc u))) \<subseteq> items_le k (\<J> k (Suc u))
[PROOF STEP]
{
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. Gen (paths_le k (\<P> k (Suc u))) \<subseteq> items_le k (\<J> k (Suc u))
[PROOF STEP]
fix x :: item
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. Gen (paths_le k (\<P> k (Suc u))) \<subseteq> items_le k (\<J> k (Suc u))
[PROOF STEP]
assume x_dom: "x \<in> Gen (paths_le k (\<P> k (Suc u)))"
[PROOF STATE]
proof (state)
this:
x \<in> Gen (paths_le k (\<P> k (Suc u)))
goal (1 subgoal):
1. Gen (paths_le k (\<P> k (Suc u))) \<subseteq> items_le k (\<J> k (Suc u))
[PROOF STEP]
have "\<exists> q. pvalid q x \<and> q \<in> \<P> k (Suc u) \<and> charslength q \<le> k"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>q. pvalid q x \<and> q \<in> \<P> k (Suc u) \<and> charslength q \<le> k
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<exists>q. pvalid q x \<and> q \<in> \<P> k (Suc u) \<and> charslength q \<le> k
[PROOF STEP]
have "\<And>i I n. i \<in> I \<or> i \<notin> items_le n I"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>i I n. i \<in> I \<or> i \<notin> items_le n I
[PROOF STEP]
by (meson items_le_is_filter subsetCE)
[PROOF STATE]
proof (state)
this:
?i \<in> ?I \<or> ?i \<notin> items_le ?n ?I
goal (1 subgoal):
1. \<exists>q. pvalid q x \<and> q \<in> \<P> k (Suc u) \<and> charslength q \<le> k
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
?i \<in> ?I \<or> ?i \<notin> items_le ?n ?I
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
?i \<in> ?I \<or> ?i \<notin> items_le ?n ?I
goal (1 subgoal):
1. \<exists>q. pvalid q x \<and> q \<in> \<P> k (Suc u) \<and> charslength q \<le> k
[PROOF STEP]
by (metis Gen_implies_pvalid x_dom items_le_fix_D items_le_idempotent items_le_paths_le
pvalid_item_end)
[PROOF STATE]
proof (state)
this:
\<exists>q. pvalid q x \<and> q \<in> \<P> k (Suc u) \<and> charslength q \<le> k
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<exists>q. pvalid q x \<and> q \<in> \<P> k (Suc u) \<and> charslength q \<le> k
goal (1 subgoal):
1. Gen (paths_le k (\<P> k (Suc u))) \<subseteq> items_le k (\<J> k (Suc u))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<exists>q. pvalid q x \<and> q \<in> \<P> k (Suc u) \<and> charslength q \<le> k
[PROOF STEP]
obtain q where q: "pvalid q x \<and> q \<in> \<P> k (Suc u) \<and> charslength q \<le> k"
[PROOF STATE]
proof (prove)
using this:
\<exists>q. pvalid q x \<and> q \<in> \<P> k (Suc u) \<and> charslength q \<le> k
goal (1 subgoal):
1. (\<And>q. pvalid q x \<and> q \<in> \<P> k (Suc u) \<and> charslength q \<le> k \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
pvalid q x \<and> q \<in> \<P> k (Suc u) \<and> charslength q \<le> k
goal (1 subgoal):
1. Gen (paths_le k (\<P> k (Suc u))) \<subseteq> items_le k (\<J> k (Suc u))
[PROOF STEP]
have "q \<in> \<P> k u \<or> q \<notin> \<P> k u"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. q \<in> \<P> k u \<or> q \<notin> \<P> k u
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
q \<in> \<P> k u \<or> q \<notin> \<P> k u
goal (1 subgoal):
1. Gen (paths_le k (\<P> k (Suc u))) \<subseteq> items_le k (\<J> k (Suc u))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
q \<in> \<P> k u \<or> q \<notin> \<P> k u
[PROOF STEP]
have "x \<in> items_le k (\<J> k (Suc u))"
[PROOF STATE]
proof (prove)
using this:
q \<in> \<P> k u \<or> q \<notin> \<P> k u
goal (1 subgoal):
1. x \<in> items_le k (\<J> k (Suc u))
[PROOF STEP]
proof (induct rule: disjCases2)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. q \<in> \<P> k u \<Longrightarrow> x \<in> items_le k (\<J> k (Suc u))
2. q \<notin> \<P> k u \<Longrightarrow> x \<in> items_le k (\<J> k (Suc u))
[PROOF STEP]
case 1
[PROOF STATE]
proof (state)
this:
q \<in> \<P> k u
goal (2 subgoals):
1. q \<in> \<P> k u \<Longrightarrow> x \<in> items_le k (\<J> k (Suc u))
2. q \<notin> \<P> k u \<Longrightarrow> x \<in> items_le k (\<J> k (Suc u))
[PROOF STEP]
with q
[PROOF STATE]
proof (chain)
picking this:
pvalid q x \<and> q \<in> \<P> k (Suc u) \<and> charslength q \<le> k
q \<in> \<P> k u
[PROOF STEP]
have "x \<in> Gen (paths_le k (\<P> k u))"
[PROOF STATE]
proof (prove)
using this:
pvalid q x \<and> q \<in> \<P> k (Suc u) \<and> charslength q \<le> k
q \<in> \<P> k u
goal (1 subgoal):
1. x \<in> Gen (paths_le k (\<P> k u))
[PROOF STEP]
apply (auto simp add: Gen_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>q \<in> \<P> k u; pvalid q x; q \<in> limit (Append (\<Y> (\<Z> k u) (\<P> k u) k) k) (\<P> k u); length (chars q) \<le> k\<rbrakk> \<Longrightarrow> \<exists>p. p \<in> paths_le k (\<P> k u) \<and> pvalid p x
[PROOF STEP]
apply (rule_tac x=q in exI)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>q \<in> \<P> k u; pvalid q x; q \<in> limit (Append (\<Y> (\<Z> k u) (\<P> k u) k) k) (\<P> k u); length (chars q) \<le> k\<rbrakk> \<Longrightarrow> q \<in> paths_le k (\<P> k u) \<and> pvalid q x
[PROOF STEP]
by (simp add: paths_le_def)
[PROOF STATE]
proof (state)
this:
x \<in> Gen (paths_le k (\<P> k u))
goal (2 subgoals):
1. q \<in> \<P> k u \<Longrightarrow> x \<in> items_le k (\<J> k (Suc u))
2. q \<notin> \<P> k u \<Longrightarrow> x \<in> items_le k (\<J> k (Suc u))
[PROOF STEP]
with induct
[PROOF STATE]
proof (chain)
picking this:
items_le k (\<J> k u) = Gen (paths_le k (\<P> k u))
x \<in> Gen (paths_le k (\<P> k u))
[PROOF STEP]
have "x \<in> items_le k (\<J> k u)"
[PROOF STATE]
proof (prove)
using this:
items_le k (\<J> k u) = Gen (paths_le k (\<P> k u))
x \<in> Gen (paths_le k (\<P> k u))
goal (1 subgoal):
1. x \<in> items_le k (\<J> k u)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
x \<in> items_le k (\<J> k u)
goal (2 subgoals):
1. q \<in> \<P> k u \<Longrightarrow> x \<in> items_le k (\<J> k (Suc u))
2. q \<notin> \<P> k u \<Longrightarrow> x \<in> items_le k (\<J> k (Suc u))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
x \<in> items_le k (\<J> k u)
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
x \<in> items_le k (\<J> k u)
goal (1 subgoal):
1. x \<in> items_le k (\<J> k (Suc u))
[PROOF STEP]
using LocalLexing.items_le_def LocalLexing_axioms \<J>_subset_Suc_u
[PROOF STATE]
proof (prove)
using this:
x \<in> items_le k (\<J> k u)
LocalLexing ?\<NN> ?\<TT> ?\<RR> ?\<SS> ?Lex ?Sel \<Longrightarrow> items_le ?k ?I = {x \<in> ?I. item_end x \<le> ?k}
LocalLexing \<NN> \<TT> \<RR> \<SS> Lex Sel
\<J> ?k ?u \<subseteq> \<J> ?k (Suc ?u)
goal (1 subgoal):
1. x \<in> items_le k (\<J> k (Suc u))
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
x \<in> items_le k (\<J> k (Suc u))
goal (1 subgoal):
1. q \<notin> \<P> k u \<Longrightarrow> x \<in> items_le k (\<J> k (Suc u))
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. q \<notin> \<P> k u \<Longrightarrow> x \<in> items_le k (\<J> k (Suc u))
[PROOF STEP]
case 2
[PROOF STATE]
proof (state)
this:
q \<notin> \<P> k u
goal (1 subgoal):
1. q \<notin> \<P> k u \<Longrightarrow> x \<in> items_le k (\<J> k (Suc u))
[PROOF STEP]
have q_is_limit: "q \<in> limit (Append (\<Y> (\<Z> k u) (\<P> k u) k) k) (\<P> k u)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. q \<in> limit (Append (\<Y> (\<Z> k u) (\<P> k u) k) k) (\<P> k u)
[PROOF STEP]
using q
[PROOF STATE]
proof (prove)
using this:
pvalid q x \<and> q \<in> \<P> k (Suc u) \<and> charslength q \<le> k
goal (1 subgoal):
1. q \<in> limit (Append (\<Y> (\<Z> k u) (\<P> k u) k) k) (\<P> k u)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
q \<in> limit (Append (\<Y> (\<Z> k u) (\<P> k u) k) k) (\<P> k u)
goal (1 subgoal):
1. q \<notin> \<P> k u \<Longrightarrow> x \<in> items_le k (\<J> k (Suc u))
[PROOF STEP]
from limit_Append_path_nonelem_split[OF q_is_limit 2]
[PROOF STATE]
proof (chain)
picking this:
\<exists>qa ts. q = qa @ ts \<and> qa \<in> \<P> k u \<and> charslength qa = k \<and> admissible (qa @ ts) \<and> (\<forall>t\<in>set ts. t \<in> \<Y> (\<Z> k u) (\<P> k u) k) \<and> (\<forall>t\<in>set (butlast ts). chars_of_token t = [])
[PROOF STEP]
obtain p ts where p_ts:
"q = p @ ts \<and>
p \<in> \<P> k u \<and>
charslength p = k \<and>
admissible (p @ ts) \<and>
(\<forall>t\<in>set ts. t \<in> \<Y> (\<Z> k u) (\<P> k u) k) \<and> (\<forall>t\<in>set (butlast ts). chars_of_token t = [])"
[PROOF STATE]
proof (prove)
using this:
\<exists>qa ts. q = qa @ ts \<and> qa \<in> \<P> k u \<and> charslength qa = k \<and> admissible (qa @ ts) \<and> (\<forall>t\<in>set ts. t \<in> \<Y> (\<Z> k u) (\<P> k u) k) \<and> (\<forall>t\<in>set (butlast ts). chars_of_token t = [])
goal (1 subgoal):
1. (\<And>p ts. q = p @ ts \<and> p \<in> \<P> k u \<and> charslength p = k \<and> admissible (p @ ts) \<and> (\<forall>t\<in>set ts. t \<in> \<Y> (\<Z> k u) (\<P> k u) k) \<and> (\<forall>t\<in>set (butlast ts). chars_of_token t = []) \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
q = p @ ts \<and> p \<in> \<P> k u \<and> charslength p = k \<and> admissible (p @ ts) \<and> (\<forall>t\<in>set ts. t \<in> \<Y> (\<Z> k u) (\<P> k u) k) \<and> (\<forall>t\<in>set (butlast ts). chars_of_token t = [])
goal (1 subgoal):
1. q \<notin> \<P> k u \<Longrightarrow> x \<in> items_le k (\<J> k (Suc u))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
q = p @ ts \<and> p \<in> \<P> k u \<and> charslength p = k \<and> admissible (p @ ts) \<and> (\<forall>t\<in>set ts. t \<in> \<Y> (\<Z> k u) (\<P> k u) k) \<and> (\<forall>t\<in>set (butlast ts). chars_of_token t = [])
[PROOF STEP]
have ts_nonempty: "ts \<noteq> []"
[PROOF STATE]
proof (prove)
using this:
q = p @ ts \<and> p \<in> \<P> k u \<and> charslength p = k \<and> admissible (p @ ts) \<and> (\<forall>t\<in>set ts. t \<in> \<Y> (\<Z> k u) (\<P> k u) k) \<and> (\<forall>t\<in>set (butlast ts). chars_of_token t = [])
goal (1 subgoal):
1. ts \<noteq> []
[PROOF STEP]
using 2
[PROOF STATE]
proof (prove)
using this:
q = p @ ts \<and> p \<in> \<P> k u \<and> charslength p = k \<and> admissible (p @ ts) \<and> (\<forall>t\<in>set ts. t \<in> \<Y> (\<Z> k u) (\<P> k u) k) \<and> (\<forall>t\<in>set (butlast ts). chars_of_token t = [])
q \<notin> \<P> k u
goal (1 subgoal):
1. ts \<noteq> []
[PROOF STEP]
using self_append_conv
[PROOF STATE]
proof (prove)
using this:
q = p @ ts \<and> p \<in> \<P> k u \<and> charslength p = k \<and> admissible (p @ ts) \<and> (\<forall>t\<in>set ts. t \<in> \<Y> (\<Z> k u) (\<P> k u) k) \<and> (\<forall>t\<in>set (butlast ts). chars_of_token t = [])
q \<notin> \<P> k u
(?y = ?y @ ?ys) = (?ys = [])
goal (1 subgoal):
1. ts \<noteq> []
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
ts \<noteq> []
goal (1 subgoal):
1. q \<notin> \<P> k u \<Longrightarrow> x \<in> items_le k (\<J> k (Suc u))
[PROOF STEP]
obtain T where T: "T = \<Y> (\<Z> k u) (\<P> k u) k"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>T. T = \<Y> (\<Z> k u) (\<P> k u) k \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
T = \<Y> (\<Z> k u) (\<P> k u) k
goal (1 subgoal):
1. q \<notin> \<P> k u \<Longrightarrow> x \<in> items_le k (\<J> k (Suc u))
[PROOF STEP]
obtain J where J: "J = \<pi> k T (Gen (paths_le k (\<P> k u)))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>J. J = \<pi> k T (Gen (paths_le k (\<P> k u))) \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
J = \<pi> k T (Gen (paths_le k (\<P> k u)))
goal (1 subgoal):
1. q \<notin> \<P> k u \<Longrightarrow> x \<in> items_le k (\<J> k (Suc u))
[PROOF STEP]
from q p_ts
[PROOF STATE]
proof (chain)
picking this:
pvalid q x \<and> q \<in> \<P> k (Suc u) \<and> charslength q \<le> k
q = p @ ts \<and> p \<in> \<P> k u \<and> charslength p = k \<and> admissible (p @ ts) \<and> (\<forall>t\<in>set ts. t \<in> \<Y> (\<Z> k u) (\<P> k u) k) \<and> (\<forall>t\<in>set (butlast ts). chars_of_token t = [])
[PROOF STEP]
have chars_of_token_is_empty: "\<And> t. t\<in>set ts \<Longrightarrow> chars_of_token t = []"
[PROOF STATE]
proof (prove)
using this:
pvalid q x \<and> q \<in> \<P> k (Suc u) \<and> charslength q \<le> k
q = p @ ts \<and> p \<in> \<P> k u \<and> charslength p = k \<and> admissible (p @ ts) \<and> (\<forall>t\<in>set ts. t \<in> \<Y> (\<Z> k u) (\<P> k u) k) \<and> (\<forall>t\<in>set (butlast ts). chars_of_token t = [])
goal (1 subgoal):
1. \<And>t. t \<in> set ts \<Longrightarrow> chars_of_token t = []
[PROOF STEP]
using charslength_appendix_is_empty chars_append charslength.simps le_add1 le_imp_less_Suc
le_neq_implies_less length_append not_less_eq
[PROOF STATE]
proof (prove)
using this:
pvalid q x \<and> q \<in> \<P> k (Suc u) \<and> charslength q \<le> k
q = p @ ts \<and> p \<in> \<P> k u \<and> charslength p = k \<and> admissible (p @ ts) \<and> (\<forall>t\<in>set ts. t \<in> \<Y> (\<Z> k u) (\<P> k u) k) \<and> (\<forall>t\<in>set (butlast ts). chars_of_token t = [])
\<lbrakk>charslength (?p @ ?ts) = charslength ?p; ?t \<in> set ?ts\<rbrakk> \<Longrightarrow> chars_of_token ?t = []
chars (?a @ ?b) = chars ?a @ chars ?b
charslength ?cs = length (chars ?cs)
?n \<le> ?n + ?m
?m \<le> ?n \<Longrightarrow> ?m < Suc ?n
\<lbrakk>?m \<le> ?n; ?m \<noteq> ?n\<rbrakk> \<Longrightarrow> ?m < ?n
length (?xs @ ?ys) = length ?xs + length ?ys
(\<not> ?m < ?n) = (?n < Suc ?m)
goal (1 subgoal):
1. \<And>t. t \<in> set ts \<Longrightarrow> chars_of_token t = []
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
?t \<in> set ts \<Longrightarrow> chars_of_token ?t = []
goal (1 subgoal):
1. q \<notin> \<P> k u \<Longrightarrow> x \<in> items_le k (\<J> k (Suc u))
[PROOF STEP]
{
[PROOF STATE]
proof (state)
this:
?t \<in> set ts \<Longrightarrow> chars_of_token ?t = []
goal (1 subgoal):
1. q \<notin> \<P> k u \<Longrightarrow> x \<in> items_le k (\<J> k (Suc u))
[PROOF STEP]
fix sss :: "token list"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. q \<notin> \<P> k u \<Longrightarrow> x \<in> items_le k (\<J> k (Suc u))
[PROOF STEP]
have "is_prefix sss ts \<Longrightarrow> pvalid (p @ sss) x \<Longrightarrow> x \<in> J"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>is_prefix sss ts; pvalid (p @ sss) x\<rbrakk> \<Longrightarrow> x \<in> J
[PROOF STEP]
proof (induct "length sss" arbitrary: sss x rule: less_induct)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>sss x. \<lbrakk>\<And>sssa x. \<lbrakk>length sssa < length sss; is_prefix sssa ts; pvalid (p @ sssa) x\<rbrakk> \<Longrightarrow> x \<in> J; is_prefix sss ts; pvalid (p @ sss) x\<rbrakk> \<Longrightarrow> x \<in> J
[PROOF STEP]
case less
[PROOF STATE]
proof (state)
this:
\<lbrakk>length ?sss < length sss; is_prefix ?sss ts; pvalid (p @ ?sss) ?x\<rbrakk> \<Longrightarrow> ?x \<in> J
is_prefix sss ts
pvalid (p @ sss) x
goal (1 subgoal):
1. \<And>sss x. \<lbrakk>\<And>sssa x. \<lbrakk>length sssa < length sss; is_prefix sssa ts; pvalid (p @ sssa) x\<rbrakk> \<Longrightarrow> x \<in> J; is_prefix sss ts; pvalid (p @ sss) x\<rbrakk> \<Longrightarrow> x \<in> J
[PROOF STEP]
have "sss = [] \<or> sss \<noteq> []"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sss = [] \<or> sss \<noteq> []
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
sss = [] \<or> sss \<noteq> []
goal (1 subgoal):
1. \<And>sss x. \<lbrakk>\<And>sssa x. \<lbrakk>length sssa < length sss; is_prefix sssa ts; pvalid (p @ sssa) x\<rbrakk> \<Longrightarrow> x \<in> J; is_prefix sss ts; pvalid (p @ sss) x\<rbrakk> \<Longrightarrow> x \<in> J
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
sss = [] \<or> sss \<noteq> []
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
sss = [] \<or> sss \<noteq> []
goal (1 subgoal):
1. x \<in> J
[PROOF STEP]
proof (induct rule: disjCases2)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. sss = [] \<Longrightarrow> x \<in> J
2. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
case 1
[PROOF STATE]
proof (state)
this:
sss = []
goal (2 subgoals):
1. sss = [] \<Longrightarrow> x \<in> J
2. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
with less
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>length ?sss < length sss; is_prefix ?sss ts; pvalid (p @ ?sss) ?x\<rbrakk> \<Longrightarrow> ?x \<in> J
is_prefix sss ts
pvalid (p @ sss) x
sss = []
[PROOF STEP]
have pvalid_p_x: "pvalid p x"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>length ?sss < length sss; is_prefix ?sss ts; pvalid (p @ ?sss) ?x\<rbrakk> \<Longrightarrow> ?x \<in> J
is_prefix sss ts
pvalid (p @ sss) x
sss = []
goal (1 subgoal):
1. pvalid p x
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
pvalid p x
goal (2 subgoals):
1. sss = [] \<Longrightarrow> x \<in> J
2. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
have charslength_p: "charslength p \<le> k"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. charslength p \<le> k
[PROOF STEP]
using p_ts
[PROOF STATE]
proof (prove)
using this:
q = p @ ts \<and> p \<in> \<P> k u \<and> charslength p = k \<and> admissible (p @ ts) \<and> (\<forall>t\<in>set ts. t \<in> \<Y> (\<Z> k u) (\<P> k u) k) \<and> (\<forall>t\<in>set (butlast ts). chars_of_token t = [])
goal (1 subgoal):
1. charslength p \<le> k
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
charslength p \<le> k
goal (2 subgoals):
1. sss = [] \<Longrightarrow> x \<in> J
2. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
with p_ts
[PROOF STATE]
proof (chain)
picking this:
q = p @ ts \<and> p \<in> \<P> k u \<and> charslength p = k \<and> admissible (p @ ts) \<and> (\<forall>t\<in>set ts. t \<in> \<Y> (\<Z> k u) (\<P> k u) k) \<and> (\<forall>t\<in>set (butlast ts). chars_of_token t = [])
charslength p \<le> k
[PROOF STEP]
have "p \<in> paths_le k (\<P> k u)"
[PROOF STATE]
proof (prove)
using this:
q = p @ ts \<and> p \<in> \<P> k u \<and> charslength p = k \<and> admissible (p @ ts) \<and> (\<forall>t\<in>set ts. t \<in> \<Y> (\<Z> k u) (\<P> k u) k) \<and> (\<forall>t\<in>set (butlast ts). chars_of_token t = [])
charslength p \<le> k
goal (1 subgoal):
1. p \<in> paths_le k (\<P> k u)
[PROOF STEP]
by (simp add: paths_le_def)
[PROOF STATE]
proof (state)
this:
p \<in> paths_le k (\<P> k u)
goal (2 subgoals):
1. sss = [] \<Longrightarrow> x \<in> J
2. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
with pvalid_p_x
[PROOF STATE]
proof (chain)
picking this:
pvalid p x
p \<in> paths_le k (\<P> k u)
[PROOF STEP]
have "x \<in> Gen (paths_le k (\<P> k u))"
[PROOF STATE]
proof (prove)
using this:
pvalid p x
p \<in> paths_le k (\<P> k u)
goal (1 subgoal):
1. x \<in> Gen (paths_le k (\<P> k u))
[PROOF STEP]
using Gen_def mem_Collect_eq
[PROOF STATE]
proof (prove)
using this:
pvalid p x
p \<in> paths_le k (\<P> k u)
Gen ?P = {uu_. \<exists>x p. uu_ = x \<and> p \<in> ?P \<and> pvalid p x}
(?a \<in> Collect ?P) = ?P ?a
goal (1 subgoal):
1. x \<in> Gen (paths_le k (\<P> k u))
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
x \<in> Gen (paths_le k (\<P> k u))
goal (2 subgoals):
1. sss = [] \<Longrightarrow> x \<in> J
2. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
x \<in> Gen (paths_le k (\<P> k u))
[PROOF STEP]
have "x \<in> \<pi> k T (Gen (paths_le k (\<P> k u)))"
[PROOF STATE]
proof (prove)
using this:
x \<in> Gen (paths_le k (\<P> k u))
goal (1 subgoal):
1. x \<in> \<pi> k T (Gen (paths_le k (\<P> k u)))
[PROOF STEP]
using \<pi>_apply_setmonotone
[PROOF STATE]
proof (prove)
using this:
x \<in> Gen (paths_le k (\<P> k u))
?x \<in> ?I \<Longrightarrow> ?x \<in> \<pi> ?k ?T ?I
goal (1 subgoal):
1. x \<in> \<pi> k T (Gen (paths_le k (\<P> k u)))
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
x \<in> \<pi> k T (Gen (paths_le k (\<P> k u)))
goal (2 subgoals):
1. sss = [] \<Longrightarrow> x \<in> J
2. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
x \<in> \<pi> k T (Gen (paths_le k (\<P> k u)))
[PROOF STEP]
show "x \<in> J"
[PROOF STATE]
proof (prove)
using this:
x \<in> \<pi> k T (Gen (paths_le k (\<P> k u)))
goal (1 subgoal):
1. x \<in> J
[PROOF STEP]
using pvalid_item_end q J LocalLexing.items_le_def
LocalLexing_axioms charslength_p mem_Collect_eq pvalid_p_x
[PROOF STATE]
proof (prove)
using this:
x \<in> \<pi> k T (Gen (paths_le k (\<P> k u)))
pvalid ?p ?x \<Longrightarrow> item_end ?x = charslength ?p
pvalid q x__ \<and> q \<in> \<P> k (Suc u) \<and> charslength q \<le> k
J = \<pi> k T (Gen (paths_le k (\<P> k u)))
LocalLexing ?\<NN> ?\<TT> ?\<RR> ?\<SS> ?Lex ?Sel \<Longrightarrow> items_le ?k ?I = {x \<in> ?I. item_end x \<le> ?k}
LocalLexing \<NN> \<TT> \<RR> \<SS> Lex Sel
charslength p \<le> k
(?a \<in> Collect ?P) = ?P ?a
pvalid p x
goal (1 subgoal):
1. x \<in> J
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x \<in> J
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
case 2
[PROOF STATE]
proof (state)
this:
sss \<noteq> []
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
sss \<noteq> []
[PROOF STEP]
have "\<exists> a ss. sss = ss@[a]"
[PROOF STATE]
proof (prove)
using this:
sss \<noteq> []
goal (1 subgoal):
1. \<exists>a ss. sss = ss @ [a]
[PROOF STEP]
using rev_exhaust
[PROOF STATE]
proof (prove)
using this:
sss \<noteq> []
\<lbrakk>?xs = [] \<Longrightarrow> ?P; \<And>ys y. ?xs = ys @ [y] \<Longrightarrow> ?P\<rbrakk> \<Longrightarrow> ?P
goal (1 subgoal):
1. \<exists>a ss. sss = ss @ [a]
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<exists>a ss. sss = ss @ [a]
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<exists>a ss. sss = ss @ [a]
[PROOF STEP]
obtain a ss where snoc: "sss = ss@[a]"
[PROOF STATE]
proof (prove)
using this:
\<exists>a ss. sss = ss @ [a]
goal (1 subgoal):
1. (\<And>ss a. sss = ss @ [a] \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
sss = ss @ [a]
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
obtain p' where p': "p' = p @ ss"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>p'. p' = p @ ss \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
p' = p @ ss
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
p' = p @ ss
[PROOF STEP]
have "pvalid_left (p'@[a]) x"
[PROOF STATE]
proof (prove)
using this:
p' = p @ ss
goal (1 subgoal):
1. pvalid_left (p' @ [a]) x
[PROOF STEP]
using snoc less pvalid_left
[PROOF STATE]
proof (prove)
using this:
p' = p @ ss
sss = ss @ [a]
\<lbrakk>length ?sss < length sss; is_prefix ?sss ts; pvalid (p @ ?sss) ?x\<rbrakk> \<Longrightarrow> ?x \<in> J
is_prefix sss ts
pvalid (p @ sss) x
pvalid ?p ?x = pvalid_left ?p ?x
goal (1 subgoal):
1. pvalid_left (p' @ [a]) x
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
pvalid_left (p' @ [a]) x
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
from iffD1[OF pvalid_left_def this]
[PROOF STATE]
proof (chain)
picking this:
\<exists>u \<gamma>. wellformed_tokens (p' @ [a]) \<and> wellformed_item x \<and> u \<le> length (p' @ [a]) \<and> charslength (p' @ [a]) = item_end x \<and> charslength (take u (p' @ [a])) = item_origin x \<and> is_leftderivation (terminals (take u (p' @ [a])) @ [item_nonterminal x] @ \<gamma>) \<and> leftderives (item_\<alpha> x) (terminals (drop u (p' @ [a])))
[PROOF STEP]
obtain r \<omega> where pvalid:
"wellformed_tokens (p' @ [a]) \<and>
wellformed_item x \<and>
r \<le> length (p' @ [a]) \<and>
charslength (p' @ [a]) = item_end x \<and>
charslength (take r (p' @ [a])) = item_origin x \<and>
is_leftderivation (terminals (take r (p' @ [a])) @ [item_nonterminal x] @ \<omega>) \<and>
leftderives (item_\<alpha> x) (terminals (drop r (p' @ [a])))"
[PROOF STATE]
proof (prove)
using this:
\<exists>u \<gamma>. wellformed_tokens (p' @ [a]) \<and> wellformed_item x \<and> u \<le> length (p' @ [a]) \<and> charslength (p' @ [a]) = item_end x \<and> charslength (take u (p' @ [a])) = item_origin x \<and> is_leftderivation (terminals (take u (p' @ [a])) @ [item_nonterminal x] @ \<gamma>) \<and> leftderives (item_\<alpha> x) (terminals (drop u (p' @ [a])))
goal (1 subgoal):
1. (\<And>r \<omega>. wellformed_tokens (p' @ [a]) \<and> wellformed_item x \<and> r \<le> length (p' @ [a]) \<and> charslength (p' @ [a]) = item_end x \<and> charslength (take r (p' @ [a])) = item_origin x \<and> is_leftderivation (terminals (take r (p' @ [a])) @ [item_nonterminal x] @ \<omega>) \<and> leftderives (item_\<alpha> x) (terminals (drop r (p' @ [a]))) \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
wellformed_tokens (p' @ [a]) \<and> wellformed_item x \<and> r \<le> length (p' @ [a]) \<and> charslength (p' @ [a]) = item_end x \<and> charslength (take r (p' @ [a])) = item_origin x \<and> is_leftderivation (terminals (take r (p' @ [a])) @ [item_nonterminal x] @ \<omega>) \<and> leftderives (item_\<alpha> x) (terminals (drop r (p' @ [a])))
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
obtain q' where q': "q' = p'@[a]"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>q'. q' = p' @ [a] \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
q' = p' @ [a]
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
have is_prefix_ss_ts: "is_prefix ss ts"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. is_prefix ss ts
[PROOF STEP]
using snoc less
[PROOF STATE]
proof (prove)
using this:
sss = ss @ [a]
\<lbrakk>length ?sss < length sss; is_prefix ?sss ts; pvalid (p @ ?sss) ?x\<rbrakk> \<Longrightarrow> ?x \<in> J
is_prefix sss ts
pvalid (p @ sss) x
goal (1 subgoal):
1. is_prefix ss ts
[PROOF STEP]
by (simp add: is_prefix_append)
[PROOF STATE]
proof (state)
this:
is_prefix ss ts
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
is_prefix ss ts
[PROOF STEP]
have "is_prefix (p@ss) q"
[PROOF STATE]
proof (prove)
using this:
is_prefix ss ts
goal (1 subgoal):
1. is_prefix (p @ ss) q
[PROOF STEP]
using p' snoc p_ts
[PROOF STATE]
proof (prove)
using this:
is_prefix ss ts
p' = p @ ss
sss = ss @ [a]
q = p @ ts \<and> p \<in> \<P> k u \<and> charslength p = k \<and> admissible (p @ ts) \<and> (\<forall>t\<in>set ts. t \<in> \<Y> (\<Z> k u) (\<P> k u) k) \<and> (\<forall>t\<in>set (butlast ts). chars_of_token t = [])
goal (1 subgoal):
1. is_prefix (p @ ss) q
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
is_prefix (p @ ss) q
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
is_prefix (p @ ss) q
[PROOF STEP]
have "is_prefix p' q"
[PROOF STATE]
proof (prove)
using this:
is_prefix (p @ ss) q
goal (1 subgoal):
1. is_prefix p' q
[PROOF STEP]
using p'
[PROOF STATE]
proof (prove)
using this:
is_prefix (p @ ss) q
p' = p @ ss
goal (1 subgoal):
1. is_prefix p' q
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
is_prefix p' q
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
is_prefix p' q
[PROOF STEP]
have h1: "p' \<in> \<PP>"
[PROOF STATE]
proof (prove)
using this:
is_prefix p' q
goal (1 subgoal):
1. p' \<in> \<PP>
[PROOF STEP]
using q \<PP>_covers_\<P> prefixes_are_paths' subsetCE
[PROOF STATE]
proof (prove)
using this:
is_prefix p' q
pvalid q x__ \<and> q \<in> \<P> k (Suc u) \<and> charslength q \<le> k
\<P> ?k ?u \<subseteq> \<PP>
\<lbrakk>?p \<in> \<PP>; is_prefix ?q ?p\<rbrakk> \<Longrightarrow> ?q \<in> \<PP>
\<lbrakk>?A \<subseteq> ?B; ?c \<notin> ?A \<Longrightarrow> ?P; ?c \<in> ?B \<Longrightarrow> ?P\<rbrakk> \<Longrightarrow> ?P
goal (1 subgoal):
1. p' \<in> \<PP>
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
p' \<in> \<PP>
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
have charslength_ss: "charslength ss = 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. charslength ss = 0
[PROOF STEP]
apply (rule empty_tokens_have_charslength_0)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>t. t \<in> set ss \<Longrightarrow> chars_of_token t = []
[PROOF STEP]
by (metis is_prefix_ss_ts append_is_Nil_conv chars_append chars_of_token_is_empty
charslength.simps charslength_0 is_prefix_def length_greater_0_conv list.size(3))
[PROOF STATE]
proof (state)
this:
charslength ss = 0
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
charslength ss = 0
[PROOF STEP]
have h2: "charslength p' = k"
[PROOF STATE]
proof (prove)
using this:
charslength ss = 0
goal (1 subgoal):
1. charslength p' = k
[PROOF STEP]
using p' p_ts
[PROOF STATE]
proof (prove)
using this:
charslength ss = 0
p' = p @ ss
q = p @ ts \<and> p \<in> \<P> k u \<and> charslength p = k \<and> admissible (p @ ts) \<and> (\<forall>t\<in>set ts. t \<in> \<Y> (\<Z> k u) (\<P> k u) k) \<and> (\<forall>t\<in>set (butlast ts). chars_of_token t = [])
goal (1 subgoal):
1. charslength p' = k
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
charslength p' = k
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
have a_in_ts: "a \<in> set ts"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. a \<in> set ts
[PROOF STEP]
by (metis in_set_dropD is_prefix_append is_prefix_cons list.set_intros(1)
snoc less(2))
[PROOF STATE]
proof (state)
this:
a \<in> set ts
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
a \<in> set ts
[PROOF STEP]
have h3: "a \<in> T"
[PROOF STATE]
proof (prove)
using this:
a \<in> set ts
goal (1 subgoal):
1. a \<in> T
[PROOF STEP]
using T p_ts
[PROOF STATE]
proof (prove)
using this:
a \<in> set ts
T = \<Y> (\<Z> k u) (\<P> k u) k
q = p @ ts \<and> p \<in> \<P> k u \<and> charslength p = k \<and> admissible (p @ ts) \<and> (\<forall>t\<in>set ts. t \<in> \<Y> (\<Z> k u) (\<P> k u) k) \<and> (\<forall>t\<in>set (butlast ts). chars_of_token t = [])
goal (1 subgoal):
1. a \<in> T
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
a \<in> T
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
have h4: "T \<subseteq> \<X> k"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. T \<subseteq> \<X> k
[PROOF STEP]
using LocalLexing.\<Z>.simps(2) LocalLexing_axioms T \<Z>_subset_\<X>
[PROOF STATE]
proof (prove)
using this:
LocalLexing ?\<NN> ?\<TT> ?\<RR> ?\<SS> ?Lex ?Sel \<Longrightarrow> LocalLexing.\<Z> ?\<NN> ?\<TT> ?\<RR> ?\<SS> ?Lex ?Sel ?Doc ?k (Suc ?u) = LocalLexing.\<Y> ?\<NN> ?\<TT> ?\<RR> ?\<SS> ?Lex ?Sel ?Doc (LocalLexing.\<Z> ?\<NN> ?\<TT> ?\<RR> ?\<SS> ?Lex ?Sel ?Doc ?k ?u) (LocalLexing.\<P> ?\<NN> ?\<TT> ?\<RR> ?\<SS> ?Lex ?Sel ?Doc ?k ?u) ?k
LocalLexing \<NN> \<TT> \<RR> \<SS> Lex Sel
T = \<Y> (\<Z> k u) (\<P> k u) k
\<Z> ?k ?n \<subseteq> \<X> ?k
goal (1 subgoal):
1. T \<subseteq> \<X> k
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
T \<subseteq> \<X> k
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
note h5 = q'
[PROOF STATE]
proof (state)
this:
q' = p' @ [a]
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
obtain N where N: "N = item_nonterminal x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>N. N = item_nonterminal x \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
N = item_nonterminal x
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
obtain \<alpha> where \<alpha>: "\<alpha> = item_\<alpha> x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>\<alpha>. \<alpha> = item_\<alpha> x \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<alpha> = item_\<alpha> x
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
obtain \<beta> where \<beta>: "\<beta> = item_\<beta> x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>\<beta>. \<beta> = item_\<beta> x \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<beta> = item_\<beta> x
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
have item_rule_x: "item_rule x = (N, \<alpha> @ \<beta>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. item_rule x = (N, \<alpha> @ \<beta>)
[PROOF STEP]
using N \<alpha> \<beta> item_nonterminal_def item_rhs_def item_rhs_split
[PROOF STATE]
proof (prove)
using this:
N = item_nonterminal x
\<alpha> = item_\<alpha> x
\<beta> = item_\<beta> x
item_nonterminal ?x = fst (item_rule ?x)
item_rhs ?x = snd (item_rule ?x)
item_rhs ?x = item_\<alpha> ?x @ item_\<beta> ?x
goal (1 subgoal):
1. item_rule x = (N, \<alpha> @ \<beta>)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
item_rule x = (N, \<alpha> @ \<beta>)
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
have "wellformed_item x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. wellformed_item x
[PROOF STEP]
using pvalid
[PROOF STATE]
proof (prove)
using this:
wellformed_tokens (p' @ [a]) \<and> wellformed_item x \<and> r \<le> length (p' @ [a]) \<and> charslength (p' @ [a]) = item_end x \<and> charslength (take r (p' @ [a])) = item_origin x \<and> is_leftderivation (terminals (take r (p' @ [a])) @ [item_nonterminal x] @ \<omega>) \<and> leftderives (item_\<alpha> x) (terminals (drop r (p' @ [a])))
goal (1 subgoal):
1. wellformed_item x
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
wellformed_item x
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
wellformed_item x
[PROOF STEP]
have h6: "(N, \<alpha>@\<beta>) \<in> \<RR>"
[PROOF STATE]
proof (prove)
using this:
wellformed_item x
goal (1 subgoal):
1. (N, \<alpha> @ \<beta>) \<in> \<RR>
[PROOF STEP]
using item_rule_x
[PROOF STATE]
proof (prove)
using this:
wellformed_item x
item_rule x = (N, \<alpha> @ \<beta>)
goal (1 subgoal):
1. (N, \<alpha> @ \<beta>) \<in> \<RR>
[PROOF STEP]
by (simp add: wellformed_item_def)
[PROOF STATE]
proof (state)
this:
(N, \<alpha> @ \<beta>) \<in> \<RR>
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
have h7: "r \<le> length q'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. r \<le> length q'
[PROOF STEP]
using pvalid q'
[PROOF STATE]
proof (prove)
using this:
wellformed_tokens (p' @ [a]) \<and> wellformed_item x \<and> r \<le> length (p' @ [a]) \<and> charslength (p' @ [a]) = item_end x \<and> charslength (take r (p' @ [a])) = item_origin x \<and> is_leftderivation (terminals (take r (p' @ [a])) @ [item_nonterminal x] @ \<omega>) \<and> leftderives (item_\<alpha> x) (terminals (drop r (p' @ [a])))
q' = p' @ [a]
goal (1 subgoal):
1. r \<le> length q'
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
r \<le> length q'
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
have h8: "leftderives [\<SS>] (terminals (take r q') @ [N] @ \<omega>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. leftderives [\<SS>] (terminals (take r q') @ [N] @ \<omega>)
[PROOF STEP]
using N is_leftderivation_def pvalid q'
[PROOF STATE]
proof (prove)
using this:
N = item_nonterminal x
is_leftderivation ?u = leftderives [\<SS>] ?u
wellformed_tokens (p' @ [a]) \<and> wellformed_item x \<and> r \<le> length (p' @ [a]) \<and> charslength (p' @ [a]) = item_end x \<and> charslength (take r (p' @ [a])) = item_origin x \<and> is_leftderivation (terminals (take r (p' @ [a])) @ [item_nonterminal x] @ \<omega>) \<and> leftderives (item_\<alpha> x) (terminals (drop r (p' @ [a])))
q' = p' @ [a]
goal (1 subgoal):
1. leftderives [\<SS>] (terminals (take r q') @ [N] @ \<omega>)
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
leftderives [\<SS>] (terminals (take r q') @ [N] @ \<omega>)
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
have h9: "leftderives \<alpha> (terminals (drop r q'))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. leftderives \<alpha> (terminals (drop r q'))
[PROOF STEP]
using \<alpha> pvalid q'
[PROOF STATE]
proof (prove)
using this:
\<alpha> = item_\<alpha> x
wellformed_tokens (p' @ [a]) \<and> wellformed_item x \<and> r \<le> length (p' @ [a]) \<and> charslength (p' @ [a]) = item_end x \<and> charslength (take r (p' @ [a])) = item_origin x \<and> is_leftderivation (terminals (take r (p' @ [a])) @ [item_nonterminal x] @ \<omega>) \<and> leftderives (item_\<alpha> x) (terminals (drop r (p' @ [a])))
q' = p' @ [a]
goal (1 subgoal):
1. leftderives \<alpha> (terminals (drop r q'))
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
leftderives \<alpha> (terminals (drop r q'))
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
have h10: "k = k + length (chars_of_token a)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. k = k + length (chars_of_token a)
[PROOF STEP]
by (simp add: a_in_ts chars_of_token_is_empty)
[PROOF STATE]
proof (state)
this:
k = k + length (chars_of_token a)
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
have h11: "x = Item (N, \<alpha> @ \<beta>) (length \<alpha>) (charslength (take r q')) k"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x = Item (N, \<alpha> @ \<beta>) (length \<alpha>) (charslength (take r q')) k
[PROOF STEP]
by (metis \<alpha> charslength_ss a_in_ts append_Nil2 chars.simps(2) chars_append
chars_of_token_is_empty charslength.simps h2 item.collapse item_dot_is_\<alpha>_length
item_rule_x length_greater_0_conv list.size(3) pvalid q')
[PROOF STATE]
proof (state)
this:
x = Item (N, \<alpha> @ \<beta>) (length \<alpha>) (charslength (take r q')) k
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
have x_dom: "x \<in> items_le k (\<pi> k {} (Scan T k (Gen (Prefixes p'))))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<in> items_le k (\<pi> k {} (Scan T k (Gen (Prefixes p'))))
[PROOF STEP]
using thmD11[OF h1 h2 h3 h4 h5 h6 h7 h8 h9 h10 h11]
[PROOF STATE]
proof (prove)
using this:
?I = items_le k (\<pi> k {} (Scan T k (Gen (Prefixes p')))) \<Longrightarrow> x \<in> ?I
goal (1 subgoal):
1. x \<in> items_le k (\<pi> k {} (Scan T k (Gen (Prefixes p'))))
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x \<in> items_le k (\<pi> k {} (Scan T k (Gen (Prefixes p'))))
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
{
[PROOF STATE]
proof (state)
this:
x \<in> items_le k (\<pi> k {} (Scan T k (Gen (Prefixes p'))))
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
fix y :: item
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
fix toks :: "token list"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
assume pvalid_toks_y: "pvalid toks y"
[PROOF STATE]
proof (state)
this:
pvalid toks y
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
assume is_prefix_toks_p': "is_prefix toks p'"
[PROOF STATE]
proof (state)
this:
is_prefix toks p'
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
is_prefix toks p'
[PROOF STEP]
have charslength_toks: "charslength toks \<le> k"
[PROOF STATE]
proof (prove)
using this:
is_prefix toks p'
goal (1 subgoal):
1. charslength toks \<le> k
[PROOF STEP]
using charslength_of_prefix h2
[PROOF STATE]
proof (prove)
using this:
is_prefix toks p'
is_prefix ?a ?b \<Longrightarrow> charslength ?a \<le> charslength ?b
charslength p' = k
goal (1 subgoal):
1. charslength toks \<le> k
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
charslength toks \<le> k
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
charslength toks \<le> k
[PROOF STEP]
have item_end_y: "item_end y \<le> k"
[PROOF STATE]
proof (prove)
using this:
charslength toks \<le> k
goal (1 subgoal):
1. item_end y \<le> k
[PROOF STEP]
using pvalid_item_end pvalid_toks_y
[PROOF STATE]
proof (prove)
using this:
charslength toks \<le> k
pvalid ?p ?x \<Longrightarrow> item_end ?x = charslength ?p
pvalid toks y
goal (1 subgoal):
1. item_end y \<le> k
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
item_end y \<le> k
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
have "is_prefix toks p \<or> (\<exists> ss'. is_prefix ss' ss \<and> toks = p@ss')"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. is_prefix toks p \<or> (\<exists>ss'. is_prefix ss' ss \<and> toks = p @ ss')
[PROOF STEP]
using is_prefix_of_append is_prefix_toks_p' p'
[PROOF STATE]
proof (prove)
using this:
is_prefix ?p (?a @ ?b) \<Longrightarrow> is_prefix ?p ?a \<or> (\<exists>b'. b' \<noteq> [] \<and> is_prefix b' ?b \<and> ?p = ?a @ b')
is_prefix toks p'
p' = p @ ss
goal (1 subgoal):
1. is_prefix toks p \<or> (\<exists>ss'. is_prefix ss' ss \<and> toks = p @ ss')
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
is_prefix toks p \<or> (\<exists>ss'. is_prefix ss' ss \<and> toks = p @ ss')
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
is_prefix toks p \<or> (\<exists>ss'. is_prefix ss' ss \<and> toks = p @ ss')
[PROOF STEP]
have "y \<in> J"
[PROOF STATE]
proof (prove)
using this:
is_prefix toks p \<or> (\<exists>ss'. is_prefix ss' ss \<and> toks = p @ ss')
goal (1 subgoal):
1. y \<in> J
[PROOF STEP]
proof (induct rule: disjCases2)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. is_prefix toks p \<Longrightarrow> y \<in> J
2. \<exists>ss'. is_prefix ss' ss \<and> toks = p @ ss' \<Longrightarrow> y \<in> J
[PROOF STEP]
case 1
[PROOF STATE]
proof (state)
this:
is_prefix toks p
goal (2 subgoals):
1. is_prefix toks p \<Longrightarrow> y \<in> J
2. \<exists>ss'. is_prefix ss' ss \<and> toks = p @ ss' \<Longrightarrow> y \<in> J
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
is_prefix toks p
[PROOF STEP]
have "toks \<in> \<P> k u"
[PROOF STATE]
proof (prove)
using this:
is_prefix toks p
goal (1 subgoal):
1. toks \<in> \<P> k u
[PROOF STEP]
using p_ts prefixes_are_paths
[PROOF STATE]
proof (prove)
using this:
is_prefix toks p
q = p @ ts \<and> p \<in> \<P> k u \<and> charslength p = k \<and> admissible (p @ ts) \<and> (\<forall>t\<in>set ts. t \<in> \<Y> (\<Z> k u) (\<P> k u) k) \<and> (\<forall>t\<in>set (butlast ts). chars_of_token t = [])
\<lbrakk>?p \<in> \<P> ?k ?u; is_prefix ?x ?p\<rbrakk> \<Longrightarrow> ?x \<in> \<P> ?k ?u
goal (1 subgoal):
1. toks \<in> \<P> k u
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
toks \<in> \<P> k u
goal (2 subgoals):
1. is_prefix toks p \<Longrightarrow> y \<in> J
2. \<exists>ss'. is_prefix ss' ss \<and> toks = p @ ss' \<Longrightarrow> y \<in> J
[PROOF STEP]
with charslength_toks
[PROOF STATE]
proof (chain)
picking this:
charslength toks \<le> k
toks \<in> \<P> k u
[PROOF STEP]
have "toks \<in> paths_le k (\<P> k u)"
[PROOF STATE]
proof (prove)
using this:
charslength toks \<le> k
toks \<in> \<P> k u
goal (1 subgoal):
1. toks \<in> paths_le k (\<P> k u)
[PROOF STEP]
by (simp add: paths_le_def)
[PROOF STATE]
proof (state)
this:
toks \<in> paths_le k (\<P> k u)
goal (2 subgoals):
1. is_prefix toks p \<Longrightarrow> y \<in> J
2. \<exists>ss'. is_prefix ss' ss \<and> toks = p @ ss' \<Longrightarrow> y \<in> J
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
toks \<in> paths_le k (\<P> k u)
[PROOF STEP]
have "y \<in> Gen (paths_le k (\<P> k u))"
[PROOF STATE]
proof (prove)
using this:
toks \<in> paths_le k (\<P> k u)
goal (1 subgoal):
1. y \<in> Gen (paths_le k (\<P> k u))
[PROOF STEP]
using pvalid_toks_y
Gen_def mem_Collect_eq
[PROOF STATE]
proof (prove)
using this:
toks \<in> paths_le k (\<P> k u)
pvalid toks y
Gen ?P = {uu_. \<exists>x p. uu_ = x \<and> p \<in> ?P \<and> pvalid p x}
(?a \<in> Collect ?P) = ?P ?a
goal (1 subgoal):
1. y \<in> Gen (paths_le k (\<P> k u))
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
y \<in> Gen (paths_le k (\<P> k u))
goal (2 subgoals):
1. is_prefix toks p \<Longrightarrow> y \<in> J
2. \<exists>ss'. is_prefix ss' ss \<and> toks = p @ ss' \<Longrightarrow> y \<in> J
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
y \<in> Gen (paths_le k (\<P> k u))
[PROOF STEP]
have "y \<in> \<pi> k T (Gen (paths_le k (\<P> k u)))"
[PROOF STATE]
proof (prove)
using this:
y \<in> Gen (paths_le k (\<P> k u))
goal (1 subgoal):
1. y \<in> \<pi> k T (Gen (paths_le k (\<P> k u)))
[PROOF STEP]
using \<pi>_apply_setmonotone
[PROOF STATE]
proof (prove)
using this:
y \<in> Gen (paths_le k (\<P> k u))
?x \<in> ?I \<Longrightarrow> ?x \<in> \<pi> ?k ?T ?I
goal (1 subgoal):
1. y \<in> \<pi> k T (Gen (paths_le k (\<P> k u)))
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
y \<in> \<pi> k T (Gen (paths_le k (\<P> k u)))
goal (2 subgoals):
1. is_prefix toks p \<Longrightarrow> y \<in> J
2. \<exists>ss'. is_prefix ss' ss \<and> toks = p @ ss' \<Longrightarrow> y \<in> J
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
y \<in> \<pi> k T (Gen (paths_le k (\<P> k u)))
[PROOF STEP]
show "y \<in> J"
[PROOF STATE]
proof (prove)
using this:
y \<in> \<pi> k T (Gen (paths_le k (\<P> k u)))
goal (1 subgoal):
1. y \<in> J
[PROOF STEP]
by (simp add: J items_le_def item_end_y)
[PROOF STATE]
proof (state)
this:
y \<in> J
goal (1 subgoal):
1. \<exists>ss'. is_prefix ss' ss \<and> toks = p @ ss' \<Longrightarrow> y \<in> J
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<exists>ss'. is_prefix ss' ss \<and> toks = p @ ss' \<Longrightarrow> y \<in> J
[PROOF STEP]
case 2
[PROOF STATE]
proof (state)
this:
\<exists>ss'. is_prefix ss' ss \<and> toks = p @ ss'
goal (1 subgoal):
1. \<exists>ss'. is_prefix ss' ss \<and> toks = p @ ss' \<Longrightarrow> y \<in> J
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<exists>ss'. is_prefix ss' ss \<and> toks = p @ ss'
[PROOF STEP]
obtain ss' where ss': "is_prefix ss' ss \<and> toks = p@ss'"
[PROOF STATE]
proof (prove)
using this:
\<exists>ss'. is_prefix ss' ss \<and> toks = p @ ss'
goal (1 subgoal):
1. (\<And>ss'. is_prefix ss' ss \<and> toks = p @ ss' \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
is_prefix ss' ss \<and> toks = p @ ss'
goal (1 subgoal):
1. \<exists>ss'. is_prefix ss' ss \<and> toks = p @ ss' \<Longrightarrow> y \<in> J
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
is_prefix ss' ss \<and> toks = p @ ss'
[PROOF STEP]
have l1: "length ss' < length sss"
[PROOF STATE]
proof (prove)
using this:
is_prefix ss' ss \<and> toks = p @ ss'
goal (1 subgoal):
1. length ss' < length sss
[PROOF STEP]
using append_eq_conv_conj append_self_conv is_prefix_length length_append
less_le_trans nat_neq_iff not_Cons_self2 not_add_less1 snoc
[PROOF STATE]
proof (prove)
using this:
is_prefix ss' ss \<and> toks = p @ ss'
(?xs @ ?ys = ?zs) = (?xs = take (length ?xs) ?zs \<and> ?ys = drop (length ?xs) ?zs)
(?xs @ ?ys = ?xs) = (?ys = [])
is_prefix ?a ?b \<Longrightarrow> length ?a \<le> length ?b
length (?xs @ ?ys) = length ?xs + length ?ys
\<lbrakk>?x < ?y; ?y \<le> ?z\<rbrakk> \<Longrightarrow> ?x < ?z
(?m \<noteq> ?n) = (?m < ?n \<or> ?n < ?m)
?x # ?xs \<noteq> ?xs
\<not> ?i + ?j < ?i
sss = ss @ [a]
goal (1 subgoal):
1. length ss' < length sss
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
length ss' < length sss
goal (1 subgoal):
1. \<exists>ss'. is_prefix ss' ss \<and> toks = p @ ss' \<Longrightarrow> y \<in> J
[PROOF STEP]
have l2: "is_prefix ss' ts"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. is_prefix ss' ts
[PROOF STEP]
using ss' is_prefix_ss_ts
[PROOF STATE]
proof (prove)
using this:
is_prefix ss' ss \<and> toks = p @ ss'
is_prefix ss ts
goal (1 subgoal):
1. is_prefix ss' ts
[PROOF STEP]
by (metis append_dropped_prefix is_prefix_append)
[PROOF STATE]
proof (state)
this:
is_prefix ss' ts
goal (1 subgoal):
1. \<exists>ss'. is_prefix ss' ss \<and> toks = p @ ss' \<Longrightarrow> y \<in> J
[PROOF STEP]
have l3: "pvalid (p @ ss') y"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. pvalid (p @ ss') y
[PROOF STEP]
using ss' pvalid_toks_y
[PROOF STATE]
proof (prove)
using this:
is_prefix ss' ss \<and> toks = p @ ss'
pvalid toks y
goal (1 subgoal):
1. pvalid (p @ ss') y
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
pvalid (p @ ss') y
goal (1 subgoal):
1. \<exists>ss'. is_prefix ss' ss \<and> toks = p @ ss' \<Longrightarrow> y \<in> J
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. y \<in> J
[PROOF STEP]
using less.hyps[OF l1 l2 l3]
[PROOF STATE]
proof (prove)
using this:
y \<in> J
goal (1 subgoal):
1. y \<in> J
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
y \<in> J
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
y \<in> J
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
\<lbrakk>pvalid ?toks2 ?y2; is_prefix ?toks2 p'\<rbrakk> \<Longrightarrow> ?y2 \<in> J
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>pvalid ?toks2 ?y2; is_prefix ?toks2 p'\<rbrakk> \<Longrightarrow> ?y2 \<in> J
[PROOF STEP]
have "Gen (Prefixes p') \<subseteq> J"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>pvalid ?toks2 ?y2; is_prefix ?toks2 p'\<rbrakk> \<Longrightarrow> ?y2 \<in> J
goal (1 subgoal):
1. Gen (Prefixes p') \<subseteq> J
[PROOF STEP]
by (meson Gen_implies_pvalid Prefixes_is_prefix subsetI)
[PROOF STATE]
proof (state)
this:
Gen (Prefixes p') \<subseteq> J
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
with x_dom
[PROOF STATE]
proof (chain)
picking this:
x \<in> items_le k (\<pi> k {} (Scan T k (Gen (Prefixes p'))))
Gen (Prefixes p') \<subseteq> J
[PROOF STEP]
have r0: "x \<in> items_le k (\<pi> k {} (Scan T k J))"
[PROOF STATE]
proof (prove)
using this:
x \<in> items_le k (\<pi> k {} (Scan T k (Gen (Prefixes p'))))
Gen (Prefixes p') \<subseteq> J
goal (1 subgoal):
1. x \<in> items_le k (\<pi> k {} (Scan T k J))
[PROOF STEP]
by (metis (no_types, lifting) LocalLexing.items_le_def LocalLexing_axioms
mem_Collect_eq mono_Scan mono_\<pi> mono_subset_elem subsetI)
[PROOF STATE]
proof (state)
this:
x \<in> items_le k (\<pi> k {} (Scan T k J))
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
x \<in> items_le k (\<pi> k {} (Scan T k J))
[PROOF STEP]
have x_in_\<pi>: "x \<in> \<pi> k {} (Scan T k J)"
[PROOF STATE]
proof (prove)
using this:
x \<in> items_le k (\<pi> k {} (Scan T k J))
goal (1 subgoal):
1. x \<in> \<pi> k {} (Scan T k J)
[PROOF STEP]
using LocalLexing.items_le_is_filter LocalLexing_axioms subsetCE
[PROOF STATE]
proof (prove)
using this:
x \<in> items_le k (\<pi> k {} (Scan T k J))
LocalLexing ?\<NN> ?\<TT> ?\<RR> ?\<SS> ?Lex ?Sel \<Longrightarrow> items_le ?k ?I \<subseteq> ?I
LocalLexing \<NN> \<TT> \<RR> \<SS> Lex Sel
\<lbrakk>?A \<subseteq> ?B; ?c \<notin> ?A \<Longrightarrow> ?P; ?c \<in> ?B \<Longrightarrow> ?P\<rbrakk> \<Longrightarrow> ?P
goal (1 subgoal):
1. x \<in> \<pi> k {} (Scan T k J)
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
x \<in> \<pi> k {} (Scan T k J)
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
have r1: "Scan T k J = J"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Scan T k J = J
[PROOF STEP]
by (simp add: J Scan_\<pi>_fix)
[PROOF STATE]
proof (state)
this:
Scan T k J = J
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
have r2: "\<pi> k {} J = J"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<pi> k {} J = J
[PROOF STEP]
using \<pi>_idempotent'
[PROOF STATE]
proof (prove)
using this:
\<pi> ?k {} (\<pi> ?k ?T ?I) = \<pi> ?k ?T ?I
goal (1 subgoal):
1. \<pi> k {} J = J
[PROOF STEP]
using J
[PROOF STATE]
proof (prove)
using this:
\<pi> ?k {} (\<pi> ?k ?T ?I) = \<pi> ?k ?T ?I
J = \<pi> k T (Gen (paths_le k (\<P> k u)))
goal (1 subgoal):
1. \<pi> k {} J = J
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<pi> k {} J = J
goal (1 subgoal):
1. sss \<noteq> [] \<Longrightarrow> x \<in> J
[PROOF STEP]
from x_in_\<pi> r1 r2
[PROOF STATE]
proof (chain)
picking this:
x \<in> \<pi> k {} (Scan T k J)
Scan T k J = J
\<pi> k {} J = J
[PROOF STEP]
show "x \<in> J"
[PROOF STATE]
proof (prove)
using this:
x \<in> \<pi> k {} (Scan T k J)
Scan T k J = J
\<pi> k {} J = J
goal (1 subgoal):
1. x \<in> J
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x \<in> J
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
x \<in> J
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<lbrakk>is_prefix sss ts; pvalid (p @ sss) x\<rbrakk> \<Longrightarrow> x \<in> J
goal (1 subgoal):
1. q \<notin> \<P> k u \<Longrightarrow> x \<in> items_le k (\<J> k (Suc u))
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
\<lbrakk>is_prefix ?sss2 ts; pvalid (p @ ?sss2) x\<rbrakk> \<Longrightarrow> x \<in> J
goal (1 subgoal):
1. q \<notin> \<P> k u \<Longrightarrow> x \<in> items_le k (\<J> k (Suc u))
[PROOF STEP]
note th = this
[PROOF STATE]
proof (state)
this:
\<lbrakk>is_prefix ?sss2 ts; pvalid (p @ ?sss2) x\<rbrakk> \<Longrightarrow> x \<in> J
goal (1 subgoal):
1. q \<notin> \<P> k u \<Longrightarrow> x \<in> items_le k (\<J> k (Suc u))
[PROOF STEP]
have x_in_J: "x \<in> J"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<in> J
[PROOF STEP]
apply (rule th[of ts])
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. is_prefix ts ts
2. pvalid (p @ ts) x
[PROOF STEP]
apply (simp add: is_prefix_eq_proper_prefix)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. pvalid (p @ ts) x
[PROOF STEP]
using p_ts q
[PROOF STATE]
proof (prove)
using this:
q = p @ ts \<and> p \<in> \<P> k u \<and> charslength p = k \<and> admissible (p @ ts) \<and> (\<forall>t\<in>set ts. t \<in> \<Y> (\<Z> k u) (\<P> k u) k) \<and> (\<forall>t\<in>set (butlast ts). chars_of_token t = [])
pvalid q x \<and> q \<in> \<P> k (Suc u) \<and> charslength q \<le> k
goal (1 subgoal):
1. pvalid (p @ ts) x
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
x \<in> J
goal (1 subgoal):
1. q \<notin> \<P> k u \<Longrightarrow> x \<in> items_le k (\<J> k (Suc u))
[PROOF STEP]
have \<T>_eq_\<Z>: "\<T> k (Suc u) = \<Z> k (Suc u)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<T> k (Suc u) = \<Z> k (Suc u)
[PROOF STEP]
using induct induct_tokens \<T>_equals_\<Z>_induct_step
[PROOF STATE]
proof (prove)
using this:
items_le k (\<J> k u) = Gen (paths_le k (\<P> k u))
\<T> k u = \<Z> k u
\<lbrakk>items_le ?k (\<J> ?k ?u) = Gen (paths_le ?k (\<P> ?k ?u)); \<T> ?k ?u = \<Z> ?k ?u\<rbrakk> \<Longrightarrow> \<T> ?k (Suc ?u) = \<Z> ?k (Suc ?u)
goal (1 subgoal):
1. \<T> k (Suc u) = \<Z> k (Suc u)
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<T> k (Suc u) = \<Z> k (Suc u)
goal (1 subgoal):
1. q \<notin> \<P> k u \<Longrightarrow> x \<in> items_le k (\<J> k (Suc u))
[PROOF STEP]
have T_alt: "T = \<T> k (Suc u)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. T = \<T> k (Suc u)
[PROOF STEP]
using \<T>_eq_\<Z> T
[PROOF STATE]
proof (prove)
using this:
\<T> k (Suc u) = \<Z> k (Suc u)
T = \<Y> (\<Z> k u) (\<P> k u) k
goal (1 subgoal):
1. T = \<T> k (Suc u)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
T = \<T> k (Suc u)
goal (1 subgoal):
1. q \<notin> \<P> k u \<Longrightarrow> x \<in> items_le k (\<J> k (Suc u))
[PROOF STEP]
have "J = \<pi> k T (items_le k (\<J> k u))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. J = \<pi> k T (items_le k (\<J> k u))
[PROOF STEP]
using induct J
[PROOF STATE]
proof (prove)
using this:
items_le k (\<J> k u) = Gen (paths_le k (\<P> k u))
J = \<pi> k T (Gen (paths_le k (\<P> k u)))
goal (1 subgoal):
1. J = \<pi> k T (items_le k (\<J> k u))
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
J = \<pi> k T (items_le k (\<J> k u))
goal (1 subgoal):
1. q \<notin> \<P> k u \<Longrightarrow> x \<in> items_le k (\<J> k (Suc u))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
J = \<pi> k T (items_le k (\<J> k u))
[PROOF STEP]
have "J \<subseteq> \<pi> k T (\<J> k u)"
[PROOF STATE]
proof (prove)
using this:
J = \<pi> k T (items_le k (\<J> k u))
goal (1 subgoal):
1. J \<subseteq> \<pi> k T (\<J> k u)
[PROOF STEP]
by (simp add: items_le_is_filter monoD mono_\<pi>)
[PROOF STATE]
proof (state)
this:
J \<subseteq> \<pi> k T (\<J> k u)
goal (1 subgoal):
1. q \<notin> \<P> k u \<Longrightarrow> x \<in> items_le k (\<J> k (Suc u))
[PROOF STEP]
with T_alt
[PROOF STATE]
proof (chain)
picking this:
T = \<T> k (Suc u)
J \<subseteq> \<pi> k T (\<J> k u)
[PROOF STEP]
have "J \<subseteq> \<J> k (Suc u)"
[PROOF STATE]
proof (prove)
using this:
T = \<T> k (Suc u)
J \<subseteq> \<pi> k T (\<J> k u)
goal (1 subgoal):
1. J \<subseteq> \<J> k (Suc u)
[PROOF STEP]
using \<J>.simps(2)
[PROOF STATE]
proof (prove)
using this:
T = \<T> k (Suc u)
J \<subseteq> \<pi> k T (\<J> k u)
\<J> ?k (Suc ?u) = \<pi> ?k (\<T> ?k (Suc ?u)) (\<J> ?k ?u)
goal (1 subgoal):
1. J \<subseteq> \<J> k (Suc u)
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
J \<subseteq> \<J> k (Suc u)
goal (1 subgoal):
1. q \<notin> \<P> k u \<Longrightarrow> x \<in> items_le k (\<J> k (Suc u))
[PROOF STEP]
with x_in_J
[PROOF STATE]
proof (chain)
picking this:
x \<in> J
J \<subseteq> \<J> k (Suc u)
[PROOF STEP]
have "x \<in> \<J> k (Suc u)"
[PROOF STATE]
proof (prove)
using this:
x \<in> J
J \<subseteq> \<J> k (Suc u)
goal (1 subgoal):
1. x \<in> \<J> k (Suc u)
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
x \<in> \<J> k (Suc u)
goal (1 subgoal):
1. q \<notin> \<P> k u \<Longrightarrow> x \<in> items_le k (\<J> k (Suc u))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
x \<in> \<J> k (Suc u)
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
x \<in> \<J> k (Suc u)
goal (1 subgoal):
1. x \<in> items_le k (\<J> k (Suc u))
[PROOF STEP]
using LocalLexing.items_le_def LocalLexing_axioms pvalid_item_end q
[PROOF STATE]
proof (prove)
using this:
x \<in> \<J> k (Suc u)
LocalLexing ?\<NN> ?\<TT> ?\<RR> ?\<SS> ?Lex ?Sel \<Longrightarrow> items_le ?k ?I = {x \<in> ?I. item_end x \<le> ?k}
LocalLexing \<NN> \<TT> \<RR> \<SS> Lex Sel
pvalid ?p ?x \<Longrightarrow> item_end ?x = charslength ?p
pvalid q x \<and> q \<in> \<P> k (Suc u) \<and> charslength q \<le> k
goal (1 subgoal):
1. x \<in> items_le k (\<J> k (Suc u))
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x \<in> items_le k (\<J> k (Suc u))
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
x \<in> items_le k (\<J> k (Suc u))
goal (1 subgoal):
1. Gen (paths_le k (\<P> k (Suc u))) \<subseteq> items_le k (\<J> k (Suc u))
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
?x2 \<in> Gen (paths_le k (\<P> k (Suc u))) \<Longrightarrow> ?x2 \<in> items_le k (\<J> k (Suc u))
goal (1 subgoal):
1. Gen (paths_le k (\<P> k (Suc u))) \<subseteq> items_le k (\<J> k (Suc u))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
?x2 \<in> Gen (paths_le k (\<P> k (Suc u))) \<Longrightarrow> ?x2 \<in> items_le k (\<J> k (Suc u))
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
?x2 \<in> Gen (paths_le k (\<P> k (Suc u))) \<Longrightarrow> ?x2 \<in> items_le k (\<J> k (Suc u))
goal (1 subgoal):
1. Gen (paths_le k (\<P> k (Suc u))) \<subseteq> items_le k (\<J> k (Suc u))
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
Gen (paths_le k (\<P> k (Suc u))) \<subseteq> items_le k (\<J> k (Suc u))
goal:
No subgoals!
[PROOF STEP]
qed |
-- An ATP hint must be used with functions.
-- This error is detected by TypeChecking.Rules.Decl.
module ATPBadHint2 where
data Bool : Set where
false true : Bool
{-# ATP hint Bool #-}
|
"""
Author: Mark Bonney
"""
import numpy as np
from Layer import Layer
class NeuralNet(object):
def __init__(self, l):
self.learning_rate = l
self.input_layer, self.output_layer = None, None
self.num_inputs, self.num_outputs = 0, 0
self.loss = None
self.hidden_layers = [] # List of layer objects
self.bias_neuron = 1 # Set to 1 for all layers
def read_input_data(self, inputs, targets):
"""
Read input data and populate input and output layers
:param inputs: Input data to the model
:param targets: Desired targets
"""
self.input_layer = np.loadtxt(inputs, delimiter=",", ndmin=2)
self.output_layer = np.loadtxt(targets, delimiter=",", ndmin=2)
self.num_inputs = self.input_layer.shape[0]
self.num_outputs = self.output_layer.shape[0]
def initialise_weights(self):
"""Randomly initialise weight matrices for each layer in ANN"""
np.random.seed(42)
self.add_layer(self.num_outputs)
previous_layer = self.hidden_layers[0].num_neurons
self.hidden_layers[0].output = self.input_layer
for layer in self.hidden_layers[1:]:
layer.weights = np.random.rand(layer.num_neurons, previous_layer)
previous_layer = layer.num_neurons
def add_layer(self, num_neurons):
"""
Add a hidden layer to the ANN
:param num_neurons: Number of neurons in the layer
"""
new_layer = Layer(num_neurons)
self.hidden_layers.append(new_layer)
def forward_pass(self):
"""
Compute forward pass using matrices
:return: Output matrix of ANN
"""
previous_layer = self.input_layer
for layer in self.hidden_layers[1:]:
layer.output = self.sigmoid_function(np.dot(layer.weights, previous_layer) + self.bias_neuron)
previous_layer = layer.output
self.loss = self.output_layer - self.hidden_layers[-1].output
return self.hidden_layers[-1].output
@staticmethod
def sigmoid_function(x):
return 1.0/(1.0 + np.exp(-x))
@staticmethod
def gradient_sigmoid(f):
return (1-f)*f
def back_propagation(self):
"""Compute gradient of all functions and perform back propagation"""
# Calculate delta at the output layer, using the loss
self.hidden_layers[-1].deltas = np.array(self.loss * self.gradient_sigmoid(self.hidden_layers[-1].output))
previous_layer = self.hidden_layers[-1]
# Start at last hidden layer and loop through all layers
for layer in self.hidden_layers[::-1][1:]:
layer.output_gradient = self.gradient_sigmoid(layer.output)
layer.deltas = np.dot(previous_layer.weights.T, previous_layer.deltas)*layer.output_gradient
update = np.dot(layer.output, previous_layer.deltas.T)
previous_layer.weights += update.T * self.learning_rate
previous_layer = layer
|
-----------------------------------------------------------------------------
--
-- Module : Drool.Utils.Conversions
-- Copyright :
-- License : AllRightsReserved
--
-- Maintainer :
-- Stability :
-- Portability :
--
-- |
--
-----------------------------------------------------------------------------
module Drool.Utils.Conversions (
gtkColorToGLColor3,
gtkColorToGLColor4,
glColor3ToGtkColor,
glColor4ToGtkColor,
freqToMs,
msToFreq,
floatToComplexDouble,
floatsToComplexDoubles,
complexDoubleToFloat,
complexDoublesToFloats,
listToCArray,
listFromCArray,
interleave,
interleaveArrays,
aMax,
aLength,
aZip,
adjustBufferSize,
adjustBufferSizeBack,
blendModeSourceFromIndex,
blendModeFrameBufferFromIndex,
blendModeSourceIndex,
blendModeFrameBufferIndex,
) where
import qualified Graphics.UI.Gtk as Gtk
import Graphics.Rendering.OpenGL
import Data.Complex
import Data.Array ( Array )
import Data.Array.IArray ( (!), bounds, listArray, ixmap, amap, rangeSize )
import Data.Array.CArray ( createCArray, elems )
import Foreign.Marshal.Array ( pokeArray, peekArray )
import GHC.Float
import Data.Word ( Word16 )
gtkColorToGLColor3 :: Gtk.Color -> Color3 GLfloat
gtkColorToGLColor3 (Gtk.Color r g b) = Color3 r' g' b'
where r' = ((fromIntegral r) / 65535.0) :: GLfloat
g' = ((fromIntegral g) / 65535.0) :: GLfloat
b' = ((fromIntegral b) / 65535.0) :: GLfloat
gtkColorToGLColor4 :: Gtk.Color -> Word16 -> Color4 GLfloat
gtkColorToGLColor4 (Gtk.Color r g b) a = Color4 r' g' b' a'
where r' = ((fromIntegral r) / 65535.0) :: GLfloat
g' = ((fromIntegral g) / 65535.0) :: GLfloat
b' = ((fromIntegral b) / 65535.0) :: GLfloat
a' = ((fromIntegral a) / 65535.0) :: GLfloat
glColor3ToGtkColor :: Color3 GLfloat -> Gtk.Color
glColor3ToGtkColor (Color3 r g b) = Gtk.Color r' g' b'
where r' = round(r * 65535.0)
g' = round(g * 65535.0)
b' = round(b * 65535.0)
glColor4ToGtkColor :: Color4 GLfloat -> ( Gtk.Color, Word16 )
glColor4ToGtkColor (Color4 r g b a) = (Gtk.Color r' g' b', a')
where r' = round(r * 65535.0)
g' = round(g * 65535.0)
b' = round(b * 65535.0)
a' = round(a * 65535.0)
freqToMs :: Int -> Int
freqToMs f = round(1000.0 / fromIntegral f)
msToFreq :: Int -> Int
msToFreq ms = round(1000.0 / fromIntegral ms)
floatToComplexDouble :: Float -> Complex Double
floatToComplexDouble f = (float2Double f :+ 0.0) :: Complex Double
floatsToComplexDoubles :: [Float] -> [Complex Double]
floatsToComplexDoubles fs = (map (\x -> floatToComplexDouble x) fs)
-- Returns magnitude of complex double as float, with magnitude = (re^2 + im^2)^0.5
complexDoubleToFloat :: Complex Double -> Float
complexDoubleToFloat cd = realToFrac(sqrt (realPart cd * realPart cd + imagPart cd * imagPart cd)) :: Float
-- complexDoubleToFloat cd = realToFrac(sqrt (realPart cd * realPart cd)) :: Float
complexDoublesToFloats :: [Complex Double] -> [Float]
complexDoublesToFloats cds = map (\x -> (complexDoubleToFloat x) / n) cds
where n = fromIntegral $ length cds
listToCArray lst = createCArray (0,(length lst)-1) ( \ptr -> do { pokeArray ptr lst } )
listFromCArray carr = elems carr
interleave :: [a] -> [a] -> [a]
interleave xsA xsB = foldl (\acc (a,b) -> acc ++ [a,b] ) [] (zip xsA xsB)
aLength a = rangeSize $ bounds a
aMax a = snd $ bounds a
interleaveArrays :: Array Int e -> Array Int e -> Array Int e
interleaveArrays a b = listArray (0,(aLength a) + (aMax b)) (concat [ [(a ! i),(b ! i)] | i <- [0..(min (aMax a) (aMax b))] ])
aZip a b = [ ((a ! i),(b ! i)) | i <- [0..(min (aMax a) (aMax b))] ]
adjustBufferSize :: [a] -> Int -> [a]
adjustBufferSize buf maxLen = drop ((length buf)-maxLen) buf -- drop does not alter list for values <= 0
adjustBufferSizeBack :: [a] -> Int -> [a]
adjustBufferSizeBack buf maxLen = take maxLen buf -- drop does not alter list for values <= 0
blendSourceModes :: [ BlendingFactor ]
blendSourceModes = [ Zero, One, DstColor, OneMinusDstColor, SrcAlpha,
OneMinusSrcAlpha, DstAlpha, OneMinusDstAlpha,
SrcAlphaSaturate ]
blendFrameBufferModes :: [ BlendingFactor ]
blendFrameBufferModes = [ Zero, One, SrcColor, OneMinusSrcColor, SrcAlpha,
OneMinusSrcAlpha, DstAlpha, OneMinusDstAlpha ]
blendModeSourceFromIndex :: Int -> BlendingFactor
blendModeSourceFromIndex i = blendSourceModes !! i
blendModeFrameBufferFromIndex :: Int -> BlendingFactor
blendModeFrameBufferFromIndex i = blendFrameBufferModes !! i
blendModeSourceIndex :: BlendingFactor -> Int
blendModeSourceIndex bm = blendModeSourceIndex' bm 0 blendSourceModes
blendModeSourceIndex' :: BlendingFactor -> Int -> [ BlendingFactor ] -> Int
blendModeSourceIndex' bm i (mode:modes) = index
where index = if bm == mode then i else blendModeSourceIndex' bm (i+1) modes
blendModeSourceIndex' _ _ [] = 0
blendModeFrameBufferIndex :: BlendingFactor -> Int
blendModeFrameBufferIndex bm = blendModeFrameBufferIndex' bm 0 blendFrameBufferModes
blendModeFrameBufferIndex' :: BlendingFactor -> Int -> [ BlendingFactor ] -> Int
blendModeFrameBufferIndex' bm i (mode:modes) = index
where index = if bm == mode then i else blendModeFrameBufferIndex' bm (i+1) modes
blendModeFrameBufferIndex' _ _ [] = 0
|
function [centres, options, post, errlog] = kmeans(centres, data, options)
%KMEANS Trains a k means cluster model.
%
% Description
% CENTRES = KMEANS(CENTRES, DATA, OPTIONS) uses the batch K-means
% algorithm to set the centres of a cluster model. The matrix DATA
% represents the data which is being clustered, with each row
% corresponding to a vector. The sum of squares error function is used.
% The point at which a local minimum is achieved is returned as
% CENTRES. The error value at that point is returned in OPTIONS(8).
%
% [CENTRES, OPTIONS, POST, ERRLOG] = KMEANS(CENTRES, DATA, OPTIONS)
% also returns the cluster number (in a one-of-N encoding) for each
% data point in POST and a log of the error values after each cycle in
% ERRLOG. The optional parameters have the following
% interpretations.
%
% OPTIONS(1) is set to 1 to display error values; also logs error
% values in the return argument ERRLOG. If OPTIONS(1) is set to 0, then
% only warning messages are displayed. If OPTIONS(1) is -1, then
% nothing is displayed.
%
% OPTIONS(2) is a measure of the absolute precision required for the
% value of CENTRES at the solution. If the absolute difference between
% the values of CENTRES between two successive steps is less than
% OPTIONS(2), then this condition is satisfied.
%
% OPTIONS(3) is a measure of the precision required of the error
% function at the solution. If the absolute difference between the
% error functions between two successive steps is less than OPTIONS(3),
% then this condition is satisfied. Both this and the previous
% condition must be satisfied for termination.
%
% OPTIONS(14) is the maximum number of iterations; default 100.
%
% See also
% GMMINIT, GMMEM
%
% Copyright (c) Ian T Nabney (1996-2001)
[ndata, data_dim] = size(data);
[ncentres, dim] = size(centres);
if dim ~= data_dim
error('Data dimension does not match dimension of centres')
end
if (ncentres > ndata)
error('More centres than data')
end
% Sort out the options
if (options(14))
niters = options(14);
else
niters = 100;
end
store = 0;
if (nargout > 3)
store = 1;
errlog = zeros(1, niters);
end
% Check if centres and posteriors need to be initialised from data
if (options(5) == 1)
% Do the initialisation
perm = randperm(ndata);
perm = perm(1:ncentres);
% Assign first ncentres (permuted) data points as centres
centres = data(perm, :);
end
% Matrix to make unit vectors easy to construct
id = eye(ncentres);
% Main loop of algorithm
for n = 1:niters
% Save old centres to check for termination
old_centres = centres;
% Calculate posteriors based on existing centres
d2 = dist2(data, centres);
% Assign each point to nearest centre
[minvals, index] = min(d2', [], 1);
post = id(index,:);
num_points = sum(post, 1);
% Adjust the centres based on new posteriors
for j = 1:ncentres
if (num_points(j) > 0)
centres(j,:) = sum(data(find(post(:,j)),:), 1)/num_points(j);
end
end
% Error value is total squared distance from cluster centres
e = sum(minvals);
if store
errlog(n) = e;
end
if options(1) > 0
fprintf(1, 'Cycle %4d Error %11.6f\n', n, e);
end
if n > 1
% Test for termination
if max(max(abs(centres - old_centres))) < options(2) & ...
abs(old_e - e) < options(3)
options(8) = e;
return;
end
end
old_e = e;
end
% If we get here, then we haven't terminated in the given number of
% iterations.
options(8) = e;
if (options(1) >= 0)
disp(maxitmess);
end
|
@testset "K8sClusterManager" begin
@testset "pods not found" begin
try
K8sClusterManager(1)
catch ex
# Show the original stacktrace if an unexpected error occurred.
ex isa KubeError || rethrow()
@test ex isa KubeError
@test length(Base.catch_stack()) == 1
end
end
end
@testset "TimeoutException" begin
e = K8sClusterManagers.TimeoutException("time out!")
@test sprint(showerror, e) == "TimeoutException: time out!"
end
|
# Solving the Cournot Oligopoly Model by Collocation
**DEMAPP09 Cournot Oligopolist Problem**
<br>
This example is taken from section 6.8.1, page(s) 159-162 of:
Miranda, M. J., & Fackler, P. L. (2002). Applied computational economics and finance (P. L. Fackler, ed.). Cambridge, Mass. : MIT Press.
<br>
To illustrate the implementation of the collocation method for implicit function problems, consider the case of a Cournot oligopoly. In the standard microeconomic model of the firm, the firm maximizes its profits by matching marginal revenue to marginal cost (MC). An oligopolistic firm, recognizing that its actions affect the price, knows that its marginal revenue is $p + q \frac{dp}{dq}$, where $p$ is the price, $q$ the quantity produced, and $\frac{dp}{dq}$ is the marginal impact of the product on the market price. Cournot's assumption is that the company acts as if none of its production changes would provoke a reaction from its competitors. This implies that:
\begin{equation}
\frac{dp}{dq} = \frac{1}{D'(p)} \tag{1}
\end{equation}
where $D(p)$ is the market demand curve.
<br>
Suppose we want to derive the firm's effective supply function, which specifies the amount $q = S(p)$ that it will supply at each price. The effective supply function of the firm is characterized by the functional equation
\begin{equation}
p + \frac{S(p)}{D'(p)} - MC(S(p)) = 0 \tag{2}
\end{equation}
for every price $p>0$. In simple cases, this function can be found explicitly. However, in more complicated cases, there is no explicit solution. Suppose for example that demand and marginal cost are given by
\begin{equation*}
D(p) = p^{-\eta} \qquad\qquad CM(q) = \alpha\sqrt{q} + q^2
\end{equation*}
so that the functional equation to be solved for $S(p)$ is
\begin{equation} \label{eq:funcional}
\left[p - \frac{S(p)p^{\eta+1}}{\eta}\right] - \left[\alpha\sqrt{S(p)} + S(p)^2\right] = 0 \tag{3}
\end{equation}
## The collocation method
In equation (3), the unknown is the supply *function* $S(p)$, which makes (3) an infinite-dimension equation. Instead of solving the equation directly, we will approximate its solution using $n$ Chebyshev polynomials $\phi_i(x)$, which are defined recursively for $x \in [0,1]$ as:
\begin{align*}
\phi_0(x) & = 1 \\
\phi_1(x) & = x \\
\phi_{k + 1}(p_i) & = 2x \phi_k(x) - \phi_{k-1}(x), \qquad \text{for} \; k = 1,2, \dots
\end{align*}
<br>
In addition, instead of requiring that both sides of the equation be exactly equal over the entire domain of $p \in \Re^+$, we will choose $n$ Chebyshev nodes $p_i$ in the interval $[a, b]$:
\begin{equation} \label{eq:chebynodes}
p_i = \frac{a + b}{2} + \frac{ba}{2}\ cos\left(\frac{n-i + 0.5}{n}\pi\right), \qquad\text{for } i = 1,2, \dots, n \tag{4}
\end{equation}
<br>
Thus, the supply is approximated by
\begin{equation*}
S(p_i) = \sum_{k = 0}^{n-1} c_{k}\phi_k(p_i)
\end{equation*}
Substituting this last expression in (3) for each of the placement nodes (Chebyshev in this case) results in a non-linear system of $ n $ equations (one for each node) in $ n $ unknowns $ c_k $ (one for each polynomial of Cheybshev), which in principle can be solved by Newton's method, as in the last example. Thus, in practice, the system to be solved is
\begin{equation} \label{eq:collocation}
\left[p_i - \frac{\left(\sum_{k=0}^{n-1}c_{k}\phi_k(p_i)\right)p_i^{\eta+1}}{\eta}\right] - \left[\alpha\sqrt{\sum_{k=0}^{n-1}c_{k}\phi_k(p_i)} + \left(\sum_{k=0}^{n-1}c_{k}\phi_k(p_i)\right)^2\right] = 0 \tag{5}
\end{equation}
for $i=1,2,\dots, n$ and for $k=1,2,\dots,n$.
## Solving the model withPython
To solve this model we start a new Python session:
```python
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from compecon import BasisChebyshev, NLP, nodeunif
from compecon.demos import demo
```
and set the $\alpha$ and $\eta$ parameters
```python
alpha= 1.0;
eta= 1.5;
```
For convenience, we define a `lambda` function to represent the demand. **Note: A lambda function is a small anonymous function in Python that can take any number of arguments, but can have only one expression. If you are curious to learn more Google "Lambda Functions in Python".**
```python
D = lambda p: p** (-eta)
```
We will approximate the solution for prices in the $p\in [a, b]$ interval, using 25 collocation nodes. The `compecon` library provides the `BasisChebyshev` class to make computations with Chebyshev bases:
```python
n= 25;
a= 0.1;
b= 3.0
S= BasisChebyshev(n, a, b, labels= ['price'], l=['supply'])
```
Let's assume that our first guess is $S(p)=1$. To that end, we set the value of `S` to one in each of the nodes
```python
p= S.nodes
S.y= np.ones_like(p)
```
It is important to highlight that in this problem the unknowns are the $c_k$ coefficients from the Chebyshev basis; however, an object of `BasisChebyshev` class automatically adjusts those coefficients so they are consistent with the values we set for the function at the nodes (here indicated by the `.y` property).
<br>
We are now ready to define the objective function, which we will call `resid`. This function takes as its argument a vector with the 25 Chebyshev basis coefficients and returns the left-hand side of the 25 equations defined by (5).
```python
def resid(c):
S.c= c # update interpolation coefficients
q= S(p) # compute quantity supplied at price nodes
return p- q* (p** (eta+ 1)/ eta)- alpha* np.sqrt(q)- q** 2
```
Note that the `resid` function takes a single argument (the coefficients for the Chebyshev basis). All other parameters (`Q, p, eta, alpha` must be declared in the main script, where Python will find their values.
<br>
To use Newton's method, it is necessary to compute the Jacobian matrix of the function whose roots we are looking for. In certain occasions, like in the problem we are dealing with, coding the computation of this Jacobian matrix correctly can be quite cumbersome. The `NLP` class provides, besides the Newton's method (which we used in the last example), the [Broyden's Method](http://www.matthewaaronlooney.com/Optim_Class_Fall2019/Broydens_Method.html), whose main appeal is that it does not require the coding of the Jacobian matrix (the method itself will approximate it). To learn more about Broyden's Method, click on the hyperlink above and see Quasi-Newton Methods in section 3.4, page(s) 39-42 of the text.
```python
cournot = NLP(resid)
S.c = cournot.broyden(S.c, tol=1e-12)
```
After 20 iterations, Broyden's method converges to the desired solution. We can visualize this in Figure 3, which shows the value of the function on 501 different points within the approximation interval. Notice that the residual plot crosses the horizontal axis 25 times; this occurs precisely at the collocation nodes (represented by red dots). This figure also shows the precision of the approximation: outside nodes, the function is within $\approx 1\times10^{-17}$ units from zero.
<br>
One of the advantages of working with the `BasisChebyshev` class is that, once the collocation coefficients have been found, we can evaluate the supply function by calling the `S` object as if it were a Python function. Thus, for example, to find out the quantity supplied by the firm when the price is 1.2, we simply evaluate `print(S(1.2))`, which returns `0.3950`. We use this feature next to compute the effective supply curve when there are 5 identical firms in the market; the result is shown in Figure 2.
##### Figure 2 Supply and demand when there are 5 firms
```python
nFirms= 5;
pplot = nodeunif(501, a, b)
demo.figure('Cournot Effective Firm Supply Function',
'Quantity', 'Price', [0, nFirms], [a, b])
plt.plot(nFirms* S(pplot), pplot, D(pplot), pplot)
plt.legend(('Supply','Demand'))
plt.show();
```
##### Figure 3: Approximation residuals for equation (5)
This block generates Figure 3.
```python
p= pplot
demo.figure('Residual Function for Cournot Problem',
'Quantity', 'Residual')
plt.hlines(0, a, b, 'k', '--', lw= 2)
plt.plot(pplot, resid(S.c))
plt.plot(S.nodes,np.zeros_like(S.nodes),'r*');
plt.show();
```
##### Figure 4: Change in the effective supply as the number of firms increases
We now plot the effective supply for a varying number of firms; the result is shown in Figure 4.
```python
m= np.array([1, 3, 5, 10, 15, 20])
demo.figure('Supply and Demand Functions', 'Quantity', 'Price', [0, 13])
plt.plot(np.outer(S(pplot), m), pplot)
plt.plot(D(pplot), pplot, linewidth= 2, color='black')
plt.legend(['m= 1', 'm= 3', 'm= 5', 'm= 10', 'm= 15', 'm= 20', 'demand']);
plt.show();
```
In Figure 4 notice how the equilibrium price and quantity change as the number of firms increases.
##### Figure 5: Equilibrium price as a function of the number of firms
The last figure in this example (Figure 5), shows the equilibrium price as a function of the number of firms.
```python
pp= (b+ a)/ 2
dp= (b- a)/ 2
m = np.arange(1, 26)
for i in range(50):
dp/= 2
pp= pp- np.sign(S(pp)* m- D(pp))* dp
demo.figure('Cournot Equilibrium Price as Function of Industry Size',
'Number of Firms', 'Price')
plt.bar(m, pp);
plt.show();
```
# References
Miranda, M. J., & Fackler, P. L. (2002). Applied computational economics and finance (P. L. Fackler, ed.). Cambridge, Mass. : MIT Press.
Miranda, M. J., & Fackler, P. L., Romero-Aguilar, R (2002, 2016). CompEcon Toolbox (MATLAB).
|
(** * Decide: Programming with Decision Procedures *)
Set Warnings "-notation-overridden,-parsing".
From VFA Require Import Perm.
(* ################################################################# *)
(** * Using [reflect] to characterize decision procedures *)
(** Thus far in _Verified Functional Algorithms_ we have been using
- propositions ([Prop]) such as [a<b] (which is Notation for [lt a b])
- booleans ([bool]) such as [a<?b] (which is Notation for [ltb a b]). *)
Check Nat.lt. (* : nat -> nat -> Prop *)
Check Nat.ltb. (* : nat -> nat -> bool *)
(** The [Perm] chapter defined a tactic called [bdestruct] that
does case analysis on (x <? y) while giving you hypotheses (above
the line) of the form (x<y). This tactic is built using the [reflect]
type and the [blt_reflect] theorem. *)
Print reflect.
(* Inductive reflect (P : Prop) : bool -> Set :=
| ReflectT : P -> reflect P true
| ReflectF : ~ P -> reflect P false *)
Check blt_reflect. (* : forall x y, reflect (x<y) (x <? y) *)
(** The name [reflect] for this type is a reference to _computational
reflection_, a technique in logic. One takes a logical formula, or
proposition, or predicate, and designs a syntactic embedding of
this formula as an "object value" in the logic. That is, _reflect_ the
formula back into the logic. Then one can design computations
expressible inside the logic that manipulate these syntactic object
values. Finally, one proves that the computations make transformations
that are equivalent to derivations (or equivalences) in the logic.
The first use of computational reflection was by Goedel, in 1931:
his syntactic embedding encoded formulas as natural numbers, a
"Goedel numbering." The second and third uses of reflection were
by Church and Turing, in 1936: they encoded (respectively)
lambda-expressions and Turing machines.
In Coq it is easy to do reflection, because the Calculus of Inductive
Constructions (CiC) has Inductive data types that can easily encode
syntax trees. We could, for example, take some of our propositional
operators such as [and], [or], and make an [Inductive] type that is an
encoding of these, and build a computational reasoning system for
boolean satisfiability.
But in this chapter I will show something much simpler. When
reasoning about less-than comparisons on natural numbers, we have
the advantage that [nat] already an inductive type; it is "pre-reflected,"
in some sense. (The same for [Z], [list], [bool], etc.) *)
(** Now, let's examine how [reflect] expresses the coherence between
[lt] and [ltb]. Suppose we have a value [v] whose type is
[reflect (3<7) (3<?7)]. What is [v]? Either it is
- ReflectT [P] (3<?7), where [P] is a proof of [3<7], and [3<?7] is [true], or
- ReflectF [Q] (3<?7), where [Q] is a proof of [~(3<7)], and [3<?7] is [false].
In the case of [3,7], we are well advised to use [ReflectT], because
(3<?7) cannot match the [false] required by [ReflectF]. *)
Goal (3<?7 = true). Proof. reflexivity. Qed.
(** So [v] cannot be [ReflectF Q (3<?7)] for any [Q], because that would
not type-check. Now, the next question: must there exist a value
of type [reflect (3<7) (3<?7)] ? The answer is yes; that is the
[blt_reflect] theorem. The result of [Check blt_reflect], above, says that
for any [x,y], there does exist a value (blt_reflect x y) whose type
is exactly [reflect (x<y)(x<?y)]. So let's look at that value! That is,
examine what [H], and [P], and [Q] are equal to at "Case 1" and "Case 2": *)
Theorem three_less_seven_1: 3<7.
Proof.
assert (H := blt_reflect 3 7).
remember (3<?7) as b.
destruct H as [P|Q] eqn:?.
* (* Case 1: H = ReflectT (3<7) P *)
apply P.
* (* Case 2: H = ReflectF (3<7) Q *)
compute in Heqb.
inversion Heqb.
Qed.
(** Here is another proof that uses [inversion] instead of [destruct].
The [ReflectF] case is eliminated automatically by [inversion]
because [3<?7] does not match [false]. *)
Theorem three_less_seven_2: 3<7.
Proof.
assert (H := blt_reflect 3 7).
inversion H as [P|Q].
apply P.
Qed.
(** The [reflect] inductive data type is a way of relating a _decision
procedure_ (a function from X to [bool]) with a predicate (a function
from X to [Prop]). The convenience of [reflect], in the verification
of functional programs, is that we can do [destruct (blt_reflect a b)],
which relates [a<?b] (in the program) to the [a<b] (in the proof).
That's just how the [bdestruct] tactic works; you can go back
to [Perm.v] and examine how it is implemented in the [Ltac]
tactic-definition language. *)
(* ################################################################# *)
(** * Using [sumbool] to Characterize Decision Procedures *)
Module ScratchPad.
(** An alternate way to characterize decision procedures,
widely used in Coq, is via the inductive type [sumbool].
Suppose [Q] is a proposition, that is, [Q: Prop]. We say [Q] is
_decidable_ if there is an algorithm for computing a proof of
[Q] or [~Q]. More generally, when [P] is a predicate (a function
from some type [T] to [Prop]), we say [P] is decidable when
[forall x:T, decidable(P)].
We represent this concept in Coq by an inductive datatype: *)
Inductive sumbool (A B : Prop) : Set :=
| left : A -> sumbool A B
| right : B -> sumbool A B.
(** Let's consider [sumbool] applied to two propositions: *)
Definition t1 := sumbool (3<7) (3>2).
Lemma less37: 3<7. Proof. omega. Qed.
Lemma greater23: 3>2. Proof. omega. Qed.
Definition v1a: t1 := left (3<7) (3>2) less37.
Definition v1b: t1 := right (3<7) (3>2) greater23.
(** A value of type [sumbool (3<7) (3>2)] is either one of:
- [left] applied to a proof of (3<7), or
- [right] applied to a proof of (3>2). *)
(** Now let's consider: *)
Definition t2 := sumbool (3<7) (2>3).
Definition v2a: t2 := left (3<7) (2>3) less37.
(** A value of type [sumbool (3<7) (2>3)] is either one of:
- [left] applied to a proof of (3<7), or
- [right] applied to a proof of (2>3).
But since there are no proofs of 2>3, only [left] values (such as [v2a])
exist. That's OK. *)
(** [sumbool] is in the Coq standard library, where there is [Notation]
for it: the expression [ {A}+{B} ] means [sumbool A B]. *)
Notation "{ A } + { B }" := (sumbool A B) : type_scope.
(** A very common use of [sumbool] is on a proposition and its negation.
For example, *)
Definition t4 := forall a b, {a<b}+{~(a<b)}.
(** That expression, [forall a b, {a<b}+{~(a<b)}], says that for any
natural numbers [a] and [b], either [a<b] or [a>=b]. But it is _more_
than that! Because [sumbool] is an Inductive type with two constructors
[left] and [right], then given the [{3<7}+{~(3<7)}] you can pattern-match
on it and learn _constructively_ which thing is true. *)
Definition v3: {3<7}+{~(3<7)} := left _ _ less37.
Definition is_3_less_7: bool :=
match v3 with
| left _ _ _ => true
| right _ _ _ => false
end.
Eval compute in is_3_less_7. (* = true : bool *)
Print t4. (* = forall a b : nat, {a < b} + {~ a < b} *)
(** Suppose there existed a value [lt_dec] of type [t4]. That would be a
_decision procedure_ for the less-than function on natural numbers.
For any nats [a] and [b], you could calculate [lt_dec a b], which would
be either [left ...] (if [a<b] was provable) or [right ...] (if [~(a<b)] was
provable).
Let's go ahead and implement [lt_dec]. We can base it on the function
[ltb: nat -> nat -> bool] which calculates whether [a] is less than [b],
as a boolean. We already have a theorem that this function on booleans
is related to the proposition [a<b]; that theorem is called [blt_reflect]. *)
Check blt_reflect. (* : forall x y, reflect (x<y) (x<?y) *)
(** It's not too hard to use [blt_reflect] to define [lt_dec] *)
Definition lt_dec (a: nat) (b: nat) : {a<b}+{~(a<b)} :=
match blt_reflect a b with
| ReflectT _ P => left (a < b) (~ a < b) P
| ReflectF _ Q => right (a < b) (~ a < b) Q
end.
(** Another, equivalent way to define [lt_dec] is to use
definition-by-tactic: *)
Definition lt_dec' (a: nat) (b: nat) : {a<b}+{~(a<b)}.
destruct (blt_reflect a b) as [P|Q]. left. apply P. right. apply Q.
Defined.
Print lt_dec.
Print lt_dec'.
Theorem lt_dec_equivalent: forall a b, lt_dec a b = lt_dec' a b.
Proof.
intros.
unfold lt_dec, lt_dec'.
reflexivity.
Qed.
(** Warning: these definitions of [lt_dec] are not as nice as the
definition in the Coq standard library, because these are not
fully computable. See the discussion below. *)
End ScratchPad.
(* ================================================================= *)
(** ** [sumbool] in the Coq Standard Library *)
Module ScratchPad2.
Locate sumbool. (* Coq.Init.Specif.sumbool *)
Print sumbool.
(** The output of [Print sumbool] explains that the first two arguments
of [left] and [right] are implicit. We use them as follows (notice that
[left] has only one explicit argument [P]: *)
Definition lt_dec (a: nat) (b: nat) : {a<b}+{~(a<b)} :=
match blt_reflect a b with
| ReflectT _ P => left P
| ReflectF _ Q => right Q
end.
Definition le_dec (a: nat) (b: nat) : {a<=b}+{~(a<=b)} :=
match ble_reflect a b with
| ReflectT _ P => left P
| ReflectF _ Q => right Q
end.
(** Now, let's use [le_dec] directly in the implementation of insertion
sort, without mentioning [ltb] at all. *)
Fixpoint insert (x:nat) (l: list nat) :=
match l with
| nil => x::nil
| h::t => if le_dec x h then x::h::t else h :: insert x t
end.
Fixpoint sort (l: list nat) : list nat :=
match l with
| nil => nil
| h::t => insert h (sort t)
end.
Inductive sorted: list nat -> Prop :=
| sorted_nil:
sorted nil
| sorted_1: forall x,
sorted (x::nil)
| sorted_cons: forall x y l,
x <= y -> sorted (y::l) -> sorted (x::y::l).
(** **** Exercise: 2 stars (insert_sorted_le_dec) *)
Lemma insert_sorted:
forall a l, sorted l -> sorted (insert a l).
Proof.
intros a l H.
induction H.
- constructor.
- unfold insert.
destruct (le_dec a x) as [ Hle | Hgt].
(** Look at the proof state now. In the first subgoal, we have
above the line, [Hle: a <= x]. In the second subgoal, we have
[Hgt: ~ (a < x)]. These are put there automatically by the
[destruct (le_dec a x)]. Now, the rest of the proof can proceed
as it did in [Sort.v], but using [destruct (le_dec _ _)] instead of
[bdestruct (_ <=? _)]. *)
(* FILL IN HERE *) Admitted.
(** [] *)
(* ################################################################# *)
(** * Decidability and Computability *)
(** Before studying the rest of this chapter, it is helpful to study the
[ProofObjects] chapter of _Software Foundations volume 1_ if you
have not done so already.
A predicate [P: T->Prop] is _decidable_ if there is a computable
function [f: T->bool] such that, forall [x:T], [f x = true <-> P x].
The second and most famous example of an _undecidable_ predicate
is the Halting Problem (Turing, 1936): [T] is the type of Turing-machine
descriptions, and [P(x)] is, Turing machine [x] halts. The first, and not
as famous, example is due to Church, 1936 (six months earlier): test
whether a lambda-expression has a normal form. In 1936-37, as a
first-year PhD student before beginning his PhD thesis work, Turing
proved these two problems are equivalent.
Classical logic contains the axiom [forall P, P \/ ~P]. This is not provable
in core Coq, that is, in the bare Calculus of Inductive Constructions. But
its negation is not provable either. You could add this axiom to Coq
and the system would still be consistent (i.e., no way to prove [False]).
But [P \/ ~P] is a weaker statement than [ {P}+{~P} ], that is,
[sumbool P (~P)]. From [ {P}+{~P} ] you can actually _calculate_ or
[compute] either [left (x:P)] or [right(y: ~P)]. From [P \/ ~P] you cannot
[compute] whether [P] is true. Yes, you can [destruct] it in a proof,
but not in a calculation.
For most purposes its unnecessary to add the axiom [P \/ ~P] to Coq,
because for specific predicates there's a specific way to prove [P \/ ~P]
as a theorem. For example, less-than on natural numbers is decidable,
and the existence of [blt_reflect] or [lt_dec] (as a theorem, not as an axiom)
is a demonstration of that.
Furthermore, in this "book" we are interested in _algorithms_. An axiom
[P \/ ~P] does not give us an algorithm to compute whether P is true. As
you saw in the definition of [insert] above, we can use [lt_dec] not only as
a theorem that either [3<7] or [~(3<7)], we can use it as a function to
compute whether [3<7]. In Coq, you can't compute with axioms!
Let's try it: *)
Axiom lt_dec_axiom_1: forall i j: nat, i<j \/ ~(i<j).
(** Now, can we use this axiom to compute with? *)
(* Uncomment and try this:
Definition max (i j: nat) : nat :=
if lt_dec_axiom_1 i j then j else i.
*)
(** That doesn't work, because an [if] statement requires an [Inductive]
data type with exactly two constructors; but [lt_dec_axiom_1 i j] has
type [i<j \/ ~(i<j)], which is not Inductive. But let's try a different axiom: *)
Axiom lt_dec_axiom_2: forall i j: nat, {i<j} + {~(i<j)}.
Definition max_with_axiom (i j: nat) : nat :=
if lt_dec_axiom_2 i j then j else i.
(** This typechecks, because [lt_dec_axiom_2 i j] belongs to type
[sumbool (i<j) (~(i<j))] (also written [ {i<j} + {~(i<j)} ]), which does have
two constructors.
Now, let's use this function: *)
Eval compute in max_with_axiom 3 7.
(* = if lt_dec_axiom_2 3 7 then 7 else 3
: nat *)
(** This [compute] didn't compute very much! Let's try to evaluate it
using [unfold]: *)
Lemma prove_with_max_axiom: max_with_axiom 3 7 = 7.
Proof.
unfold max_with_axiom.
try reflexivity. (* does not do anything, reflexivity fails *)
(* uncomment this line and try it:
unfold lt_dec_axiom_2.
*)
destruct (lt_dec_axiom_2 3 7).
reflexivity.
contradiction n. omega.
Qed.
(** It is dangerous to add Axioms to Coq: if you add one that's inconsistent,
then it leads to the ability to prove [False]. While that's a convenient way
to get a lot of things proved, it's unsound; the proofs are useless.
The Axioms above, [lt_dec_axiom_1] and [lt_dec_axiom_2], are safe enough:
they are consistent. But they don't help in computation. Axioms are not
useful here. *)
End ScratchPad2.
(* ################################################################# *)
(** * Opacity of [Qed] *)
(** This lemma [prove_with_max_axiom] turned out to be _provable_, but the proof
could not go by _computation_. In contrast, let's use [lt_dec], which was built
without any axioms: *)
Lemma compute_with_lt_dec: (if ScratchPad2.lt_dec 3 7 then 7 else 3) = 7.
Proof.
compute.
(* uncomment this line and try it:
unfold blt_reflect.
*)
Abort.
(** Unfortunately, even though [blt_reflect] was proved without any axioms, it
is an _opaque theorem_ (proved with [Qed] instead of with [Defined]), and
one cannot compute with opaque theorems. Not only that, but it is proved with
other opaque theorems such as [iff_sym] and [Nat.ltb_lt]. If we want to
compute with an implementation of [lt_dec] built from [blt_reflect], then
we will have to rebuild [blt_reflect] without using [Qed] anywhere, only [Defined].
Instead, let's use the version of [lt_dec] from the Coq standard library,
which _is_ carefully built without any opaque ([Qed]) theorems.
*)
Lemma compute_with_StdLib_lt_dec: (if lt_dec 3 7 then 7 else 3) = 7.
Proof.
compute.
reflexivity.
Qed.
(** The Coq standard library has many decidability theorems. You can
examine them by doing the following [Search] command. The results
shown here are only for the subset of the library that's currently
imported (by the [Import] commands above); there's even more out there. *)
Search ({_}+{~_}).
(*
reflect_dec: forall (P : Prop) (b : bool), reflect P b -> {P} + {~ P}
lt_dec: forall n m : nat, {n < m} + {~ n < m}
list_eq_dec:
forall A : Type,
(forall x y : A, {x = y} + {x <> y}) ->
forall l l' : list A, {l = l'} + {l <> l'}
le_dec: forall n m : nat, {n <= m} + {~ n <= m}
in_dec:
forall A : Type,
(forall x y : A, {x = y} + {x <> y}) ->
forall (a : A) (l : list A), {In a l} + {~ In a l}
gt_dec: forall n m : nat, {n > m} + {~ n > m}
ge_dec: forall n m : nat, {n >= m} + {~ n >= m}
eq_nat_decide: forall n m : nat, {eq_nat n m} + {~ eq_nat n m}
eq_nat_dec: forall n m : nat, {n = m} + {n <> m}
bool_dec: forall b1 b2 : bool, {b1 = b2} + {b1 <> b2}
Zodd_dec: forall n : Z, {Zodd n} + {~ Zodd n}
Zeven_dec: forall n : Z, {Zeven n} + {~ Zeven n}
Z_zerop: forall x : Z, {x = 0%Z} + {x <> 0%Z}
Z_lt_dec: forall x y : Z, {(x < y)%Z} + {~ (x < y)%Z}
Z_le_dec: forall x y : Z, {(x <= y)%Z} + {~ (x <= y)%Z}
Z_gt_dec: forall x y : Z, {(x > y)%Z} + {~ (x > y)%Z}
Z_ge_dec: forall x y : Z, {(x >= y)%Z} + {~ (x >= y)%Z}
*)
(** The type of [list_eq_dec] is worth looking at. It says that if you
have a decidable equality for an element type [A], then
[list_eq_dec] calculates for you a decidable equality for type [list A].
Try it out: *)
Definition list_nat_eq_dec:
(forall al bl : list nat, {al=bl}+{al<>bl}) :=
list_eq_dec eq_nat_dec.
Eval compute in if list_nat_eq_dec [1;3;4] [1;4;3] then true else false.
(* = false : bool *)
Eval compute in if list_nat_eq_dec [1;3;4] [1;3;4] then true else false.
(* = true : bool *)
(** **** Exercise: 2 stars (list_nat_in) *)
(** Use [in_dec] to build this function. *)
Definition list_nat_in: forall (i: nat) (al: list nat), {In i al}+{~ In i al}
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
Example in_4_pi: (if list_nat_in 4 [3;1;4;1;5;9;2;6] then true else false) = true.
Proof.
simpl.
(* reflexivity. *)
(* FILL IN HERE *) Admitted.
(** [] *)
(** In general, beyond [list_eq_dec] and [in_dec], one can construct a
whole programmable calculus of decidability, using the
programs-as-proof language of Coq. But is it a good idea? Read on! *)
(* ################################################################# *)
(** * Advantages and Disadvantages of [reflect] Versus [sumbool] *)
(** I have shown two ways to program decision procedures in Coq,
one using [reflect] and the other using [{_}+{~_}], i.e., [sumbool].
- With [sumbool], you define _two_ things: the operator in [Prop]
such as [lt: nat -> nat -> Prop] and the decidability "theorem"
in [sumbool], such as [lt_dec: forall i j, {lt i j}+{~ lt i j}]. I say
"theorem" in quotes because it's not _just_ a theorem, it's also
a (nonopaque) computable function.
- With [reflect], you define _three_ things: the operator in [Prop],
the operator in [bool] (such as [ltb: nat -> nat -> bool], and the
theorem that relates them (such as [ltb_reflect]).
Defining three things seems like more work than defining two.
But it may be easier and more efficient. Programming in [bool],
you may have more control over how your functions are implemented,
you will have fewer difficult uses of dependent types, and you
will run into fewer difficulties with opaque theorems.
However, among Coq programmers, [sumbool] seems to be more
widely used, and it seems to have better support in the Coq standard
library. So you may encounter it, and it is worth understanding what
it does. Either of these two methods is a reasonable way of programming
with proof. *)
|
Prolonged skin contact with antimony dust may cause dermatitis . However , it was agreed at the European Union level that the skin rashes observed are not substance @-@ specific , but most probably due to a physical blocking of sweat ducts ( ECHA / PR / 09 / 09 , Helsinki , 6 July 2009 ) . Antimony dust may also be explosive when dispersed in the air ; when in a bulk solid it is not combustible .
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.