text
stringlengths 0
3.34M
|
---|
module Data.Crypto.Encryption.Block.EncryptionMode
import Data.Bits
import Data.Crypto.Encryption
import Data.Crypto.Encryption.Block
import Data.Crypto.Encryption.Stream
import Data.Crypto.Util
%default total
%access public export
data ElectronicCookbook : Nat -> Type where
ECB : ElectronicCookbook n
-- This is ECB (Electronic Cookbook) - no initialization vector
-- ECB should be considered insecure regardless of the cipher used
implementation EncryptionMode ElectronicCookbook where
encryptBlocks key _ blocks = map (encryptBlock key) blocks
decryptBlocks key _ blocks = map (decryptBlock key) blocks
data CipherBlockChainingMode : Nat -> Type where
CBC : Bits n -> CipherBlockChainingMode n
implementation EncryptionMode CipherBlockChainingMode where
encryptBlocks _ _ [] = []
encryptBlocks key (CBC iv) (plain::rest) =
let ciph = encryptBlock key (plain `xor` iv)
in ciph :: encryptBlocks key (CBC ciph) rest
decryptBlocks _ _ [] = []
decryptBlocks key (CBC iv) (ciph::rest) =
(decryptBlock key ciph `xor` iv) :: decryptBlocks key (CBC ciph) rest
data PropagatingCipherBlockChainingMode : Nat -> Type where
PCBC : Bits n -> PropagatingCipherBlockChainingMode n
implementation EncryptionMode PropagatingCipherBlockChainingMode where
encryptBlocks _ _ [] = []
encryptBlocks key (PCBC iv) (plain::rest) =
let ciph = encryptBlock key (plain `xor` iv)
in ciph :: encryptBlocks key (PCBC (plain `xor` ciph)) rest
decryptBlocks _ _ [] = []
decryptBlocks key (PCBC iv) (ciph::rest) =
let plain = decryptBlock key ciph `xor` iv
in plain :: decryptBlocks key (PCBC (plain `xor` ciph)) rest
data CipherFeedbackMode : Nat -> Type where
CFB : Bits n -> CipherFeedbackMode n
implementation EncryptionMode CipherFeedbackMode where
encryptBlocks _ _ [] = []
encryptBlocks key (CFB iv) (plain::rest) =
let ciph = encryptBlock key iv `xor` plain
in ciph :: encryptBlocks key (PCBC ciph) rest
decryptBlocks _ _ [] = []
decryptBlocks key (CFB iv) (ciph::rest) =
(decryptBlock key iv `xor` ciph) :: decryptBlocks key (PCBC ciph) rest
data OutputFeedbackMode : Nat -> Type where
OFB : Bits n -> OutputFeedbackMode n
implementation EncryptionMode OutputFeedbackMode where
encryptBlocks _ _ [] = []
encryptBlocks key (OFB iv) (plain::rest) =
let newIV = encryptBlock key iv
in (plain `xor` newIV) :: encryptBlocks key (OFB newIV) rest
decryptBlocks _ _ [] = []
decryptBlocks key (OFB iv) (ciph::rest) =
let newIV = decryptBlock key iv
in (ciph `xor` newIV) :: decryptBlocks key (OFB newIV) rest
||| `OutputFeedbackMode` allows any `BlockCipher` to be treated as a
||| `StreamCipher`.
implementation BlockCipher b bitsPerBlock _ => StreamCipher (b, OutputFeedbackMode bitsPerBlock) bitsPerBlock where
generateKeystream (b, OFB iv) =
let newIV = encryptBlock b iv
in newIV :: (generateKeystream (b, OFB newIV))
||| Counter mode takes a nonce, an initial “counter” value, and a function to
||| get the next counter value.
data CounterMode : Nat -> Type where
CTR : Bits m -> Bits n -> (Bits n -> Bits n) -> CounterMode (m + n)
||| The most common counter mode starts at 0 and moves sequentially through the
||| natural numbers.
incrementalCTR : Bits m -> CounterMode (m + n)
incrementalCTR m = CTR m (intToBits 0) (plus (intToBits 1))
implementation EncryptionMode CounterMode where
encryptBlocks _ _ [] = []
encryptBlocks key (CTR nonce counter f) (plain::rest) =
(plain `xor` encryptBlock key (append nonce counter))
:: encryptBlocks key (CTR nonce (f counter) f) rest
decryptBlocks _ _ [] = []
decryptBlocks key (CTR nonce counter f) (ciph::rest) =
(ciph `xor` decryptBlock key (append nonce counter))
:: decryptBlocks key (CTR nonce (f counter) f) rest
||| `CounterMode` allows any `BlockCipher` to be treated as a `StreamCipher`.
implementation BlockCipher b bitsPerBlock _ => StreamCipher (b, CounterMode bitsPerBlock) bitsPerBlock where
generateKeystream (b, CTR nonce counter f) =
encryptBlock b (append nonce counter)
:: generateKeystream (b, CTR nonce (f counter) f)
|
(* Title: A Definitional Encoding of TLA in Isabelle/HOL
Authors: Gudmund Grov <ggrov at inf.ed.ac.uk>
Stephan Merz <Stephan.Merz at loria.fr>
Year: 2011
Maintainer: Gudmund Grov <ggrov at inf.ed.ac.uk>
*)
section \<open>Representing state in TLA*\<close>
theory State
imports Liveness
begin
text\<open>
We adopt the hidden state appraoch, as used in the existing
Isabelle/HOL TLA embedding \<^cite>\<open>"Merz98"\<close>. This approach is also used
in \<^cite>\<open>"Ehmety01"\<close>.
Here, a state space is defined by its projections, and everything else is
unknown. Thus, a variable is a projection of the state space, and has the same
type as a state function. Moreover, strong typing is achieved, since the projection
function may have any result type. To achieve this, the state space is represented
by an undefined type, which is an instance of the \<open>world\<close> class to enable
use with the \<open>Intensional\<close> theory.
\<close>
typedecl state
instance state :: world ..
type_synonym 'a statefun = "(state,'a) stfun"
type_synonym statepred = "bool statefun"
type_synonym 'a tempfun = "(state,'a) formfun"
type_synonym temporal = "state formula"
text \<open>
Formalizing type state would require formulas to be tagged with
their underlying state space and would result in a system that is
much harder to use. (Unlike Hoare logic or Unity, TLA has quantification
over state variables, and therefore one usually works with different
state spaces within a single specification.) Instead, state is just
an anonymous type whose only purpose is to provide Skolem constants.
Moreover, we do not define a type of state variables separate from that
of arbitrary state functions, again in order to simplify the definition
of flexible quantification later on. Nevertheless, we need to distinguish
state variables, mainly to define the enabledness of actions. The user
identifies (tuples of) ``base'' state variables in a specification via the
``meta predicate'' \<open>basevars\<close>, which is defined here.
\<close>
definition stvars :: "'a statefun \<Rightarrow> bool"
where basevars_def: "stvars \<equiv> surj"
syntax
"PRED" :: "lift \<Rightarrow> 'a" ("PRED _")
"_stvars" :: "lift \<Rightarrow> bool" ("basevars _")
translations
"PRED P" \<rightharpoonup> "(P::state => _)"
"_stvars" \<rightleftharpoons> "CONST stvars"
text \<open>
Base variables may be assigned arbitrary (type-correct) values.
In the following lemma, note that \<open>vs\<close> may be a tuple of variables.
The correct identification of base variables is up to the user who must
take care not to introduce an inconsistency. For example, @{term "basevars (x,x)"}
would definitely be inconsistent.
\<close>
lemma basevars: "basevars vs \<Longrightarrow> \<exists>u. vs u = c"
proof (unfold basevars_def surj_def)
assume "\<forall>y. \<exists>x. y = vs x"
then obtain x where "c = vs x" by blast
thus "\<exists>u. vs u = c" by blast
qed
lemma baseE:
assumes H1: "basevars v" and H2:"\<And>x. v x = c \<Longrightarrow> Q"
shows "Q"
using H1[THEN basevars] H2 by auto
text \<open>A variant written for sequences rather than single states.\<close>
lemma first_baseE:
assumes H1: "basevars v" and H2: "\<And>x. v (first x) = c \<Longrightarrow> Q"
shows "Q"
using H1[THEN basevars] H2 by (force simp: first_def)
lemma base_pair1:
assumes h: "basevars (x,y)"
shows "basevars x"
proof (auto simp: basevars_def)
fix c
from h[THEN basevars] obtain s where "(LIFT (x,y)) s = (c, arbitrary)" by auto
thus "c \<in> range x" by auto
qed
lemma base_pair2:
assumes h: "basevars (x,y)"
shows "basevars y"
proof (auto simp: basevars_def)
fix d
from h[THEN basevars] obtain s where "(LIFT (x,y)) s = (arbitrary, d)" by auto
thus "d \<in> range y" by auto
qed
lemma base_pair: "basevars (x,y) \<Longrightarrow> basevars x \<and> basevars y"
by (auto elim: base_pair1 base_pair2)
text \<open>
Since the @{typ unit} type has just one value, any state function of unit type
satisfies the predicate \<open>basevars\<close>. The following theorem can sometimes
be useful because it gives a trivial solution for \<open>basevars\<close> premises.
\<close>
lemma unit_base: "basevars (v::state \<Rightarrow> unit)"
by (auto simp: basevars_def)
text \<open>
A pair of the form \<open>(x,x)\<close> will generally not satisfy the predicate
\<open>basevars\<close> -- except for pathological cases such as \<open>x::unit\<close>.
\<close>
lemma
fixes x :: "state \<Rightarrow> bool"
assumes h1: "basevars (x,x)"
shows "False"
proof -
from h1 have "\<exists>u. (LIFT (x,x)) u = (False,True)" by (rule basevars)
thus False by auto
qed
lemma
fixes x :: "state \<Rightarrow> nat"
assumes h1: "basevars (x,x)"
shows "False"
proof -
from h1 have "\<exists>u. (LIFT (x,x)) u = (0,1)" by (rule basevars)
thus False by auto
qed
text \<open>
The following theorem reduces the reasoning about the existence of a
state sequence satisfiyng an enabledness predicate to finding a suitable
value \<open>c\<close> at the successor state for the base variables of the
specification. This rule is intended for reasoning about standard TLA
specifications, where \<open>Enabled\<close> is applied to actions, not arbitrary
pre-formulas.
\<close>
lemma base_enabled:
assumes h1: "basevars vs"
and h2: "\<And>u. vs (first u) = c \<Longrightarrow> ((first s) ## u) \<Turnstile> F"
shows "s \<Turnstile> Enabled F"
using h1 proof (rule first_baseE)
fix t
assume "vs (first t) = c"
hence "((first s) ## t) \<Turnstile> F" by (rule h2)
thus "s \<Turnstile> Enabled F" unfolding enabled_def by blast
qed
subsection "Temporal Quantifiers"
text\<open>
In \<^cite>\<open>"Lamport94"\<close>, Lamport gives a stuttering invariant definition
of quantification over (flexible) variables. It relies on similarity
of two sequences (as supported in our @{theory TLA.Sequence} theory), and
equivalence of two sequences up to a variable (the bound variable).
However, sequence equaivalence up to a variable, requires state
equaivalence up to a variable. Our state representation above does not
support this, hence we cannot encode Lamport's definition in our theory.
Thus, we need to axiomatise quantification over (flexible) variables.
Note that with a state representation supporting this, our theory should
allow such an encoding.
\<close>
consts
EEx :: "('a statefun \<Rightarrow> temporal) \<Rightarrow> temporal" (binder "Eex " 10)
AAll :: "('a statefun \<Rightarrow> temporal) \<Rightarrow> temporal" (binder "Aall " 10)
syntax
"_EEx" :: "[idts, lift] => lift" ("(3\<exists>\<exists> _./ _)" [0,10] 10)
"_AAll" :: "[idts, lift] => lift" ("(3\<forall>\<forall> _./ _)" [0,10] 10)
translations
"_EEx v A" == "Eex v. A"
"_AAll v A" == "Aall v. A"
axiomatization where
eexI: "\<turnstile> F x \<longrightarrow> (\<exists>\<exists> x. F x)"
and eexE: "\<lbrakk>s \<Turnstile> (\<exists>\<exists> x. F x) ; basevars vs; (!! x. \<lbrakk> basevars (x,vs); s \<Turnstile> F x \<rbrakk> \<Longrightarrow> s \<Turnstile> G)\<rbrakk>
\<Longrightarrow> (s \<Turnstile> G)"
and all_def: "\<turnstile> (\<forall>\<forall> x. F x) = (\<not>(\<exists>\<exists> x. \<not>(F x)))"
and eexSTUT: "STUTINV F x \<Longrightarrow> STUTINV (\<exists>\<exists> x. F x)"
and history: "\<turnstile> (I \<and> \<box>[A]_v) = (\<exists>\<exists> h. ($h = ha) \<and> I \<and> \<box>[A \<and> h$=hb]_(h,v))"
lemmas eexI_unl = eexI[unlift_rule] \<comment> \<open>@{text "w \<Turnstile> F x \<Longrightarrow> w \<Turnstile> (\<exists>\<exists> x. F x)"}\<close>
text \<open>
\<open>tla_defs\<close> can be used to unfold TLA definitions into lowest predicate level.
This is particularly useful for reasoning about enabledness of formulas.
\<close>
lemmas tla_defs = unch_def before_def after_def first_def second_def suffix_def
tail_def nexts_def app_def angle_actrans_def actrans_def
end
|
#ifndef RNN_TYPE_HPP
#define RNN_TYPE_HPP
#include <Eigen/Core>
namespace rnn::generic {
using real = double;
template<std::size_t row_dim_t_, std::size_t col_dim_t_>
using rnn_matrix = Eigen::Matrix<real, row_dim_t_, col_dim_t_>;
template<std::size_t dim_t_>
using rnn_square_matrix = rnn_matrix<dim_t_, dim_t_>;
template<std::size_t dim_t_>
using rnn_vector = Eigen::Vector<real, dim_t_>;
#ifdef USE_FLOAT
typedef float real;
typedef Eigen::MatrixXf MatD;
typedef Eigen::VectorXf VecD;
#else
typedef Eigen::MatrixXd MatD;
typedef Eigen::VectorXd VecD;
#endif
typedef Eigen::MatrixXi MatI;
typedef Eigen::VectorXi VecI;
#define REAL_MAX std::numeric_limits<real>::max()
} // namespace rnn
#endif |
lemma starlike_convex_tweak_boundary_points: fixes S :: "'a::euclidean_space set" assumes "convex S" "S \<noteq> {}" and ST: "rel_interior S \<subseteq> T" and TS: "T \<subseteq> closure S" shows "starlike T" |
/*************************************************************
* Copyright (c) David Pellow, Darya Filippova, Carl Kingsford
*************************************************************/
#ifndef LIB_HS_NODE
#define LIB_HS_NODE
#include <boost/heap/fibonacci_heap.hpp>
#include <unordered_set>
typedef uint64_t kmer_t;
class HSNode {
private:
const kmer_t kmer;
unsigned degree = 0;
unordered_set<unordered_set<kmer_t>*> neighbours;
public:
HSNode(kmer_t kmer) : kmer(kmer) {
}
kmer_t get_kmer() {
return kmer;
}
unordered_set<unordered_set<kmer_t>*> get_neighbours() {
return neighbours;
}
unsigned get_degree() {
return degree;
}
void add_neighbour(unordered_set<kmer_t>* N) {
neighbours.insert(N);
degree++;
}
void remove_neighbour(unordered_set<kmer_t>* N) {
neighbours.erase(N);
degree--;
}
};
#endif
|
Formal statement is: lemma AE_not_in: "N \<in> null_sets M \<Longrightarrow> AE x in M. x \<notin> N" Informal statement is: If $N$ is a null set, then almost every point of $M$ is not in $N$. |
{-# OPTIONS --without-K #-}
module M-types.Base.Core where
open import Agda.Primitive public using (Level) renaming
(
lzero to ℓ-zero ;
lsuc to ℓ-suc ;
_⊔_ to ℓ-max
)
variable
ℓ ℓ₀ ℓ₁ ℓ₂ : Level
Ty : (ℓ : Level) → Set (ℓ-suc ℓ)
Ty ℓ = Set ℓ
|
lemma norm_inverse_le_norm: fixes x :: "'a::real_normed_div_algebra" shows "r \<le> norm x \<Longrightarrow> 0 < r \<Longrightarrow> norm (inverse x) \<le> inverse r" |
Wednesday 13th February – Australian Market Wrap | Beeks Downunder-Because not all financial news is boring!
One day, the wife comes home with a spectacular diamond ring.
“Where did you get that ring?” her husband asks.
A week later, his wife comes home with a long shiny fur coat. “Where did you get that coat?” her husband asks.
Her share of the lotto winnings….
That night, the wife asks her husband to run her a nice warm bath while she gets undressed.
When she enters the bathroom, she finds that there is barely enough water in the bath to cover the bath plug.
“What’s this?” she asks her husband. |
#=
test_array:
- Julia version: 1.5.0
- Author: shisa
- Date: 2020-08-11
=#
module TestArray
# packages
using Test
# external modules
include("array_initialization.jl")
include("array_operation.jl")
# methods
function test()
@testset "Array" begin
@testset "ArrayInit" begin
@test_nowarn ArrayInit.main()
end
@testset "ArrayOp" begin
@test_nowarn ArrayOp.main()
end
end
end
end
if abspath(PROGRAM_FILE) == @__FILE__
using .TestArray
TestArray.test()
end
|
lemma cone_hull_empty_iff: "S = {} \<longleftrightarrow> cone hull S = {}" |
```python
import sympy as sym
from sympy.physics.units.quantities import Quantity
from IPython.display import display, Math, Latex
import pandas as pd
import numpy as np
def func_str(function):
return str(function)
function_str = str(function)
function_str = function_str.replace("**", "^")
return function_str
class Lagrange:
def __init__(self, function, *args):
self.lamb = Quantity("λ")
self.variables = []
self.function = function
for variable in args:
self.variables.append(variable)
self.make_delta()
self.partial_deravative()
def make_delta(self):
summation = 0
for variable in self.variables:
summation += variable**2
self.delta = summation**0.5 + self.lamb * self.function
display(Math(r"min(\Delta) = " + func_str(self.delta)))
def partial_deravative(self):
self.partial_equations = []
for i in range(len(self.variables) + 1):
if i == len(self.variables):
der_function = sym.diff(self.delta, self.lamb)
self.partial_equations.append(sym.Eq(der_function, 0))
display(Math(r"\frac{\partial min(\Delta)}{\partial \lambda} = " + func_str(der_function) + str( " = 0")))
else:
der_function = sym.diff(self.delta, self.variables[i])
self.partial_equations.append(sym.Eq(der_function, 0))
display(Math(r"\frac{\partial min(\Delta)}{\partial y_%d} = " % (i+1) + func_str(der_function) + str( " = 0")))
def solve_equations(self):
self.unknows = []
for i in range(len(self.variables) + 1):
if i == len(self.variables):
self.unknows.append(self.lamb)
else:
self.unknows.append(self.variables[i])
self.result = sym.solve(self.partial_equations, self.unknows)
return self.result
def get_beta(self):
if hasattr(self, "result"):
return ((float(self.result[0][0])**2) + float((self.result[0][1])**2))**0.5
else:
self.result = self.solve_equations()
return self.get_beta()
y_1, y_2 = sym.symbols('y_1 y_2')
f = 10*y_1 - 4*y_2 + 12
lagrange = Lagrange(f, y_1, y_2)
lagrange.get_beta()
```
$\displaystyle min(\Delta) = λ*(10*y_1 - 4*y_2 + 12) + (y_1**2 + y_2**2)**0.5$
$\displaystyle \frac{\partial min(\Delta)}{\partial y_1} = 10*λ + 1.0*y_1*(y_1**2 + y_2**2)**(-0.5) = 0$
$\displaystyle \frac{\partial min(\Delta)}{\partial y_2} = -4*λ + 1.0*y_2*(y_1**2 + y_2**2)**(-0.5) = 0$
$\displaystyle \frac{\partial min(\Delta)}{\partial \lambda} = 10*y_1 - 4*y_2 + 12 = 0$
1.1141720290623114
```python
print("hi")
```
|
### Example 5: Laplace equation
In this tutorial we will look constructing the steady-state heat example using the Laplace equation. In contrast to the previous tutorials this example is entirely driven by the prescribed Dirichlet and Neumann boundary conditions, instead of an initial condition. We will also demonstrate how to use Devito to solve a steady-state problem without time derivatives and how to switch buffers explicitly without having to re-compile the kernel.
First, we again define our governing equation:
$$\frac{\partial ^2 p}{\partial x^2} + \frac{\partial ^2 p}{\partial y^2} = 0$$
We are again discretizing second-order derivatives using a central difference scheme to construct a diffusion problem (see tutorial 3). This time we have no time-dependent term in our equation though, since there is no term $p_{i,j}^{n+1}$. This means that we are simply updating our field variable $p$ over and over again, until we have reached an equilibrium state. In a discretised form, after rearranging to update the central point $p_{i,j}^n$ we have
$$p_{i,j}^n = \frac{\Delta y^2(p_{i+1,j}^n+p_{i-1,j}^n)+\Delta x^2(p_{i,j+1}^n + p_{i,j-1}^n)}{2(\Delta x^2 + \Delta y^2)}$$
And, as always, we first re-create the original implementation to see what we are aiming for. Here we initialise the field $p$ to $0$ and apply the following bounday conditions:
$p=0$ at $x=0$
$p=y$ at $x=2$
$\frac{\partial p}{\partial y}=0$ at $y=0, \ 1$
**Developer note:**
The original tutorial stores the field data in the layout `(ny, nx)`. Until now we have used `(x, y)` notation for creating our Devito examples, but for this one we will adopt the `(y, x)` layout for compatibility reasons.
```python
from examples.cfd import plot_field
import numpy as np
%matplotlib inline
# Some variable declarations
nx = 31
ny = 31
c = 1
dx = 2. / (nx - 1)
dy = 1. / (ny - 1)
```
```python
def laplace2d(p, bc_y, dx, dy, l1norm_target):
l1norm = 1
pn = np.empty_like(p)
while l1norm > l1norm_target:
pn = p.copy()
p[1:-1, 1:-1] = ((dy**2 * (pn[1:-1, 2:] + pn[1:-1, 0:-2]) +
dx**2 * (pn[2:, 1:-1] + pn[0:-2, 1:-1])) /
(2 * (dx**2 + dy**2)))
p[:, 0] = 0 # p = 0 @ x = 0
p[:, -1] = bc_right # p = y @ x = 2
p[0, :] = p[1, :] # dp/dy = 0 @ y = 0
p[-1, :] = p[-2, :] # dp/dy = 0 @ y = 1
l1norm = (np.sum(np.abs(p[:]) - np.abs(pn[:])) /
np.sum(np.abs(pn[:])))
return p
```
```python
#NBVAL_IGNORE_OUTPUT
# Out initial condition is 0 everywhere,except at the boundary
p = np.zeros((ny, nx))
# Boundary conditions
bc_right = np.linspace(0, 1, ny)
p[:, 0] = 0 # p = 0 @ x = 0
p[:, -1] = bc_right # p = y @ x = 2
p[0, :] = p[1, :] # dp/dy = 0 @ y = 0
p[-1, :] = p[-2, :] # dp/dy = 0 @ y = 1
plot_field(p, ymax=1.0, view=(30, 225))
```
```python
#NBVAL_IGNORE_OUTPUT
p = laplace2d(p, bc_right, dx, dy, 1e-4)
plot_field(p, ymax=1.0, view=(30, 225))
```
Ok, nice. Now, to re-create this example in Devito we need to look a little bit further under the hood. There are two things that make this different to the examples we covered so far:
* We have no time dependence in the `p` field, but we still need to advance the state of p in between buffers. So, instead of using `TimeFunction` objects that provide multiple data buffers for timestepping schemes, we will use `Function` objects that have no time dimension and only allocate a single buffer according to the space dimensions. However, since we are still implementing a pseudo-timestepping loop, we will need to objects, say `p` and `pn`, to act as alternating buffers.
* If we're using two different symbols to denote our buffers, any operator we create will only perform a single timestep. This is desired though, since we need to check a convergence criteria outside of the main stencil update to determine when we stop iterating. As a result we will need to call the operator repeatedly after instantiating it outside the convergence loop.
So, how do we make sure our operator doesn't accidentally overwrite values in the same buffer? Well, we can again let SymPy reorganise our Laplace equation based on `pn` to generate the stencil, but when we create the update expression, we set the LHS to our second buffer variable `p`.
```python
from devito import Grid, Function, Eq, INTERIOR
from sympy import solve
# Create two explicit buffers for pseudo-timestepping
grid = Grid(shape=(nx, ny), extent=(1., 2.))
p = Function(name='p', grid=grid, space_order=2)
pn = Function(name='pn', grid=grid, space_order=2)
# Create Laplace equation base on `pn`
eqn = Eq(pn.laplace, region=INTERIOR)
# Let SymPy solve for the central stencil point
stencil = solve(eqn, pn)[0]
# Now we let our stencil populate our second buffer `p`
eq_stencil = Eq(p, stencil)
# In the resulting stencil `pn` is exclusively used on the RHS
# and `p` on the LHS is the grid the kernel will update
print("Update stencil:\n%s\n" % eq_stencil)
```
Update stencil:
Eq(p(x, y), 0.5*(h_x**2*pn(x, y - h_y) + h_x**2*pn(x, y + h_y) + h_y**2*pn(x - h_x, y) + h_y**2*pn(x + h_x, y))/(h_x**2 + h_y**2))
Now we can add our boundary conditions. We have already seen how to prescribe constant Dirichlet BCs by simply setting values using the low-level notation. This time we will go a little further by setting a prescribed profile, which we create first as a custom 1D symbol and supply with the BC values. For this we need to create a `Function` object that has a different shape than our general `grid`, so instead of the grid we provide an explicit pair of dimension symbols and the according shape for the data.
```python
x, y = grid.dimensions
bc_right = Function(name='bc_right', shape=(nx, ), dimensions=(x, ))
bc_right.data[:] = np.linspace(0, 1, nx)
```
Now we can create a set of expressions for the BCs again, where we wet prescribed values on the right and left of our grid. For the Neuman BCs along the top and bottom boundaries we simply copy the second rwo from the outside into the outermost row, just as the original tutorial did. Using these expressions and our stencil update we can now create an operator.
```python
#NBVAL_IGNORE_OUTPUT
from devito import Operator
# Create boundary condition expressions
bc = [Eq(p.indexed[x, 0], 0.)] # p = 0 @ x = 0
bc += [Eq(p.indexed[x, ny-1], bc_right.indexed[x])] # p = y @ x = 2
bc += [Eq(p.indexed[0, y], p.indexed[1, y])] # dp/dy = 0 @ y = 0
bc += [Eq(p.indexed[nx-1, y], p.indexed[nx-2, y])] # dp/dy = 0 @ y = 1
# Now we can build the operator that we need
op = Operator(expressions=[eq_stencil] + bc)
```
We can now use this single-step operator repeatedly in a Python loop, where we can arbitrarily execute other code in between invocations. This allows us to update our L1 norm and check for convergence. Using our pre0compiled operator now comes down to a single function call that supplies the relevant data symbols. One thing to note is that we now do exactly the same thing as the original NumPy loop, in that we deep-copy the data between each iteration of the loop, which we will look at after this.
```python
#NBVAL_IGNORE_OUTPUT
# Silence the runtime performance logging
from devito import configuration
configuration['log_level'] = 'ERROR'
# Initialise the two buffer fields
p.data[:] = 0.
p.data[:, -1] = np.linspace(0, 1, ny)
pn.data[:] = 0.
pn.data[:, -1] = np.linspace(0, 1, ny)
# Visualize the initial condition
plot_field(p.data, ymax=1.0, view=(30, 225))
# Run the convergence loop with deep data copies
l1norm_target = 1.e-4
l1norm = 1
while l1norm > l1norm_target:
# This call implies a deep data copy
pn.data[:] = p.data[:]
op(p=p, pn=pn)
l1norm = (np.sum(np.abs(p.data[:]) - np.abs(pn.data[:])) /
np.sum(np.abs(pn.data[:])))
# Visualize the converged steady-state
plot_field(p.data, ymax=1.0, view=(30, 225))
```
One crucial detail about the code above is that the deep data copy between iterations will really hurt performance if we were to run this on a large grid. However, we have already seen how we can match data symbols to symbolic names when calling the pre-compiled operator, which we can now use to actually switch the roles of `pn` and `p` between iterations, eg. `op(p=pn, pn=p)`. Thus, we can implement a simple buffer-switching scheme by simply testing for odd and even time-steps, without ever having to shuffle data around.
```python
#NBVAL_IGNORE_OUTPUT
# Initialise the two buffer fields
p.data[:] = 0.
p.data[:, -1] = np.linspace(0, 1, ny)
pn.data[:] = 0.
pn.data[:, -1] = np.linspace(0, 1, ny)
# Visualize the initial condition
plot_field(p.data, ymax=1.0, view=(30, 225))
# Run the convergence loop by explicitly flipping buffers
l1norm_target = 1.e-4
l1norm = 1
counter = 0
while l1norm > l1norm_target:
# Determine buffer order
if counter % 2 == 0:
_p = p
_pn = pn
else:
_p = pn
_pn = p
# Apply operator
op(p=_p, pn=_pn)
# Compute L1 norm
l1norm = (np.sum(np.abs(_p.data[:]) - np.abs(_pn.data[:])) /
np.sum(np.abs(_pn.data[:])))
counter += 1
plot_field(p.data, ymax=1.0, view=(30, 225))
```
|
function diag_lobpcg( LF, ∇2, prec, Vpot, X0;
tol=1e-5, tol_avg=1e-7, maxit=200, verbose=false,
verbose_last=false )
#
ncols = size(X0)[2]
if ncols <= 0
@printf("diag_lobpcg requires at least one initial wave function!\n");
return
end
Npoints = size(X0)[1]
# orthonormalize the initial wave functions.
X = copy(X0)
ortho_gram_schmidt!(X)
HX = zeros(Float64, Npoints, ncols)
W = zeros(Float64, Npoints, ncols)
HW = zeros(Float64, Npoints, ncols)
for ic = 1:ncols
HX[:,ic] = op_H( LF, ∇2, Vpot, X[:,ic] )
end
nconv = 0
iter = 1
resnrm = ones(ncols,1)
sum_evals = 0.0
sum_evals_old = 0.0
conv = 0.0
NiterConv = 0
while iter <= maxit && nconv < ncols
# Rayleigh quotient (approximate eigenvalue, obj func)
S = X'*HX
lambda = eigvals(S)
#
# Check for convergence
#
sum_evals = sum(lambda)
conv = abs(sum_evals - sum_evals_old)/ncols
sum_evals_old = sum_evals
R = HX - X*S
if verbose
@printf("LOBPCG iter = %8d, %18.10e\n", iter, conv)
end
if conv <= tol_avg
NiterConv = iter
if verbose
@printf("LOBPCG convergence: tol_avg\n")
end
break
end
#
for ic = 1:ncols
resnrm[ic] = norm( R[:,ic] )
end
#
#W = R[:,:]
# apply preconditioner
for ic = 1:ncols
W[:,ic] = apply_prec_ilu0( prec, R[:,ic] )
end
for ic = 1:ncols
HW[:,ic] = op_H( LF, ∇2, Vpot, W[:,ic] )
end
#
C = W'*W
C = ( C + C' )/2
R = chol(C)
W = W/R
HW = HW/R
#
Q = [X W]
HQ = [HX HW]
if iter > 1
Q = [Q P]
HQ = [HQ HP]
end
T = Q'*(HQ)
T = (T+T')/2
G = Q'*Q
G = (G+G')/2
sd, S = eig( T, G ) # evals, evecs
U = S[:,1:ncols]
X = Q*U
HX = HQ*U
if iter > 1
set2 = ncols+1:2*ncols
set3 = 2*ncols+1:3*ncols
P = W*U[set2,:] + P*U[set3,:]
HP = HW*U[set2,:] + HP*U[set3,:]
C = P'*P
C = (C + C')/2
R = chol(C)
P = P/R
HP = HP/R
else
P = copy(W)
HP = copy(HW)
end
iter = iter + 1
end
S = X'*HX
S = (S+S')/2
lambda, Q = eig(S)
X = X*Q
if verbose_last || verbose
@printf("LOBPCG converges in %8d iterations.\n", NiterConv)
for j = 1:ncols
@printf("eigval[%2d] = %18.10f, resnrm = %18.10e\n", j, lambda[j], resnrm[j] )
end
end
return lambda, X
end
|
#ifndef HERMIT_SPIRIT_QI_IPV6_HPP
#define HERMIT_SPIRIT_QI_IPV6_HPP
#include <vector>
#include <utility>
#include <boost/spirit/include/qi.hpp>
#include <boost/spirit/include/phoenix.hpp>
#include <boost/optional.hpp>
#include <hermit/spirit/qi/ipv4.hpp>
#include <boost/fusion/adapted/std_pair.hpp>
#include <boost/fusion/include/std_pair.hpp>
#include <hermit/ip.hpp>
namespace hermit {
namespace spirit {
namespace qi {
template< typename Iterator >
class ipv6 : public boost::spirit::qi::grammar<
Iterator,
hermit::ipv6()
> {
public:
ipv6() : ipv6::base_type( root ) {
namespace qi = boost::spirit::qi;
namespace phx = boost::phoenix;
ipv6part = ( hex4_p - ipv4address ) % ( qi::lit(':') - "::" );
ipv6_type1 = ( ipv6part >> "::" >> ipv6part >> ':' >> ipv4address )[
qi::_pass = phx::size( qi::_1 ) + phx::size( qi::_2 ) < 6ul,
phx::at_c< 0 >( qi::_val ) = qi::_1,
phx::at_c< 1 >( qi::_val ) = qi::_2,
phx::push_back( phx::at_c< 1 >( qi::_val ), ( qi::_3 >> 16 ) & 0xFFFF ),
phx::push_back( phx::at_c< 1 >( qi::_val ), qi::_3 & 0xFFFF )
];
ipv6_type2 = ( "::" >> ipv6part >> ':' >> ipv4address )[
qi::_pass = phx::size( qi::_1 ) < 6ul,
phx::at_c< 1 >( qi::_val ) = qi::_1,
phx::push_back( phx::at_c< 1 >( qi::_val ), ( qi::_2 >> 16 ) & 0xFFFF ),
phx::push_back( phx::at_c< 1 >( qi::_val ), qi::_2 & 0xFFFF )
];
ipv6_type3 = ( ipv6part >> "::" >> ipv4address )[
qi::_pass = phx::size( qi::_1 ) < 6ul,
phx::at_c< 0 >( qi::_val ) = qi::_1,
phx::push_back( phx::at_c< 1 >( qi::_val ), ( qi::_2 >> 16 ) & 0xFFFF ),
phx::push_back( phx::at_c< 1 >( qi::_val ), qi::_2 & 0xFFFF )
];
ipv6_type4 = ( ipv6part >> ":" >> ipv4address )[
qi::_pass = phx::size( qi::_1 ) == 6ul,
phx::at_c< 0 >( qi::_val ) = qi::_1,
phx::push_back( phx::at_c< 1 >( qi::_val ), ( qi::_2 >> 16 ) & 0xFFFF ),
phx::push_back( phx::at_c< 1 >( qi::_val ), qi::_2 & 0xFFFF )
];
ipv6_type5 = ( "::" >> ipv4address )[
phx::push_back( phx::at_c< 1 >( qi::_val ), ( qi::_1 >> 16 ) & 0xFFFF ),
phx::push_back( phx::at_c< 1 >( qi::_val ), qi::_1 & 0xFFFF )
];
ipv6_type6 = ( ipv6part >> "::" >> ipv6part )[
qi::_pass = phx::size( qi::_1 ) + phx::size( qi::_2 ) < 8ul,
phx::at_c< 0 >( qi::_val ) = qi::_1,
phx::at_c< 1 >( qi::_val ) = qi::_2
];
ipv6_type7 = ( "::" >> ipv6part )[
qi::_pass = phx::size( qi::_1 ) < 8ul,
phx::at_c< 1 >( qi::_val ) = qi::_1
];
ipv6_type8 = ( ipv6part >> "::" )[
qi::_pass = phx::size( qi::_1 ) < 8ul,
phx::at_c< 0 >( qi::_val ) = qi::_1
];
ipv6_type9 = ipv6part[
qi::_pass = phx::size( qi::_1 ) == 8ul,
phx::at_c< 0 >( qi::_val ) = qi::_1
];
ipv6_type0 = qi::lit( "::" )[
qi::_pass =true
];
ipv6address = (ipv6_type1|ipv6_type2|ipv6_type3|ipv6_type4|ipv6_type5|ipv6_type6|ipv6_type7|ipv6_type8|ipv6_type9|ipv6_type0 )[
phx::resize( qi::_a, 8ul ),
phx::fill( qi::_a, 0u ),
phx::copy( phx::at_c< 0 >( qi::_1 ), phx::begin( qi::_a ) ),
phx::reverse_copy( phx::at_c< 1 >( qi::_1 ), phx::rbegin( qi::_a ) ),
phx::at_c< 0 >( qi::_val ) = phx::at( qi::_a, 0ul ),
phx::at_c< 0 >( qi::_val ) <<= 16,
phx::at_c< 0 >( qi::_val ) |= phx::at( qi::_a, 1ul ),
phx::at_c< 0 >( qi::_val ) <<= 16,
phx::at_c< 0 >( qi::_val ) |= phx::at( qi::_a, 2ul ),
phx::at_c< 0 >( qi::_val ) <<= 16,
phx::at_c< 0 >( qi::_val ) |= phx::at( qi::_a, 3ul ),
phx::at_c< 1 >( qi::_val ) = phx::at( qi::_a, 4ul ),
phx::at_c< 1 >( qi::_val ) <<= 16,
phx::at_c< 1 >( qi::_val ) |= phx::at( qi::_a, 5ul ),
phx::at_c< 1 >( qi::_val ) <<= 16,
phx::at_c< 1 >( qi::_val ) |= phx::at( qi::_a, 6ul ),
phx::at_c< 1 >( qi::_val ) <<= 16,
phx::at_c< 1 >( qi::_val ) |= phx::at( qi::_a, 7ul )
];
root = ipv6address;
}
private:
boost::spirit::qi::uint_parser<uint16_t, 16, 1, 4> hex4_p;
ipv4< Iterator > ipv4address;
boost::spirit::qi::rule< Iterator, std::vector< uint16_t >() > ipv6part;
boost::spirit::qi::rule< Iterator, std::pair< std::vector< uint16_t >, std::vector< uint16_t > >() > ipv6_type1;
boost::spirit::qi::rule< Iterator, std::pair< std::vector< uint16_t >, std::vector< uint16_t > >() > ipv6_type2;
boost::spirit::qi::rule< Iterator, std::pair< std::vector< uint16_t >, std::vector< uint16_t > >() > ipv6_type3;
boost::spirit::qi::rule< Iterator, std::pair< std::vector< uint16_t >, std::vector< uint16_t > >() > ipv6_type4;
boost::spirit::qi::rule< Iterator, std::pair< std::vector< uint16_t >, std::vector< uint16_t > >() > ipv6_type5;
boost::spirit::qi::rule< Iterator, std::pair< std::vector< uint16_t >, std::vector< uint16_t > >() > ipv6_type6;
boost::spirit::qi::rule< Iterator, std::pair< std::vector< uint16_t >, std::vector< uint16_t > >() > ipv6_type7;
boost::spirit::qi::rule< Iterator, std::pair< std::vector< uint16_t >, std::vector< uint16_t > >() > ipv6_type8;
boost::spirit::qi::rule< Iterator, std::pair< std::vector< uint16_t >, std::vector< uint16_t > >() > ipv6_type9;
boost::spirit::qi::rule< Iterator, std::pair< std::vector< uint16_t >, std::vector< uint16_t > >() > ipv6_type0;
boost::spirit::qi::rule< Iterator, std::pair< std::uint64_t, std::uint64_t >(), boost::spirit::qi::locals< std::vector< uint16_t > > > ipv6address;
boost::spirit::qi::rule< Iterator, hermit::ipv6() > root;
};
}
}
}
#endif
|
function pass = test_coeffs2vals(pref)
% test various coeffs2vals, vals2coeffs, vals2vals, and coeffs2coeffs codes.
if ( nargin == 0 )
pref = chebfunpref();
end
tol = 1000*eps;
f = chebfun(@exp, pref);
N = length(f);
c_leg = legcoeffs(f);
c_cheb = chebcoeffs(f);
v_leg = f(legpts(N));
v_cheb1 = f(chebpts(N,1));
v_cheb2 = f(chebpts(N,2));
pass(1) = norm(c_leg - chebcoeffs2legcoeffs(c_cheb), inf) < tol;
pass(2) = norm(c_leg - legvals2legcoeffs(v_leg), inf) < tol;
pass(3) = norm(c_leg - chebvals2legcoeffs(v_cheb1, 1), inf) < tol;
pass(4) = norm(c_leg - chebvals2legcoeffs(v_cheb2, 2), inf) < tol;
pass(5) = norm(c_cheb - legcoeffs2chebcoeffs(c_leg), inf) < tol;
pass(6) = norm(c_cheb - legvals2chebcoeffs(v_leg), inf) < tol;
pass(7) = norm(c_cheb - chebvals2chebcoeffs(v_cheb1, 1), inf) < tol;
pass(8) = norm(c_cheb - chebvals2chebcoeffs(v_cheb2, 2), inf) < tol;
pass(9) = norm(v_leg - legcoeffs2legvals(c_leg), inf) < tol;
pass(10) = norm(v_leg - chebcoeffs2legvals(c_cheb), inf) < tol;
pass(11) = norm(v_leg - chebvals2legvals(v_cheb1, 1), inf) < tol;
pass(12) = norm(v_leg - chebvals2legvals(v_cheb2, 2), inf) < tol;
pass(13) = norm(v_cheb1 - legcoeffs2chebvals(c_leg, 1), inf) < tol;
pass(14) = norm(v_cheb1 - chebcoeffs2chebvals(c_cheb,1), inf) < tol;
pass(15) = norm(v_cheb1 - legvals2chebvals(v_leg, 1), inf) < tol;
pass(16) = norm(v_cheb1 - chebvals2chebvals(v_cheb2, 2, 1), inf) < tol;
pass(17) = norm(v_cheb2 - legcoeffs2chebvals(c_leg, 2), inf) < tol;
pass(18) = norm(v_cheb2 - chebcoeffs2chebvals(c_cheb,2), inf) < tol;
pass(19) = norm(v_cheb2 - legvals2chebvals(v_leg, 2), inf) < tol;
pass(20) = norm(v_cheb2 - chebvals2chebvals(v_cheb1, 1, 2), inf) < tol;
end |
[STATEMENT]
lemma lambda_system_sets: "x \<in> lambda_system \<Omega> M f \<Longrightarrow> x \<in> M"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<in> lambda_system \<Omega> M f \<Longrightarrow> x \<in> M
[PROOF STEP]
by (simp add: lambda_system_def) |
import Decidable.Equality
0 foo : (i, j : Nat) -> Bool
foo i j = case decEq i j of
Yes pf => True
No pf => False
|
[STATEMENT]
lemma measurable_op_sem:
assumes "op_type oper t = Some t'"
shows "op_sem oper \<in> measurable (stock_measure t) (stock_measure t')"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
[PROOF STEP]
proof (cases oper)
[PROOF STATE]
proof (state)
goal (18 subgoals):
1. oper = Fst \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
2. oper = Snd \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
3. oper = Add \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
4. oper = Mult \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
5. oper = Minus \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
6. oper = Less \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
7. oper = Equals \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
8. oper = And \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
9. oper = pdf_operator.Not \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
10. oper = Or \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
A total of 18 subgoals...
[PROOF STEP]
case Fst
[PROOF STATE]
proof (state)
this:
oper = Fst
goal (18 subgoals):
1. oper = Fst \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
2. oper = Snd \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
3. oper = Add \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
4. oper = Mult \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
5. oper = Minus \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
6. oper = Less \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
7. oper = Equals \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
8. oper = And \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
9. oper = pdf_operator.Not \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
10. oper = Or \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
A total of 18 subgoals...
[PROOF STEP]
with assms
[PROOF STATE]
proof (chain)
picking this:
op_type oper t = Some t'
oper = Fst
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
op_type oper t = Some t'
oper = Fst
goal (1 subgoal):
1. op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
[PROOF STEP]
by (simp split: pdf_type.split_asm)
[PROOF STATE]
proof (state)
this:
op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
goal (17 subgoals):
1. oper = Snd \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
2. oper = Add \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
3. oper = Mult \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
4. oper = Minus \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
5. oper = Less \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
6. oper = Equals \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
7. oper = And \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
8. oper = pdf_operator.Not \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
9. oper = Or \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
10. oper = pdf_operator.Pow \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
A total of 17 subgoals...
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (17 subgoals):
1. oper = Snd \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
2. oper = Add \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
3. oper = Mult \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
4. oper = Minus \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
5. oper = Less \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
6. oper = Equals \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
7. oper = And \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
8. oper = pdf_operator.Not \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
9. oper = Or \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
10. oper = pdf_operator.Pow \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
A total of 17 subgoals...
[PROOF STEP]
case Snd
[PROOF STATE]
proof (state)
this:
oper = Snd
goal (17 subgoals):
1. oper = Snd \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
2. oper = Add \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
3. oper = Mult \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
4. oper = Minus \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
5. oper = Less \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
6. oper = Equals \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
7. oper = And \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
8. oper = pdf_operator.Not \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
9. oper = Or \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
10. oper = pdf_operator.Pow \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
A total of 17 subgoals...
[PROOF STEP]
with assms
[PROOF STATE]
proof (chain)
picking this:
op_type oper t = Some t'
oper = Snd
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
op_type oper t = Some t'
oper = Snd
goal (1 subgoal):
1. op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
[PROOF STEP]
by (simp split: pdf_type.split_asm)
[PROOF STATE]
proof (state)
this:
op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
goal (16 subgoals):
1. oper = Add \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
2. oper = Mult \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
3. oper = Minus \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
4. oper = Less \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
5. oper = Equals \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
6. oper = And \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
7. oper = pdf_operator.Not \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
8. oper = Or \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
9. oper = pdf_operator.Pow \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
10. oper = Sqrt \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
A total of 16 subgoals...
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (16 subgoals):
1. oper = Add \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
2. oper = Mult \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
3. oper = Minus \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
4. oper = Less \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
5. oper = Equals \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
6. oper = And \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
7. oper = pdf_operator.Not \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
8. oper = Or \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
9. oper = pdf_operator.Pow \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
10. oper = Sqrt \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
A total of 16 subgoals...
[PROOF STEP]
case Equals
[PROOF STATE]
proof (state)
this:
oper = Equals
goal (16 subgoals):
1. oper = Add \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
2. oper = Mult \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
3. oper = Minus \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
4. oper = Less \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
5. oper = Equals \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
6. oper = And \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
7. oper = pdf_operator.Not \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
8. oper = Or \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
9. oper = pdf_operator.Pow \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
10. oper = Sqrt \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
A total of 16 subgoals...
[PROOF STEP]
with assms
[PROOF STATE]
proof (chain)
picking this:
op_type oper t = Some t'
oper = Equals
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
op_type oper t = Some t'
oper = Equals
goal (1 subgoal):
1. op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
[PROOF STEP]
by (auto intro!: val_case_stock_measurable split: if_split_asm)
[PROOF STATE]
proof (state)
this:
op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
goal (15 subgoals):
1. oper = Add \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
2. oper = Mult \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
3. oper = Minus \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
4. oper = Less \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
5. oper = And \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
6. oper = pdf_operator.Not \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
7. oper = Or \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
8. oper = pdf_operator.Pow \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
9. oper = Sqrt \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
10. oper = Exp \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
A total of 15 subgoals...
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (15 subgoals):
1. oper = Add \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
2. oper = Mult \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
3. oper = Minus \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
4. oper = Less \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
5. oper = And \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
6. oper = pdf_operator.Not \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
7. oper = Or \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
8. oper = pdf_operator.Pow \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
9. oper = Sqrt \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
10. oper = Exp \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
A total of 15 subgoals...
[PROOF STEP]
case Pow
[PROOF STATE]
proof (state)
this:
oper = pdf_operator.Pow
goal (15 subgoals):
1. oper = Add \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
2. oper = Mult \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
3. oper = Minus \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
4. oper = Less \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
5. oper = And \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
6. oper = pdf_operator.Not \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
7. oper = Or \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
8. oper = pdf_operator.Pow \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
9. oper = Sqrt \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
10. oper = Exp \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
A total of 15 subgoals...
[PROOF STEP]
with assms
[PROOF STATE]
proof (chain)
picking this:
op_type oper t = Some t'
oper = pdf_operator.Pow
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
op_type oper t = Some t'
oper = pdf_operator.Pow
goal (1 subgoal):
1. op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
[PROOF STEP]
apply (auto intro!: val_case_stock_measurable split: pdf_type.splits)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>oper = pdf_operator.Pow; t = PRODUCT REAL INTEG; t' = REAL\<rbrakk> \<Longrightarrow> (\<lambda>(y, z). case y of IntVal x \<Rightarrow> case z of IntVal n \<Rightarrow> if n < 0 then IntVal 0 else IntVal (x ^ nat n) | RealVal x \<Rightarrow> case z of IntVal n \<Rightarrow> if n < 0 then RealVal 0 else RealVal (x ^ nat n)) \<in> stock_measure REAL \<Otimes>\<^sub>M stock_measure INTEG \<rightarrow>\<^sub>M stock_measure REAL
[PROOF STEP]
apply (subst measurable_cong[where
g="\<lambda>(x, n). if extract_int n < 0 then RealVal 0 else RealVal (extract_real x ^ nat (extract_int n))"])
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>w. \<lbrakk>oper = pdf_operator.Pow; t = PRODUCT REAL INTEG; t' = REAL; w \<in> space (stock_measure REAL \<Otimes>\<^sub>M stock_measure INTEG)\<rbrakk> \<Longrightarrow> (case w of (IntVal x, IntVal n) \<Rightarrow> if n < 0 then IntVal 0 else IntVal (x ^ nat n) | (RealVal x, IntVal n) \<Rightarrow> if n < 0 then RealVal 0 else RealVal (x ^ nat n)) = (case w of (x, n) \<Rightarrow> if extract_int n < 0 then RealVal 0 else RealVal (extract_real x ^ nat (extract_int n)))
2. \<lbrakk>oper = pdf_operator.Pow; t = PRODUCT REAL INTEG; t' = REAL\<rbrakk> \<Longrightarrow> (\<lambda>(x, n). if extract_int n < 0 then RealVal 0 else RealVal (extract_real x ^ nat (extract_int n))) \<in> stock_measure REAL \<Otimes>\<^sub>M stock_measure INTEG \<rightarrow>\<^sub>M stock_measure REAL
[PROOF STEP]
apply (auto simp: space_pair_measure elim!: REAL_E INTEG_E)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
goal (14 subgoals):
1. oper = Add \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
2. oper = Mult \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
3. oper = Minus \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
4. oper = Less \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
5. oper = And \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
6. oper = pdf_operator.Not \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
7. oper = Or \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
8. oper = Sqrt \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
9. oper = Exp \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
10. oper = pdf_operator.Ln \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
A total of 14 subgoals...
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (14 subgoals):
1. oper = Add \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
2. oper = Mult \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
3. oper = Minus \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
4. oper = Less \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
5. oper = And \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
6. oper = pdf_operator.Not \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
7. oper = Or \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
8. oper = Sqrt \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
9. oper = Exp \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
10. oper = pdf_operator.Ln \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
A total of 14 subgoals...
[PROOF STEP]
case Less
[PROOF STATE]
proof (state)
this:
oper = Less
goal (14 subgoals):
1. oper = Add \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
2. oper = Mult \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
3. oper = Minus \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
4. oper = Less \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
5. oper = And \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
6. oper = pdf_operator.Not \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
7. oper = Or \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
8. oper = Sqrt \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
9. oper = Exp \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
10. oper = pdf_operator.Ln \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
A total of 14 subgoals...
[PROOF STEP]
with assms
[PROOF STATE]
proof (chain)
picking this:
op_type oper t = Some t'
oper = Less
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
op_type oper t = Some t'
oper = Less
goal (1 subgoal):
1. op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
[PROOF STEP]
by (auto split: pdf_type.splits)
[PROOF STATE]
proof (state)
this:
op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
goal (13 subgoals):
1. oper = Add \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
2. oper = Mult \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
3. oper = Minus \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
4. oper = And \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
5. oper = pdf_operator.Not \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
6. oper = Or \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
7. oper = Sqrt \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
8. oper = Exp \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
9. oper = pdf_operator.Ln \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
10. oper = Fact \<Longrightarrow> op_sem oper \<in> stock_measure t \<rightarrow>\<^sub>M stock_measure t'
A total of 13 subgoals...
[PROOF STEP]
qed (insert assms, auto split: pdf_type.split_asm intro!: val_case_stock_measurable) |
[STATEMENT]
lemma is_lub_upper:
"P \<turnstile> lub(U, V) = T \<Longrightarrow> P \<turnstile> U \<le> T \<and> P \<turnstile> V \<le> T"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P \<turnstile> lub(U, V) = T \<Longrightarrow> P \<turnstile> U \<le> T \<and> P \<turnstile> V \<le> T
[PROOF STEP]
by(auto elim: is_lub.cases) |
lemma LIM_offset_zero: "f \<midarrow>a\<rightarrow> L \<Longrightarrow> (\<lambda>h. f (a + h)) \<midarrow>0\<rightarrow> L" for a :: "'a::real_normed_vector" |
/-
Copyright (c) 2022 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import measure_theory.measure.measure_space_def
/-!
# Almost everywhere disjoint sets
We say that sets `s` and `t` are `μ`-a.e. disjoint (see `measure_theory.ae_disjoint`) if their
intersection has measure zero. This assumption can be used instead of `disjoint` in most theorems in
measure theory.
-/
open set function
namespace measure_theory
variables {ι α : Type*} {m : measurable_space α} (μ : measure α)
/-- Two sets are said to be `μ`-a.e. disjoint if their intersection has measure zero. -/
def ae_disjoint (s t : set α) := μ (s ∩ t) = 0
variables {μ} {s t u v : set α}
/-- If `s : ι → set α` is a countable family of pairwise a.e. disjoint sets, then there exists a
family of measurable null sets `t i` such that `s i \ t i` are pairwise disjoint. -/
lemma exists_null_pairwise_disjoint_diff [encodable ι] {s : ι → set α}
(hd : pairwise (ae_disjoint μ on s)) :
∃ t : ι → set α, (∀ i, measurable_set (t i)) ∧ (∀ i, μ (t i) = 0) ∧
pairwise (disjoint on (λ i, s i \ t i)) :=
begin
refine ⟨λ i, to_measurable μ (s i ∩ ⋃ j ∈ ({i}ᶜ : set ι), s j),
λ i, measurable_set_to_measurable _ _, λ i, _, _⟩,
{ simp only [measure_to_measurable, inter_Union, measure_bUnion_null_iff (countable_encodable _)],
exact λ j hj, hd _ _ (ne.symm hj) },
{ simp only [pairwise, disjoint_left, on_fun, mem_diff, not_and, and_imp, not_not],
intros i j hne x hi hU hj,
replace hU : x ∉ s i ∩ ⋃ j ≠ i, s j := λ h, hU (subset_to_measurable _ _ h),
simp only [mem_inter_eq, mem_Union, not_and, not_exists] at hU,
exact (hU hi j hne.symm hj).elim }
end
namespace ae_disjoint
protected lemma eq (h : ae_disjoint μ s t) : μ (s ∩ t) = 0 := h
@[symm] protected lemma symm (h : ae_disjoint μ s t) : ae_disjoint μ t s :=
by rwa [ae_disjoint, inter_comm]
protected lemma symmetric : symmetric (ae_disjoint μ) := λ s t h, h.symm
protected lemma comm : ae_disjoint μ s t ↔ ae_disjoint μ t s := ⟨λ h, h.symm, λ h, h.symm⟩
lemma _root_.disjoint.ae_disjoint (h : disjoint s t) : ae_disjoint μ s t :=
by rw [ae_disjoint, disjoint_iff_inter_eq_empty.1 h, measure_empty]
lemma mono_ae (h : ae_disjoint μ s t) (hu : u ≤ᵐ[μ] s) (hv : v ≤ᵐ[μ] t) : ae_disjoint μ u v :=
measure_mono_null_ae (hu.inter hv) h
lemma mono (h : ae_disjoint μ s t) (hu : u ⊆ s) (hv : v ⊆ t) : ae_disjoint μ u v :=
h.mono_ae hu.eventually_le hv.eventually_le
@[simp] lemma Union_left_iff [encodable ι] {s : ι → set α} :
ae_disjoint μ (⋃ i, s i) t ↔ ∀ i, ae_disjoint μ (s i) t :=
by simp only [ae_disjoint, Union_inter, measure_Union_null_iff]
@[simp] lemma Union_right_iff [encodable ι] {t : ι → set α} :
ae_disjoint μ s (⋃ i, t i) ↔ ∀ i, ae_disjoint μ s (t i) :=
by simp only [ae_disjoint, inter_Union, measure_Union_null_iff]
@[simp] lemma union_left_iff : ae_disjoint μ (s ∪ t) u ↔ ae_disjoint μ s u ∧ ae_disjoint μ t u :=
by simp [union_eq_Union, and.comm]
@[simp] lemma union_right_iff : ae_disjoint μ s (t ∪ u) ↔ ae_disjoint μ s t ∧ ae_disjoint μ s u :=
by simp [union_eq_Union, and.comm]
lemma union_left (hs : ae_disjoint μ s u) (ht : ae_disjoint μ t u) : ae_disjoint μ (s ∪ t) u :=
union_left_iff.mpr ⟨hs, ht⟩
lemma union_right (ht : ae_disjoint μ s t) (hu : ae_disjoint μ s u) : ae_disjoint μ s (t ∪ u) :=
union_right_iff.2 ⟨ht, hu⟩
lemma diff_ae_eq_left (h : ae_disjoint μ s t) : (s \ t : set α) =ᵐ[μ] s :=
@diff_self_inter _ s t ▸ diff_null_ae_eq_self h
lemma diff_ae_eq_right (h : ae_disjoint μ s t) : (t \ s : set α) =ᵐ[μ] t := h.symm.diff_ae_eq_left
lemma measure_diff_left (h : ae_disjoint μ s t) : μ (s \ t) = μ s := measure_congr h.diff_ae_eq_left
lemma measure_diff_right (h : ae_disjoint μ s t) : μ (t \ s) = μ t :=
measure_congr h.diff_ae_eq_right
/-- If `s` and `t` are `μ`-a.e. disjoint, then `s \ u` and `t` are disjoint for some measurable null
set `u`. -/
lemma exists_disjoint_diff (h : ae_disjoint μ s t) :
∃ u, measurable_set u ∧ μ u = 0 ∧ disjoint (s \ u) t :=
⟨to_measurable μ (s ∩ t), measurable_set_to_measurable _ _, (measure_to_measurable _).trans h,
disjoint_diff.symm.mono_left (λ x hx, ⟨hx.1, λ hxt, hx.2 $ subset_to_measurable _ _ ⟨hx.1, hxt⟩⟩)⟩
lemma of_null_right (h : μ t = 0) : ae_disjoint μ s t :=
measure_mono_null (inter_subset_right _ _) h
lemma of_null_left (h : μ s = 0) : ae_disjoint μ s t := (of_null_right h).symm
end ae_disjoint
lemma ae_disjoint_compl_left : ae_disjoint μ sᶜ s := (@disjoint_compl_left _ s _).ae_disjoint
lemma ae_disjoint_compl_right : ae_disjoint μ s sᶜ := (@disjoint_compl_right _ s _).ae_disjoint
end measure_theory
|
[STATEMENT]
lemma (in ptrace_top) adh_lemma:
assumes xpoint: "x \<in> A\<^sup>\<spadesuit>"
and P_subset_A: "P \<subseteq> A\<^sup>\<spadesuit>"
shows "x adh P = (\<forall> r \<in> pfinpref A x. \<exists> s \<in> A\<^sup>\<infinity>. r @@ s \<in> P)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (x adh P) = (\<forall>r\<in>pfinpref A x. \<exists>s\<in>A\<^sup>\<infinity>. r @@ s \<in> P)
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. x adh P \<Longrightarrow> \<forall>r\<in>pfinpref A x. \<exists>s\<in>A\<^sup>\<infinity>. r @@ s \<in> P
2. \<forall>r\<in>pfinpref A x. \<exists>s\<in>A\<^sup>\<infinity>. r @@ s \<in> P \<Longrightarrow> x adh P
[PROOF STEP]
assume adh_x: "x adh P"
[PROOF STATE]
proof (state)
this:
x adh P
goal (2 subgoals):
1. x adh P \<Longrightarrow> \<forall>r\<in>pfinpref A x. \<exists>s\<in>A\<^sup>\<infinity>. r @@ s \<in> P
2. \<forall>r\<in>pfinpref A x. \<exists>s\<in>A\<^sup>\<infinity>. r @@ s \<in> P \<Longrightarrow> x adh P
[PROOF STEP]
show "\<forall>r\<in>pfinpref A x. \<exists>s\<in>A\<^sup>\<infinity>. r @@ s \<in> P"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>r\<in>pfinpref A x. \<exists>s\<in>A\<^sup>\<infinity>. r @@ s \<in> P
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>r. r \<in> pfinpref A x \<Longrightarrow> \<exists>s\<in>A\<^sup>\<infinity>. r @@ s \<in> P
[PROOF STEP]
fix r
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>r. r \<in> pfinpref A x \<Longrightarrow> \<exists>s\<in>A\<^sup>\<infinity>. r @@ s \<in> P
[PROOF STEP]
let ?u = "suff A r"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>r. r \<in> pfinpref A x \<Longrightarrow> \<exists>s\<in>A\<^sup>\<infinity>. r @@ s \<in> P
[PROOF STEP]
assume r_pfinpref_x: "r \<in> pfinpref A x"
[PROOF STATE]
proof (state)
this:
r \<in> pfinpref A x
goal (1 subgoal):
1. \<And>r. r \<in> pfinpref A x \<Longrightarrow> \<exists>s\<in>A\<^sup>\<infinity>. r @@ s \<in> P
[PROOF STEP]
hence r_pos: "r \<in> A\<^sup>\<clubsuit>"
[PROOF STATE]
proof (prove)
using this:
r \<in> pfinpref A x
goal (1 subgoal):
1. r \<in> A\<^sup>\<clubsuit>
[PROOF STEP]
by (auto dest: finpref_fin)
[PROOF STATE]
proof (state)
this:
r \<in> A\<^sup>\<clubsuit>
goal (1 subgoal):
1. \<And>r. r \<in> pfinpref A x \<Longrightarrow> \<exists>s\<in>A\<^sup>\<infinity>. r @@ s \<in> P
[PROOF STEP]
hence "?u open"
[PROOF STATE]
proof (prove)
using this:
r \<in> A\<^sup>\<clubsuit>
goal (1 subgoal):
1. suff A r open
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
suff A r open
goal (1 subgoal):
1. \<And>r. r \<in> pfinpref A x \<Longrightarrow> \<exists>s\<in>A\<^sup>\<infinity>. r @@ s \<in> P
[PROOF STEP]
hence "?u \<in> nhds x"
[PROOF STATE]
proof (prove)
using this:
suff A r open
goal (1 subgoal):
1. suff A r \<in> nhds x
[PROOF STEP]
using xpoint r_pfinpref_x
[PROOF STATE]
proof (prove)
using this:
suff A r open
x \<in> A\<^sup>\<spadesuit>
r \<in> pfinpref A x
goal (1 subgoal):
1. suff A r \<in> nhds x
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
suff A r \<in> nhds x
goal (1 subgoal):
1. \<And>r. r \<in> pfinpref A x \<Longrightarrow> \<exists>s\<in>A\<^sup>\<infinity>. r @@ s \<in> P
[PROOF STEP]
with adh_x
[PROOF STATE]
proof (chain)
picking this:
x adh P
suff A r \<in> nhds x
[PROOF STEP]
have "?u \<inter> P \<noteq> {}"
[PROOF STATE]
proof (prove)
using this:
x adh P
suff A r \<in> nhds x
goal (1 subgoal):
1. suff A r \<inter> P \<noteq> {}
[PROOF STEP]
by (auto elim!:adhCE)
[PROOF STATE]
proof (state)
this:
suff A r \<inter> P \<noteq> {}
goal (1 subgoal):
1. \<And>r. r \<in> pfinpref A x \<Longrightarrow> \<exists>s\<in>A\<^sup>\<infinity>. r @@ s \<in> P
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
suff A r \<inter> P \<noteq> {}
[PROOF STEP]
obtain t where tu: "t \<in> ?u" and tP: "t \<in> P"
[PROOF STATE]
proof (prove)
using this:
suff A r \<inter> P \<noteq> {}
goal (1 subgoal):
1. (\<And>t. \<lbrakk>t \<in> suff A r; t \<in> P\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
t \<in> suff A r
t \<in> P
goal (1 subgoal):
1. \<And>r. r \<in> pfinpref A x \<Longrightarrow> \<exists>s\<in>A\<^sup>\<infinity>. r @@ s \<in> P
[PROOF STEP]
from tu
[PROOF STATE]
proof (chain)
picking this:
t \<in> suff A r
[PROOF STEP]
obtain s where "t = r @@ s"
[PROOF STATE]
proof (prove)
using this:
t \<in> suff A r
goal (1 subgoal):
1. (\<And>s. t = r @@ s \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using r_pos
[PROOF STATE]
proof (prove)
using this:
t \<in> suff A r
r \<in> A\<^sup>\<clubsuit>
goal (1 subgoal):
1. (\<And>s. t = r @@ s \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (auto elim!: suff_appE)
[PROOF STATE]
proof (state)
this:
t = r @@ s
goal (1 subgoal):
1. \<And>r. r \<in> pfinpref A x \<Longrightarrow> \<exists>s\<in>A\<^sup>\<infinity>. r @@ s \<in> P
[PROOF STEP]
with tP
[PROOF STATE]
proof (chain)
picking this:
t \<in> P
t = r @@ s
[PROOF STEP]
show "\<exists>s\<in>A\<^sup>\<infinity>. r @@ s \<in> P"
[PROOF STATE]
proof (prove)
using this:
t \<in> P
t = r @@ s
goal (1 subgoal):
1. \<exists>s\<in>A\<^sup>\<infinity>. r @@ s \<in> P
[PROOF STEP]
using P_subset_A r_pos
[PROOF STATE]
proof (prove)
using this:
t \<in> P
t = r @@ s
P \<subseteq> A\<^sup>\<spadesuit>
r \<in> A\<^sup>\<clubsuit>
goal (1 subgoal):
1. \<exists>s\<in>A\<^sup>\<infinity>. r @@ s \<in> P
[PROOF STEP]
by (auto iff: lapp_allT_iff)
[PROOF STATE]
proof (state)
this:
\<exists>s\<in>A\<^sup>\<infinity>. r @@ s \<in> P
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<forall>r\<in>pfinpref A x. \<exists>s\<in>A\<^sup>\<infinity>. r @@ s \<in> P
goal (1 subgoal):
1. \<forall>r\<in>pfinpref A x. \<exists>s\<in>A\<^sup>\<infinity>. r @@ s \<in> P \<Longrightarrow> x adh P
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<forall>r\<in>pfinpref A x. \<exists>s\<in>A\<^sup>\<infinity>. r @@ s \<in> P \<Longrightarrow> x adh P
[PROOF STEP]
assume H: "\<forall>r\<in>pfinpref A x. \<exists>s\<in>A\<^sup>\<infinity>. r @@ s \<in> P"
[PROOF STATE]
proof (state)
this:
\<forall>r\<in>pfinpref A x. \<exists>s\<in>A\<^sup>\<infinity>. r @@ s \<in> P
goal (1 subgoal):
1. \<forall>r\<in>pfinpref A x. \<exists>s\<in>A\<^sup>\<infinity>. r @@ s \<in> P \<Longrightarrow> x adh P
[PROOF STEP]
show "x adh P"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x adh P
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>U. U \<in> nhds x \<Longrightarrow> U \<inter> P \<noteq> {}
[PROOF STEP]
fix U
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>U. U \<in> nhds x \<Longrightarrow> U \<inter> P \<noteq> {}
[PROOF STEP]
assume unhd: "U \<in> nhds x"
[PROOF STATE]
proof (state)
this:
U \<in> nhds x
goal (1 subgoal):
1. \<And>U. U \<in> nhds x \<Longrightarrow> U \<inter> P \<noteq> {}
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
U \<in> nhds x
[PROOF STEP]
obtain r where r_pfinpref_x: "r \<in> pfinpref A x" and
suff_subset_U: "suff A r \<subseteq> U"
[PROOF STATE]
proof (prove)
using this:
U \<in> nhds x
goal (1 subgoal):
1. (\<And>r. \<lbrakk>r \<in> pfinpref A x; suff A r \<subseteq> U\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (elim suff_ptop_nhd_base)
[PROOF STATE]
proof (state)
this:
r \<in> pfinpref A x
suff A r \<subseteq> U
goal (1 subgoal):
1. \<And>U. U \<in> nhds x \<Longrightarrow> U \<inter> P \<noteq> {}
[PROOF STEP]
from r_pfinpref_x
[PROOF STATE]
proof (chain)
picking this:
r \<in> pfinpref A x
[PROOF STEP]
have rpos: "r \<in> A\<^sup>\<clubsuit>"
[PROOF STATE]
proof (prove)
using this:
r \<in> pfinpref A x
goal (1 subgoal):
1. r \<in> A\<^sup>\<clubsuit>
[PROOF STEP]
by (auto intro: finpref_fin)
[PROOF STATE]
proof (state)
this:
r \<in> A\<^sup>\<clubsuit>
goal (1 subgoal):
1. \<And>U. U \<in> nhds x \<Longrightarrow> U \<inter> P \<noteq> {}
[PROOF STEP]
show "U \<inter> P \<noteq> {}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. U \<inter> P \<noteq> {}
[PROOF STEP]
using rpos
[PROOF STATE]
proof (prove)
using this:
r \<in> A\<^sup>\<clubsuit>
goal (1 subgoal):
1. U \<inter> P \<noteq> {}
[PROOF STEP]
proof (cases r)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a rs. \<lbrakk>r = a ## rs; a \<in> A; rs \<in> A\<^sup>\<star>\<rbrakk> \<Longrightarrow> U \<inter> P \<noteq> {}
[PROOF STEP]
case (LCons a l)
[PROOF STATE]
proof (state)
this:
r = a ## l
a \<in> A
l \<in> A\<^sup>\<star>
goal (1 subgoal):
1. \<And>a rs. \<lbrakk>r = a ## rs; a \<in> A; rs \<in> A\<^sup>\<star>\<rbrakk> \<Longrightarrow> U \<inter> P \<noteq> {}
[PROOF STEP]
hence r_pfinpref_x: "r \<in> pfinpref A x"
[PROOF STATE]
proof (prove)
using this:
r = a ## l
a \<in> A
l \<in> A\<^sup>\<star>
goal (1 subgoal):
1. r \<in> pfinpref A x
[PROOF STEP]
using r_pfinpref_x
[PROOF STATE]
proof (prove)
using this:
r = a ## l
a \<in> A
l \<in> A\<^sup>\<star>
r \<in> pfinpref A x
goal (1 subgoal):
1. r \<in> pfinpref A x
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
r \<in> pfinpref A x
goal (1 subgoal):
1. \<And>a rs. \<lbrakk>r = a ## rs; a \<in> A; rs \<in> A\<^sup>\<star>\<rbrakk> \<Longrightarrow> U \<inter> P \<noteq> {}
[PROOF STEP]
with H
[PROOF STATE]
proof (chain)
picking this:
\<forall>r\<in>pfinpref A x. \<exists>s\<in>A\<^sup>\<infinity>. r @@ s \<in> P
r \<in> pfinpref A x
[PROOF STEP]
obtain s where sA: "s \<in> A\<^sup>\<infinity>" and asP: "r@@s \<in> P"
[PROOF STATE]
proof (prove)
using this:
\<forall>r\<in>pfinpref A x. \<exists>s\<in>A\<^sup>\<infinity>. r @@ s \<in> P
r \<in> pfinpref A x
goal (1 subgoal):
1. (\<And>s. \<lbrakk>s \<in> A\<^sup>\<infinity>; r @@ s \<in> P\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
s \<in> A\<^sup>\<infinity>
r @@ s \<in> P
goal (1 subgoal):
1. \<And>a rs. \<lbrakk>r = a ## rs; a \<in> A; rs \<in> A\<^sup>\<star>\<rbrakk> \<Longrightarrow> U \<inter> P \<noteq> {}
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
s \<in> A\<^sup>\<infinity>
r @@ s \<in> P
goal (1 subgoal):
1. \<And>a rs. \<lbrakk>r = a ## rs; a \<in> A; rs \<in> A\<^sup>\<star>\<rbrakk> \<Longrightarrow> U \<inter> P \<noteq> {}
[PROOF STEP]
have "r @@ s \<in> suff A r"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. r @@ s \<in> suff A r
[PROOF STEP]
using sA rpos
[PROOF STATE]
proof (prove)
using this:
s \<in> A\<^sup>\<infinity>
r \<in> A\<^sup>\<clubsuit>
goal (1 subgoal):
1. r @@ s \<in> suff A r
[PROOF STEP]
by (auto simp: suff_def iff: lapp_allT_iff)
[PROOF STATE]
proof (state)
this:
r @@ s \<in> suff A r
goal (1 subgoal):
1. \<And>a rs. \<lbrakk>r = a ## rs; a \<in> A; rs \<in> A\<^sup>\<star>\<rbrakk> \<Longrightarrow> U \<inter> P \<noteq> {}
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
s \<in> A\<^sup>\<infinity>
r @@ s \<in> P
r @@ s \<in> suff A r
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
s \<in> A\<^sup>\<infinity>
r @@ s \<in> P
r @@ s \<in> suff A r
goal (1 subgoal):
1. U \<inter> P \<noteq> {}
[PROOF STEP]
using suff_subset_U
[PROOF STATE]
proof (prove)
using this:
s \<in> A\<^sup>\<infinity>
r @@ s \<in> P
r @@ s \<in> suff A r
suff A r \<subseteq> U
goal (1 subgoal):
1. U \<inter> P \<noteq> {}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
U \<inter> P \<noteq> {}
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
U \<inter> P \<noteq> {}
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
x adh P
goal:
No subgoals!
[PROOF STEP]
qed |
function xyz = skel2xyz(skel, channels)
% SKEL2XYZ Compute XYZ values given skeleton structure and channels.
% FORMAT
% DESC Computes X, Y, Z coordinates given a BVH or acclaim skeleton
% structure and an associated set of channels.
% ARG skel : a skeleton for the bvh file.
% ARG channels : the channels for the bvh file.
% RETURN xyz : the point cloud positions for the skeleton.
%
% COPYRIGHT : Neil D. Lawrence, 2006
%
% SEEALSO : acclaim2xyz, bvh2xyz
% MOCAP
fname = str2func([skel.type '2xyz']);
xyz = fname(skel, channels);
|
module SQLite3.Bindings
import CUtils
libsqlite3 : String -> String
libsqlite3 x = "C:sqlite3_" <+> x <+> ",libsqlite3"
%foreign libsqlite3 "libversion"
sqlite3_libversion : PrimIO String
export data PrimHandle : Type where
export data PrimStmt : Type where
export %foreign libsqlite3 "errmsg"
sqlite3_errmsg : Ptr PrimHandle -> PrimIO String
export %foreign libsqlite3 "errcode"
sqlite3_errcode : Ptr PrimHandle -> PrimIO Int
export %foreign libsqlite3 "open"
sqlite3_open : (filename_utf8 : String) -> Ptr (Ptr PrimHandle) -> PrimIO Int
export %foreign libsqlite3 "close"
sqlite3_close : Ptr PrimHandle -> PrimIO Int
export %foreign libsqlite3 "prepare_v2"
sqlite3_prepare_v2 : Ptr PrimHandle -> (zsql : String) -> (zsql_max_len : Int) -> Ptr (Ptr PrimStmt) -> Ptr (Ptr String) -> PrimIO Int
export %foreign libsqlite3 "step"
sqlite3_step : Ptr PrimStmt -> PrimIO Int
export %foreign libsqlite3 "reset"
sqlite3_reset : Ptr PrimStmt -> PrimIO Int
export %foreign libsqlite3 "finalize"
sqlite3_finalize : Ptr PrimStmt -> PrimIO Int
export %foreign libsqlite3 "column_count"
sqlite3_column_count : Ptr PrimStmt -> PrimIO Int
export %foreign libsqlite3 "column_blob"
sqlite3_column_blob : Ptr PrimStmt -> (icol : Int) -> PrimIO AnyPtr
export %foreign libsqlite3 "column_text"
sqlite3_column_text : Ptr PrimStmt -> (icol : Int) -> PrimIO String
export %foreign libsqlite3 "column_double"
sqlite3_column_double : Ptr PrimStmt -> (icol : Int) -> PrimIO Double
||| sqlite3_column_int -> 32-bit INTEGER result
||| -- from: https://sqlite.org/c3ref/column_blob.html
||| tho the signature is `int sqlite3_column_int(sqlite3_stmt*, int iCol);`
export %foreign libsqlite3 "column_int"
sqlite3_column_int : Ptr PrimStmt -> (icol : Int) -> PrimIO Int32
export %foreign libsqlite3 "column_int64"
sqlite3_column_int64 : Ptr PrimStmt -> (icol : Int) -> PrimIO Int64
export %foreign libsqlite3 "column_bytes"
sqlite3_column_bytes : Ptr PrimStmt -> (icol : Int) -> PrimIO Int
export %foreign libsqlite3 "column_type"
sqlite3_column_type : Ptr PrimStmt -> (icol : Int) -> PrimIO Int
|
(*
* Copyright 2014, NICTA
*
* This software may be distributed and modified according to the terms of
* the BSD 2-Clause license. Note that NO WARRANTY is provided.
* See "LICENSE_BSD2.txt" for details.
*
* @TAG(NICTA_BSD)
*)
theory Sep_Tactic_Helpers
imports Separation_Algebra
begin
lemmas sep_curry = sep_conj_sep_impl[rotated]
lemma sep_mp: "((Q \<longrightarrow>* R) \<and>* Q) s \<Longrightarrow> R s"
by (rule sep_conj_sep_impl2)
lemma sep_mp_frame: "((Q \<longrightarrow>* R) \<and>* Q \<and>* R') s \<Longrightarrow> (R \<and>* R') s"
apply (clarsimp simp: sep_conj_assoc[symmetric])
apply (erule sep_conj_impl)
apply (erule (1) sep_mp)
done
lemma sep_empty_conj: "P s \<Longrightarrow> (\<box> \<and>* P) s"
by clarsimp
lemma sep_conj_empty: "(\<box> \<and>* P) s \<Longrightarrow> P s"
by clarsimp
lemma sep_empty_imp: "(\<box> \<longrightarrow>* P) s \<Longrightarrow> P s"
apply (clarsimp simp: sep_impl_def)
apply (erule_tac x=0 in allE)
apply (clarsimp)
done
lemma sep_empty_imp': "(\<box> \<longrightarrow>* P) s \<Longrightarrow> (\<And>s. P s \<Longrightarrow> Q s) \<Longrightarrow> Q s"
apply (clarsimp simp: sep_impl_def)
apply (erule_tac x=0 in allE)
apply (clarsimp)
done
lemma sep_imp_empty: " P s \<Longrightarrow> (\<And>s. P s \<Longrightarrow> Q s) \<Longrightarrow> (\<box> \<longrightarrow>* Q) s"
by (erule sep_conj_sep_impl, clarsimp)
end
|
Formal statement is: lemma analytic_on_open: "open S \<Longrightarrow> f analytic_on S \<longleftrightarrow> f holomorphic_on S" Informal statement is: If $S$ is an open set, then $f$ is analytic on $S$ if and only if $f$ is holomorphic on $S$. |
= = Offensive strategy and order of battle = =
|
lemma homotopic_with_equal: assumes "P f" "P g" and contf: "continuous_map X Y f" and fg: "\<And>x. x \<in> topspace X \<Longrightarrow> f x = g x" shows "homotopic_with P X Y f g" |
||| A version of `Dec` that returns a meaningful error message as well as proof of void.
|||
||| When dealing with decidable properties for type-level computations the existing `Dec` data type is useful.
||| However, when using decidable properties interactively one cannot easily tell why a property failed.
||| One can always encode failing cases within the property itself but that is not necessarily a advantageous.
|||
||| `DecInfo` provides a data structure to capture decidable properties together with an informative error message for when the property does not hold.
module Commons.Data.DecInfo
%default total
%access public export
data DecInfo : (errType : Type) -> (prop : Type) -> Type where
Yes : (prfWhy : prop) -> DecInfo errType prop
No : (msgWhyNot : errType) -> (prfWhyNot : prop -> Void) -> DecInfo errType prop
-- --------------------------------------------------------------------- [ EOF ]
|
section \<open>cpo fixpoint\<close>
theory CPOFix
imports Prelude
begin
default_sort type
(* genaral fixpoint over cpo, x is \<sqsubseteq> f x*)
definition fixg::"('a::cpo) \<Rightarrow> ('a \<rightarrow> 'a) \<rightarrow> 'a" where
"fixg = (\<lambda> x. \<Lambda> F. if x \<sqsubseteq> F\<cdot>x then \<Squnion>i. iterate i\<cdot>F\<cdot>x else x)"
lemma iter_fixg_mono2: assumes "x \<sqsubseteq> y" and "F1 \<sqsubseteq> F2"
shows "\<forall>i . (iterate i\<cdot>F1\<cdot>x) \<sqsubseteq> (iterate i\<cdot>F2\<cdot>y)"
by (simp add: assms(1) assms(2) monofun_cfun)
lemma iter_fixg_chain: assumes "x \<sqsubseteq> F\<cdot>x"
shows "chain (\<lambda>i. iterate i\<cdot>F\<cdot>x)"
apply (rule chainI)
by (metis assms cont_pref_eq1I iterate_Suc2)
lemma lub_iter_fixg_mono_req: assumes "F1 \<sqsubseteq> F2" and "x \<sqsubseteq> F1\<cdot>x" and "x\<sqsubseteq>F2\<cdot>x"
shows "(\<Squnion>i. iterate i\<cdot>F1\<cdot>x) \<sqsubseteq> (\<Squnion>i. iterate i\<cdot>F2\<cdot>x)"
proof -
have "\<forall>i. (iterate i\<cdot>F1\<cdot>x) \<sqsubseteq> (iterate i\<cdot>F2\<cdot>x)"
by (simp add: iter_fixg_mono2 assms(1) assms(2))
then show ?thesis
by (simp add: lub_mono assms iter_fixg_mono2 iter_fixg_chain)
qed
(*cont (\<lambda> F. fixg x F)*)
lemma fixg_pre:"x \<sqsubseteq> (if x \<sqsubseteq> F\<cdot>x then \<Squnion>i. iterate i\<cdot>F\<cdot>x else x)"
proof(cases "x\<sqsubseteq>F\<cdot>x")
case True
then show ?thesis
proof -
have "\<And>n. iterate n\<cdot>F\<cdot>x \<sqsubseteq> (\<Squnion>n. iterate n\<cdot>F\<cdot>x)"
using True is_ub_thelub iter_fixg_chain by blast
then have "x \<sqsubseteq> (\<Squnion>n. iterate n\<cdot>F\<cdot>x)"
by (metis iterate_0)
then show ?thesis
using True by presburger
qed
next
case False
then show ?thesis
by simp
qed
lemma fixg_mono[simp]:"monofun (\<lambda>F. if x \<sqsubseteq> F\<cdot>x then \<Squnion>i. iterate i\<cdot>F\<cdot>x else x)"
proof(rule monofunI)
fix xa::"'a \<rightarrow> 'a" and y::"'a \<rightarrow> 'a"
assume a1:"xa \<sqsubseteq> y"
show "(if x \<sqsubseteq> xa\<cdot>x then \<Squnion>i. iterate i\<cdot>xa\<cdot>x else x) \<sqsubseteq> (if x \<sqsubseteq> y\<cdot>x then \<Squnion>i. iterate i\<cdot>y\<cdot>x else x)"
proof(cases "x \<sqsubseteq> xa \<cdot>x")
case True
then have "x \<sqsubseteq> y\<cdot>x"
using a1 cfun_below_iff rev_below_trans by blast
then show ?thesis
by (simp add: True a1 lub_iter_fixg_mono_req)
next
case False
then show ?thesis
by(simp add: fixg_pre)
qed
qed
lemma fixg_cont[simp]:assumes "\<And> y z. x\<sqsubseteq>z \<and> y\<sqsubseteq>z \<longrightarrow> x\<sqsubseteq>y" shows "cont (\<lambda>F. if x \<sqsubseteq> F\<cdot>x then \<Squnion>i. iterate i\<cdot>F\<cdot>x else x)"
proof(rule Cont.contI2, simp)
fix Y:: "nat \<Rightarrow> ('a \<rightarrow> 'a)"
assume a1:"chain Y"
assume a2:"chain (\<lambda>i. if x \<sqsubseteq> Y i\<cdot>x then \<Squnion>ia. iterate ia\<cdot>(Y i)\<cdot>x else x)"
show "(if x \<sqsubseteq> (\<Squnion>i. Y i)\<cdot>x then \<Squnion>i. iterate i\<cdot>(\<Squnion>i. Y i)\<cdot>x else x) \<sqsubseteq> (\<Squnion>i. if x \<sqsubseteq> Y i\<cdot>x then \<Squnion>ia. iterate ia\<cdot>(Y i)\<cdot>x else x)"
proof(cases "x \<sqsubseteq> (\<Squnion>i. Y i)\<cdot>x")
case True
then show ?thesis
proof(cases "\<exists>i. x \<sqsubseteq> (Y i)\<cdot>x")
case True
then have h1:"\<forall>i. x \<sqsubseteq> Y i \<cdot>x"
by (meson a1 assms cfun_below_iff is_ub_thelub rev_below_trans)
then have h2:"(\<Squnion>i. if x \<sqsubseteq> Y i\<cdot>x then \<Squnion>ia. iterate ia\<cdot>(Y i)\<cdot>x else x) = (\<Squnion>i.\<Squnion>ia. iterate ia\<cdot>(Y i)\<cdot>x)"
by simp
have h3:"(if x \<sqsubseteq> (\<Squnion>i. Y i)\<cdot>x then \<Squnion>i. iterate i\<cdot>(\<Squnion>i. Y i)\<cdot>x else x) = (\<Squnion>i. iterate i\<cdot>(\<Squnion>ia. Y ia)\<cdot>x)"
by (meson True a1 below_trans cfun_below_iff is_ub_thelub)
have h4:"(\<Squnion>i. iterate i\<cdot>(\<Squnion>ia. Y ia)\<cdot>x) = (\<Squnion>i.\<Squnion>ia. iterate i\<cdot>( Y ia)\<cdot>x)"
by(simp add: a1 contlub_cfun_fun contlub_cfun_arg)
show ?thesis
proof-
show "(if x \<sqsubseteq> (\<Squnion>i. Y i)\<cdot>x then \<Squnion>i. iterate i\<cdot>(\<Squnion>i. Y i)\<cdot>x else x) \<sqsubseteq> (\<Squnion>i. if x \<sqsubseteq> Y i\<cdot>x then \<Squnion>ia. iterate ia\<cdot>(Y i)\<cdot>x else x)"
by(simp_all add: h2 h3 h4 diag_lub a1 h1 iter_fixg_chain)
qed
next
case False
have h1:"(\<Squnion>i. Y i)\<cdot>x = x"
proof-
have "x \<sqsubseteq> (\<Squnion>i. Y i)\<cdot>x"
by(simp add: True)
have "\<forall>i. Y i\<cdot>x \<sqsubseteq> x"
using False True a1 assms cfun_below_iff is_ub_thelub by blast
then show "(\<Squnion>i. Y i)\<cdot>x = x"
by (metis True a1 below_antisym ch2ch_Rep_cfunL contlub_cfun_fun lub_below_iff)
qed
have "\<forall>i. iterate i\<cdot>(\<Squnion>i. Y i)\<cdot>x = x"
proof(auto)
fix i::nat
show "iterate i\<cdot>(\<Squnion>i. Y i)\<cdot>x = x"
proof(induction i)
case 0
then show ?case
by simp
next
case (Suc i)
then show ?case
by (simp add: h1)
qed
qed
then have "(\<Squnion>i. iterate i\<cdot>(\<Squnion>i. Y i)\<cdot>x) = x"
by auto
then show ?thesis
using False by auto
qed
next
case False
then show ?thesis
using a2 below_lub fixg_pre by fastforce
qed
qed
lemma fixg_apply: assumes "\<And> y z. x\<sqsubseteq>z \<and> y\<sqsubseteq>z \<longrightarrow> x\<sqsubseteq>y"
shows "fixg x\<cdot>F = (if x \<sqsubseteq> F\<cdot>x then \<Squnion>i. iterate i\<cdot>F\<cdot>x else x)"
by (simp add: assms fixg_def)
(*fixg gives the least fixpoint, if x \<sqsubseteq> F\<cdot>x*)
lemma fixg_fix:assumes" x \<sqsubseteq> F\<cdot>x " and "\<And>y z. x \<sqsubseteq> z \<and> y \<sqsubseteq> z \<longrightarrow> x \<sqsubseteq> y"
shows "fixg x\<cdot> F = F\<cdot>(fixg x\<cdot>F)"
apply (simp add: fixg_def assms)
apply (subst lub_range_shift [of _ 1, symmetric])
apply(rule chainI)
apply(subst iterate_Suc2)
apply(rule Cfun.monofun_cfun_arg, simp add: assms)
apply (subst contlub_cfun_arg)
apply(rule chainI)
apply(subst iterate_Suc2)
apply(rule Cfun.monofun_cfun_arg, simp add: assms)
by simp
lemma fixg_least_below:assumes" x \<sqsubseteq> F\<cdot>x " and "\<And>y z. x \<sqsubseteq> z \<and> y \<sqsubseteq> z \<longrightarrow> x \<sqsubseteq> y" and "x \<sqsubseteq> y"
shows "F\<cdot>y \<sqsubseteq> y \<Longrightarrow> (fixg x\<cdot> F) \<sqsubseteq> y"
apply (simp add: fixg_def assms)
apply (rule lub_below)
apply(rule chainI)
apply(subst iterate_Suc2)
apply(rule Cfun.monofun_cfun_arg, simp add: assms)
apply (induct_tac i)
apply (simp add: assms)
apply (simp add: assms(1))
apply (erule rev_below_trans)
by (erule monofun_cfun_arg)
lemma fixg_least_fix:assumes"F\<cdot>y = y" and "x \<sqsubseteq> y" and "x \<sqsubseteq> F\<cdot>x" and "\<And>y z. x \<sqsubseteq> z \<and> y \<sqsubseteq> z \<longrightarrow> x \<sqsubseteq> y"
shows "fixg x\<cdot> F \<sqsubseteq> y"
by(subst fixg_least_below, simp_all add: assms)
end
|
# one sided test
sigma=2.4
delta=1.5
z_alpha=qnorm(0.05)
z_beta=qnorm(0.10)
sample_size=2*(sigma^2)*((z_alpha+z_beta)^2)/(delta^2)
print(sample_size) |
module MaybeFin
import Data.Fin
%default total
%access public
data MaybeFin : Nat -> Type where
NoFin : MaybeFin Z
SomeFin : Fin (S k) -> MaybeFin (S k)
instance Cast (MaybeFin n) (Maybe (Fin n)) where
cast NoFin = Nothing
cast (SomeFin x) = Just x
|
# Copyright (c) 2018-2021, Carnegie Mellon University
# See LICENSE for details
Declare(DWTper);
#F DWTper(<n>, <j>, [<h(z)>,<g(z)>])
#F DWTper(<n>, <j>, <L>, <V>)
#F returns a 2-channel <j>-stage discrete wavelet transform with periodic extensions
#F (circulant transforms) where the
#F low pass and the high pass analysis filters given by polynomials
#F [<h(z)>,<g(z)>]
#F
#F <L> = [<Lh>,<Lg>] - coefficient lists for h and g
#F <V> = [<vh>,<vg>] - valuation for h and g
#F
#F <n> - size of the output of the reconstructed sequence
#F <j> - number of filter bank stages
Class(DWTper, NonTerminal, rec(
abbrevs := [
function(n,j,M)
local S,L,V, j;
if IsList(M[1]) then
S:=List([1,2], j->FillZeros([M[1][j],M[2][j]]));
V:=List(S, j->j[2]);
L:=List(S, j->j[1]);
return [n,j,L,V];
else
Checked(IsPosInt(n), Checked(ForAll(M,i->IsPolynomial(i)),true));
S:=List(M, i-> FillZeros(i));
V:=List(S, i->i[2]);
L:=List(S, i->i[1]);
return [n,j,L,V];
fi;
end,
function(n,j,L,V)
local S,V,L,i,j;
S:=List([1,2], j->FillZeros([L[j],V[j]]));
V:=List(S, j->j[2]);
L:=List(S, j->j[1]);
return [n,j,L,V];
end
],
dims := self >> [self.params[1], self.params[1]],
half1 := self >> let(
n := self.params[1],
l1 := self.params[3][1],
v1 := self.params[4][1],
DownSample(n,2,0) * Circulant(n,l1,v1).terminate() ),
half2 := self >> let(
n := self.params[1],
l2 := self.params[3][2],
v2 := self.params[4][2],
DownSample(n,2,0) * Circulant(n,l2,v2).terminate() ),
terminate := self >> let(
n := self.params[1],
j := self.params[2],
L := self.params[3],
V := self.params[4],
# DWT as filter bank stage + downsampling
res := Cond(j = 1, I(n),
DirectSum(DWTper(n/2,j-1,L,V).terminate(), I(n/2))) *
VStack(self.half1(), self.half2()),
When(self.transposed, res.transpose(), res)
),
LiftingScheme := self >> self.lifting(),
lifting := meth(self)
local n,j,LS,h,g,he,ho,ge,go;
n := self.params[1];
j := self.params[2];
h := DownsampleTwo([self.params[3][1],self.params[4][1]]);
g := DownsampleTwo([self.params[3][2],self.params[4][2]]);
# Lifting scheme does not converge for wavelets longer than 9
# (need to be fixed)
if (Maximum(List(h, i-> Length(i[1])))>9 or
Maximum(List(g, i-> Length(i[1])))>9) then return [[]];fi;
he := Poly(h[1][1],h[1][2]);
ho := Poly(h[2][1],h[2][2]);
ge := Poly(g[1][1],g[1][2]);
go := Poly(g[2][1],g[2][2]);
LS := LiftingScheme([[he,ho],[ge,go]]);
return LS;
end,
isReal := True,
));
#F RuleFilter_DWT: (base case) DWT -> Mat,
#F Computes filter by definition
#F
RulesFor(DWTper, rec(
#F RuleFilt_Mallat_2:
#F
#F DWTper(n,j,[h,g])->DWTper(n/2,j-1,[h,g]),DWTper(n,1,[h,g])
#F
#F Mallat rule recursive (single stage DWT)
#F The I(n/2) is inefficient because it copies the output
#F However, this rule allows application of Lifting, POlyphase, etc.
#F on DWTper(n,1,[h,g])
#F
DWTper_Mallat_2 := rec(
info := "DWTper(n,j,[h,g])->DWTper(n,j-1,[h,g])",
forTransposition := false,
isApplicable := P -> P[1] > 2 and P[1] mod 2 =0 and P[2] > 1,
allChildren := P -> [[ DWTper(P[1]/2,P[2]-1,P[3],P[4]), DWTper(P[1],1,P[3],P[4]) ]],
rule := (P, C) -> DirectSum(C[1],I(P[1]/2))*C[2]
),
#F DWTper_Mallat:
#F
#F DWTper(n,j,[h,g])->DWTper(n/2,j-1,[h,g]), DSCirculant
#F
DWTper_Mallat := rec(
info := "DWTper(n,j,[h,g])->DWTper(n/2,j-1,[h,g])",
forTransposition := false,
isApplicable := P -> P[1]>2 and P[1] mod 2 = 0,
allChildren := P -> let(
n := P[1],
dcirc1 := DSCirculant(n, P[3][1], P[4][1], 2, 0),
dcirc2 := DSCirculant(n, P[3][2], P[4][2], 2, 0),
When(P[2]=1, [[ dcirc1, dcirc2 ]],
[[ dcirc1, dcirc2, DWTper(P[1]/2, P[2]-1, P[3], P[4]) ]])),
rule := (P, C) -> When(P[2]=1, VStack(C[1], C[2]),
VStack(C[3]*C[1], C[2]))
),
#F DWTper_Polyphase:
#F
#F DWTper(n,1,[h,g])->[ [Circulant(he), Circulant(ho)],
#F [Circulant(ge), Circulant(go)] ]
#F
#F Single-stage periodic DWT into a matrix of circulants of downsampled filters
#F
DWTper_Polyphase := rec(
info := "DWTper(n,1,[h,g]) -> [[Circ(he), Circ(ho)], [Circ(ge), Circ(go)]]",
forTransposition := false,
isApplicable := P -> P[1]>2 and P[1] mod 2 =0 and P[2]=1,
allChildren := function(P)
local n,h,g,he,ho,ge,go;
n := P[1];
h := DownsampleTwo([P[3][1], P[4][1]]);
g := DownsampleTwo([P[3][2], P[4][2]]);
he := Circulant(n/2, h[1][1], h[1][2]);
ho := Circulant(n/2, h[2][1], h[2][2]);
ge := Circulant(n/2, g[1][1], g[1][2]);
go := Circulant(n/2, g[2][1], g[2][2]);
return [[ he, ho, ge, go ]];
end,
rule := (P, C) -> BlockMat( [[ C[1], C[2] ],
[ C[3], C[4] ]] ) * L(P[1],2)
),
#F RuleDWTper_Lifting:
#F
DWTper_Lifting := rec(
info := "DWTper(n,1,[h,g]) -> Lifting steps",
forTransposition := false,
isApplicable := ( L ) -> L[1]>2 and L[1] mod 2 =0 and L[2]=1,
allChildren := function ( P )
local n,j,LS,Lc,scheme,step,pol;
n := P[1];
LS := Copy(HashLookupWav(HashTableWavelets, [P[3],P[4]]));
Lc := List(LS, scheme->
List(scheme{[2..Length(scheme)]}, step->
let(pol := FillZeros(ListPoly(step)),
Circulant(n/2,pol[1],pol[2]))));
for i in [1..Length(LS)] do
# attach the indicator of the type of the first liftings step
Lc[i][1].lift :=LS[i][1];
# fuse in the constants/shifts in the last lifting step
#if (LS[i][1]=0 or LS[i][1]=-2) then LS[i][4]:=LS[i][4]*LS[i][3];
#else LS[i][4]:=LS[i][4]*LS[i][2];
#fi;
pol := FillZeros(ListPoly(LS[i][4]));
Lc[i][3] := Circulant(n/2,pol[1],pol[2]);
od;
return Lc;
end,
rule := function ( P, C, Nonterms )
local n, i, ind, b, l, M, first, last, ind0, ind1, lstep, last_ls;
n := P[1];
b := Nonterms[1].lift;
l:=Length(C);
ind0 := fTensor(fBase(2,0), fId(Rows(C[3])));
ind1 := fTensor(fBase(2,1), fId(Rows(C[3])));
lstep := (f,b) -> When(b=0,
LStep(Scat(ind0) * f * Gath(ind1)),
LStep(Scat(ind1) * f * Gath(ind0)));
M := When(b < 0,
SUM(Scat(ind0)*C[2]*Gath(ind1), Scat(ind1)*C[1]*Gath(ind0)),
SUM(Scat(ind0)*C[2]*Gath(ind0), Scat(ind1)*C[1]*Gath(ind1)));
b := (b+2) mod 2; # make b positive
for i in [1 .. l-2] do
M := M * lstep(C[i+2], b);
b := (b+1) mod 2;
od;
first := M.child(1);
M := Inplace(Compose(Drop(M.children(),1)));
return first * M * L(n,2);
end
)
));
|
<!-- dom:TITLE: PHY321: Conservative Forces, Examples and Theory -->
# PHY321: Conservative Forces, Examples and Theory
<!-- dom:AUTHOR: [Morten Hjorth-Jensen](http://mhjgit.github.io/info/doc/web/) at Department of Physics and Astronomy and Facility for Rare Ion Beams (FRIB), Michigan State University, USA & Department of Physics, University of Oslo, Norway -->
<!-- Author: -->
**[Morten Hjorth-Jensen](http://mhjgit.github.io/info/doc/web/)**, Department of Physics and Astronomy and Facility for Rare Ion Beams (FRIB), Michigan State University, USA and Department of Physics, University of Oslo, Norway
Date: **Mar 3, 2021**
Copyright 1999-2021, [Morten Hjorth-Jensen](http://mhjgit.github.io/info/doc/web/). Released under CC Attribution-NonCommercial 4.0 license
## Aims and Overarching Motivation
### Monday
Short repetition from last week about conservative forces. Discussion
of conditions for conservative forces and the Earth-Sun gravitional
force example. **Reading suggestion**: Taylor sections 4.3, 4.4 and 4.8.
### Wednesday
Potential curves and discussion of the Earth-Sun example, analytical and numerical considerations.
**Reading suggestions**: Taylor section 4.6, 4.8 and 4.9.
### Friday
Earth-Sun, conservative forces and potential energy.
**Reading suggestion**: Taylor sections 4.8 and 4.9.
If we get time, we start with harmonic oscillations and Hooke's law. **Reading suggestion**: Taylor section 5.1.
## One Figure to Rule All Forces (thx to Julie)
<!-- dom:FIGURE: [figslides/ClassicalMechanicsJulie.png, width=600 frac=0.8] -->
<!-- begin figure -->
<p style="font-size: 0.9em"><i>Figure 1: </i></p><!-- end figure -->
## Repetition from last week: Work, Energy, Momentum and Conservation laws
Energy conservation is most convenient as a strategy for addressing
problems where time does not appear. For example, a particle goes
from position $x_0$ with speed $v_0$, to position $x_f$; what is its
new speed? However, it can also be applied to problems where time
does appear, such as in solving for the trajectory $x(t)$, or
equivalently $t(x)$.
## Energy Conservation
Energy is conserved in the case where the potential energy, $V(\boldsymbol{r})$, depends only on position, and not on time. The force is determined by $V$,
<!-- Equation labels as ordinary links -->
<div id="_auto1"></div>
$$
\begin{equation}
\boldsymbol{F}(\boldsymbol{r})=-\boldsymbol{\nabla} V(\boldsymbol{r}).
\label{_auto1} \tag{1}
\end{equation}
$$
## Conservative forces
We say a force is conservative if it satisfies the following conditions:
1. The force $\boldsymbol{F}$ acting on an object only depends on the position $\boldsymbol{r}$, that is $\boldsymbol{F}=\boldsymbol{F}(\boldsymbol{r})$.
2. For any two points $\boldsymbol{r}_1$ and $\boldsymbol{r}_2$, the work done by the force $\boldsymbol{F}$ on the displacement between these two points is independent of the path taken.
3. Finally, the **curl** of the force is zero $\boldsymbol{\nabla}\times\boldsymbol{F}=0$.
## Forces and Potentials
The energy $E$ of a given system is defined as the sum of kinetic and potential energies,
$$
E=K+V(\boldsymbol{r}).
$$
We define the potential energy at a point $\boldsymbol{r}$ as the negative work done from a starting point $\boldsymbol{r}_0$ to a final point $\boldsymbol{r}$
$$
V(\boldsymbol{r})=-W(\boldsymbol{r}_0\rightarrow\boldsymbol{r})= -\int_{\boldsymbol{r}_0}^{\boldsymbol{r}}d\boldsymbol{r}'\boldsymbol{F}(\boldsymbol{r}').
$$
If the potential depends on the path taken between these two points there is no unique potential.
## Example (relevant for homework 5)
We study a classical electron which moves in the $x$-direction along a surface. The force from the surface is
$$
\boldsymbol{F}(x)=-F_0\sin{(\frac{2\pi x}{b})}\boldsymbol{e}_1.
$$
The constant $b$ represents the distance between atoms at the surface of the material, $F_0$ is a constant and $x$ is the position of the electron.
This is indeed a conservative force since it depends only on position
and its **curl** is zero, that is $-\boldsymbol{\nabla}\times \boldsymbol{F}=0$. This means that energy is conserved and the
integral over the work done by the force is independent of the path
taken.
## Example Continues
Using the work-energy theorem we can find the work $W$ done when
moving an electron from a position $x_0$ to a final position $x$
through the integral
$$
W=\int_{x_0}^x \boldsymbol{F}(x')dx' = -\int_{x_0}^x F_0\sin{(\frac{2\pi x'}{b})} dx',
$$
which results in
$$
W=\frac{F_0b}{2\pi}\left[\cos{(\frac{2\pi x}{b})}-\cos{(\frac{2\pi x_0}{b})}\right].
$$
Since this is related to the change in kinetic energy we have, with $v_0$ being the initial velocity at a time $t_0$,
$$
v = \pm\sqrt{\frac{2}{m}\frac{F_0b}{2\pi}\left[\cos{(\frac{2\pi x}{b})}-\cos{(\frac{2\pi x_0}{b})}\right]+v_0^2}.
$$
## The potential energy from this example
The potential energy, due to energy conservation is
$$
V(x)=V(x_0)+\frac{1}{2}mv_0^2-\frac{1}{2}mv^2,
$$
with $v$ given by the velocity from above.
We can now, in order to find a more explicit expression for the
potential energy at a given value $x$, define a zero level value for
the potential. The potential is defined, using the work-energy
theorem, as
$$
V(x)=V(x_0)+\int_{x_0}^x (-F(x'))dx',
$$
and if you recall the definition of the indefinite integral, we can rewrite this as
$$
V(x)=\int (-F(x'))dx'+C,
$$
where $C$ is an undefined constant. The force is defined as the
gradient of the potential, and in that case the undefined constant
vanishes. The constant does not affect the force we derive from the
potential.
We have then
$$
V(x)=V(x_0)-\int_{x_0}^x \boldsymbol{F}(x')dx',
$$
which results in
$$
V(x)=-\frac{F_0b}{2\pi}\left[\cos{(\frac{2\pi x}{b})}-\cos{(\frac{2\pi x_0}{b})}\right]+V(x_0).
$$
We can now define
$$
-\frac{F_0b}{2\pi}\cos{(\frac{2\pi x_0}{b})}=V(x_0),
$$
which gives
$$
V(x)=-\frac{F_0b}{2\pi}\left[\cos{(\frac{2\pi x}{b})}\right].
$$
## Force and Potential
We have defined work as the energy resulting from a net force acting
on an object (or sseveral objects), that is
$$
W(\boldsymbol{r}\rightarrow \boldsymbol{r}+d\boldsymbol{r})= \boldsymbol{F}(\boldsymbol{r})d\boldsymbol{r}.
$$
If we write out this for each component we have
$$
W(\boldsymbol{r}\rightarrow \boldsymbol{r}+d\boldsymbol{r})=\boldsymbol{F}(\boldsymbol{r})d\boldsymbol{r}=F_xdx+F_ydy+F_zdz.
$$
The work done from an initial position to a final one defines also the difference in potential energies
$$
W(\boldsymbol{r}\rightarrow \boldsymbol{r}+d\boldsymbol{r})=-\left[V(\boldsymbol{r}+d\boldsymbol{r})-V(\boldsymbol{r})\right].
$$
## Getting to $\boldsymbol{F}(\boldsymbol{r})=-\boldsymbol{\nabla} V(\boldsymbol{r})$
We can write out the differences in potential energies as
$$
V(\boldsymbol{r}+d\boldsymbol{r})-V(\boldsymbol{r})=V(x+dx,y+dy,z+dz)-V(x,y,z)=dV,
$$
and using the expression the differential of a multi-variable function $f(x,y,z)$
$$
df=\frac{\partial f}{\partial x}dx+\frac{\partial f}{\partial y}dy+\frac{\partial f}{\partial z}dz,
$$
we can write the expression for the work done as
$$
W(\boldsymbol{r}\rightarrow \boldsymbol{r}+d\boldsymbol{r})=-dV=-\left[\frac{\partial V}{\partial x}dx+\frac{\partial V}{\partial y}dy+\frac{\partial V}{\partial z}dz \right].
$$
## Final expression
Comparing the last equation with
$$
W(\boldsymbol{r}\rightarrow \boldsymbol{r}+d\boldsymbol{r})=F_xdx+F_ydy+F_zdz,
$$
we have
$$
F_xdx+F_ydy+F_zdz=-\left[\frac{\partial V}{\partial x}dx+\frac{\partial V}{\partial y}dy+\frac{\partial V}{\partial z}dz \right],
$$
leading to
$$
F_x=-\frac{\partial V}{\partial x},
$$
and
$$
F_y=-\frac{\partial V}{\partial y},
$$
and
$$
F_z=-\frac{\partial V}{\partial z},
$$
or just
$$
\boldsymbol{F}=-\frac{\partial V}{\partial x}\boldsymbol{e}_1-\frac{\partial V}{\partial y}\boldsymbol{e}_2-\frac{\partial V}{\partial z}\boldsymbol{e}_3=-\boldsymbol{\nabla}V(\boldsymbol{r}).
$$
And this connection is the one we wanted to show.
## Net Energy
The net energy, $E=V+K$ where $K$ is the kinetic energy, is then conserved,
$$
\begin{eqnarray}
\frac{d}{dt}(K+V)&=&\frac{d}{dt}\left(\frac{m}{2}(v_x^2+v_y^2+v_z^2)+V(\boldsymbol{r})\right)\\
\nonumber
&=&m\left(v_x\frac{dv_x}{dt}+v_y\frac{dv_y}{dt}+v_z\frac{dv_z}{dt}\right)
+\partial_xV\frac{dx}{dt}+\partial_yV\frac{dy}{dt}+\partial_zV\frac{dz}{dt}\\
\nonumber
&=&v_xF_x+v_yF_y+v_zF_z-F_xv_x-F_yv_y-F_zv_z=0.
\end{eqnarray}
$$
## In Vector Notation
The same proof can be written more compactly with vector notation,
$$
\begin{eqnarray}
\frac{d}{dt}\left(\frac{m}{2}v^2+V(\boldsymbol{r})\right)
&=&m\boldsymbol{v}\cdot\dot{\boldsymbol{v}}+\boldsymbol{\nabla} V(\boldsymbol{r})\cdot\dot{\boldsymbol{r}}\\
\nonumber
&=&\boldsymbol{v}\cdot\boldsymbol{F}-\boldsymbol{F}\cdot\boldsymbol{v}=0.
\end{eqnarray}
$$
Inverting the expression for kinetic energy,
<!-- Equation labels as ordinary links -->
<div id="_auto2"></div>
$$
\begin{equation}
v=\sqrt{2K/m}=\sqrt{2(E-V)/m},
\label{_auto2} \tag{2}
\end{equation}
$$
allows one to solve for the one-dimensional trajectory $x(t)$, by finding $t(x)$,
<!-- Equation labels as ordinary links -->
<div id="_auto3"></div>
$$
\begin{equation}
t=\int_{x_0}^x \frac{dx'}{v(x')}=\int_{x_0}^x\frac{dx'}{\sqrt{2(E-V(x'))/m}}.
\label{_auto3} \tag{3}
\end{equation}
$$
Note this would be much more difficult in higher dimensions, because
you would have to determine which points, $x,y,z$, the particles might
reach in the trajectory, whereas in one dimension you can typically
tell by simply seeing whether the kinetic energy is positive at every
point between the old position and the new position.
## The Earth-Sun system
We will now venture into a study of a system which is energy
conserving. The aim is to see if we (since it is not possible to solve
the general equations analytically) we can develop stable numerical
algorithms whose results we can trust!
We solve the equations of motion numerically. We will also compute
quantities like the energy numerically.
We start with a simpler case first, the Earth-Sun system in two dimensions only. The gravitational force $F_G$ on the earth from the sun is
$$
\boldsymbol{F}_G=-\frac{GM_{\odot}M_E}{r^3}\boldsymbol{r},
$$
where $G$ is the gravitational constant,
$$
M_E=6\times 10^{24}\mathrm{Kg},
$$
the mass of Earth,
$$
M_{\odot}=2\times 10^{30}\mathrm{Kg},
$$
the mass of the Sun and
$$
r=1.5\times 10^{11}\mathrm{m},
$$
is the distance between Earth and the Sun. The latter defines what we call an astronomical unit **AU**.
## The Earth-Sun system, Newton's Laws
From Newton's second law we have then for the $x$ direction
$$
\frac{d^2x}{dt^2}=-\frac{F_{x}}{M_E},
$$
and
$$
\frac{d^2y}{dt^2}=-\frac{F_{y}}{M_E},
$$
for the $y$ direction.
Here we will use that $x=r\cos{(\theta)}$, $y=r\sin{(\theta)}$ and
$$
r = \sqrt{x^2+y^2}.
$$
We can rewrite
$$
F_{x}=-\frac{GM_{\odot}M_E}{r^2}\cos{(\theta)}=-\frac{GM_{\odot}M_E}{r^3}x,
$$
and
$$
F_{y}=-\frac{GM_{\odot}M_E}{r^2}\sin{(\theta)}=-\frac{GM_{\odot}M_E}{r^3}y,
$$
for the $y$ direction.
## The Earth-Sun system, rewriting the Equations
We can rewrite these two equations
$$
F_{x}=-\frac{GM_{\odot}M_E}{r^2}\cos{(\theta)}=-\frac{GM_{\odot}M_E}{r^3}x,
$$
and
$$
F_{y}=-\frac{GM_{\odot}M_E}{r^2}\sin{(\theta)}=-\frac{GM_{\odot}M_E}{r^3}y,
$$
as four first-order coupled differential equations
4
1
<
<
<
!
!
M
A
T
H
_
B
L
O
C
K
4
2
<
<
<
!
!
M
A
T
H
_
B
L
O
C
K
4
3
<
<
<
!
!
M
A
T
H
_
B
L
O
C
K
$$
\frac{dy}{dt}=v_y.
$$
## Building a code for the solar system, final coupled equations
The four coupled differential equations
4
5
<
<
<
!
!
M
A
T
H
_
B
L
O
C
K
4
6
<
<
<
!
!
M
A
T
H
_
B
L
O
C
K
4
7
<
<
<
!
!
M
A
T
H
_
B
L
O
C
K
$$
\frac{dy}{dt}=v_y,
$$
can be turned into dimensionless equations or we can introduce astronomical units with $1$ AU = $1.5\times 10^{11}$.
Using the equations from circular motion (with $r =1\mathrm{AU}$)
$$
\frac{M_E v^2}{r} = F = \frac{GM_{\odot}M_E}{r^2},
$$
we have
$$
GM_{\odot}=v^2r,
$$
and using that the velocity of Earth (assuming circular motion) is
$v = 2\pi r/\mathrm{yr}=2\pi\mathrm{AU}/\mathrm{yr}$, we have
$$
GM_{\odot}= v^2r = 4\pi^2 \frac{(\mathrm{AU})^3}{\mathrm{yr}^2}.
$$
## Building a code for the solar system, discretized equations
The four coupled differential equations can then be discretized using Euler's method as (with step length $h$)
5
2
<
<
<
!
!
M
A
T
H
_
B
L
O
C
K
5
3
<
<
<
!
!
M
A
T
H
_
B
L
O
C
K
5
4
<
<
<
!
!
M
A
T
H
_
B
L
O
C
K
$$
y_{i+1}=y_i+hv_{y,i},
$$
## Code Example with Euler's Method
The code here implements Euler's method for the Earth-Sun system using a more compact way of representing the vectors. Alternatively, you could have spelled out all the variables $v_x$, $v_y$, $x$ and $y$ as one-dimensional arrays.
```
%matplotlib inline
# Common imports
import numpy as np
import pandas as pd
from math import *
import matplotlib.pyplot as plt
import os
# Where to save the figures and data files
PROJECT_ROOT_DIR = "Results"
FIGURE_ID = "Results/FigureFiles"
DATA_ID = "DataFiles/"
if not os.path.exists(PROJECT_ROOT_DIR):
os.mkdir(PROJECT_ROOT_DIR)
if not os.path.exists(FIGURE_ID):
os.makedirs(FIGURE_ID)
if not os.path.exists(DATA_ID):
os.makedirs(DATA_ID)
def image_path(fig_id):
return os.path.join(FIGURE_ID, fig_id)
def data_path(dat_id):
return os.path.join(DATA_ID, dat_id)
def save_fig(fig_id):
plt.savefig(image_path(fig_id) + ".png", format='png')
DeltaT = 0.001
#set up arrays
tfinal = 10 # in years
n = ceil(tfinal/DeltaT)
# set up arrays for t, a, v, and x
t = np.zeros(n)
v = np.zeros((n,2))
r = np.zeros((n,2))
# Initial conditions as compact 2-dimensional arrays
r0 = np.array([1.0,0.0])
v0 = np.array([0.0,2*pi])
r[0] = r0
v[0] = v0
Fourpi2 = 4*pi*pi
# Start integrating using Euler's method
for i in range(n-1):
# Set up the acceleration
# Here you could have defined your own function for this
rabs = sqrt(sum(r[i]*r[i]))
a = -Fourpi2*r[i]/(rabs**3)
# update velocity, time and position using Euler's forward method
v[i+1] = v[i] + DeltaT*a
r[i+1] = r[i] + DeltaT*v[i]
t[i+1] = t[i] + DeltaT
# Plot position as function of time
fig, ax = plt.subplots()
#ax.set_xlim(0, tfinal)
ax.set_ylabel('y[AU]')
ax.set_xlabel('x[AU]')
ax.plot(r[:,0], r[:,1])
fig.tight_layout()
save_fig("EarthSunEuler")
plt.show()
```
## Problems with Euler's Method
We notice here that Euler's method doesn't give a stable orbit. It
means that we cannot trust Euler's method. In a deeper way, as we will
see in homework 5, Euler's method does not conserve energy. It is an
example of an integrator which is not
[symplectic](https://en.wikipedia.org/wiki/Symplectic_integrator).
Here we present thus two methods, which with simple changes allow us to avoid these pitfalls. The simplest possible extension is the so-called Euler-Cromer method.
The changes we need to make to our code are indeed marginal here.
We need simply to replace
```
r[i+1] = r[i] + DeltaT*v[i]
```
in the above code with the velocity at the new time $t_{i+1}$
```
r[i+1] = r[i] + DeltaT*v[i+1]
```
By this simple caveat we get stable orbits.
Below we derive the Euler-Cromer method as well as one of the most utlized algorithms for sovling the above type of problems, the so-called Velocity-Verlet method.
## Deriving the Euler-Cromer Method
Let us repeat Euler's method.
We have a differential equation
<!-- Equation labels as ordinary links -->
<div id="_auto4"></div>
$$
\begin{equation}
y'(t_i)=f(t_i,y_i)
\label{_auto4} \tag{4}
\end{equation}
$$
and if we truncate at the first derivative, we have from the Taylor expansion
<!-- Equation labels as ordinary links -->
<div id="eq:euler"></div>
$$
\begin{equation}
y_{i+1}=y(t_i) + (\Delta t) f(t_i,y_i) + O(\Delta t^2), \label{eq:euler} \tag{5}
\end{equation}
$$
which when complemented with $t_{i+1}=t_i+\Delta t$ forms
the algorithm for the well-known Euler method.
Note that at every step we make an approximation error
of the order of $O(\Delta t^2)$, however the total error is the sum over all
steps $N=(b-a)/(\Delta t)$ for $t\in [a,b]$, yielding thus a global error which goes like
$NO(\Delta t^2)\approx O(\Delta t)$.
To make Euler's method more precise we can obviously
decrease $\Delta t$ (increase $N$), but this can lead to loss of numerical precision.
Euler's method is not recommended for precision calculation,
although it is handy to use in order to get a first
view on how a solution may look like.
Euler's method is asymmetric in time, since it uses information about the derivative at the beginning
of the time interval. This means that we evaluate the position at $y_1$ using the velocity
at $v_0$. A simple variation is to determine $x_{n+1}$ using the velocity at
$v_{n+1}$, that is (in a slightly more generalized form)
<!-- Equation labels as ordinary links -->
<div id="_auto5"></div>
$$
\begin{equation}
y_{n+1}=y_{n}+ v_{n+1}+O(\Delta t^2)
\label{_auto5} \tag{6}
\end{equation}
$$
and
<!-- Equation labels as ordinary links -->
<div id="_auto6"></div>
$$
\begin{equation}
v_{n+1}=v_{n}+(\Delta t) a_{n}+O(\Delta t^2).
\label{_auto6} \tag{7}
\end{equation}
$$
The acceleration $a_n$ is a function of $a_n(y_n, v_n, t_n)$ and needs to be evaluated
as well. This is the Euler-Cromer method.
**Exercise**: go back to the above code with Euler's method and add the Euler-Cromer method.
## Deriving the Velocity-Verlet Method
Let us stay with $x$ (position) and $v$ (velocity) as the quantities we are interested in.
We have the Taylor expansion for the position given by
$$
x_{i+1} = x_i+(\Delta t)v_i+\frac{(\Delta t)^2}{2}a_i+O((\Delta t)^3).
$$
The corresponding expansion for the velocity is
$$
v_{i+1} = v_i+(\Delta t)a_i+\frac{(\Delta t)^2}{2}v^{(2)}_i+O((\Delta t)^3).
$$
Via Newton's second law we have normally an analytical expression for the derivative of the velocity, namely
$$
a_i= \frac{d^2 x}{dt^2}\vert_{i}=\frac{d v}{dt}\vert_{i}= \frac{F(x_i,v_i,t_i)}{m}.
$$
If we add to this the corresponding expansion for the derivative of the velocity
$$
v^{(1)}_{i+1} = a_{i+1}= a_i+(\Delta t)v^{(2)}_i+O((\Delta t)^2)=a_i+(\Delta t)v^{(2)}_i+O((\Delta t)^2),
$$
and retain only terms up to the second derivative of the velocity since our error goes as $O(h^3)$, we have
$$
(\Delta t)v^{(2)}_i\approx a_{i+1}-a_i.
$$
We can then rewrite the Taylor expansion for the velocity as
$$
v_{i+1} = v_i+\frac{(\Delta t)}{2}\left( a_{i+1}+a_{i}\right)+O((\Delta t)^3).
$$
## The velocity Verlet method
Our final equations for the position and the velocity become then
$$
x_{i+1} = x_i+(\Delta t)v_i+\frac{(\Delta t)^2}{2}a_{i}+O((\Delta t)^3),
$$
and
$$
v_{i+1} = v_i+\frac{(\Delta t)}{2}\left(a_{i+1}+a_{i}\right)+O((\Delta t)^3).
$$
Note well that the term $a_{i+1}$ depends on the position at $x_{i+1}$. This means that you need to calculate
the position at the updated time $t_{i+1}$ before the computing the next velocity. Note also that the derivative of the velocity at the time
$t_i$ used in the updating of the position can be reused in the calculation of the velocity update as well.
## Adding the Velocity-Verlet Method
We can now easily add the Verlet method to our original code as
```
DeltaT = 0.01
#set up arrays
tfinal = 10 # in years
n = ceil(tfinal/DeltaT)
# set up arrays for t, a, v, and x
t = np.zeros(n)
v = np.zeros((n,2))
r = np.zeros((n,2))
# Initial conditions as compact 2-dimensional arrays
r0 = np.array([1.0,0.0])
v0 = np.array([0.0,2*pi])
r[0] = r0
v[0] = v0
Fourpi2 = 4*pi*pi
# Start integrating using the Velocity-Verlet method
for i in range(n-1):
# Set up forces, air resistance FD, note now that we need the norm of the vecto
# Here you could have defined your own function for this
rabs = sqrt(sum(r[i]*r[i]))
a = -Fourpi2*r[i]/(rabs**3)
# update velocity, time and position using the Velocity-Verlet method
r[i+1] = r[i] + DeltaT*v[i]+0.5*(DeltaT**2)*a
rabs = sqrt(sum(r[i+1]*r[i+1]))
anew = -4*(pi**2)*r[i+1]/(rabs**3)
v[i+1] = v[i] + 0.5*DeltaT*(a+anew)
t[i+1] = t[i] + DeltaT
# Plot position as function of time
fig, ax = plt.subplots()
ax.set_ylabel('y[AU]')
ax.set_xlabel('x[AU]')
ax.plot(r[:,0], r[:,1])
fig.tight_layout()
save_fig("EarthSunVV")
plt.show()
```
You can easily generalize the calculation of the forces by defining a function
which takes in as input the various variables. We leave this as a challenge to you.
## Additional Material: Link between Line Integrals and Conservative forces
The concept of line integrals plays an important role in our discussion of energy conservation,
our definition of potentials and conservative forces.
Let us remind ourselves of some the basic elements (most of you may
have seen this in a calculus course under the general topic of vector
fields).
We define a path integration $C$, that is we integrate
from a point $\boldsymbol{r}_1$ to a point $\boldsymbol{r}_2$.
Let us assume that the path $C$ is represented by an arc length $s$. In three dimension we have the following representation of $C$
$$
\boldsymbol{r}(s)=x(s)\boldsymbol{e}_1+y(s)\boldsymbol{e}_2+z(s)\boldsymbol{e}_3,
$$
then our integral of a function $f(x,y,z)$ along the path $C$ is defined as
$$
\int_Cf(x,y,z)ds=\int_a^bf\left(x(s),y(s),z(s)\right)ds,
$$
where the initial and final points are $a$ and $b$, respectively.
## Exactness and Independence of Path
With the definition of a line integral, we can in tunrn set up the
theorem of independence of integration path.
Let us define
$f(x,y,z)$, $g(x,y,z)$ and $h(x,y,z)$ to be functions which are
defined and continuous in a domain $D$ in space. Then a line integral
like the above is said to be independent of path in $D$, if for every
pair of endpoints $a$ and $b$ in $D$ the value of the integral is the
same for all paths $C$ in $D$ starting from a point $a$ and ending in
a point $b$. The integral depends thus only on the integration limits
and not on the path.
## Differential Forms
An expression of the form
$$
fdx+gdy+hdz,
$$
where $f$, $g$ and $h$ are functions defined in $D$, is a called a first-order differential form
in three variables.
The form is said to be exact if it is the differential
$$
du= \frac{\partial u}{\partial x}dx+\frac{\partial u}{\partial y}dy+\frac{\partial u}{\partial z}dz,
$$
of a differentiable function $u(x,y,z)$ everywhere in $D$, that is
$$
du=fdx+gdy+hdz.
$$
It is said to be exact if and only if we can then set
$$
f=\frac{\partial u}{\partial x},
$$
and
$$
g=\frac{\partial u}{\partial y},
$$
and
$$
h=\frac{\partial u}{\partial z},
$$
everywhere in the domain $D$.
## In Vector Language
In vector language the above means that the differential form
$$
fdx+gdy+hdz,
$$
is exact in $D$ if and only if the vector function (it could be a force, or velocity, acceleration or other vectors we encounter in this course)
$$
\boldsymbol{F}=f\boldsymbol{e}_1+g\boldsymbol{e}_2+h\boldsymbol{e}_3,
$$
is the gradient of a function $u(x,y,z)$
$$
\boldsymbol{v}=\boldsymbol{\nabla}u=\frac{\partial u}{\partial x}\boldsymbol{e}_1+\frac{\partial u}{\partial y}\boldsymbol{e}_2+\frac{\partial u}{\partial z}\boldsymbol{e}_3.
$$
## Path Independence Theorem
If this is the case, we can state the path independence theorem which
states that with functions $f(x,y,z)$, $g(x,y,z)$ and $h(x,y,z)$ that fulfill the above
exactness conditions, the line integral
$$
\int_C\left(fdx+gdy+hdz\right),
$$
is independent of path in $D$ if and only if the differential form under the integral sign is exact in $D$.
This is the path independence theorem.
We will not give a proof of the theorem. You can find this in any vector analysis chapter in a mathematics textbook.
We note however that the path integral from a point $p$ to a final point $q$ is given by
$$
\int_p^q\left(fdx+gdy+hdz\right)=\int_p^q\left(\frac{\partial u}{\partial x}dx+\frac{\partial u}{\partial y}dy+\frac{\partial u}{\partial z}dz\right)=\int_p^qdu.
$$
Assume now that we have a dependence on a variable $s$ for $x$, $y$ and $z$. We have then
$$
\int_p^qdu=\int_{s_1}^{s_2}\frac{du}{ds}ds = u(x(s),y(s),z(s))\vert_{s=s_1}^{s=s_2}=u(q)-u(p).
$$
This last equation
$$
\int_p^q\left(fdx+gdy+hdz\right)=u(q)-u(p),
$$
is the analogue of the usual formula
$$
\int_a^bf(x)dx=F(x)\vert_a^b=F(b)-F(a),
$$
with $F'(x)=f(x)$.
## Work-Energy Theorem again
We remember that a the work done by a force
$\boldsymbol{F}=f\boldsymbol{e}_1+g\boldsymbol{e}_2+h\boldsymbol{e}_3$ on a displacemnt $d\boldsymbol{r}$
is
$$
W=\int_C\boldsymbol{F}d\boldsymbol{r}=\int_C(fdx+gdy+hdz).
$$
From the path independence theorem, we know that this has to result in
the difference between the two endpoints only. This is exact if and
only if the force is the force $\boldsymbol{F}$ is the gradient of a scalar
function $u$. We call this scalar function, which depends only the
positions $x,y,z$ for the potential energy $V(x,y,z)=V(\boldsymbol{r})$.
We have thus
$$
\boldsymbol{F}(\boldsymbol{r})\propto \boldsymbol{\nabla}V(\boldsymbol{r}),
$$
and we define this as
$$
\boldsymbol{F}(\boldsymbol{r})= -\boldsymbol{\nabla}V(\boldsymbol{r}).
$$
Such a force is called **a conservative force**. The above expression can be used to demonstrate
energy conservation.
## Additional Theorem
Finally we can define the criterion for exactness and independence of
path. This theorem states that if $f(x,y,z)$, $g(x,y,z)$ and
$h(x,y,z)$ are continuous functions with continuous first partial derivatives in the domain $D$,
then the line integral
$$
\int_C\left(fdx+gdy+hdz\right),
$$
is independent of path in $D$ when
$$
\frac{\partial h}{\partial y}=\frac{\partial g}{\partial z},
$$
and
$$
\frac{\partial f}{\partial z}=\frac{\partial h}{\partial x},
$$
and
$$
\frac{\partial g}{\partial x}=\frac{\partial f}{\partial y}.
$$
This leads to the **curl** of $\boldsymbol{F}$ being zero
$$
\boldsymbol{\nabla}\times\boldsymbol{F}=\boldsymbol{\nabla}\times\left(-\boldsymbol{\nabla}V(\boldsymbol{r})\right)=0!
$$
## Summarizing
A conservative force $\boldsymbol{F}$ is a defined as the partial derivative of a scalar potential which depends only on the position,
$$
\boldsymbol{F}(\boldsymbol{r})= -\boldsymbol{\nabla}V(\boldsymbol{r}).
$$
This leads to conservation of energy and a path independent line integral as long as the curl of the force is zero, that is
$$
\boldsymbol{\nabla}\times\boldsymbol{F}=\boldsymbol{\nabla}\times\left(-\boldsymbol{\nabla}V(\boldsymbol{r})\right)=0.
$$
|
\documentclass[letterpaper,10pt,english]{hitec}
%\usepackage{cmap}
%\usepackage[T1]{fontenc}
\usepackage{amsmath,amssymb,amstext}
\usepackage{babel}
%\usepackage[utf8]{inputenc}
\usepackage{multirow}
\usepackage[table,xcdraw]{xcolor}
\usepackage{graphicx}
\usepackage{hyperref}
\title{stDAQ Reference Manual - r.1.1 (Antille)}
\date{July 4th, 2021}
%\release{Antille - 1.0}
\author{S.Furlan}
%\makeindex
\begin{document}
\maketitle
\vspace{1cm}
%\pagestyle{empty}
\begin{figure*}[ht]
%\centering
\includegraphics[scale=0.6]{../img/stDAQ_logo.png}
%\caption{}
\label{fig:stDAQ_logo}
\end{figure*}
\vspace{6cm}
\hrulefill
\small
\textbf{Note from the Author:} this project is NOT supported by STMicroeletronics, herein as ST.
The Author is originally contributing to the software development of the NUCLEO platform publicly provided by ST, and is committed to provide the highest performance achievable from the hardware to the general public so that the DAQ system can be considered extremely reliable, as reported in the technical specifications of this reference manual. Specifications are subject to change without notice.
Any usage of the stDAQ software falls under the MIT license agreement.
\normalsize
\blinddocument
\newpage
%\pagestyle{plain}
\tableofcontents
%\pagestyle{normal}
\newpage
\section{stDAQ - Data Acquisition System with ST-NUCLEO}
\begin{flushright}
\textit{"Let's be quantitative"} - Ray.E.G. (DAMTP Prof., Univ. of Cambridge, UK)
\end{flushright}
\vskip 0.5cm
The only way to recognize progress is by measuring it. Whether it is a research project, a new design for experiment for an industrial production system or your latest DIY hobby, there is always the need of a handy tool that can collect, process and save the data logs on your PC. This tool is a data acquisition system or alternatively referred as DAQ. DAQ suppliers like National Instruments recognized this need and built LabView, a specilized interface to virtualize and program their proprietary mixed-signals acquisition board. Though, both the interface and the board come with a certain cost for the license that may not justify a commitment for your initial startup budget. Luckly, the recent introduction of mixed-signal computing platforms powered by ARM, like the Raspberry PI and many high-performance microcontrollers, made the need of a DAQ more affordable and customizable. Yet still remains the assumption that you are a skilled programmer mastering all the details involved in the setting of the many registers in a ARM processor.
\begin{figure}[ht!]
%\centering
\includegraphics[scale=0.47]{../img/stDAQ_system.png}
\caption{stDAQ data acquisition system setup.}
\label{fig:stDAQ_system}
\end{figure}
In this scenario the stDAQ project sparked from the personal quest of the author to provide to the general public a data acquisition system that is:
\begin{itemize}
\item[i.] affordable and scalable; just less than 20\$ for a ST-NUCLEO board and all the PC environments are freewares. Different situations may benefit of this low-cost DAQ alternative: while providing to the educators a means to teach students about electronic front-end, data collection, processing and control mechanisms, it also offers a versitile tool to the industry, where fast-prototyping and quick design cycles find an easy to setup data acquisition platform so to validate their innovation processes.
\item[ii.] easy to setup and program (using well-known data processing environment like Scilab and Python); all it requires to setup is to plug a USB cable and open a data processing environment. The environment can be used to program the commands of the stDAQ either in REPL mode, which allows to step through the cause-effect mechanism of the command in a propedeutical method, or through the execution of a .sci or .py script, which may contain the source of a more complicated program. In order to keep the programming effort as minimal as possible without limiting the versatility, stDAQ is supported with a compact set of programming instructions, which are described in the next sections.
\item[iii.] performing and reliable, i.e. implement a control loop with feedback frequency up to 333-500 Hz (you need to trust the author and the many tests performed and reported in this manual).
\end{itemize}
The stDAQ data acquisition system runs on a ST-NUCLEO board, connected to laptop PC via USB, see an high-level diagram in figure \ref{fig:stDAQ_system}. The stDAQ firmware setups up the ST-NUCLEO board as a mixed-signals DAQ, while the laptop PC is used as a programming interface to the stDAQ, either running through a Scilab or a Python environment. Setup and installation details are explained later. On the other side of the ST-NUCLEO, you can plug in any output of your dedicated signal conditioning circuit or sensor front-end. A particular attention must be payed in the connection of the latter to the ST-NUCLEO and the PC, so to avoid ground-loops; see the Power Supply \& USB Isolation section for more details about how to guarantee a low-noise acquisition.
%Scope of the project:
%14AD 1DA 4DI 8DO
%not to reinvent the wheel, unless it can be built cheaper
\newpage
\subsection{Specifications}
stDAQ is designed to support a selected number of peripherals from the ones available on the microcontroller mounted on the board. Currently, only the NUCLEO-F413ZH can support stDAQ. We have provision to extended the project soon to all the ST-NUCLEO F4 family through compatibility. Additional features and peripharals will be also addressed in new releases.
\begin{table}[h!]
\caption{stDAQ support and compatibility.}
\centering
\begin{tabular}{|ll|}
\textbf{ST-NUCLEO} & NUCLEO-F413ZH \\
\textbf{Operating Systems} & Windows 8.1 and newer \\
\textbf{Environments} & Scilab 6.0.1/6.0.2, Python 3.4
\end{tabular}
\end{table}
In the next table are resumed the main features for each peripheral.
\begin{table}[h!]
\begin{tabular}{l|l|lll}
\rowcolor[HTML]{9B9B9B}
\textbf{Peripherals} & \textbf{Feature} & \textbf{Min.} & \textbf{Typ.} & \textbf{Max.} \\ \hline
\cellcolor[HTML]{C0C0C0} & resolution & & 12 bits & \\
\cellcolor[HTML]{C0C0C0} & MUX channels & & & 14 \\
\cellcolor[HTML]{C0C0C0} & ADC update latency & & 1 msec. & \\
\cellcolor[HTML]{C0C0C0} & ADC voltage input & 0V & & +3.3V \\
\cellcolor[HTML]{C0C0C0} & channel sampling frequency & & & 1 MHz \\
\multirow{-6}{*}{\cellcolor[HTML]{C0C0C0}\textbf{ADC}} & inter-sequence sampling frequency & & & 1 KHz \\ \hline
\cellcolor[HTML]{C0C0C0} & resolution & & 12 bits & \\
\cellcolor[HTML]{C0C0C0} & MUX channels & & & 1 \\
\multirow{-3}{*}{\cellcolor[HTML]{C0C0C0}\textbf{DAC}} & DAC update latency & & 1 msec. & \\ \hline
\cellcolor[HTML]{C0C0C0} & input pins & & & 4 \\
\cellcolor[HTML]{C0C0C0} & output pins & & & 8 \\
\cellcolor[HTML]{C0C0C0} & IO update latency & & 2 msec. & \\
\multirow{-4}{*}{\cellcolor[HTML]{C0C0C0}\textbf{GPIO}} & IO voltage (TTL/CMOS) & 0V & & +3.3V \\ \hline
\cellcolor[HTML]{C0C0C0} & PWM resolution & & & 16 bits \\
\cellcolor[HTML]{C0C0C0} & PWM output frequency & 1 Hz & & 350 kHz \\
\multirow{-3}{*}{\cellcolor[HTML]{C0C0C0}\textbf{PWM}} & & & & \\ \hline
\cellcolor[HTML]{C0C0C0} & I2C address size & & 7 bits & \\
\multirow{-2}{*}{\cellcolor[HTML]{C0C0C0}\textbf{I2C}} & Standard mode clock & & 100 kHz &
\end{tabular}
\end{table}
\newpage
\subsection{Pinout}
ST-NUCLEO boards come with mounted connectors CN7, CN8, CN9, CN10, called ST-Zio, which are female on top side and male on the bottom side. Those connectors are designed to also support the ARDUINO shield.
See figure \ref{fig:pinout} for reference of the stDAQ peripherals available from the ST-Zio connectors. \\
ST-NUCLEO boards do also have a vias for each pin of the microcontroller, this way you can solder a strip-header and customize the board for your application.
\begin{figure}[ht!]
%\centering
\includegraphics[scale=0.7]{../img/nucleo_f413zh_zio_ex.png}
\caption{Pinout for the Arduino and Zio extension of the NUCLEO-F413ZH.}
\label{fig:pinout}
\end{figure}
\newpage
\section{Programming interface}
stDAQ provides the access to a selection of peripherals of the STM32F413ZH microcontroller through a set of programming instructions. The scope of the stDAQ is to keep the programming interface as simple as possible, without sacrificing on performance, so to provide to the user a flexible programming environment to quickly setup its next data acquisition experiment. The supported programming environments are either the Scilab console or Python. In both cases, the peripheral programming interfaces are linked to the environments with a dedicated library, as explained in the Setup \& Installation section. \\
The access to the stDAQ system is obtained by opening the USB communication on a virtual COM port, execute a sequence of setup commands, run the acquisition and close the communication. \\
In the following sections, the programming functions related to each peripherals are highlighted in red, and usage examples are described along with their timing performance.
\subsection{DEVICE}
The following functions are used to open, close the communication with the stDAQ and return the current firmware version running on the ST-NUCLEO board.
\subsubsection{\textcolor{red}{stdaq\_open}}
The stdaq\_open() function opens the communication on the selected COM port. \\
The prototype for the function call is:
\begin{verbatim}
res = stdaq_open(port)
\end{verbatim}
The entries to the functions are:
\begin{itemize}
\item [\textbf{[port (IN)]}] is a string that specifies the input COM port, i.e.: "COM1"
\item [\textbf{[res (OUT)]}] returns the status of the opening: 0 if successfull, -1 if not successfull.
\end{itemize}
A function call example in Scilab:
\begin{verbatim}
--> res = stdaq_open("COM0");
\end{verbatim}
%
In Python, the device communication is opened with the instantion of a class \\ $STDAQ(port,[verbose=True])$. A function call example in Python:
\begin{verbatim}
>>> import stdaq
>>> daq = stdaq.STDAQ('COM0') # open the communication
\end{verbatim}
If the status of the opening is not successfull, then the new class is not instantiated and an error message appears.
\subsubsection{\textcolor{red}{stdaq\_close}}
The stdaq\_close() function closes the communication on the selected COM port. \\
The prototype for the function call is:
\begin{verbatim}
stdaq_close()
\end{verbatim}
Currently this function does not require any entry and does not have returns. It just closes the active COM port.
In Python, the device communication is closed by the deletion of the class instantiation. A function call example to close the communication previously opened in Python:
\begin{verbatim}
>>> del daq # close the communication
\end{verbatim}
\subsubsection{\textcolor{red}{stdaq\_version}}
The stdaq\_version() function gets the current version of the stDAQ firmware programmed on the ST-NUCLEO. \\
The prototype for the function call is:
\begin{verbatim}
[package,release,subrelease] = stdaq_version()
\end{verbatim}
The entries to the functions are:
\begin{itemize}
\item [\textbf{[none (IN)]}]
\item [\textbf{[res (OUT)]}] returns a list with both the package name and the release number of the stDAQ firmware.
\end{itemize}
Examples of function calls:
\begin{table}[h!]
\begin{tabular}{|l|l|}
\hline
\cellcolor[HTML]{C0C0C0} \textbf{Scilab} &
\begin{minipage}{4.5in}
\begin{verbatim}
if (stdaq_open("COM0")>=0) then
[pkg,rel,sub] = stdaq_version();
mprintf("\n stDAQ version: (%s) r.%d.%d",pkg,rel,sub);
stdaq_close();
end
\end{verbatim}
\end{minipage}
\\ \hline
\cellcolor[HTML]{C0C0C0} \textbf{Python} &
\begin{minipage}{4.5in}
\begin{verbatim}
import stdaq
daq = stdaq.STDAQ('COM0') # open the communication
[pkg,rel,sub] = daq.version()
print("\n stDAQ version: ({}) r.{}.{}\n".format(pkg,rel,sub))
del daq # close the communication
\end{verbatim}
\end{minipage}
\\ \hline
\end{tabular}
\end{table}
%\hrulefill
\newpage
\subsection{DAC}
The stDAQ supports a single 12 bits DAC, with output on pin PA4. \\
The programming of the DAC is obtained with the following functions:
\subsubsection{\textcolor{red}{stdaq\_set\_dac}}
The stdaq\_set\_dac() function sets the value in the DAC register used to set the analog voltage output on the pin PA4. To physically set the analog output voltage on the pin PA4 the DAC must be enabled (see stdaq\_dac\_enable()). \\
The prototype for the function call is:
\begin{verbatim}
stdaq_set_dac(value)
\end{verbatim}
The entries to the functions are:
\begin{itemize}
\item [\textbf{[value (IN)]}] is an integer value in the range of [0-4095]. The output voltage on the pin corresponds to the scaling of the value by $Vout = value/4096*3.3V$.
\item [\textbf{[none (OUT)]}]
\end{itemize}
A function call example in Scilab:
\begin{verbatim}
--> stdaq_set_dac(2048);
\end{verbatim}
this will put on the output about 1.65V, once the DAC is enabled.
\subsubsection{\textcolor{red}{stdaq\_enable\_dac}}
The stdaq\_enable\_dac() function enables the analog output voltage on the pin PA4. If DAC is enabled before setting its value in the register, the default output is 0V. \\
The prototype for the function call is:
\begin{verbatim}
stdaq_enable_dac()
\end{verbatim}
Currently this function does not require any entry and does not have returns. It just enables the output analog voltage of the DAC.
\subsubsection{\textcolor{red}{stdaq\_disable\_dac}}
The stdaq\_disable\_dac() function disables the analog output voltage on the pin PA4. \\
The prototype for the function call is:
\begin{verbatim}
stdaq_disable_dac()
\end{verbatim}
Currently this function does not require any entry and does not have returns.
\subsubsection{Examples}
As an example we will generate a periodic function, command the DAC through increments and analyze its performance.
First we generate a triangular wave spanning from 0 to 3.3V with an incremental step of 12, equivalent to a step of about 10 mV. A delay of 1 msec. is added after the newer dac value has been set. This is recommended since the USB communication has a delay of about 1 msec. Longer sleeps can be performed, but shorter ones perform an unpredicatable update.
%
\begin{table}[ht!]
\begin{tabular}{|l|l|}
\hline
\cellcolor[HTML]{C0C0C0} \textbf{Scilab} &
\begin{minipage}{4.5in}
\begin{verbatim}
stdaq_open("COM0");
stdaq_set_dac(4095);
stdaq_enable_dac();
step = 12; taps = 341; periods = 10;
for i=1:(2*taps*periods)
value = abs(step*(pmodulo(i,2*taps)-taps));
stdaq_set_dac(value);
sleep(1);
end
stdaq_close();
\end{verbatim}
\end{minipage}
\\ \hline
\cellcolor[HTML]{C0C0C0} \textbf{Python} &
\begin{minipage}{4.5in}
\begin{verbatim}
import stdaq
from time import sleep
daq = stdaq.STDAQ('COM0') # open the communication
daq.set_dac(4095)
daq.enable_dac()
step = 12, taps = 341, periods = 10
for i in range(2*taps*periods):
value = abs(step*((i%(2*taps))-taps))
daq.set_dac(value)
sleep(0.001)
del daq # close the communication
\end{verbatim}
\end{minipage}
\\ \hline
\end{tabular}
\end{table}
\begin{figure}[ht!]
%\centering
\includegraphics[scale=0.8]{../img/triangular_wave2.png}
\caption{Triangular wave measured on PA4.}
\label{fig:triangular_wave}
\end{figure}
%\clearpage
%\hrulefill
\newpage
\subsection{ADC}
The stDAQ supports a single 12 bits ADC, muxed over 14 input channels and a temperature sensor.
In the following table there is the correspondence between channel and pin on the ST NUCLEO-F413ZH.
Channel 16 corresponds to the temperature sensor. Note that channel 4 and channel 8 are missing because the pins are dedicated to other functions.
\begin{table}[h]
\caption{ST NUCLEO-F413ZH ADC channels pinout.}
\centering
\begin{tabular}{|ll|ll|}
\textbf{PIN} & \textbf{CHANNEL} & \textbf{PIN} & \textbf{CHANNEL} \\ \hline
PA0 & channel 0 & PB1 & channel 9 \\
PA1 & channel 1 & PC0 & channel 10 \\
PA2 & channel 2 & PC1 & channel 11 \\
PA3 & channel 3 & PC2 & channel 12 \\
PA5 & channel 5 & PC3 & channel 13 \\
PA6 & channel 6 & PC4 & channel 14 \\
PA7 & channel 7 & PC5 & channel 15
\end{tabular}
\end{table}
The programming of the ADC is obtained with the following functions:
\subsubsection{\textcolor{red}{stdaq\_set\_adc}}
stdaq\_set\_adc() sets the ADC channels required for the acquisition. \\
The prototype for the function call is:
\begin{verbatim}
stdaq_set_adc(channelsequence, clockdivision)
\end{verbatim}
The entries to the functions are:
\begin{itemize}
\item [\textbf{[channelsequence (IN)]}] is an array of max. 16 entries with values ranging from [0-16] with the exclusion of 4 and 8, which are channels not available. Repetition of the same value is possible inside the channelsequence.
\item [\textbf{[clockdivision (IN)]}] is a value in range [0-9] corresponding to a specific clock frequency, which trigges the acquisition of the next channel inside the channelsequence. Refers to the following table for the corresponding frequencies.
\item [\textbf{[none (OUT)]}]
\end{itemize}
\begin{table}[h]
\caption{ST NUCLEO-F413ZH clockdivision frequencies used to trigger the acquisition.}
\centering
\begin{tabular}{|ll|ll|}
clockdivision & frequency & clockdivision & frequency \\ \hline
0 & 1 MHz & 5 & 31.25 KHz \\
1 & 500 KHz & 6 & 15.625 KHz \\
2 & 250 KHz & 7 & 7.8125 KHz \\
3 & 125 KHz & 8 & 3.9062 KHz \\
4 & 62.5 KHz & 9 & 1.9531 KHz \\
\end{tabular}
\end{table}
A function call example in Scilab:
\begin{verbatim}
--> chseq = [0,5,6,6,16]; // [ch0, ch5, ch6, ch6, temp_sensor]
--> clkdiv = 5; // 31.25 KHz
--> stdaq_set_adc(chseq,clkdiv);
\end{verbatim}
\subsubsection{\textcolor{red}{stdaq\_get\_adc}}
The prototype for the function call is:
\begin{verbatim}
samples = stdaq_get_adc(channelsequence, numsamplesperchannel)
\end{verbatim}
The entries to the functions are:
\begin{itemize}
\item [\textbf{[channelsequence (IN)]}] is an array of max. 16 entries with values ranging from [0-16] with the exclusion of 4 and 8, which are channels not available. Repetition of the same value is possible inside the channelsequence.
\item [\textbf{[numsamplesperchannel (IN)]}] is the number of samples per channel returned by the acquisition;
\item [\textbf{[samples (OUT)]}] returns a matrix of [numchannels x numsamplesperchannel] with the 12 bits ADC values scaled to 0-3.3 V. For the temperature sensor, the value is already returned scaled in Celsius.
\end{itemize}
\begin{figure}[ht!]
%\centering
\includegraphics[scale=0.4]{../img/adc_timing_a.png}
\caption{Acquisition timing measured on PE9.}
\label{fig:adc_timing_a}
\end{figure}
A function call example in Scilab:
\begin{verbatim}
--> chseq = [0,1,2,2,5,16]; // [ch0, ch1, ch2, ch2, ch5, temp_sensor]
--> clkdiv = 2; // 250 KHz
--> stdaq_set_adc(chseq,clkdiv);
--> samples = stdaq_get_adc(chseq,30);
\end{verbatim}
%
Similarly in Python, the ADC channels are set and read with the functions:
\begin{verbatim}
>>> chseq = [0,1,2,2,5,16]
>>> clkdiv = 2
>>> daq.set_adc(chseq,clkdiv)
>>> samples = daq.get_adc(chseq,30)
\end{verbatim}
As seen in figure \ref{fig:adc_timing_a} top, the inter-sequence sampling frequency can reach values up to 1 KHz, where the limit is mainly due by the USB communication to request for new data.
In the bottom of the same figure, the channel sampling frequency is measured on PE9 as the 250 KHz set in the clockdivision entry. Note that pin PE9 is the trigger to the acquisition of a new channel in the selected sequence list, where the acquisition trigger is set as rising edge.
\subsubsection{Examples}
As an example we will use the DAC to generate a sinusoid function, command the DAC output, record the voltage with the ADC channel 0 and display the result in Scilab, see figure \ref{fig:adc_example}.
It is required to connect the output of the DAC (pin PA4) with the input of the channel 0 of the ADC (pin PA0) with a jumper wire.
%\begin{table}[ht!]
\begin{table}[!]
\begin{tabular}{|l|l|}
\hline
\cellcolor[HTML]{C0C0C0} \textbf{Scilab} &
\begin{minipage}{4.5in}
\begin{verbatim}
stdaq_open("COM0");
chseq = [0]; // [ch0]
clkdiv = 0; // 1 MHz
stdaq_set_adc(chseq,clkdiv);
stdaq_set_dac(0);
stdaq_enable_dac();
tpp = 20; // taps per period
periods = 10; // number of periods
n = tpp*periods; out = [];
value = floor(2047.5*(1 + sin(2*%pi*(1:n)/tpp)));
for i=1:n
stdaq_set_dac(value(i));
sleep(1);
samples = stdaq_get_adc(chseq,1);
out = [out, samples];
end
figure; plot(1:length(out),out);
stdaq_close();
\end{verbatim}
\end{minipage}
\\ \hline
\cellcolor[HTML]{C0C0C0} \textbf{Python} &
\begin{minipage}{4.5in}
\begin{verbatim}
import stdaq
from time import sleep
import math as m
import matplotlib.pyplot as plt
daq = stdaq.STDAQ('COM0') # open the communication
chseq = [0]
clkdiv = 0
daq.set_adc(chseq,clkdiv)
daq.set_dac(0)
daq.enable_dac()
tpp = 20 # taps per period
periods = 10 # number of periods
out = []
n = tpp*periods
value = [int(2047.5*(1+m.sin(2*m.pi*(i+1)/tpp))) for i in range(n)]
for i in range(n):
daq.set_dac(value(i))
sleep(0.001)
samples = daq.get_adc(chseq,1)
out = [out, samples]
plt.plot(range(1,len(out)),out)
plt.show()
del daq # close the communication
\end{verbatim}
\end{minipage}
\\ \hline
\end{tabular}
\end{table}
Here, the sleep(1) corresponds to a 1 msec. sleep, allowing the DAC to set before taking the measeure. Without it, we noticed that the measured value may not correpond to the real one set.
Considering that each stdaq command in the for-loop takes about 1 msec., the total loop period amounts to about 3 msec, for a 333Hz loop rate. This structure may be usefull to implement for example a control loop where a PID controller, implemented in Scilab, can be used to set the next DAC value.
\begin{figure}[ht!]
%\centering
\includegraphics[scale=0.8]{../img/adc_example.png}
\caption{Top: DAC voltage output measured on pin PA4. Bottom: plot of the samples returned by stdaq\_get\_adc().}
\label{fig:adc_example}
\end{figure}
\hrulefill
\newpage
\subsection{GPIO}
The GPIOs of the ST-NUCLEO are configured both as digital output (8 pins) and as digital input (4 pins). A list of pins are referenced in the following tables.
The digital outputs are set in push-pull mode.
\begin{table}[h]
\caption{ST NUCLEO-F413ZH GPIO output.}
\centering
\begin{tabular}{|ll|ll|}
\textbf{PIN} & \textbf{OUTPUT} & \textbf{PIN} & \textbf{OUTPUT} \\ \hline
PD0 & 0 & PD4 & 4 \\
PD1 & 1 & PD5 & 5 \\
PD2 & 2 & PD6 & 6 \\
PD3 & 3 & PD7 & 7
\end{tabular}
\end{table}
\begin{table}[h]
\caption{ST NUCLEO-F413ZH GPIO input.}
\centering
\begin{tabular}{|ll|}
\textbf{PIN} & \textbf{INPUT} \\ \hline
PE0 & 0 \\
PE1 & 1 \\
PE2 & 2 \\
PE3 & 3
\end{tabular}
\end{table}
\subsubsection{\textcolor{red}{stdaq\_get\_gpio}}
This function reads the specified input port pin and returns its value to be either set (3.3V) or reset (0V).
The prototype for the function call is:
\begin{verbatim}
value = stdaq_get_gpio(pin)
\end{verbatim}
The entries to the functions are:
\begin{itemize}
\item [\textbf{[pin (IN)]}] output number corresponding to the pin in the GPIO output table;
\item [\textbf{[value (OUT)]}] returns binary value defined as set = 1 or reset = 0;
\end{itemize}
A function call example in Scilab:
\begin{verbatim}
--> pin = 1; // PE1
--> value = stdaq_get_gpio(pin);
\end{verbatim}
\subsubsection{\textcolor{red}{stdaq\_set\_gpio}}
This function sets or clears the selected port bit.
The prototype for the function call is:
\begin{verbatim}
stdaq_set_gpio(pin, value)
\end{verbatim}
The entries to the functions are:
\begin{itemize}
\item [\textbf{[pin (IN)]}] output number corresponding to the pin in the GPIO output table;
\item [\textbf{[value (IN)]}] binary value to define if the output is set = 1 or reset = 0;
\item [\textbf{[none (OUT)]}]
\end{itemize}
A function call example in Scilab:
\begin{verbatim}
--> pin = 0; // PD0
--> value = 1; // set to 3.3V
--> stdaq_set_gpio(pin,value);
\end{verbatim}
\subsubsection{\textcolor{red}{stdaq\_toggle\_gpio}}
The prototype for the function call is:
\begin{verbatim}
stdaq_toggle_gpio(pin)
\end{verbatim}
The entries to the functions are:
\begin{itemize}
\item [\textbf{[pin (IN)]}] output number corresponding to the pin in the GPIO output table;
\item [\textbf{[none (OUT)]}]
\end{itemize}
\subsubsection{Examples}
As an example we will toggle the pin PD0 at each iteration of a loop cycle, so to evaluate the frequency response and the average latency of the digital output.
%
\begin{table}[ht!]
\begin{tabular}{|l|l|}
\hline
\cellcolor[HTML]{C0C0C0} \textbf{Scilab} &
\begin{minipage}{4.5in}
\begin{verbatim}
stdaq_open('COM0');
tags = 100;
for i=1:tags
stdaq_toggle_gpio(0);
sleep(1);
end
stdaq_close();
\end{verbatim}
\end{minipage}
\\ \hline
\cellcolor[HTML]{C0C0C0} \textbf{Python} &
\begin{minipage}{4.5in}
\begin{verbatim}
import stdaq
from time import sleep
daq = stdaq.STDAQ('COM0') # open the communication
tags = 100
for i in range(tags):
daq.toggle_gpio(0)
sleep(0.001)
del daq # close the communication
\end{verbatim}
\end{minipage}
\\ \hline
\end{tabular}
\end{table}
%
The $sleep(1)$ routine is added in the loop to delay the PC loop of 1 msec.
The toggling of the GPIO is in fact limited by the USB refresh rate, which occurs at about 1 msec..
The max. total toggling rate is measured to be 250 Hz, see figure \ref{fig:gpio_example}. It is observed that some slips of $\pm 1$ msec happens with a rate of 2\% of the toggling count.
For values of sleeps lower than 1 msec., the toggling is not more reliable and toggling loss may occurs.
\begin{figure}[ht!]
%\centering
\includegraphics[scale=0.8]{../img/gpio_example.png}
\caption{Digital pin toggling measured on PD0.}
\label{fig:gpio_example}
\end{figure}
\hrulefill
\subsection{LED}
The ST-NUCLEO board mounts three color LEDs, typically connected to the pins PB0 (green), PB7 (blue), PB14 (red).
\subsubsection{\textcolor{red}{stdaq\_toggle\_led}}
This function toggles one of the three LEDs mounted on the ST-NUCLEO board.
The prototype for the function call is:
\begin{verbatim}
stdaq_toggle_led(color)
\end{verbatim}
The entries to the functions are:
\begin{itemize}
\item [\textbf{[color (IN)]}] character corresponding to the color of the LED to be toggled, which can be either 'r' (red), 'g' (green), or 'b' (blue).
\item [\textbf{[none (OUT)]}]
\end{itemize}
An example of a function call in Scilab:
\begin{verbatim}
--> for i=1:10
... stdaq_toggle_led('r');
... sleep(500);
end
\end{verbatim}
This example toggles the red LED each 0.5 seconds for 10 times. \\
The same example of a call in Python:
\begin{verbatim}
>>> import stdaq
>>> daq = stdaq.STDAQ('COM0')
>>> for i in range(10):
daq.toggle_led('r');
sleep(0.5);
\end{verbatim}
\hrulefill
\subsection{PWM}
Three indipendent PWM output pins are available on PE5 (TIM9), PF7 (TIM11) and PF8 (TIM13).
Those are obtained from general-purpose timers with auto-reload counter and prescaler of 16 bits. The PWM outputs have positive polarity and are edge aligned.
The timing of the PWM outputs can be syncronized.
\subsubsection{\textcolor{red}{stdaq\_set\_pwm}}
The stdaq\_set\_pwm() function sets the PWM parameters. \\
The prototype for the function call is:
\begin{verbatim}
stdaq_enable_pwm(pwm,duty,rate,params)
\end{verbatim}
The entries to the functions are:
\begin{itemize}
\item [\textbf{[pwm (IN)]}] is the number [1-3] of the PWM required to be turned on.
\item [\textbf{[duty (IN)]}] is the duty-cycle expressed in percentage in the range [0-100].
\item [\textbf{[rate (IN)]}] is the frequency of the PWM expressed in Hertz.
\item [\textbf{[params (IN)]}] is a two entries array [tim\_presc,tim\_period] used to bypass the automotic period guess, with both entries in [1-65336].
\item [\textbf{[none (OUT)]}]
\end{itemize}
Here we make some examples of function call in Scilab:
\begin{verbatim}
--> stdaq_set_pwm(1,50,160);
\end{verbatim}
This command sets the PWM 1 to a 50\% duty-cycle and a frequency of 160Hz. In particular, this call makes use of an automatic period guess that optimize the prescaler and the period parameters to the timer so to maximize the PWM resolution as possible.
\begin{verbatim}
--> stdaq_set_pwm(3,20,params=[6400,200]);
\end{verbatim}
This command sets the PWM 3 to a 20\% duty-cycle and a frequency of 75Hz. This call bypasses the automatic period guess by feeding directly the parameters to the prescaler and period of the timer. The output frequency of the PWM is calculated as $96 MHz/(6400*200) = 75 Hz$.
\begin{verbatim}
--> stdaq_set_pwm(1,70);
\end{verbatim}
This command is used to update the duty-cycle of the PWM 1 to 70\%.
\subsubsection{\textcolor{red}{stdaq\_enable\_pwm}}
The stdaq\_enable\_pwm() function enables the PWM output. \\
The prototype for the function call is:
\begin{verbatim}
stdaq_enable_pwm(pwm)
\end{verbatim}
The entries to the functions are:
\begin{itemize}
\item [\textbf{[pwm (IN)]}] is the number [1-3] of the PWM required to be turned on.
\item [\textbf{[none (OUT)]}]
\end{itemize}
A function call example in Scilab:
\begin{verbatim}
--> stdaq_enable_pwm(1);
\end{verbatim}
This example turns on the output of the PWM 1.
\subsubsection{\textcolor{red}{stdaq\_disable\_pwm}}
The stdaq\_disable\_pwm() function disables the PWM output. \\
The prototype for the function call is:
\begin{verbatim}
stdaq_disable_pwm(pwm)
\end{verbatim}
The entries to the functions are:
\begin{itemize}
\item [\textbf{[pwm (IN)]}] is the number [1-3] of the PWM required to be turned off.
\item [\textbf{[none (OUT)]}]
\end{itemize}
A function call example in Scilab:
\begin{verbatim}
--> stdaq_disable_pwm(1);
\end{verbatim}
This example turns off the output of the PWM 1.
\subsubsection{\textcolor{red}{stdaq\_sync\_pwm}}
\subsubsection{Examples}
In the following example the PWM 1 will be activated starting with a 50 Hz frequency and a 20\% duty-cycle. After 100 msec. the duty-cycle is updated online to 50\%. Completed another time step of 100 msec. the output of the PWM 1 is turned off.
%
\begin{table}[ht!]
\begin{tabular}{|l|l|}
\hline
\cellcolor[HTML]{C0C0C0} \textbf{Scilab} &
\begin{minipage}{4.5in}
\begin{verbatim}
stdaq_open("COM0");
pwm = 1;
duty = 20; // 20% duty cycle
rate = 50; // 50 Hz
stdaq_set_pwm(pwm,duty,rate);
stdaq_enable_pwm(pwm);
sleep(100); // wait 100 msec.
duty = 50; // 50% duty cycle
stdaq_set_pwm(pwm,duty);
sleep(100); // wait 100 msec.
stdaq_disable_pwm(pwm);
stdaq_close();
\end{verbatim}
\end{minipage}
\\ \hline
\cellcolor[HTML]{C0C0C0} \textbf{Python} &
\begin{minipage}{4.5in}
\begin{verbatim}
import stdaq
from time import sleep
daq = stdaq.STDAQ('COM0') # open the communication
pwm = 1, duty = 20, rate = 50
daq.set_pwm(pwm,duty,rate) # set 50Hz @ 20% duty-cycle
daq.enable_pwm(pwm)
sleep(0.1)
duty = 50
daq.set_pwm(pwm,duty) # change to 50% duty-cycle
sleep(0.1)
daq.disable_pwm(pwm)
del daq # close the communication
\end{verbatim}
\end{minipage}
\\ \hline
\end{tabular}
\end{table}
\subsection{I2C}
An I2C interface of 7-bit address standard mode no-strech 100 KHz clock is available on the pins PB6 (SCL - clock) and PB9 (SDA - data).
When connected to a slave, pull-up resistors with typical values ranging from 2.2k$\Omega$ to 4.7k$\Omega$ are required on both the pins, since they are not present on the ST-NUCLEO.
\subsubsection{\textcolor{red}{stdaq\_read\_i2c}}
This function reads the register at the specified I2C address. \\
The prototype for the function call is:
\begin{verbatim}
[rx,len] = stdaq_read_i2c(i2c_address,reg_address,num_bytes)
\end{verbatim}
The entries to the functions are:
\begin{itemize}
\item [\textbf{[i2c\_address (IN)]}] is a 7-bit address [0-127];
\item [\textbf{[reg\_address (IN)]}] is the address of the register for a memory address size of 8 bits;
\item [\textbf{[num\_bytes (IN)]}] number of bytes to be read from the register, maximum read of 32 bytes;
\item [\textbf{[rx (OUT)]}] an array with the values returned by the I2C read command;
\item [\textbf{[len (OUT)]}] returns the length of the rx array;
\end{itemize}
A function call example in Scilab:
\begin{verbatim}
--> i2c_addr = 52;
--> reg_addr = 10;
--> num_bytes = 1;
--> [rx,len] = stdaq_read_i2c(i2c_addr,reg_addr,num_bytes);
\end{verbatim}
\subsubsection{\textcolor{red}{stdaq\_write\_i2c}}
This function writes the register at the specified I2C address. \\
The prototype for the function call is:
\begin{verbatim}
stdaq_write_i2c(i2c_address,reg_address,data)
\end{verbatim}
The entries to the functions are:
\begin{itemize}
\item [\textbf{[i2c\_address (IN)]}] is a 7-bit address [0-127];
\item [\textbf{[reg\_address (IN)]}] is the address of the register for a memory address size of 8 bits;
\item [\textbf{[data (IN)]}] array with data, where each entry must be 8 bits size and maximum length of 32 entries;
\item [\textbf{[none (OUT)]}]
\end{itemize}
A function call example in Scilab:
\begin{verbatim}
--> i2c_addr = 52;
--> reg_addr = 10;
--> data = [2,58,103];
--> stdaq_write_i2c(i2c_addr,reg_addr,data);
\end{verbatim}
\newpage
\section{Setup \& Installation}
To operate the ST-NUCLEO as a stDAQ and communicate with your Scilab environment running on your laptop, we recommend the following installation steps:
\begin{enumerate}
\item Download and install the latest Scilab freeware from the \href{https://www.scilab.org/}{website}.
\item Download and install the ST-Link Utility from the \href{https://www.st.com/en/development-tools/stsw-link004.html}{ST website}.
\item Plug in the USB PWR (also known as STLink USB) of the ST-NUCLEO board to your laptop and open the ST-Link Utility. Go in Target menu and Connect. If sucessfull, the device name is displayed and the memory of the microcontroller opened up in a table.
\item In the ST-Link Utility, open Program... in the Target menu, upload the stdaq\_antille\_r1s0.bin binary file available in the /nucleo folder of the project, and program the microcontroller.
\item Connect the User USB of the ST-NUCLEO board to your laptop and locate the corresponding COM port.
\item Open Scilab. In the file browser, navigate till the /scilab folder in the stDAQ project and execute runme.sci with the command
\begin{verbatim}
--> exec('runme.sci',-1)
\end{verbatim}
Wait till Link done appears on the console. Now your system is ready to operate.
\item On Scilab console write the command
\begin{verbatim}
--> stdaq_open('COM0')
\end{verbatim}
to start the communication between the stDAQ on the ST-NUCLEO and your Scilab environment.
\end{enumerate}
Note that everytime you restart a new Scilab session, you will always need to execute the runme.sci unless you set it up in the environment variables of Scilab to auto-upload and link the stDAQ dynamic library. \\
After a power-down or reboot, there is no need to reflash the stDAQ firmware on the ST-NUCLEO.
A new reprogramming with the ST-Link Utility is required only if a new firmware release need to be installed on the ST-NUCLEO.
Similarly to Scilab, the Python stDAQ library in the /python folder needs to be used to communicate with the ST-NUCLEO from your .py programming environment.
\newpage
\section{Power Supply \& USB Isolation}
The ST-NUCLEO can be powered in four different ways (see section 6.4 in UM1974 from STMicroeletronics also available in the docs/ folder):
\begin{itemize}
\item plugging in the USB PWR (STLink USB) which supplies a nominal 5 Volts;
\item supplying an external 5 Volts into the E5V pin;
\item supplying an external 3.3 Volts into the 3.3 pin;
\item supplying an external 7 to 12 Volts source into the VIN pin; this voltage is stepped down to a nominal 5 Volts through the on-board linear regolator;
\end{itemize}
One of the problem that arises when supplying power to the ST-NUCLEO from an external source and contemporary connecting the User USB to the Laptop or PC for the data transmission is the presence of ground loop. This latter may cause an unstable reference ground, which results in uncorrelated noise on the ADC measurements. \\
Just adding an USB isolator on the User USB may be sufficient to break the ground loop and its effects on the measruements.
Examples of USB isolators available on the market are the HiLetgo USB isolator, the EZSync USB isolator and the SMAKN USB isolator. Those isolators support USB-FS communication and can be also used to power a connected device.
%\subsection{STM32F4xx}
%We tested the lwHAL on the STM NUCLEO-F429ZI development board. The test consists
%in turning the red LED on and then toggling at every second the green LED on the board.
%We initialized a new project for the NUCLEO-F429ZI from the STM IDE, setting
%with default configuration. Then we added the lwHAL folder inside the $/Core$, as
%shown in figure \ref{fig:stm_ide}.
%The $\#define$ $STM32F4xx$ must be uncommented in the $src/config.h$.
%Make sure to include the path to the lwHAL folder inside the IDE.
%Copy the code snippets from the
%$ports/stm32f4/stm32f4\_test.c$
%into the $main.c$.
%Compile and run the binary on the board. You will see the red LED turning on and
%the green LED blinking at a frequency of 1 Hz.
%\begin{figure*}[ht]
%\centering
%\includegraphics[scale=0.4]{stm_ide.png}
%%\caption{}
%\label{fig:stm_ide}
%\end{figure*}
\newpage
\section{stDAQ license information}
The MIT License (MIT)
\\
\\
Copyright (c) 2021 Silvano Furlan,
\\
\\
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the “Software”), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
\\
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
\\
\\
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
\end{document}
|
[STATEMENT]
lemma f_geom_series:
assumes "b = 2^(Suc c)"
shows "(F q c b = f) \<longleftrightarrow> ( (b-1) * f = 2^c * (b^(Suc q) - 1) )"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (F q c b = f) = ((b - 1) * f = 2 ^ c * (b ^ Suc q - 1))
[PROOF STEP]
proof-
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (F q c b = f) = ((b - 1) * f = 2 ^ c * (b ^ Suc q - 1))
[PROOF STEP]
have "F q c b = 2^c * E q b"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. F q c b = 2 ^ c * E q b
[PROOF STEP]
by (auto simp: E_def F_def sum_distrib_left sum_distrib_right)
[PROOF STATE]
proof (state)
this:
F q c b = 2 ^ c * E q b
goal (1 subgoal):
1. (F q c b = f) = ((b - 1) * f = 2 ^ c * (b ^ Suc q - 1))
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
F q c b = 2 ^ c * E q b
goal (1 subgoal):
1. (F q c b = f) = ((b - 1) * f = 2 ^ c * (b ^ Suc q - 1))
[PROOF STEP]
have "b \<ge> 2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 2 \<le> b
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
b = 2 ^ Suc c
goal (1 subgoal):
1. 2 \<le> b
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
2 \<le> b
goal (1 subgoal):
1. (F q c b = f) = ((b - 1) * f = 2 ^ c * (b ^ Suc q - 1))
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
F q c b = 2 ^ c * E q b
2 \<le> b
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
F q c b = 2 ^ c * E q b
2 \<le> b
goal (1 subgoal):
1. (F q c b = f) = ((b - 1) * f = 2 ^ c * (b ^ Suc q - 1))
[PROOF STEP]
by (smt e_geom_series mult.left_commute mult_cancel_left)
[PROOF STATE]
proof (state)
this:
(F q c b = f) = ((b - 1) * f = 2 ^ c * (b ^ Suc q - 1))
goal:
No subgoals!
[PROOF STEP]
qed |
module WhipRest
using HttpServer
include("beans.jl")
export ServerConfig, ResponseConfig, WhipRestServer, RestMessage, GET, POST, PUT, PATCH, DELETE, createServer, startServer
function createServer(config::ServerConfig)
restServer = WhipRestServer(config.port, Dict{String,Any}(), Dict{String,Any}(), Dict{String,Any}(), Dict{String,Any}(), Dict{String,Any}())
#Iterate over all the request response mappings
for i = 1:length(config.responseConfigurations)
responseConfiguration = config.responseConfigurations[i]
#get the correct mappings set based on HTTP method
mappings = getMappings(restServer, responseConfiguration.method)
#Put the response handler into map for the given path
mappings[responseConfiguration.path] = responseConfiguration.responseHandler
end
return restServer
end
function startServer(whipServer::WhipRestServer)
#Create a new HTTP handler
http = HttpHandler() do req::Request, res::Response
#Get the mappings of path to action based on HTTP method
mappings = getMappings(whipServer, getHTTP_METHOD(req.method))
try
#Extract the path from the request resource
path = getPath(req.resource)
#Try to find the function for the mapping
responseHandler = mappings[String(path)]
#Call the handler for this path with the content after the path
params = getParams(req.resource)
Response(responseHandler(RestMessage(params, req.data)))
catch error
if isa(error, KeyError)
#No mapping was found for this path
Response("404")
end
end
end
server = Server(http)
run(server, whipServer.port)
end
function getPath(resource::String)
#Get the entire resource from the first / to the ? or the end of string
paramIndex = search(resource, '?')
if paramIndex==0
#Trim the / off the end if it exists
if resource[end] == '/'
path = resource[1:end-1]
else
path = resource[1:end]
end
else
path = resource[1:paramIndex-1]
end
return path
end
function getParams(resource::String)
paramIndex = search(resource, '?')
paramsMap = Dict{String,String}()
if paramIndex==0
return paramsMap
else
params = split(resource[paramIndex+1:end], '&')
for i = 1:length(params)
pair = split(params[i], '=')
paramsMap[pair[1]] = String(pair[2])
end
return paramsMap
end
end
function getHTTP_METHOD(method::String)
if method == "GET"
return GET
elseif method== "POST"
return POST
elseif method == "PUT"
return PUT
elseif method == "PATCH"
return PATCH
elseif method == "DELETE"
return DELETE
end
end
function getMappings(whipServer::WhipRestServer, method::HTTP_METHOD)
if GET == method
return whipServer.getMappings
elseif POST == method
return whipServer.postMappings
elseif PUT == method
return whipServer.putMappings
elseif PACTH == method
return whipServer.patchMappings
elseif DELETE == method
return whipServer.deleteMappings
end
end
end #end module WhipRest
|
This page lists verbs that end with R. Several types of verbs may be included such as linking verbs. These verbs ending in R might be particularly helpful to college students taking classes toward a degree, teachers, people writing reports, and those looking for an online reference site about verbs.
administer, alter, answer, appear, bar, bear, better, blister, blur, bolster, bother, cater, center, char, chatter, cheer, cipher, clamor, clatter, clear, clobber, cluster, collar, color, concur, confer, conquer, consider, counter, cover, defer, deliver, despair, dessier, deter, devour, differ, disappear, discover, empower, encounter, endanger, endeavor, engender, enter, err, falter, father, favor, fear, feather, figger, filter, flatter, flicker, flounder, flower, foster, further, garner, gather, hammer, hamper, harbor, hear, honor, hover, huckster, humor, impair, incur, infer, labor, linger, lower, major, maneuver, mar, master, matter, mirror, miter, mother, motor, murder, murmur, muster, mutter, near, number, occur, offer, order, outnumber, pamper, paper, peer, pester, pioneer, plunder, ponder, pour, power, prefer, prosper, reappear, rear, reconsider, recover, recur, rediscover, refer, register, remember, render, reorder, repair, roar, scour, sear, sever, shatter, shear, shelter, shiver, shoulder, shower, shudder, simmer, solder, sour, spear, splinter, sponsor, spur, sputter, stagger, steer, stir, suffer, sunder, surrender, swear, tamper, taper, tear, thunder, titter.
Verbs1.com has many examples of verbs which end in various letters. Hope you enjoy this page of verbs ending with r and the rest of this verb list site. |
lemma small_refl_iff: "f \<in> l F (f) \<longleftrightarrow> eventually (\<lambda>x. f x = 0) F" |
Topic : High school senior year reflection essay. Author : .
Posted : Fri, Mar 15th 2019 02:06 AM. |
module Trade.Algorithm.Bollinger where
import qualified Data.Vector as Vec
import Data.Vector (Vector)
import Statistics.Sample (mean, stdDev)
-- import Trade.Render.Svg.Plot
import Debug.Trace
-- number stddev
newtype BolK = BolK Int deriving (Show)
-- rolling window
newtype BolWin = BolWin Int deriving (Show)
meanStDev :: BolWin -> Vector Double -> (Vector (Double, Double), Vector (Double, Double))
meanStDev (BolWin l) v
| l > Vec.length v = error $ "meanStDev: input vector to short: " ++ show l ++ "/" ++ show (Vec.length v)
meanStDev (BolWin l) v =
let len = Vec.length v
f a =
let u = Vec.slice a l v
in (mean u, stdDev u)
idx = Vec.generate (len-l) (fromIntegral . (l+))
(ms, ss) = Vec.unzip $ Vec.generate (len-l) f
in (Vec.zip idx ms, Vec.zip idx ss)
data Bollinger = Bollinger {
n :: BolWin
, k :: BolK
, bMean :: Vector (Double, Double)
, bStDev :: Vector (Double, Double)
} deriving (Show)
bollinger :: BolK -> BolWin -> Vector Double -> Bollinger
bollinger k n = uncurry (Bollinger n k) . meanStDev n
{-
bollinger2Lines :: String -> BolK -> BolWin -> Vector Double -> [PlotItem Vector Double]
bollinger2Lines str k@(BolK j) n v =
let Bollinger _ _ ms ss = bollinger k n v
k' = fromIntegral j
f j (i, m) (_, s) = (i, m + j*s)
in [ Line (str ++ " μ") ms
, Line (str ++ " " ++ show j ++ "σ") (Vec.zipWith (f k') ms ss)
, Line (str ++ " " ++ show j ++ "σ") (Vec.zipWith (f (-k')) ms ss) ]
-}
|
-- {-# OPTIONS -v tc.inj:100 -v tc.reduce:100 #-}
module Issue801 where
data ℕ : Set where
zero : ℕ
suc : ℕ → ℕ
data _≡_ {A : Set}(x : A) : A → Set where
refl : x ≡ x
cong : ∀ {A : Set} {B : Set}
(f : A → B) {x y} → x ≡ y → f x ≡ f y
cong f refl = refl
lem : (n : ℕ) → n ≡ n
lem zero = refl
lem (suc n) = cong (λ x → x) (lem (suc n))
-- Andreas: this made the injectivity checker loop.
-- Now it should just report a termination error.
|
[STATEMENT]
lemma vsv_vimageI2:
assumes "a \<in>\<^sub>\<circ> \<D>\<^sub>\<circ> r"
shows "r\<lparr>a\<rparr> \<in>\<^sub>\<circ> \<R>\<^sub>\<circ> r"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. r\<lparr>a\<rparr> \<in>\<^sub>\<circ> \<R>\<^sub>\<circ> r
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
a \<in>\<^sub>\<circ> \<D>\<^sub>\<circ> r
goal (1 subgoal):
1. r\<lparr>a\<rparr> \<in>\<^sub>\<circ> \<R>\<^sub>\<circ> r
[PROOF STEP]
by (blast dest: vsv_ex1_app1) |
## 1. 선형 연립방정식을 활용한 예측모델
선형 연립방정식의 문제를 해결하는 것은 선형 예측모델의 가중치 벡터를 구하는 것과 같습니다.
$$
\begin{align}
\begin{matrix}
x_{11} w_1 & + \;& x_{12} w_2 &\; + \cdots + \;& x_{1N} w_N &\; = \;& y_1 \\
x_{21} w_1 & + \;& x_{22} w_2 &\; + \cdots + \;& x_{2N} w_N &\; = \;& y_2 \\
\vdots\;\;\; & & \vdots\;\;\; & & \vdots\;\;\; & & \;\vdots \\
x_{N1} w_1 & + \;& x_{N2} w_2 &\; + \cdots + \;& x_{NN} w_N &\; = \;& y_N \\
\end{matrix}
\end{align}
$$
위의 식은 선형 연립방정식입니다. 이는 N개의 목표값(y)을 출력하는 함수의 집합으로도 볼 수 있습니다.
여기서 x는 목표값을 예측하는 데 활용될 데이터를 나타냅니다. 여기서 필요한 것은, x라는 데이터(input)을 받으면 합리적인 목표값(y)를 출력해내는 함수를 만들어내는 것입니다.
그리고 결국, 이 함수는 아래와 같은 행렬과 벡터의 연산으로 나타낼 수 있습니다.
$$
\begin{align}
Xw = y
\end{align}
$$
input(x, 데이터)과 output(y, 예측값)을 이어주는 관계를 나타내는 모델을 찾아야 합니다. 이를 위해 우리가 찾아야 하는 것은 최적의 $w$ 입니다.
$w$는 특징행렬(X)의 역행렬을 통해 다음과 같이 구할 수 있습니다.
$$
\begin{align}
w = X^{-1} y
\end{align}
$$
## 2. 선형 연립방정식을 활용한 예측모델 : 예제 (보스턴 집값 예측모델 생성)
Python의 패키지 중 하나인 scikit-learn 패키지는 머신러닝을 공부하다보면 적어도 한 번쯤은 들어보셨을만큼, 유명하고, 또 그 만큼 유용한 패키지입니다. scikit-learn은 다양한 데이터셋을 제공하기도 하는 데, 그 중 보스턴 집값과 다양한 feature를 묶어놓은 데이터셋을 제공합니다.
앞에서 간단히 살펴본 선형 연립방정식을 활용해, 보스턴 집값을 예측하는 모델을 만들어보려 합니다.
보스턴 집값 문제를 선형 예측모델 $Ax=\hat{b}$ 로 놓고, 가중치 벡터 $x$ 를 구해, 예측모델을 완성해보겠습니다.
문제를 간단히 하기 위해, 입력 데이터를 범죄율(CRIM), 공기 오염도(NOX), 방의 개수(RM), 오래된 정도(AGE)의 4종류로 제한해서 진행해보겠습니다.
```python
from sklearn.datasets import load_boston
boston = load_boston()
X = boston.data
y = boston.target
print(boston.DESCR)
```
.. _boston_dataset:
Boston house prices dataset
---------------------------
**Data Set Characteristics:**
:Number of Instances: 506
:Number of Attributes: 13 numeric/categorical predictive. Median Value (attribute 14) is usually the target.
:Attribute Information (in order):
- CRIM per capita crime rate by town
- ZN proportion of residential land zoned for lots over 25,000 sq.ft.
- INDUS proportion of non-retail business acres per town
- CHAS Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)
- NOX nitric oxides concentration (parts per 10 million)
- RM average number of rooms per dwelling
- AGE proportion of owner-occupied units built prior to 1940
- DIS weighted distances to five Boston employment centres
- RAD index of accessibility to radial highways
- TAX full-value property-tax rate per $10,000
- PTRATIO pupil-teacher ratio by town
- B 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town
- LSTAT % lower status of the population
- MEDV Median value of owner-occupied homes in $1000's
:Missing Attribute Values: None
:Creator: Harrison, D. and Rubinfeld, D.L.
This is a copy of UCI ML housing dataset.
https://archive.ics.uci.edu/ml/machine-learning-databases/housing/
This dataset was taken from the StatLib library which is maintained at Carnegie Mellon University.
The Boston house-price data of Harrison, D. and Rubinfeld, D.L. 'Hedonic
prices and the demand for clean air', J. Environ. Economics & Management,
vol.5, 81-102, 1978. Used in Belsley, Kuh & Welsch, 'Regression diagnostics
...', Wiley, 1980. N.B. Various transformations are used in the table on
pages 244-261 of the latter.
The Boston house-price data has been used in many machine learning papers that address regression
problems.
.. topic:: References
- Belsley, Kuh & Welsch, 'Regression diagnostics: Identifying Influential Data and Sources of Collinearity', Wiley, 1980. 244-261.
- Quinlan,R. (1993). Combining Instance-Based and Model-Based Learning. In Proceedings on the Tenth International Conference of Machine Learning, 236-243, University of Massachusetts, Amherst. Morgan Kaufmann.
보스턴 집값 데이터의 간략한 소개입니다. 카네기 멜론 대학교로부터 데이터가 수집되었다고 하네요.
데이터는 실수형 / 범주형(categorical) 데이터가 섞여있습니다. 데이터의 index는 506개라고 하네요. Missing value는 없다고 합니다. :)
그럼 데이터를 실제로 출력해서 살펴보겠습니다.
```python
X.shape
```
(506, 13)
역시, 데이터는 13개의 feature를 대상으로 506개의 index를 갖고 있습니다. 저희는 이 중 4개의 feature만을 대상으로 간략히 조사해보려 하기에, 이를 따로 추출해내야 합니다.
"범죄율(CRIM) : 0, 공기 오염도(NOX) : 4, 방의 개수(RM) : 5, 오래된 정도(AGE) : 6" 이 4가지를 추출해서 특징행렬 $A$ 를 구성하겠습니다.
```python
A = X[:,[0,4,5,6]]
```
```python
# 특징행렬 A를 시각적으로 표현하기 위해 A_라는 별도의 객체를 만들어 데이터프레임으로 나타낸 것입니다.
A_ = pd.DataFrame(A,columns = ['crim','nox','rm','age'])
A_.describe()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>crim</th>
<th>nox</th>
<th>rm</th>
<th>age</th>
</tr>
</thead>
<tbody>
<tr>
<th>count</th>
<td>506.000000</td>
<td>506.000000</td>
<td>506.000000</td>
<td>506.000000</td>
</tr>
<tr>
<th>mean</th>
<td>3.613524</td>
<td>0.554695</td>
<td>6.284634</td>
<td>68.574901</td>
</tr>
<tr>
<th>std</th>
<td>8.601545</td>
<td>0.115878</td>
<td>0.702617</td>
<td>28.148861</td>
</tr>
<tr>
<th>min</th>
<td>0.006320</td>
<td>0.385000</td>
<td>3.561000</td>
<td>2.900000</td>
</tr>
<tr>
<th>25%</th>
<td>0.082045</td>
<td>0.449000</td>
<td>5.885500</td>
<td>45.025000</td>
</tr>
<tr>
<th>50%</th>
<td>0.256510</td>
<td>0.538000</td>
<td>6.208500</td>
<td>77.500000</td>
</tr>
<tr>
<th>75%</th>
<td>3.677083</td>
<td>0.624000</td>
<td>6.623500</td>
<td>94.075000</td>
</tr>
<tr>
<th>max</th>
<td>88.976200</td>
<td>0.871000</td>
<td>8.780000</td>
<td>100.000000</td>
</tr>
</tbody>
</table>
</div>
다음은 저희의 목표값인 집값 데이터를 벡터 b로 구성하겠습니다.
```python
b = boston.target
b.shape
```
(506,)
그럼 이제 $Ax=\hat{b}$ 의 x(가중치 벡터)를 구하기 위해, 선형 연립방정식을 풀어보겠습니다. 저희는 $A$ 와 $b$를 구했으니까요!
다만,,, 저희는 지금 sqaure matrix가 아닌 A를 보고 있습니다. 따라서, **A의 역행렬을 구할 수 없습니다.**
역행렬은 기본적으로 square matrix인 경우에만 정확히 구할 수 있기 때문이죠! (물론, approximate approach로 psuedo inverse 가 있기는 하지만, 이후에 다시 살펴보겠습니다.)
따라서, 아직 살펴보진 않았지만, 최소자승문제를 활용한 풀이를 먼저 진행하고 다시 역행렬을 활용해 가중치 벡터($x$)를 찾는 연습을 다시 해보겠습니다.
아직 살펴보진 않았지만, 최소자승문제(Least square problem)의 해결은 지금과 같이 정확한 solution(가중치 벡터 $x$)을 찾을 수 없을 때 활용하는 가장 대표적인 방법입니다. 가장 $x$와 **가까운** solution set을 찾아보자는 접근입니다. 보다 자세히는 추후에 다시 살펴보기로 하겠습니다! : )
### 2_1. Least square problem
Least square problem 해결은 Numpy에서 제공하는 메서드를 활용하겠습니다.
`lstsq()` 메서드는 Least square problem 해결을 위한 코드입니다.
```python
x, resid, rank, s = np.linalg.lstsq(A, b)
```
자 그럼, 우리가 찾고자 했던 solution(x, 가중치벡터)가 어떻게 나왔는지 살펴보겠습니다.
```python
for i in range(4):
print("{}의 가중치 : {}".format(A_.columns[i],x[i]))
```
crim의 가중치 : -0.1839836056341421
nox의 가중치 : -19.396230746735444
rm의 가중치 : 5.673593152217298
age의 가중치 : -0.022788798368235375
음... 간단히 해석해보자면,
범죄율, 공기오염도, 연식이 높을 수록 집값은 떨어지는 반비례 관계를 갖는 다는 것을 확인할 수 있습니다. 반면, 방의 갯수는 많을 수록 집값이 높다는 것 을 볼 수 있네요!
보통의 상식과 부합하는 결과입니다! :) 아마도 Least square method가 문제를 어느정도 맞게 해결해낸 것 같습니다!
### 2_2. Solve the systems of linear equations using inverse matrix
이번에는 역행렬을 통해 solution(x, 가중치벡터)를 구해보겠습니다!
저희가 정의했던 문제는 바로 $Ax=\hat{b}$ 였었죠.
그런데, 문제가 있습니다. 역행렬은 square matrix에서만 존재합니다. 따라서, 저희는 A를 바꿔줘야 합니다. 바로, 정방행렬(square matrix)로 말이죠!
앞에서 최소자승법으로 풀때의 행렬 A는 row가 column에 비해 매우 긴 skinny matrix였죠!
```python
A.shape
```
(506, 4)
이제 저희는 A를 다시 square matrix로 변환해주겠습니다 !
```python
A = A[:4]
```
```python
A.shape
```
(4, 4)
그리고 b도 마찬가지로 4개의 component로 변환하겠습니다. 그래야 $(4x4) @ (4x1) = (4x1)$ 이 될테니까요!
```python
b = b[:4]
b.shape
```
(4,)
그리고 이제 $x = A^{-1}b$ 를 풀기 위해, A의 역행렬을 구하고 x를 구해보겠습니다.
```python
A_inv = np.linalg.inv(A)
```
```python
x = A_inv@b
for i in range(4):
print("{}의 가중치 : {}".format(A_.columns[i],x[i]))
```
crim의 가중치 : -312.710043270391
nox의 가중치 : -115.19394234554954
rm의 가중치 : 14.4996465318047
age의 가중치 : -0.1132593173503273
음.. 앞에서 506개의 데이터를 대상으로 찾아낸 가중치와 조금 다릅니다.
물론 비례/반비례 관계는 같지만... 가중치의 값이 매우 큽니다. 아무래도 506개의 데이터를 대상으로 찾은 모델보다는 다소 부정확하고, 매우 경직된(?)것이라 생각해볼 수 있을 것 같습니다!
저희는 Least square method, inverse matrix를 통해 가중치벡터를 찾아보았습니다. 머신러닝과 딥러닝은 이러한 연립방정식의 가중치를 찾는 다양한 방법들의 집합입니다. 앞으로 기회가 된다면, 이러한 다양한 방법들을 함께 본다면 좋겠네요! :)
|
Formal statement is: lemma poly_cancel_eq_conv: fixes x :: "'a::field" shows "x = 0 \<Longrightarrow> a \<noteq> 0 \<Longrightarrow> y = 0 \<longleftrightarrow> a * y - b * x = 0" Informal statement is: If $x = 0$ and $a \neq 0$, then $y = 0$ if and only if $a y - b x = 0$. |
We spend a lot of time sitting, whether it be for playing an instrument, working at the computer, or any number of daily activities. This position which is so familiar is often at the source of many pains which slowly settle in over time: tension in the back, the neck, the eyes… Our ability to be comfortable sitting has a direct effect on what we do while sitting, and we often wonder how to be “better” seated. What is a “good” position so that you no longer hurt? But what does it really mean to be “sitting well”? Rather than find an ideal and fixed position we can also have fun exploring the many variations that exist in this position and thus find, according to each individual’s needs, more comfort and flexibility in order to do whatever we need to do. |
library(sf)
library(stars)
library(glue)
library(tictoc)
library(devtools)
ville <- "La Rochelle"
localdata <- "~/files/la rochelle/r5"
DVFdata <- "~/files/DVFdata"
# choix d'un petit secteur de test. Core plutôt que commuting
FUA <- st_read("{DVFdata}/sources/FUA/FRA core/FRA_core.shp" |> glue(),
stringsAsFactors=FALSE) |> st_transform(3035)
fua <- FUA |> dplyr::filter(fuaname == ville) |> dplyr::pull(geometry)
# choix de la résolution
resol <- 200
jour_du_transit <- as.POSIXct("2022-03-09 08:00:00 UTC")
jMem <- "8G"
options(java.parameters = '-Xmx12G')
#moteur_r5 <- r5r::setup_r5(localdata, verbose = TRUE, overwrite = FALSE)
larochelle_c200 <- qs::qread("~/files/la rochelle/la_rochelle_c200.rda", nthreads = 4)
larochelle_emp <- qs::qread(file = "~/files/la rochelle/emp_LaRochelle.rda", nthreads = 4)
# Pour l'instant, test sur core et non commuting
larochelle_emp <- larochelle_emp[fua]
st_dimensions(larochelle_c200) <- st_dimensions(larochelle_emp)
opportunites_stars <- c(larochelle_emp, dplyr::select(larochelle_c200, Ind))
rm(larochelle_c200, larochelle_emp)
opportunites <- st_as_sf(opportunites_stars, as_points = TRUE) |>
dplyr::transmute(emplois = ifelse(is.na(emplois_total), 0, emplois_total)) |>
dplyr::filter(emplois != 0)
rJava::.jinit()
# load_all()
moteur_r5_wparam <- routing_setup_r5(path = localdata, date = jour_du_transit, jMem = jMem, n_threads = 4)
tic()
iso_transit_r5 <- iso_accessibilite(quoi = opportunites,
ou = fua |> st_zm(drop=TRUE),
resolution = resol,
tmax = 30,
pdt = 5,
dir = "r5_temp_larochelle" |> glue(),
routing = moteur_r5_wparam,
future=TRUE)
toc()
|
Formal statement is: lemma path_approx_polynomial_function: fixes g :: "real \<Rightarrow> 'b::euclidean_space" assumes "path g" "0 < e" obtains p where "polynomial_function p" "pathstart p = pathstart g" "pathfinish p = pathfinish g" "\<And>t. t \<in> {0..1} \<Longrightarrow> norm(p t - g t) < e" Informal statement is: Suppose $g$ is a path in $\mathbb{R}^n$. For every $\epsilon > 0$, there exists a polynomial $p$ such that $p(0) = g(0)$, $p(1) = g(1)$, and for all $t \in [0,1]$, we have $|p(t) - g(t)| < \epsilon$. |
/-
Copyright (c) 2022 Jun Yoshida. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
-/
import Mathlib.CategoryTheory.Category.Basic
import Mathlib.CategoryTheory.Functor.Basic
import Mathlib.CategoryTheory.Functor.Category
import Mathlib.CategoryTheory.NatTrans
import Moncalc.Data.DVect2.Basic
import Moncalc.CategoryTheory.List.Basic
import Moncalc.CategoryTheory.List.Functor
/-!
# The 2-monad structure on `(List,List.mapF)`
As shown in the file `CategoryTheory.List.Functor`, the pair `(List,List.mapF)` admits a structure of a 2-endofunctor on the 2-category Cat of (small) categories.
In this file, we further show that it also admits a 2-monad structure consisting of `List.join` on objects and `DVect2.join` on morphisms.
-/
namespace CategoryTheory.List
universe u v w
/-!
## Compatibility of `(List,List.mapF)` and `append`
-/
namespace appendF
variable {α : Type u} [Category α] {β : Type v} [Category β] (F : Functor α β)
/-
@[simp]
protected
theorem naturality_obj {β : Type v} [Category β] (F : Functor α β) : ∀ {as₁ as₂ : List α}, (mapF.obj F).obj (as₁ ++ as₂) = (mapF.obj F).obj as₁ ++ (mapF.obj F).obj as₂ :=
List.map_append F.obj _ _
-/
protected
def naturatorLax : (as₁ as₂ : List α) → ((mapF.obj F).obj as₁ ++ (mapF.obj F).obj as₂ ⟶ (mapF.obj F).obj (as₁ ++ as₂))
| [], as₂ => 𝟙 ((mapF.obj F).obj as₂)
| (_::as₁), as₂ => DVect2.cons (𝟙 _) (appendF.naturatorLax as₁ as₂)
protected
lemma naturatorLax_naturality {as₁ as₂ bs₁ bs₂ : List α} {fs : as₁ ⟶ bs₁} {gs : as₂ ⟶ bs₂} : ((mapF.obj F).map fs ++ (mapF.obj F).map gs) ≫ appendF.naturatorLax F bs₁ bs₂ = appendF.naturatorLax F as₁ as₂ ≫ (mapF.obj F).map (fs ++ gs) := by
induction fs
case nil =>
dsimp [appendF.naturatorLax]
rw [Category.comp_id (Y:=(mapF.obj F).obj bs₂)]
rw [Category.id_comp (X:=(mapF.obj F).obj as₂)]
rfl
case cons f fs h_ind =>
dsimp [appendF.naturatorLax]
conv =>
lhs
rw [DVect2.cons_append, comp_cons]
rw [Category.comp_id]
conv =>
rhs
rw [DVect2.cons_append]
dsimp
rw [Category.id_comp]
rw [h_ind]
protected
def naturatorOplax : (as₁ as₂ : List α) → ((mapF.obj F).obj (as₁ ++ as₂) ⟶ (mapF.obj F).obj as₁ ++ (mapF.obj F).obj as₂)
| [], as₂ => 𝟙 ((mapF.obj F).obj as₂)
| (_::as₁), as₂ => DVect2.cons (𝟙 _) (appendF.naturatorOplax as₁ as₂)
protected
lemma naturatorOplax_naturality {as₁ as₂ bs₁ bs₂ : List α} {fs : as₁ ⟶ bs₁} {gs : as₂ ⟶ bs₂} : ((mapF.obj F).map (fs ++ gs)) ≫ appendF.naturatorOplax F bs₁ bs₂ = appendF.naturatorOplax F as₁ as₂ ≫ ((mapF.obj F).map fs ++ (mapF.obj F).map gs) := by
induction fs
case nil =>
dsimp [appendF.naturatorOplax]
rw [Category.comp_id, Category.id_comp]
rfl
case cons f fs h_ind =>
dsimp [appendF.naturatorOplax]
conv =>
lhs
rw [DVect2.cons_append]
dsimp
rw [Category.comp_id]
conv =>
rhs
rw [DVect2.cons_append]
dsimp
rw [Category.id_comp]
rw [h_ind]
@[simp]
protected
theorem naturator_lax_oplax (as₁ as₂ : List α) : appendF.naturatorLax F as₁ as₂ ≫ appendF.naturatorOplax F as₁ as₂ = 𝟙 ((mapF.obj F).obj as₁ ++ (mapF.obj F).obj as₂) := by
induction as₁
case nil =>
dsimp [appendF.naturatorLax, appendF.naturatorOplax]
rw [Category.id_comp (X:=(mapF.obj F).obj as₂)]
rfl
case cons a as₁ h_ind =>
dsimp [appendF.naturatorLax, appendF.naturatorOplax]
rw [Category.id_comp]
rw [h_ind]
@[simp]
protected
theorem naturator_oplax_lax (as₁ as₂ : List α) : appendF.naturatorOplax F as₁ as₂ ≫ appendF.naturatorLax F as₁ as₂ = 𝟙 ((mapF.obj F).obj (as₁ ++ as₂)) := by
induction as₁
case nil =>
dsimp [appendF.naturatorLax, appendF.naturatorOplax]
rw [Category.id_comp]
case cons a as₁ h_ind =>
dsimp [appendF.naturatorLax, appendF.naturatorOplax]
rw [Category.id_comp]
rw [h_ind]
end appendF
namespace joinF
/-!
### Naturality of `List.joinF`
-/
section naturality
variable {α : Type u} [Category α] {β : Type v} [Category β] (F : Functor α β)
--- Naturality as an equality
protected
theorem naturality : mapF.obj (mapF.obj F) ⋙ joinF (α:=β) = joinF (α:=α) ⋙ mapF.obj F := by
apply eqF_List
. intro ass
induction ass
case a.nil => rfl
case a.cons as ass h_ind =>
dsimp at *
have : ∀ (as bs : List α), (mapF.obj F).obj (as++bs) = (mapF.obj F).obj as ++ (mapF.obj F).obj bs :=
λ as bs => List.map_append F.obj _ _
rw [this, h_ind]
. intros as₁ as₂ fs
induction fs
case a.nil => exact DVect2.Eq.nil_rfl
case a.cons f fs h_ind =>
dsimp
have : ∀ {as₁ as₂ bs₁ bs₂ : List α} (fs₁ : as₁ ⟶ bs₁) (fs₂ : as₂ ⟶ bs₂), DVect2.Eq ((mapF.obj F).map (fs₁ ++ fs₂)) ((mapF.obj F).map fs₁ ++ (mapF.obj F).map fs₂) :=
λ fs₁ fs₂ => DVect2.map_append F.obj F.obj F.map
apply DVect2.Eq.trans _ (this f (joinF.map fs)).symm
apply DVect2.eq_of_eq_append_eq (DVect2.Eq.of rfl) h_ind
set_option autoImplicit false
protected
def naturatorLax : (ass : List (List α)) → ((joinF (α:=α) ⋙ mapF.obj F).obj ass ⟶ (mapF.obj (mapF.obj F) ⋙ joinF (α:=β)).obj ass)
| [] => DVect2.nil
| (as::ass) =>
appendF.naturatorOplax F as ((joinF (α:=α)).obj ass) ≫ ((mapF.obj F).map (𝟙 as) ++ joinF.naturatorLax ass)
protected
def naturatorOplax : (ass : List (List α)) → ((mapF.obj (mapF.obj F) ⋙ joinF (α:=β)).obj ass ⟶ (joinF (α:=α) ⋙ mapF.obj F).obj ass)
| [] => DVect2.nil
| (as::ass) => by
dsimp
exact ((mapF.obj F).map (𝟙 as) ++ joinF.naturatorOplax ass) ≫ appendF.naturatorLax F as ((joinF (α:=α)).obj ass)
--- Naturality as a natural isomorphism
protected
def naturator : joinF (α:=α) ⋙ mapF.obj F ≅ mapF.obj (mapF.obj F) ⋙ joinF (α:=β) where
hom := {
app := joinF.naturatorLax F
naturality := by
intro _ _ fss
induction fss
case nil => rfl
case cons fs fss h_ind =>
dsimp only [joinF.naturatorLax]
conv =>
lhs; rw [←Category.assoc]
change ((mapF.obj F).map (fs ++ joinF.map fss) ≫ appendF.naturatorOplax F _ ((joinF (α:=α)).obj _)) ≫ (Prefunctor.map (Prefunctor.obj mapF.toPrefunctor F).toPrefunctor (𝟙 _) ++ joinF.naturatorLax F _)
rw [appendF.naturatorOplax_naturality F (fs:=fs)]
rw [Category.assoc]
rhs
rw [appendF.map_comp, (mapF.obj F).map_id, Category.comp_id]
rhs
change (joinF ⋙ mapF.obj F).map fss ≫ joinF.naturatorLax F _
rw [h_ind]
conv =>
rhs; rw [Category.assoc]; rhs
dsimp
rw [appendF.map_comp, (mapF.obj F).map_id, Category.id_comp]
}
inv := {
app := joinF.naturatorOplax F
naturality := by
intro _ _ fss
induction fss
case nil => rfl
case cons fs fss h_ind =>
dsimp [joinF.naturatorOplax]
conv =>
lhs; rw [←Category.assoc, appendF.map_comp, (mapF.obj F).map_id]
rw [Category.comp_id]
lhs; rhs
change (mapF.obj (mapF.obj F) ⋙ joinF).map fss ≫ joinF.naturatorOplax F _
rw [h_ind]
conv =>
rhs; rw [Category.assoc]
rw [←appendF.naturatorLax_naturality F (fs:=fs)]
rw [←Category.assoc]
rw [appendF.map_comp, (mapF.obj F).map_id, Category.id_comp]
}
hom_inv_id := by
ext ass; dsimp
induction ass
case nil => rfl
case cons as ass h_ind =>
dsimp [joinF.naturatorLax, joinF.naturatorOplax]
conv =>
lhs; rw [Category.assoc]; rhs; rw [←Category.assoc]; lhs
rw [appendF.map_comp, (mapF.obj F).map_id, Category.id_comp]
rw [h_ind, appendF.map_id]
rw [Category.id_comp, appendF.naturator_oplax_lax]
inv_hom_id := by
ext ass; dsimp
induction ass
case nil => rfl
case cons as ass h_ind =>
dsimp [joinF.naturatorLax, joinF.naturatorOplax]
conv =>
lhs; rw [Category.assoc]; rhs; rw [←Category.assoc]; lhs
rw [appendF.naturator_lax_oplax]
rw [Category.id_comp]
rw [appendF.map_comp, (mapF.obj F).map_id, h_ind, Category.id_comp]
exact appendF.map_id
end naturality
end joinF
/-!
## `List.singletonF` the embedding of a category `α` into `List α`
-/
--- The functorial embedding of `α` into `List α`.
def singletonF {α : Type u} [Category α] : Functor α (List α) where
obj := λ a => [a]
map := λ f => DVect2.cons f DVect2.nil
namespace singletonF
variable {α : Type u} [Category α] {β : Type v} [Category β] (F : Functor α β)
--- 2-naturality as equality
protected
theorem naturality : singletonF ⋙ mapF.obj F = F ⋙ singletonF := rfl
protected
def naturator : singletonF ⋙ mapF.obj F ≅ F ⋙ singletonF :=
Iso.refl _
end singletonF
/-!
## 2-monad structure
-/
namespace joinF
variable {α : Type u} [Category α]
/-!
### Monad laws as equations
-/
--- Left unitality of `List.joinF` with respect to `List.singletonF` as an equality.
@[simp]
protected
theorem unit_left : singletonF (α:=List α) ⋙ joinF = 𝟭 (List α) := by
dsimp [Functor.comp, Functor.id]
dsimp [singletonF, joinF, DVect2.join]
apply eqF_List
. intro as
dsimp
exact List.append_nil as
. intro as bs fs
dsimp
exact DVect2.append_nil _
--- Right unitality of `List.joinF` with respect to `List.singletonF` as an equality.
@[simp]
protected
theorem unit_right : mapF.obj singletonF ⋙ joinF = Functor.id (List α) := by
dsimp [Functor.comp, Functor.id]
dsimp [singletonF, joinF, mapF, DVect2.join]
apply eqF_List <;> dsimp
. intro as
induction as
case a.nil => rfl
case a.cons a as h_ind => dsimp; rw [h_ind]
. intro as bs fs
induction fs
case a.nil =>
dsimp [DVect2.map, DVect2.join]
exact DVect2.Eq.of rfl
case a.cons a as b bs f fs h_ind =>
dsimp [DVect2.map, DVect2.join]
rw [DVect2.cons_append, DVect2.nil_append]
exact DVect2.Eq.descend rfl h_ind
--- Two different ways to flatten `List (List (List α))` with the `joinF` functor result in (strictly) the same.
protected
theorem assoc : joinF (α:=List α) ⋙ joinF = mapF.obj joinF ⋙ joinF := by
dsimp [Functor.comp, joinF, mapF]
apply eqF_List <;> dsimp
. intros; exact List.join_join _
. intros; exact DVect2.join_join _
/-!
### Monad laws as natural isomorphisms
-/
protected
def unitorLeftLax : (as : List α) → (as ⟶ ((singletonF (α:=List α) ⋙ joinF).obj as))
| [] => 𝟙 []
| (a::as) => DVect2.cons (𝟙 a) (joinF.unitorLeftLax as)
protected
def unitorLeftOplax : (as : List α) → (((singletonF (α:=List α) ⋙ joinF).obj as) ⟶ as)
| [] => 𝟙 []
| (a::as) => DVect2.cons (𝟙 a) (joinF.unitorLeftOplax as)
@[simp]
protected
lemma unitorLeft_lax_oplax (as : List α) : joinF.unitorLeftLax as ≫ joinF.unitorLeftOplax as = 𝟙 as := by
induction as
case nil => rfl
case cons a as h_ind =>
dsimp [joinF.unitorLeftLax, joinF.unitorLeftOplax]
rw [h_ind, Category.id_comp]
@[simp]
protected
lemma unitorLeft_oplax_lax (as : List α) : joinF.unitorLeftOplax as ≫ joinF.unitorLeftLax as = 𝟙 _ := by
induction as
case nil => rfl
case cons a as h_ind =>
dsimp [joinF.unitorLeftLax, joinF.unitorLeftOplax]
rw [h_ind, Category.id_comp]
rfl
--- Left unitality of `List.joinF` with respect to `List.singletonF` as a natural isomorphism
protected
def unitorLeft : 𝟭 (List α) ≅ singletonF (α:=List α) ⋙ joinF where
hom := {
app := joinF.unitorLeftLax
naturality := by
intro _ _ fs
dsimp [Functor.id]
induction fs
case nil => rfl
case cons f fs h_ind =>
dsimp [joinF.unitorLeftLax, joinF.unitorLeftOplax]
rw [Category.comp_id, h_ind]
dsimp [singletonF, joinF, DVect2.join]
rw [DVect2.cons_append, comp_cons, Category.id_comp]
}
inv := {
app := joinF.unitorLeftOplax
naturality := by
intro _ _ fs
dsimp [Functor.id]
induction fs
case nil => rfl
case cons f fs h_ind =>
dsimp [joinF.unitorLeftLax, joinF.unitorLeftOplax]
dsimp [singletonF, joinF, DVect2.join] at *
rw [DVect2.cons_append, comp_cons, Category.id_comp, Category.comp_id]
rw [h_ind]
}
hom_inv_id := by ext; intros; simp
inv_hom_id := by ext; intros; simp
protected
def unitorRightLax : (as : List α) → (as ⟶ (mapF.obj (singletonF (α:=α)) ⋙ joinF).obj as)
| [] => 𝟙 []
| (a::as) => DVect2.cons (𝟙 a) (joinF.unitorRightLax as)
protected
def unitorRightOplax : (as : List α) → ((mapF.obj (singletonF (α:=α)) ⋙ joinF).obj as ⟶ as)
| [] => 𝟙 []
| (a::as) => DVect2.cons (𝟙 a) (joinF.unitorRightOplax as)
@[simp]
protected
lemma unitorRight_lax_oplax (as : List α) : joinF.unitorRightLax as ≫ joinF.unitorRightOplax as = 𝟙 as := by
induction as
case nil => rfl
case cons a as h_ind =>
dsimp [joinF.unitorRightLax, joinF.unitorRightOplax]
rw [Category.id_comp, h_ind]
@[simp]
protected
lemma unitorRight_oplax_lax (as : List α) : joinF.unitorRightOplax as ≫ joinF.unitorRightLax as = 𝟙 _ := by
induction as
case nil => rfl
case cons a as h_ind =>
dsimp [joinF.unitorRightLax, joinF.unitorRightOplax]
rw [Category.id_comp, h_ind]
rfl
protected
def unitorRight : 𝟭 (List α) ≅ mapF.obj (singletonF (α:=α)) ⋙ joinF where
hom := {
app := joinF.unitorRightLax
naturality := by
intro _ _ fs; dsimp
induction fs
case nil => rfl
case cons f fs h_ind =>
dsimp [joinF.unitorRightLax]
rw [Category.comp_id, h_ind]
dsimp [singletonF, HAppend.hAppend, DVect2.append]
rw [Category.id_comp]
}
inv := {
app := joinF.unitorRightOplax
naturality := by
intro _ _ fs; dsimp
induction fs
case nil => rfl
case cons f fs h_ind =>
dsimp [joinF.unitorRightOplax]
rw [←h_ind]
dsimp [singletonF, HAppend.hAppend, DVect2.append]
rw [Category.comp_id, Category.id_comp]
}
hom_inv_id := by ext; intros; simp
inv_hom_id := by ext; intros; simp
@[irreducible]
protected
def associatorLeft : (asss : List (List (List α))) → ((mapF.obj (joinF (α:=α)) ⋙ joinF).obj asss ⟶ (joinF (α:=List α) ⋙ joinF).obj asss) :=
let rec aux : (as : List α) → (ass : List (List α)) → (asss : List (List (List α))) → ((mapF.obj (joinF (α:=α)) ⋙ joinF).obj ((as::ass)::asss) ⟶ (joinF (α:=List α) ⋙ joinF).obj ((as::ass)::asss))
| [], [], [] => 𝟙 []
| [], [], (ass::asss) => aux [] ass asss
| [], (as::ass), asss => aux as ass asss
| (a::as), ass, asss => DVect2.cons (𝟙 a) (aux as ass asss)
aux [] []
termination_by aux as ass asss => (asss.length, ass.length, as.length)
@[simp]
protected
lemma associatorLeft_nil : joinF.associatorLeft (α:=α) [] = 𝟙 [] :=
by unfold joinF.associatorLeft; rfl
@[simp]
protected
lemma associatorLeft_cons_nil {asss : List (List (List α))} : joinF.associatorLeft ([]::asss) = joinF.associatorLeft asss :=
by unfold joinF.associatorLeft; rfl
@[simp]
protected
lemma associatorLeft_cons_cons_nil {ass : List (List α)} {asss : List (List (List α))} : joinF.associatorLeft (([]::ass)::asss) = joinF.associatorLeft (ass::asss) :=
by
unfold joinF.associatorLeft
conv => lhs; unfold associatorLeft.aux; unfold associatorLeft.aux
@[simp]
protected
lemma associatorLeft_cons_cons_cons {a : α} {as : List α} {ass : List (List α)} {asss : List (List (List α))} : joinF.associatorLeft (((a::as)::ass)::asss) = DVect2.cons (𝟙 a) (joinF.associatorLeft ((as::ass)::asss)) := by
unfold joinF.associatorLeft
unfold joinF.associatorLeft.aux
unfold joinF.associatorLeft.aux
conv => lhs; unfold joinF.associatorLeft.aux
@[irreducible]
protected
def associatorRight : (asss : List (List (List α))) → ((joinF (α:=List α) ⋙ joinF).obj asss ⟶ (mapF.obj (joinF (α:=α)) ⋙ joinF).obj asss) :=
let rec aux : (as : List α) → (ass : List (List α)) → (asss : List (List (List α))) → ((joinF (α:=List α) ⋙ joinF).obj ((as::ass)::asss) ⟶ (mapF.obj (joinF (α:=α)) ⋙ joinF).obj ((as::ass)::asss))
| [], [], [] => 𝟙 []
| [], [], (ass::asss) => aux [] ass asss
| [], (as::ass), asss => aux as ass asss
| (a::as), ass, asss => DVect2.cons (𝟙 a) (aux as ass asss)
aux [] []
termination_by aux as ass asss => (asss.length, ass.length, as.length)
@[simp]
protected
lemma associatorRight_nil : joinF.associatorRight (α:=α) [] = 𝟙 [] :=
by unfold joinF.associatorRight; rfl
@[simp]
protected
lemma associatorRight_cons_nil {asss : List (List (List α))} : joinF.associatorRight ([]::asss) = joinF.associatorRight asss :=
by unfold joinF.associatorRight; rfl
@[simp]
protected
lemma associatorRight_cons_cons_nil {ass : List (List α)} {asss : List (List (List α))} : joinF.associatorRight (([]::ass)::asss) = joinF.associatorRight (ass::asss) :=
by
unfold joinF.associatorRight
conv => lhs; unfold associatorRight.aux; unfold associatorRight.aux
@[simp]
protected
lemma associatorRight_cons_cons_cons {a : α} {as : List α} {ass : List (List α)} {asss : List (List (List α))} : joinF.associatorRight (((a::as)::ass)::asss) = DVect2.cons (𝟙 a) (joinF.associatorRight ((as::ass)::asss)) := by
unfold joinF.associatorRight
unfold joinF.associatorRight.aux
unfold joinF.associatorRight.aux
conv => lhs; unfold joinF.associatorRight.aux
@[simp]
protected
lemma associator_left_right (asss : List (List (List α))) : joinF.associatorLeft asss ≫ joinF.associatorRight asss = 𝟙 _ := by
induction asss
case nil => rfl
case cons ass asss h_indsss =>
induction ass
case nil => simp; exact h_indsss
case cons as ass h_indss =>
induction as
case nil => simp; exact h_indss
case cons a as h_inds => simp; rw [h_inds]; rfl
@[simp]
protected
lemma associator_right_left (asss : List (List (List α))) : joinF.associatorRight asss ≫ joinF.associatorLeft asss = 𝟙 _ := by
induction asss
case nil => rfl
case cons ass asss h_indsss =>
induction ass
case nil => simp; exact h_indsss
case cons as ass h_indss =>
induction as
case nil => simp; exact h_indss
case cons a as h_inds => simp; rw [h_inds]; rfl
protected
def associator : (mapF.obj joinF ⋙ joinF) ≅ (joinF (α:=List α) ⋙ joinF) where
hom := {
app := joinF.associatorLeft
naturality := by
intro _ _ fsss
induction fsss
case nil => rfl
case cons fss fsss h_indss =>
induction fss
case nil =>
conv => lhs; lhs; simp; change (mapF.obj joinF ⋙ joinF).map fsss
simp only [joinF.associatorLeft_cons_nil]
rw [h_indss]
rfl
case cons fs fss h_inds =>
induction fs
case nil =>
conv =>
lhs; lhs; simp; rw [DVect2.nil_append]; simp
change (mapF.obj joinF ⋙ joinF).map (DVect2.cons fss fsss)
simp only [joinF.associatorLeft_cons_cons_nil]
rw [h_inds]
rfl
case cons f fs h_ind =>
conv =>
lhs; lhs; simp; rw [DVect2.cons_append]; simp
rw [DVect2.cons_append]
change DVect2.cons f ((mapF.obj joinF ⋙ joinF).map (DVect2.cons (DVect2.cons fs fss) fsss))
conv =>
rhs; rhs; change DVect2.cons f ((joinF ⋙ joinF).map (DVect2.cons (DVect2.cons fs fss) fsss))
simp only [joinF.associatorLeft_cons_cons_cons]
rw [comp_cons, comp_cons]
rw [h_ind, Category.comp_id, Category.id_comp]
}
inv := {
app := joinF.associatorRight
naturality := by
intro _ _ fsss
induction fsss
case nil => rfl
case cons fss fsss h_indss =>
induction fss
case nil =>
conv => lhs; lhs; simp; change (joinF ⋙ joinF).map fsss
simp only [joinF.associatorRight_cons_nil]
rw [h_indss]
rfl
case cons fs fss h_inds =>
induction fs
case nil =>
conv =>
lhs; lhs; simp; rw [DVect2.cons_append]; simp
change (joinF ⋙ joinF).map (DVect2.cons fss fsss)
simp only [joinF.associatorRight_cons_cons_nil]
rw [h_inds]
rfl
case cons f fs h_ind =>
conv =>
lhs; lhs; simp; rw [DVect2.cons_append]; simp
rw [DVect2.cons_append]
change DVect2.cons f ((joinF ⋙ joinF).map (DVect2.cons (DVect2.cons fs fss) fsss))
conv =>
rhs; rhs; change DVect2.cons f ((mapF.obj joinF ⋙ joinF).map (DVect2.cons (DVect2.cons fs fss) fsss))
simp only [joinF.associatorRight_cons_cons_cons]
rw [comp_cons, comp_cons]
rw [h_ind, Category.comp_id, Category.id_comp]
}
hom_inv_id := by ext; intros; simp
inv_hom_id := by ext; intros; simp
end joinF
end CategoryTheory.List
|
module Utils
include("MapViews.jl")
include("WfnUtils.jl")
using .MapViews
export MapView
using .WfnUtils
export applygate!
end
|
open import Level using (Level; suc; zero; _⊔_)
open import Function using (const)
open import Algebra
open import Algebra.Structures
open import Algebra.OrderedMonoid
open import Algebra.FunctionProperties as FunctionProperties using (Op₁; Op₂)
open import Data.Product using (_×_; _,_; proj₁; proj₂)
open import Relation.Binary
open import Relation.Binary.PartialOrderReasoning as ≤-Reasoning using ()
module Algebra.Pregroup where
-- Definition of the properties of left and right contraction and
-- expansion for usage in the below definition of pregroups.
module AdjointProperties {a ℓ} {A : Set a} (_≤_ : Rel A ℓ) (_∙_ : Op₂ A) (ε : A) where
LeftContract : Op₁ A → Set _
LeftContract _ˡ = ∀ x → (x ˡ ∙ x) ≤ ε
LeftExpand : Op₁ A → Set _
LeftExpand _ˡ = ∀ x → ε ≤ (x ∙ x ˡ)
RightContract : Op₁ A → Set _
RightContract _ʳ = ∀ x → (x ∙ x ʳ) ≤ ε
RightExpand : Op₁ A → Set _
RightExpand _ʳ = ∀ x → ε ≤ (x ʳ ∙ x)
-- define shorthand notation for a term being the left/right adjoint
-- of another term, which can be proven to be unique.
_LeftAdjointOf_ : ∀ y x → Set _
_LeftAdjointOf_ y x = (y ∙ x) ≤ ε × ε ≤ (x ∙ y)
_RightAdjointOf_ : ∀ y x → Set _
_RightAdjointOf_ y x = (x ∙ y) ≤ ε × ε ≤ (y ∙ x)
-- Definition of a pregroup, which adds a left and a right adjoint to
-- an ordered monoid.
record IsPregroup {a ℓ₁ ℓ₂} {A : Set a} (≈ : Rel A ℓ₁) (≤ : Rel A ℓ₂)
(∙ : Op₂ A) (ε : A) (ˡ : Op₁ A) (ʳ : Op₁ A) : Set (a ⊔ ℓ₁ ⊔ ℓ₂) where
open AdjointProperties ≤ ∙ ε
field
isOrderedMonoid : IsOrderedMonoid ≈ ≤ ∙ ε
ˡ-cong : ˡ Preserves ≈ ⟶ ≈
ʳ-cong : ʳ Preserves ≈ ⟶ ≈
ˡ-contract : LeftContract ˡ
ˡ-expand : LeftExpand ˡ
ʳ-contract : RightContract ʳ
ʳ-expand : RightExpand ʳ
open IsOrderedMonoid isOrderedMonoid public
open AdjointProperties ≤ ∙ ε public
record Pregroup c ℓ₁ ℓ₂ : Set (suc (c ⊔ ℓ₁ ⊔ ℓ₂)) where
infixl 7 _∙_
infix 4 _≈_
infix 4 _≤_
field
Carrier : Set c
_≈_ : Rel Carrier ℓ₁
_≤_ : Rel Carrier ℓ₂
_∙_ : Op₂ Carrier
_ˡ : Op₁ Carrier
_ʳ : Op₁ Carrier
ε : Carrier
isPregroup : IsPregroup _≈_ _≤_ _∙_ ε _ˡ _ʳ
open IsPregroup isPregroup public
private
-- for usage with ≤-Reasoning
poset : Poset c ℓ₁ ℓ₂
poset = record { isPartialOrder = isPartialOrder }
open ≤-Reasoning poset
-- for usage of substitutivity (which is not defined in IsPregroup)
orderedMonoid : OrderedMonoid _ _ _
orderedMonoid = record { isOrderedMonoid = isOrderedMonoid }
open OrderedMonoid orderedMonoid public using (substitutivity)
ˡ-unique : ∀ {x y} → y LeftAdjointOf x → y ≈ x ˡ
ˡ-unique {x} {y} (y-contract , y-expand) = antisym y≤xˡ xˡ≤y
where
y≤xˡ : y ≤ x ˡ
y≤xˡ =
begin
y ≈⟨ sym (proj₂ identity y) ⟩
y ∙ ε ≤⟨ compatibility ≤-refl (ˡ-expand x) ⟩
y ∙ (x ∙ x ˡ) ≈⟨ sym (assoc y x (x ˡ)) ⟩
(y ∙ x) ∙ x ˡ ≤⟨ compatibility y-contract ≤-refl ⟩
ε ∙ x ˡ ≈⟨ proj₁ identity (x ˡ) ⟩
x ˡ
∎
xˡ≤y : x ˡ ≤ y
xˡ≤y =
begin
x ˡ ≈⟨ sym (proj₂ identity (x ˡ)) ⟩
x ˡ ∙ ε ≤⟨ compatibility ≤-refl y-expand ⟩
x ˡ ∙ (x ∙ y) ≈⟨ sym (assoc (x ˡ) x y) ⟩
(x ˡ ∙ x) ∙ y ≤⟨ compatibility (ˡ-contract x) ≤-refl ⟩
ε ∙ y ≈⟨ proj₁ identity y ⟩
y
∎
ʳ-unique : ∀ {x y} → y RightAdjointOf x → y ≈ x ʳ
ʳ-unique {x} {y} (y-contract , y-expand) = antisym y≤xʳ xʳ≤y
where
xʳ≤y : x ʳ ≤ y
xʳ≤y =
begin
x ʳ ≈⟨ sym (proj₁ identity (x ʳ)) ⟩
ε ∙ x ʳ ≤⟨ compatibility y-expand ≤-refl ⟩
(y ∙ x) ∙ x ʳ ≈⟨ assoc y x (x ʳ) ⟩
y ∙ (x ∙ x ʳ) ≤⟨ compatibility ≤-refl (ʳ-contract x) ⟩
y ∙ ε ≈⟨ proj₂ identity y ⟩
y
∎
y≤xʳ : y ≤ x ʳ
y≤xʳ =
begin
y ≈⟨ sym (proj₁ identity y) ⟩
ε ∙ y ≤⟨ compatibility (ʳ-expand x) ≤-refl ⟩
(x ʳ ∙ x) ∙ y ≈⟨ assoc (x ʳ) x y ⟩
x ʳ ∙ (x ∙ y) ≤⟨ compatibility ≤-refl y-contract ⟩
x ʳ ∙ ε ≈⟨ proj₂ identity (x ʳ) ⟩
x ʳ
∎
ˡ-identity : ε ˡ ≈ ε
ˡ-identity = antisym εˡ≤ε ε≤εˡ
where
εˡ≤ε : ε ˡ ≤ ε
εˡ≤ε =
begin
ε ˡ ≈⟨ sym (proj₂ identity (ε ˡ)) ⟩
ε ˡ ∙ ε ≤⟨ ˡ-contract ε ⟩
ε
∎
ε≤εˡ : ε ≤ ε ˡ
ε≤εˡ =
begin
ε ≤⟨ ˡ-expand ε ⟩
ε ∙ ε ˡ ≈⟨ proj₁ identity (ε ˡ) ⟩
ε ˡ
∎
ʳ-identity : ε ʳ ≈ ε
ʳ-identity = antisym εʳ≤ε ε≤εʳ
where
εʳ≤ε : ε ʳ ≤ ε
εʳ≤ε =
begin
ε ʳ ≈⟨ sym (proj₁ identity (ε ʳ)) ⟩
ε ∙ ε ʳ ≤⟨ ʳ-contract ε ⟩
ε
∎
ε≤εʳ : ε ≤ ε ʳ
ε≤εʳ =
begin
ε ≤⟨ ʳ-expand ε ⟩
ε ʳ ∙ ε ≈⟨ proj₂ identity (ε ʳ) ⟩
ε ʳ
∎
ˡ-distrib : ∀ x y → y ˡ ∙ x ˡ ≈ (x ∙ y) ˡ
ˡ-distrib x y = ˡ-unique ([yˡxˡ][xy]≤ε , ε≤[xy][yˡxˡ])
where
[yˡxˡ][xy]≤ε : (y ˡ ∙ x ˡ) ∙ (x ∙ y) ≤ ε
[yˡxˡ][xy]≤ε =
begin
(y ˡ ∙ x ˡ) ∙ (x ∙ y) ≈⟨ sym (assoc (y ˡ ∙ x ˡ) x y) ⟩
((y ˡ ∙ x ˡ) ∙ x) ∙ y ≈⟨ ∙-cong (assoc (y ˡ) (x ˡ) x) refl ⟩
(y ˡ ∙ (x ˡ ∙ x)) ∙ y ≤⟨ compatibility (compatibility ≤-refl (ˡ-contract x)) ≤-refl ⟩
y ˡ ∙ ε ∙ y ≈⟨ ∙-cong (proj₂ identity (y ˡ)) refl ⟩
y ˡ ∙ y ≤⟨ ˡ-contract y ⟩
ε
∎
ε≤[xy][yˡxˡ] : ε ≤ (x ∙ y) ∙ (y ˡ ∙ x ˡ)
ε≤[xy][yˡxˡ] =
begin
ε ≤⟨ ˡ-expand x ⟩
x ∙ x ˡ ≈⟨ ∙-cong (sym (proj₂ identity x)) refl ⟩
x ∙ ε ∙ x ˡ ≤⟨ compatibility (compatibility ≤-refl (ˡ-expand y)) ≤-refl ⟩
(x ∙ (y ∙ y ˡ) ∙ x ˡ) ≈⟨ ∙-cong (sym (assoc x y (y ˡ))) refl ⟩
((x ∙ y) ∙ y ˡ) ∙ x ˡ ≈⟨ assoc (x ∙ y) (y ˡ) (x ˡ) ⟩
(x ∙ y) ∙ (y ˡ ∙ x ˡ)
∎
ʳ-distrib : ∀ x y → y ʳ ∙ x ʳ ≈ (x ∙ y) ʳ
ʳ-distrib x y = ʳ-unique ([xy][yʳxʳ]≤ε , ε≤[xy][yʳxʳ])
where
[xy][yʳxʳ]≤ε : (x ∙ y) ∙ (y ʳ ∙ x ʳ) ≤ ε
[xy][yʳxʳ]≤ε =
begin
(x ∙ y) ∙ (y ʳ ∙ x ʳ) ≈⟨ sym (assoc (x ∙ y) (y ʳ) (x ʳ)) ⟩
((x ∙ y) ∙ y ʳ) ∙ x ʳ ≈⟨ ∙-cong (assoc x y (y ʳ)) refl ⟩
(x ∙ (y ∙ y ʳ) ∙ x ʳ) ≤⟨ compatibility (compatibility ≤-refl (ʳ-contract y)) ≤-refl ⟩
x ∙ ε ∙ x ʳ ≈⟨ ∙-cong (proj₂ identity x) refl ⟩
x ∙ x ʳ ≤⟨ ʳ-contract x ⟩
ε
∎
ε≤[xy][yʳxʳ] : ε ≤ (y ʳ ∙ x ʳ) ∙ (x ∙ y)
ε≤[xy][yʳxʳ] =
begin
ε ≤⟨ ʳ-expand y ⟩
y ʳ ∙ y ≈⟨ ∙-cong refl (sym (proj₁ identity y)) ⟩
y ʳ ∙ (ε ∙ y) ≤⟨ compatibility ≤-refl (compatibility (ʳ-expand x) ≤-refl) ⟩
y ʳ ∙ ((x ʳ ∙ x) ∙ y) ≈⟨ ∙-cong refl (assoc (x ʳ) x y) ⟩
y ʳ ∙ (x ʳ ∙ (x ∙ y)) ≈⟨ sym (assoc (y ʳ) (x ʳ) (x ∙ y)) ⟩
(y ʳ ∙ x ʳ) ∙ (x ∙ y)
∎
ˡ-contra : ∀ {x y} → x ≤ y → y ˡ ≤ x ˡ
ˡ-contra {x} {y} x≤y =
begin
y ˡ ≈⟨ sym (proj₂ identity (y ˡ)) ⟩
y ˡ ∙ ε ≤⟨ compatibility ≤-refl (ˡ-expand x) ⟩
y ˡ ∙ (x ∙ x ˡ) ≈⟨ sym (assoc (y ˡ) x (x ˡ)) ⟩
(y ˡ ∙ x) ∙ x ˡ ≤⟨ substitutivity x≤y ⟩
(y ˡ ∙ y) ∙ x ˡ ≤⟨ compatibility (ˡ-contract y) ≤-refl ⟩
ε ∙ x ˡ ≈⟨ proj₁ identity (x ˡ) ⟩
x ˡ
∎
ʳ-contra : ∀ {x y} → x ≤ y → y ʳ ≤ x ʳ
ʳ-contra {x} {y} x≤y =
begin
y ʳ ≈⟨ sym (proj₁ identity (y ʳ)) ⟩
ε ∙ y ʳ ≤⟨ compatibility (ʳ-expand x) ≤-refl ⟩
(x ʳ ∙ x) ∙ y ʳ ≤⟨ substitutivity x≤y ⟩
(x ʳ ∙ y) ∙ y ʳ ≈⟨ assoc (x ʳ) y (y ʳ) ⟩
x ʳ ∙ (y ∙ y ʳ) ≤⟨ compatibility ≤-refl (ʳ-contract y) ⟩
x ʳ ∙ ε ≈⟨ proj₂ identity (x ʳ) ⟩
x ʳ
∎
ʳˡ-cancel : ∀ x → x ʳ ˡ ≈ x
ʳˡ-cancel x = sym (ˡ-unique (ʳ-contract x , ʳ-expand x))
ˡʳ-cancel : ∀ x → x ˡ ʳ ≈ x
ˡʳ-cancel x = sym (ʳ-unique (ˡ-contract x , ˡ-expand x))
|
##############################################
# Creación de nube de palabras desde Twitter #
##############################################
# Cargar librerías
library(twitteR)
library(tm)
library(SnowballC)
library(wordcloud)
######################
# Obtención de Datos #
######################
# Nota: Recuerde crear el acceso en https://apps.twitter.com
# Crea conexión a Twitter
setup_twitter_oauth("rbPjVvWsiiKl4KiFZg4FZ1Htm", "gEmHkQ8fMlnsSFffY3jTqbTVmbr7CaifsmU63xpSq1Eq0bBDxy", "137520613-4SKdEg2c7hz5gzi4oje5YyJvyUlM1WaIz6JQWtqn", "iGXyjCkiHFs2k10pWrxd2LDzNx9fr85Ga7nff6xJcHwu9")
# Recolecta tweets de la cuenta @unicyt
tweets = userTimeline("unicyt", 2000)
# Manda la lista de tweets a un data frame
tweets_df = twListToDF(tweets)
# Extrae el texto de los tweets
tweets_txt_raw = tweets_df$text
#####################
# Limpieza de Datos #
#####################
# Elimina retweets y citas
tweets_txt = gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "", tweets_txt_raw)
# Elimina menciones a otras cuentas
tweets_txt = gsub("@\\w+", "", tweets_txt)
# Elimina símbolos de puntuación
tweets_txt = gsub("[[:punct:]]", "", tweets_txt)
# Elimina números
tweets_txt = gsub("[[:digit:]]", "", tweets_txt)
# Elimina enlaces
tweets_txt = gsub("http\\w+", "", tweets_txt)
##############################
# Pre-procesamiento de Datos #
##############################
# Construye un corpus
corpus = Corpus(VectorSource(tweets_txt))
# Convierte a minúsculas
corpus = tm_map(corpus, content_transformer(tolower))
# Remueve palabras vacías (stopwords)
corpus = tm_map(corpus, content_transformer(removeWords), c(stopwords("spanish"), "unicyt"))
# Carga archivo de palabras vacías (stopwords) personalizada y lo convierte a ASCII
pv_es <- readLines("stopwords_es.tweets_txt_raw",encoding="UTF-8") # Recuerden cambiar la ubicación del archivo de texto
pv_es = iconv(pv_es, to="ASCII//TRANSLIT")
# Remueve palabras vacías personalizada
corpus = tm_map(corpus, content_transformer(removeWords), pv_es)
# Remueve espacios en blanco
corpus = tm_map(corpus, content_transformer(stripWhitespace))
# Construcción de una matriz de términos
tdm <- TermDocumentMatrix(corpus)
# Conversión a matriz
m = as.matrix(tdm)
# Conteo de palabras en orden decreciente
wf <- sort(rowSums(m),decreasing=TRUE)
# Crea un data frame con las palabras y sus frecuencias
dm <- data.frame(word = names(wf), freq=wf)
###############################
# Visualización de Resultados #
###############################
# Grafica la nube de palabras (wordcloud)
wordcloud(dm$word, dm$freq, random.order=FALSE, colors=brewer.pal(8, "Dark2")) |
(* Property from Case-Analysis for Rippling and Inductive Proof,
Moa Johansson, Lucas Dixon and Alan Bundy, ITP 2010.
This Isabelle theory is produced using the TIP tool offered at the following website:
https://github.com/tip-org/tools
This file was originally provided as part of TIP benchmark at the following website:
https://github.com/tip-org/benchmarks
Yutaka Nagashima at CIIRC, CTU changed the TIP output theory file slightly
to make it compatible with Isabelle2017.
Some proofs were added by Yutaka Nagashima.*)
theory TIP_prop_40
imports "../../Test_Base"
begin
datatype 'a list = nil2 | cons2 "'a" "'a list"
datatype Nat = Z | S "Nat"
fun take :: "Nat => 'a list => 'a list" where
"take (Z) y = nil2"
| "take (S z) (nil2) = nil2"
| "take (S z) (cons2 x2 x3) = cons2 x2 (take z x3)"
theorem property0 :
"((take Z xs) = (nil2))"
find_proof DInd
apply (induct xs)
apply auto
done
end |
State Before: R : Type u_1
inst✝⁶ : Ring R
E : Type u_3
inst✝⁵ : AddCommGroup E
inst✝⁴ : Module R E
F : Type u_2
inst✝³ : AddCommGroup F
inst✝² : Module R F
G : Type ?u.618707
inst✝¹ : AddCommGroup G
inst✝ : Module R G
g : Submodule R (E × F)
hg : ∀ {x : E × F}, x ∈ g → x.fst = 0 → x.snd = 0
a : E
ha : a ∈ map (LinearMap.fst R E F) g
⊢ ∃! b, (a, b) ∈ g State After: case refine'_1
R : Type u_1
inst✝⁶ : Ring R
E : Type u_3
inst✝⁵ : AddCommGroup E
inst✝⁴ : Module R E
F : Type u_2
inst✝³ : AddCommGroup F
inst✝² : Module R F
G : Type ?u.618707
inst✝¹ : AddCommGroup G
inst✝ : Module R G
g : Submodule R (E × F)
hg : ∀ {x : E × F}, x ∈ g → x.fst = 0 → x.snd = 0
a : E
ha : a ∈ map (LinearMap.fst R E F) g
⊢ ∃ x, (a, x) ∈ g
case refine'_2
R : Type u_1
inst✝⁶ : Ring R
E : Type u_3
inst✝⁵ : AddCommGroup E
inst✝⁴ : Module R E
F : Type u_2
inst✝³ : AddCommGroup F
inst✝² : Module R F
G : Type ?u.618707
inst✝¹ : AddCommGroup G
inst✝ : Module R G
g : Submodule R (E × F)
hg : ∀ {x : E × F}, x ∈ g → x.fst = 0 → x.snd = 0
a : E
ha : a ∈ map (LinearMap.fst R E F) g
⊢ ∀ (y₁ y₂ : F), (a, y₁) ∈ g → (a, y₂) ∈ g → y₁ = y₂ Tactic: refine' exists_unique_of_exists_of_unique _ _ State Before: case refine'_2
R : Type u_1
inst✝⁶ : Ring R
E : Type u_3
inst✝⁵ : AddCommGroup E
inst✝⁴ : Module R E
F : Type u_2
inst✝³ : AddCommGroup F
inst✝² : Module R F
G : Type ?u.618707
inst✝¹ : AddCommGroup G
inst✝ : Module R G
g : Submodule R (E × F)
hg : ∀ {x : E × F}, x ∈ g → x.fst = 0 → x.snd = 0
a : E
ha : a ∈ map (LinearMap.fst R E F) g
⊢ ∀ (y₁ y₂ : F), (a, y₁) ∈ g → (a, y₂) ∈ g → y₁ = y₂ State After: case refine'_2
R : Type u_1
inst✝⁶ : Ring R
E : Type u_3
inst✝⁵ : AddCommGroup E
inst✝⁴ : Module R E
F : Type u_2
inst✝³ : AddCommGroup F
inst✝² : Module R F
G : Type ?u.618707
inst✝¹ : AddCommGroup G
inst✝ : Module R G
g : Submodule R (E × F)
hg : ∀ {x : E × F}, x ∈ g → x.fst = 0 → x.snd = 0
a : E
ha : a ∈ map (LinearMap.fst R E F) g
y₁ y₂ : F
hy₁ : (a, y₁) ∈ g
hy₂ : (a, y₂) ∈ g
⊢ y₁ = y₂ Tactic: intro y₁ y₂ hy₁ hy₂ State Before: case refine'_2
R : Type u_1
inst✝⁶ : Ring R
E : Type u_3
inst✝⁵ : AddCommGroup E
inst✝⁴ : Module R E
F : Type u_2
inst✝³ : AddCommGroup F
inst✝² : Module R F
G : Type ?u.618707
inst✝¹ : AddCommGroup G
inst✝ : Module R G
g : Submodule R (E × F)
hg : ∀ {x : E × F}, x ∈ g → x.fst = 0 → x.snd = 0
a : E
ha : a ∈ map (LinearMap.fst R E F) g
y₁ y₂ : F
hy₁ : (a, y₁) ∈ g
hy₂ : (a, y₂) ∈ g
⊢ y₁ = y₂ State After: case refine'_2
R : Type u_1
inst✝⁶ : Ring R
E : Type u_3
inst✝⁵ : AddCommGroup E
inst✝⁴ : Module R E
F : Type u_2
inst✝³ : AddCommGroup F
inst✝² : Module R F
G : Type ?u.618707
inst✝¹ : AddCommGroup G
inst✝ : Module R G
g : Submodule R (E × F)
hg : ∀ {x : E × F}, x ∈ g → x.fst = 0 → x.snd = 0
a : E
ha : a ∈ map (LinearMap.fst R E F) g
y₁ y₂ : F
hy₁ : (a, y₁) ∈ g
hy₂ : (a, y₂) ∈ g
hy : (0, y₁ - y₂) ∈ g
⊢ y₁ = y₂ Tactic: have hy : ((0 : E), y₁ - y₂) ∈ g := by
convert g.sub_mem hy₁ hy₂
exact (sub_self _).symm State Before: case refine'_2
R : Type u_1
inst✝⁶ : Ring R
E : Type u_3
inst✝⁵ : AddCommGroup E
inst✝⁴ : Module R E
F : Type u_2
inst✝³ : AddCommGroup F
inst✝² : Module R F
G : Type ?u.618707
inst✝¹ : AddCommGroup G
inst✝ : Module R G
g : Submodule R (E × F)
hg : ∀ {x : E × F}, x ∈ g → x.fst = 0 → x.snd = 0
a : E
ha : a ∈ map (LinearMap.fst R E F) g
y₁ y₂ : F
hy₁ : (a, y₁) ∈ g
hy₂ : (a, y₂) ∈ g
hy : (0, y₁ - y₂) ∈ g
⊢ y₁ = y₂ State After: no goals Tactic: exact sub_eq_zero.mp (hg hy (by simp)) State Before: case refine'_1
R : Type u_1
inst✝⁶ : Ring R
E : Type u_3
inst✝⁵ : AddCommGroup E
inst✝⁴ : Module R E
F : Type u_2
inst✝³ : AddCommGroup F
inst✝² : Module R F
G : Type ?u.618707
inst✝¹ : AddCommGroup G
inst✝ : Module R G
g : Submodule R (E × F)
hg : ∀ {x : E × F}, x ∈ g → x.fst = 0 → x.snd = 0
a : E
ha : a ∈ map (LinearMap.fst R E F) g
⊢ ∃ x, (a, x) ∈ g State After: case a
R : Type u_1
inst✝⁶ : Ring R
E : Type u_3
inst✝⁵ : AddCommGroup E
inst✝⁴ : Module R E
F : Type u_2
inst✝³ : AddCommGroup F
inst✝² : Module R F
G : Type ?u.618707
inst✝¹ : AddCommGroup G
inst✝ : Module R G
g : Submodule R (E × F)
hg : ∀ {x : E × F}, x ∈ g → x.fst = 0 → x.snd = 0
a : E
ha : a ∈ map (LinearMap.fst R E F) g
⊢ (∃ x, (a, x) ∈ g) ↔ a ∈ map (LinearMap.fst R E F) g Tactic: convert ha State Before: case a
R : Type u_1
inst✝⁶ : Ring R
E : Type u_3
inst✝⁵ : AddCommGroup E
inst✝⁴ : Module R E
F : Type u_2
inst✝³ : AddCommGroup F
inst✝² : Module R F
G : Type ?u.618707
inst✝¹ : AddCommGroup G
inst✝ : Module R G
g : Submodule R (E × F)
hg : ∀ {x : E × F}, x ∈ g → x.fst = 0 → x.snd = 0
a : E
ha : a ∈ map (LinearMap.fst R E F) g
⊢ (∃ x, (a, x) ∈ g) ↔ a ∈ map (LinearMap.fst R E F) g State After: no goals Tactic: simp State Before: R : Type u_1
inst✝⁶ : Ring R
E : Type u_3
inst✝⁵ : AddCommGroup E
inst✝⁴ : Module R E
F : Type u_2
inst✝³ : AddCommGroup F
inst✝² : Module R F
G : Type ?u.618707
inst✝¹ : AddCommGroup G
inst✝ : Module R G
g : Submodule R (E × F)
hg : ∀ {x : E × F}, x ∈ g → x.fst = 0 → x.snd = 0
a : E
ha : a ∈ map (LinearMap.fst R E F) g
y₁ y₂ : F
hy₁ : (a, y₁) ∈ g
hy₂ : (a, y₂) ∈ g
⊢ (0, y₁ - y₂) ∈ g State After: case h.e'_4.h.e'_3
R : Type u_1
inst✝⁶ : Ring R
E : Type u_3
inst✝⁵ : AddCommGroup E
inst✝⁴ : Module R E
F : Type u_2
inst✝³ : AddCommGroup F
inst✝² : Module R F
G : Type ?u.618707
inst✝¹ : AddCommGroup G
inst✝ : Module R G
g : Submodule R (E × F)
hg : ∀ {x : E × F}, x ∈ g → x.fst = 0 → x.snd = 0
a : E
ha : a ∈ map (LinearMap.fst R E F) g
y₁ y₂ : F
hy₁ : (a, y₁) ∈ g
hy₂ : (a, y₂) ∈ g
⊢ 0 = ((a, y₁) - (a, y₂)).fst Tactic: convert g.sub_mem hy₁ hy₂ State Before: case h.e'_4.h.e'_3
R : Type u_1
inst✝⁶ : Ring R
E : Type u_3
inst✝⁵ : AddCommGroup E
inst✝⁴ : Module R E
F : Type u_2
inst✝³ : AddCommGroup F
inst✝² : Module R F
G : Type ?u.618707
inst✝¹ : AddCommGroup G
inst✝ : Module R G
g : Submodule R (E × F)
hg : ∀ {x : E × F}, x ∈ g → x.fst = 0 → x.snd = 0
a : E
ha : a ∈ map (LinearMap.fst R E F) g
y₁ y₂ : F
hy₁ : (a, y₁) ∈ g
hy₂ : (a, y₂) ∈ g
⊢ 0 = ((a, y₁) - (a, y₂)).fst State After: no goals Tactic: exact (sub_self _).symm State Before: R : Type u_1
inst✝⁶ : Ring R
E : Type u_3
inst✝⁵ : AddCommGroup E
inst✝⁴ : Module R E
F : Type u_2
inst✝³ : AddCommGroup F
inst✝² : Module R F
G : Type ?u.618707
inst✝¹ : AddCommGroup G
inst✝ : Module R G
g : Submodule R (E × F)
hg : ∀ {x : E × F}, x ∈ g → x.fst = 0 → x.snd = 0
a : E
ha : a ∈ map (LinearMap.fst R E F) g
y₁ y₂ : F
hy₁ : (a, y₁) ∈ g
hy₂ : (a, y₂) ∈ g
hy : (0, y₁ - y₂) ∈ g
⊢ (0, y₁ - y₂).fst = 0 State After: no goals Tactic: simp |
function id = getlmiid(F)
id = F.LMIid;
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include<time.h>
#include <gsl/gsl_blas.h>
double epsl = 0.01;
typedef struct {
double x, y, z;
} position;
typedef struct _vertex {
position pos;
gsl_matrix_view *C;
struct _vertex *p,*l,*r;
} vertex;
void usage(char *exec) {
printf("%s -s <input_file> <count_vertex>\n", exec);
printf("%s -c <input_file>\n", exec);
printf("%s -show <input_file>\n", exec);
}
int* ddd(char *fileName, int len){
FILE *file = NULL;
if ((file = fopen(fileName, "r")) == NULL) {
/* error openning the file */
perror("fopen: ");
return NULL;
}
int n, i, j = 0, k = 0;
char buff[6], buffFloat[17];
memset(buff, 0, sizeof(buff));
memset(buffFloat, 0, sizeof(buffFloat));
if (fread(buff, sizeof(char), 5, file) < 5 || strcmp(buff, "E = [")) {
perror("file out of params: ");
return NULL;
}
double **mat = (double **)calloc(len, sizeof(double *));
for (i = 0; i < len; i++) {
mat[i] = (double *)calloc(3, sizeof(double *));
}
i = 0;
while ((n = fread(buff, sizeof(char), 1, file)) == 1) {
if (buff[0] == ' ') {
if (k > 0) {
mat[i][j] = atof(buffFloat);
memset(buffFloat, 0, sizeof(buffFloat));
k = 0;
j++;
}
continue;
}
else if (buff[0] == ';') {
if (k) {
mat[i][j] = atof(buffFloat);
memset(buffFloat, 0, sizeof(buffFloat));
k = 0;
}
j = 0;
i++;
}
else {
buffFloat[k++] = buff[0];
}
}
fclose(file);
int current = 0;
for (i = 0; i < len; i++) {
for (j = 0; j < len; j++) {
printf(" %f ", sqrt(pow(mat[i][0] - mat[j][0], 2) + pow(mat[i][1] - mat[j][1], 2) + pow(mat[i][2] - mat[j][2], 2)));
fflush(stdout);
}
printf("\n");
}
}
void printMat(double **mat, int len) {
int i, j;
for (i = 0; i < len; i++) {
for (j = 0; j < len; j++) {
//if (mat[i][j] != 0) {
printf(" %f ", mat[i][j]);
fflush(stdout);
//}
}
printf("\n");
}
}
double **matrixConstruct(char *fileName, int len){
FILE *file = NULL;
if ((file = fopen(fileName, "r")) == NULL) {
/* error openning the file */
perror("fopen: ");
return NULL;
}
int n, i, j = 0, k = 0;
char buff[6], buffFloat[17];
memset(buff, 0, sizeof(buff));
memset(buffFloat, 0, sizeof(buffFloat));
if (fread(buff, sizeof(char), 5, file) < 5 || strcmp(buff, "E = [")) {
perror("file out of params: ");
return NULL;
}
double **mat = (double **)calloc(len, sizeof(double *));
for (i = 0; i < len; i++) {
mat[i] = (double *)calloc(len, sizeof(double *));
}
i = 0;
while ((n = fread(buff, sizeof(char), 1, file)) == 1) {
if (buff[0] == ' ') {
if (k > 0) {
mat[i][j] = atof(buffFloat);
memset(buffFloat, 0, sizeof(buffFloat));
k = 0;
j++;
}
continue;
}
else if (buff[0] == ';') {
if (k) {
mat[i][j] = atof(buffFloat);
memset(buffFloat, 0, sizeof(buffFloat));
k = 0;
}
j = 0;
i++;
}
else {
buffFloat[k++] = buff[0];
}
}
fclose(file);
for (i = 0; i < len; i++)
{
for (j = 0; j < len; j++)
{
if(pow(mat[i][j] - mat[j][i],2) > 0.0000001) return NULL;
}
}
return mat;
}
double dis(double **m, int i){
if (i-1 < 0) return -1;
return m[i][i-1];
}
double theta(double **m, int i){
if (i-1 < 0) return -1000;
return acos((pow(m[i][i+1],2) + pow(m[i][i-1],2) - pow(m[i+1][i-1],2))/(2*m[i][i+1]*m[i][i-1]));
}
double cosO(double **m, int i){
//printf("%f %f\n", pow(m[i][i+1], 2) + pow(m[i+1][i+3],2) - 2*m[i][i+1]*m[i+1][i+3]*cos(theta(m, i+1))*cos(theta(m, i+2)) - pow(m[i][i+3],2), 2*m[i][i+1]*m[i+1][i+3]*sin(theta(m, i+1))*sin(theta(m, i+2)) );
double theta2 = acos((pow(m[i+1][i+2],2) + pow(m[i+1][i+3],2) - pow(m[i+2][i+3],2))/(2*m[i+1][i+3]*m[i+1][i+2]));
double res = (pow(m[i][i+1], 2) + pow(m[i+1][i+3],2) - 2*m[i][i+1]*m[i+1][i+3]*cos(theta(m, i+1))*cos(theta2) - pow(m[i][i+3],2))/
(2*m[i][i+1]*m[i+1][i+3]*sin(theta(m, i+1))*sin(theta2));
if (res > 1) return 1;
if (res < -1) return -1;
return res;
}
double cosOmega(double **m, int i){
if (i-3 < 0) return 1000;
double Ai = (2*pow(m[i-2][i-1],2))*(pow(m[i-3][i-2],2) + pow(m[i-2][i] ,2) - pow(m[i-3][i] ,2));
double Bi = pow(m[i-3][i-2],2) + pow(m[i-2][i-1],2) - pow(m[i-3][i-1],2);
double Ci = pow(m[i-2][i-1],2) + pow(m[i-2][i],2) - pow(m[i-1][i],2);
double Di = sqrt(
4*pow(m[i-3][i-2],2)
*pow(m[i-2][i-1],2)
-pow(Bi,2) );
double Ei = sqrt(4*pow(m[i-2][i-1],2)*pow(m[i-2][i],2)-pow(Ci,2) );
double res = (Ai - Bi*Ci)/(Di*Ei);
if (res > 1) return 1;
if (res < -1) return -1;
return res;
}
int isFeasible(double **mat, int len, vertex *v, int i){
if (!v) return 0;
int j = i-1;
double pos[i][3];
vertex *aux = v->p;
while (aux)
{
//printf("\n\tj = %d {%f, %f, %f}\n", j, aux->pos.x, aux->pos.y, aux->pos.z);
pos[j][0] = aux->pos.x;
pos[j][1] = aux->pos.y;
pos[j--][2] = aux->pos.z;
aux = aux->p;
}
for (j = 0; j < i-3; j++)
{
if (mat[i][j] > 0)
{
double dij = pow(v->pos.x - pos[j][0],2)+pow(v->pos.y - pos[j][1],2)+pow(v->pos.z - pos[j][2],2);
double diff = pow(mat[i][j],2)- dij;
if (diff < 0) diff = diff*(-1);
double dist = sqrt(diff);//pow(diff,2);
//printf("{%d,%d} {%f, %f, %f}-{%f, %f, %f} \n\t%.20f- %.20f = %.20f \n %.20f \n", i, j, v->pos.x, v->pos.y, v->pos.z, pos[j][0], pos[j][1], pos[j][2], mat[i][j], dij, diff, dist);
if (dist > epsl)
{
return 0;
}
}
}
return 1;
}
double LDE(double **mat, double **m, int len){
int i, j, count = 0;
double dij = 0;
for (i = 0; i < len; i++)
{
for (j = i+1; j < len; j++)
{
if (mat[i][j]>0){
dij = dij + pow((pow(mat[i][j],2) - (pow(m[i][0] - m[j][0], 2) + pow(m[i][1] - m[j][1], 2) + pow(m[i][2] - m[j][2], 2))),2)/mat[i][j];
count++;
}
}
}
return dij/count;
}
int qtd = 0;
int BranchAndPrune(double **mat, int len, vertex *v, int i){
if(i < len){
double thetai = theta(mat,i-1);
double cti = cos(thetai);
double sti = sin(thetai);
double cwi = cosOmega(mat, i);
//double cwi2 = cosO(mat, i-3);
//printf("%f %f \n", cwi, cwi2 );
//printf("%f %f \n", acos(cwi)*57.295779513, acos(cwi2)*57.295779513 );
if (cwi*cwi > 1){
printf("\n %f Error cwi", cwi);
getchar();
}
double swi = sqrt(1-pow(cwi, 2));
double di = dis(mat,i);
double bi1[] = {0-cti,0-sti,0,0-di*cti,
sti*cwi,0-cti*cwi,0-swi,di*sti*cwi,
sti*swi,0-cti*swi,cwi,di*sti*swi,
0,0,0,1};
int j;
//for (j = 0; j < 16; j++)
//{
// printf("%f ", bi1[j]);
//}
// printf("\n");
gsl_matrix_view Bi1 = gsl_matrix_view_array(bi1, 4, 4);
swi = 0-swi;
double bi2[] = {0-cti,0-sti,0,0-di*cti,
sti*cwi,0-cti*cwi,0-swi,di*sti*cwi,
sti*swi,0-cti*swi,cwi,di*sti*swi,
0,0,0,1};
//for (j = 0; j < 16; j++)
//{
// printf("%f ", bi2[j]);
//}
//printf("\n");
gsl_matrix_view Bi2 = gsl_matrix_view_array(bi2, 4, 4);
vertex *rv = (vertex*)calloc(1, sizeof(vertex));
rv->p = v;
rv->C = (gsl_matrix_view*)calloc(1, sizeof(gsl_matrix_view));
*(rv->C) = gsl_matrix_view_array((double*)calloc(16,sizeof(double)), 4, 4);
gsl_blas_dgemm(CblasNoTrans, CblasNoTrans,
1.0, &(v->C->matrix), &Bi1.matrix,
0.0, &(rv->C->matrix));
vertex *lv = (vertex*)calloc(1, sizeof(vertex));
lv->p = v;
lv->C = (gsl_matrix_view*)calloc(1, sizeof(gsl_matrix_view));
*(lv->C) = gsl_matrix_view_array((double*)calloc(16,sizeof(double)), 4, 4);
gsl_blas_dgemm(CblasNoTrans, CblasNoTrans,
1.0, &(v->C->matrix), &Bi2.matrix,
0.0, &(lv->C->matrix));
gsl_matrix_view *Xi = (gsl_matrix_view*)calloc(1, sizeof(gsl_matrix_view));
*Xi = gsl_matrix_view_array((double*)calloc(4,sizeof(double)), 4, 1);
double *y = (double*)calloc(4,sizeof(double));
y[3] = 1;
gsl_matrix_view *Y = (gsl_matrix_view*)calloc(1, sizeof(gsl_matrix_view));
*Y = gsl_matrix_view_array(y, 4, 1);
gsl_blas_dgemm(CblasNoTrans, CblasNoTrans,
1.0, &(rv->C->matrix), &(Y->matrix),
0.0, &(Xi->matrix));
rv->pos.x = Xi->matrix.data[0];
rv->pos.y = Xi->matrix.data[1];
rv->pos.z = Xi->matrix.data[2];
gsl_blas_dgemm(CblasNoTrans, CblasNoTrans,
1.0, &(lv->C->matrix), &(Y->matrix),
0.0, &(Xi->matrix));
lv->pos.x = Xi->matrix.data[0];
lv->pos.y = Xi->matrix.data[1];
lv->pos.z = Xi->matrix.data[2];
free(y);
free(Y);
free(Xi->matrix.data);
free(Xi);
//printf("[ %f, %f", rv->pos.x, rv->pos.y);
//printf(" %f]\n", rv->pos.z);
//printf("[ %f, %f", lv->pos.x, lv->pos.y);
//printf(" %f ]\n", lv->pos.z);
//getchar();
int s;
//printf("\n");
if (isFeasible(mat, len, rv, i)){
v->r = rv;
//for (s = 0; s < i; s++)
//{
// printf(">");
//}
//printf("1\n");
fflush(stdout);
BranchAndPrune(mat, len, rv, i+1);
}else {
//for (s = 0; s < i; s++)
//{
// printf("<");
//}
//
//printf("2\n");
free(rv->C->matrix.data);
free(rv->C);
free(rv);
v->r = NULL;
}
if (isFeasible(mat, len, lv, i)){
v->l = lv;
//for (s = 0; s < i; s++)
//{
// printf(">");
//}
//
//printf("3\n");
fflush(stdout);
BranchAndPrune(mat, len, lv, i+1);
}else {
//for (s = 0; s < i; s++)
//{
// printf("<");
//}
//
//printf("4\n");
free(lv->C->matrix.data);
free(lv->C);
free(lv);
v->l = NULL;
}
}else{
//printf(" ---------------- ue\n");
//fflush(stdout);
qtd++;
double **mat2 = calloc(len, sizeof(double*));
int i;
for (i = 0; i < len; i++)
{
mat2[i] = calloc(3, sizeof(double));
}
i = len-1;
printf("\n[");
while (v)
{
mat2[i][0] = v->pos.x;
mat2[i][1] = v->pos.y;
mat2[i--][2] = v->pos.z;
//printf("%f %f %f; ", v->pos.x, v->pos.y, v->pos.z);
v = v->p;
}
double lde = LDE(mat, mat2, len);
printf("\n \t LDE = %.20f\n", lde);
for (i = 0; i < len; i++)
{
printf("%f %f %f; ", mat[i][0], mat[i][1], mat[i][2]);
}
printf("]\n");
return 0;
}
}
int printGrafo(int len, vertex *v, int i){
int j = -1;
if (v->l){
printf("(%d, %d),", i, i+1);
j = printGrafo(len, v->l, i+1);
}
if (v->r){
if (j != -1){
printf("(%d, %d),", i, j);
j = printGrafo(len, v->r, j);
}else
{
printf("(%d, %d),", i, i+1);
return printGrafo(len, v->r, i+1);
}
}
if (j == -1) return i+1;
return j;
}
vertex *resul[10];
int solve(char *fileName, int len) {
double **mat = matrixConstruct(fileName, len);
if(mat == NULL){
printf("Error when construct matrix\n");
fflush(stdout);
return 0;
}
double b1[] = {1,0,0,0,
0,1,0,0,
0,0,1,0,
0,0,0,1};
gsl_matrix_view B1 = gsl_matrix_view_array(b1, 4, 4);
double b2[] = {-1,0,0,0-dis(mat,1),
0,1,0,0,
0,0,-1,0,
0,0,0,1};
gsl_matrix_view B2 = gsl_matrix_view_array(b2, 4, 4);
double theta2 = theta(mat,1);
//printf("[ cos %f\n", theta2);
double ct2 = cos(theta2);
double st2 = sin(theta2);
double b3[] = {0-ct2,0-st2,0,0-dis(mat,2)*ct2,
st2,0-ct2,0,dis(mat,2)*st2,
0,0,1,0,
0,0,0,1};
gsl_matrix_view B3 = gsl_matrix_view_array(b3, 4, 4);
vertex *v = (vertex*)calloc(1, sizeof(vertex));
v->C = &B1;
//printf("[ %f, %f %f\n", v->pos.x, v->pos.y, v->pos.z);
vertex *v2 = (vertex*)calloc(1, sizeof(vertex));
v->r = v2;
v2->C = &B2;
v2->p = v;
v2->pos.x = 0-dis(mat,1);
// printf("[ %f, %f %f\n", v2->pos.x, v2->pos.y, v2->pos.z);
vertex *v3 = (vertex*)calloc(1, sizeof(vertex));
v2->r = v3;
v3->C = (gsl_matrix_view*)calloc(1, sizeof(gsl_matrix_view));
*(v3->C) = gsl_matrix_view_array((double*)calloc(16,sizeof(double)), 4, 4);
gsl_blas_dgemm(CblasNoTrans, CblasNoTrans,
1.0, &B2.matrix, &(B3.matrix),
0.0, &(v3->C->matrix));
v3->p = v2;
v3->pos.x = dis(mat,2)*ct2 -dis(mat,1);
v3->pos.y = dis(mat,2)*st2;
//printf("[ %f, %f", v3->pos.x, v3->pos.y);
// printf(" %f]\n", v3->pos.z);
BranchAndPrune(mat, len, v3, 3);
printf("%d", qtd);
//printf("[");
//printGrafo(len, v, 0);
//printf("]");
return 1;
}
int countCols(char *fileName) {
FILE *file = NULL;
if ((file = fopen(fileName, "r")) == NULL) {
/* error openning the file */
perror("fopen: ");
return 1;
}
int n, j = 0, k = 0;
char buff[6];
memset(buff, 0, sizeof(buff));
if (fread(buff, sizeof(char), 5, file) < 5 || strcmp(buff, "E = [")) {
fclose(file);
perror("file out of params: ");
return 1;
}
while ((n = fread(buff, sizeof(char), 1, file)) == 1) {
if (buff[0] == ' ') {
if (k > 0) {
k = 0;
j++;
}
continue;
}
else if (buff[0] == ';') {
if (k) j++;
break;
}
else {
k++;
}
}
fclose(file);
return j;
}
int main(int argc, char **argv) {
double time_spent = 0.0;
clock_t begin = clock();
if (argc < 3) {
usage(argv[0]);
}
else {
if (!strcmp(argv[1], "-s")) {
int len;
if (argc < 4) {
len = countCols(argv[2]);
}else {
if ((len = atoi(argv[3])) == 0){
/* error openning the file */
perror("count_vertex param error: ");
return 1;
}
}
solve(argv[2], len);
} else if (!strcmp(argv[1], "-c")) {
printf("%d\n",countCols(argv[2]));
} else if (!strcmp(argv[1], "-show")) {
printMat(matrixConstruct(argv[2], countCols(argv[2])), countCols(argv[2]));
} else if (!strcmp(argv[1], "-d")) {
ddd(argv[2],10);
} else {
usage(argv[0]);
}
}
//getchar();
clock_t end = clock();
// calculate elapsed time by finding difference (end - begin) and
// divide by CLOCKS_PER_SEC to convert to seconds
time_spent += (double)(end - begin) / CLOCKS_PER_SEC;
printf("\nTime elpased is %.20f seconds", time_spent);
fflush(stdout);
} |
State Before: ι : Type ?u.86870
α : Type u_1
β : Type ?u.86876
π : ι → Type ?u.86881
inst✝ : BooleanAlgebra α
a b c d : α
⊢ a ∆ b = (a ⊔ b) ⊓ (aᶜ ⊔ bᶜ) State After: no goals Tactic: rw [symmDiff_eq_sup_sdiff_inf, sdiff_eq, compl_inf] |
Axiom iter2 : bool -> bool -> bool.
Fixpoint do2 (n : nat) (A : bool) :=
match n with
| 0 => A
| S n' => do2 n' (iter2 A A)
end.
Ltac display2 := match goal with |- let n := ?k in _ => idtac "[2][" k "] :=" end; hnf; unfold do2; cbv beta.
Axiom x : bool.
Notation hidden := (_ = _).
Goal let n := 20 in do2 n x = do2 n x. display2. Time (match goal with |- ?f ?x = ?g ?y => idtac end). (* 2.6 s *) Admitted.
|
program main
use netcdf
implicit none
! new data
character(len=200) :: iname, oname
character(len=10) :: strnx, strny
integer :: newnx ! x-dimension of raw array
integer :: newny ! y-dimension of raw array
integer :: nlen, ierror
real,allocatable,dimension(:,:) :: newlats ! lats of raw data
real,allocatable,dimension(:,:) :: newlons ! lons of raw data
real,allocatable,dimension(:,:) :: newdata ! raw data
integer :: ncid, varid
! newnx = 19200
! newny = 12000
if (command_argument_count() /= 4) then
write(*,*) ' Convert NetCDF to Binary'
write(*,*) ' Usage: convert iname oname nx ny '
stop
end if
call get_command_argument(1, iname, nlen, ierror)
call get_command_argument(2, oname, nlen, ierror)
call get_command_argument(3, strnx, nlen, ierror)
call get_command_argument(4, strny, nlen, ierror)
read(strnx, '(I10)') newnx
read(strny, '(I10)') newny
allocate(newlats(newnx, newny), newlons(newnx, newny), newdata(newnx, newny))
! Open the file. NF90_NOWRITE tells netCDF we want read-only access to
! the file.
call check( nf90_open(trim(iname), NF90_NOWRITE, ncid) )
! Get the varid of the data variable, based on its name.
call check( nf90_inq_varid(ncid, "lats", varid) )
! Read the data.
call check( nf90_get_var(ncid, varid, newlats) )
! Get the varid of the data variable, based on its name.
call check( nf90_inq_varid(ncid, "lons", varid) )
! Read the data.
call check( nf90_get_var(ncid, varid, newlons) )
! Get the varid of the data variable, based on its name.
call check( nf90_inq_varid(ncid, "landuse", varid) )
! Read the data.
call check( nf90_get_var(ncid, varid, newdata) )
call check( nf90_close(ncid) )
call writenew(oname, newnx, newny, transpose(newlats), transpose(newlons), transpose(newdata))
end program main
subroutine writenew(fname, nx, ny, lats, lons, array)
implicit none
! calling
character(len=*),intent(in) :: fname
integer, intent(in) :: nx
integer, intent(in) :: ny
real, intent(in) ,dimension(nx, ny) :: lats
real, intent(in) ,dimension(nx, ny) :: lons
real, intent(in) ,dimension(nx, ny) :: array
! local
character(200) :: errormg ! the message of fail opening
integer :: ierror ! Flag for allocate
open(100,file=trim(fname), form='unformatted', iostat=ierror, iomsg=errormg)
write(100) lats
write(100) lons
write(100) array
! Check if writing data is successfule.
if(ierror /= 0) then
write(*,*) errormg
stop
end if
close(100)
end subroutine writenew
subroutine check(status)
use netcdf
integer, intent ( in) :: status
if(status /= nf90_noerr) then
write(*,*), trim(nf90_strerror(status))
write(*,*) "error"
stop 2
end if
end subroutine check
|
State Before: α : Type u_1
E : Type u_2
F : Type ?u.1496551
𝕜 : Type ?u.1496554
inst✝¹³ : NormedAddCommGroup E
inst✝¹² : NormedSpace ℝ E
inst✝¹¹ : CompleteSpace E
inst✝¹⁰ : NontriviallyNormedField 𝕜
inst✝⁹ : NormedSpace 𝕜 E
inst✝⁸ : SMulCommClass ℝ 𝕜 E
inst✝⁷ : NormedAddCommGroup F
inst✝⁶ : NormedSpace ℝ F
inst✝⁵ : CompleteSpace F
f✝ g : α → E
m : MeasurableSpace α
μ : Measure α
X : Type ?u.1499245
inst✝⁴ : TopologicalSpace X
inst✝³ : FirstCountableTopology X
ν : Measure α
inst✝² : MeasurableSpace α
inst✝¹ : MeasurableSingletonClass α
f : α → E
a : α
s : Set α
inst✝ : Decidable (a ∈ s)
⊢ (∫ (x : α) in s, f x ∂Measure.dirac a) = if a ∈ s then f a else 0 State After: α : Type u_1
E : Type u_2
F : Type ?u.1496551
𝕜 : Type ?u.1496554
inst✝¹³ : NormedAddCommGroup E
inst✝¹² : NormedSpace ℝ E
inst✝¹¹ : CompleteSpace E
inst✝¹⁰ : NontriviallyNormedField 𝕜
inst✝⁹ : NormedSpace 𝕜 E
inst✝⁸ : SMulCommClass ℝ 𝕜 E
inst✝⁷ : NormedAddCommGroup F
inst✝⁶ : NormedSpace ℝ F
inst✝⁵ : CompleteSpace F
f✝ g : α → E
m : MeasurableSpace α
μ : Measure α
X : Type ?u.1499245
inst✝⁴ : TopologicalSpace X
inst✝³ : FirstCountableTopology X
ν : Measure α
inst✝² : MeasurableSpace α
inst✝¹ : MeasurableSingletonClass α
f : α → E
a : α
s : Set α
inst✝ : Decidable (a ∈ s)
⊢ (∫ (x : α), f x ∂if a ∈ s then Measure.dirac a else 0) = if a ∈ s then f a else 0 Tactic: rw [restrict_dirac] State Before: α : Type u_1
E : Type u_2
F : Type ?u.1496551
𝕜 : Type ?u.1496554
inst✝¹³ : NormedAddCommGroup E
inst✝¹² : NormedSpace ℝ E
inst✝¹¹ : CompleteSpace E
inst✝¹⁰ : NontriviallyNormedField 𝕜
inst✝⁹ : NormedSpace 𝕜 E
inst✝⁸ : SMulCommClass ℝ 𝕜 E
inst✝⁷ : NormedAddCommGroup F
inst✝⁶ : NormedSpace ℝ F
inst✝⁵ : CompleteSpace F
f✝ g : α → E
m : MeasurableSpace α
μ : Measure α
X : Type ?u.1499245
inst✝⁴ : TopologicalSpace X
inst✝³ : FirstCountableTopology X
ν : Measure α
inst✝² : MeasurableSpace α
inst✝¹ : MeasurableSingletonClass α
f : α → E
a : α
s : Set α
inst✝ : Decidable (a ∈ s)
⊢ (∫ (x : α), f x ∂if a ∈ s then Measure.dirac a else 0) = if a ∈ s then f a else 0 State After: case inl
α : Type u_1
E : Type u_2
F : Type ?u.1496551
𝕜 : Type ?u.1496554
inst✝¹³ : NormedAddCommGroup E
inst✝¹² : NormedSpace ℝ E
inst✝¹¹ : CompleteSpace E
inst✝¹⁰ : NontriviallyNormedField 𝕜
inst✝⁹ : NormedSpace 𝕜 E
inst✝⁸ : SMulCommClass ℝ 𝕜 E
inst✝⁷ : NormedAddCommGroup F
inst✝⁶ : NormedSpace ℝ F
inst✝⁵ : CompleteSpace F
f✝ g : α → E
m : MeasurableSpace α
μ : Measure α
X : Type ?u.1499245
inst✝⁴ : TopologicalSpace X
inst✝³ : FirstCountableTopology X
ν : Measure α
inst✝² : MeasurableSpace α
inst✝¹ : MeasurableSingletonClass α
f : α → E
a : α
s : Set α
inst✝ : Decidable (a ∈ s)
h✝ : a ∈ s
⊢ (∫ (x : α), f x ∂Measure.dirac a) = f a
case inr
α : Type u_1
E : Type u_2
F : Type ?u.1496551
𝕜 : Type ?u.1496554
inst✝¹³ : NormedAddCommGroup E
inst✝¹² : NormedSpace ℝ E
inst✝¹¹ : CompleteSpace E
inst✝¹⁰ : NontriviallyNormedField 𝕜
inst✝⁹ : NormedSpace 𝕜 E
inst✝⁸ : SMulCommClass ℝ 𝕜 E
inst✝⁷ : NormedAddCommGroup F
inst✝⁶ : NormedSpace ℝ F
inst✝⁵ : CompleteSpace F
f✝ g : α → E
m : MeasurableSpace α
μ : Measure α
X : Type ?u.1499245
inst✝⁴ : TopologicalSpace X
inst✝³ : FirstCountableTopology X
ν : Measure α
inst✝² : MeasurableSpace α
inst✝¹ : MeasurableSingletonClass α
f : α → E
a : α
s : Set α
inst✝ : Decidable (a ∈ s)
h✝ : ¬a ∈ s
⊢ (∫ (x : α), f x ∂0) = 0 Tactic: split_ifs State Before: case inl
α : Type u_1
E : Type u_2
F : Type ?u.1496551
𝕜 : Type ?u.1496554
inst✝¹³ : NormedAddCommGroup E
inst✝¹² : NormedSpace ℝ E
inst✝¹¹ : CompleteSpace E
inst✝¹⁰ : NontriviallyNormedField 𝕜
inst✝⁹ : NormedSpace 𝕜 E
inst✝⁸ : SMulCommClass ℝ 𝕜 E
inst✝⁷ : NormedAddCommGroup F
inst✝⁶ : NormedSpace ℝ F
inst✝⁵ : CompleteSpace F
f✝ g : α → E
m : MeasurableSpace α
μ : Measure α
X : Type ?u.1499245
inst✝⁴ : TopologicalSpace X
inst✝³ : FirstCountableTopology X
ν : Measure α
inst✝² : MeasurableSpace α
inst✝¹ : MeasurableSingletonClass α
f : α → E
a : α
s : Set α
inst✝ : Decidable (a ∈ s)
h✝ : a ∈ s
⊢ (∫ (x : α), f x ∂Measure.dirac a) = f a State After: no goals Tactic: exact integral_dirac _ _ State Before: case inr
α : Type u_1
E : Type u_2
F : Type ?u.1496551
𝕜 : Type ?u.1496554
inst✝¹³ : NormedAddCommGroup E
inst✝¹² : NormedSpace ℝ E
inst✝¹¹ : CompleteSpace E
inst✝¹⁰ : NontriviallyNormedField 𝕜
inst✝⁹ : NormedSpace 𝕜 E
inst✝⁸ : SMulCommClass ℝ 𝕜 E
inst✝⁷ : NormedAddCommGroup F
inst✝⁶ : NormedSpace ℝ F
inst✝⁵ : CompleteSpace F
f✝ g : α → E
m : MeasurableSpace α
μ : Measure α
X : Type ?u.1499245
inst✝⁴ : TopologicalSpace X
inst✝³ : FirstCountableTopology X
ν : Measure α
inst✝² : MeasurableSpace α
inst✝¹ : MeasurableSingletonClass α
f : α → E
a : α
s : Set α
inst✝ : Decidable (a ∈ s)
h✝ : ¬a ∈ s
⊢ (∫ (x : α), f x ∂0) = 0 State After: no goals Tactic: exact integral_zero_measure _ |
(* This Isabelle theory is produced using the TIP tool offered at the following website:
https://github.com/tip-org/tools
This file was originally provided as part of TIP benchmark at the following website:
https://github.com/tip-org/benchmarks
Yutaka Nagashima at CIIRC, CTU changed the TIP output theory file slightly
to make it compatible with Isabelle2017.
\:w
Some proofs were added by Yutaka Nagashima.*)
theory TIP_sort_nat_TSortSorts
imports "../../Test_Base"
begin
datatype 'a list = nil2 | cons2 "'a" "'a list"
datatype Nat = Z | S "Nat"
datatype Tree = TNode "Tree" "Nat" "Tree" | TNil
fun le :: "Nat => Nat => bool" where
"le (Z) y = True"
| "le (S z) (Z) = False"
| "le (S z) (S x2) = le z x2"
fun ordered :: "Nat list => bool" where
"ordered (nil2) = True"
| "ordered (cons2 y (nil2)) = True"
| "ordered (cons2 y (cons2 y2 xs)) =
((le y y2) & (ordered (cons2 y2 xs)))"
fun flatten :: "Tree => Nat list => Nat list" where
"flatten (TNode q z r) y = flatten q (cons2 z (flatten r y))"
| "flatten (TNil) y = y"
fun add :: "Nat => Tree => Tree" where
"add x (TNode q z r) =
(if le x z then TNode (add x q) z r else TNode q z (add x r))"
| "add x (TNil) = TNode TNil x TNil"
fun toTree :: "Nat list => Tree" where
"toTree (nil2) = TNil"
| "toTree (cons2 y xs) = add y (toTree xs)"
fun tsort :: "Nat list => Nat list" where
"tsort x = flatten (toTree x) (nil2)"
theorem property0 :
"ordered (tsort xs)"
oops
end
|
function R = functionRlocalscattering3D(M_H, M_V, d_H, d_V, varphi, stdphi, theta, stdtheta, dist)
%Generate the spatial correlation matrix for the 3D local scattering model,
%defined in (7.17) for different angular distributions.
%
%INPUT:
% M_H = Number of antennas per horizontal row, i.e., # of columns
% M_V = Number of horizontal rows
% d_H = Horizontal antenna spacing in multiples of wavelength
% d_V = Vertical antenna spacing in multiples of wavelength
% varphi = Azimuth angle in radians
% stdphi = Azimuth angular spread (standard deviation or limit)
% theta = Elevation angle in radians
% stdtheta = Elevation angular spread (standard deviation or limit)
% dist = 'Gaussian' or 'Laplace' or 'Uniform'
%
%OUTPUT:
% R = M x M channel covariance matrix, where M = M_H x M_V
%
%
%This Matlab function was developed to generate simulation results to:
%
%Emil Bjornson, Jakob Hoydis and Luca Sanguinetti (2017),
%"Massive MIMO Networks: Spectral, Energy, and Hardware Efficiency",
%Foundations and Trends in Signal Processing: Vol. 11, No. 3-4,
%pp. 154-655. DOI: 10.1561/2000000093.
%
%For further information, visit: https://www.massivemimobook.com
%
%This is version 1.0 (Last edited: 2017-11-04)
%
%License: This code is licensed under the GPLv2 license. If you in any way
%use this code for research that results in publications, please cite our
%monograph as described above.
% Index mapping functions
i = @(m) mod(m-1, M_H);
j = @(m) floor((m-1)/M_H);
%Create the correlation matrix
M = M_H*M_V;
R = zeros(M);
% Define angle distributions as functions
if (strcmp(dist,'Laplace'))
f1 = @(x) exp(-sqrt(2)*abs(x-varphi)/stdphi)/(sqrt(2)*stdphi);
f2 = @(x) exp(-sqrt(2)*abs(x-theta)/stdtheta)/(sqrt(2)*stdtheta);
elseif (strcmp(dist,'Gaussian'))
f1 = @(x) exp(-(x-varphi).^2/(2*stdphi^2))/(sqrt(2*pi)*stdphi);
f2 = @(x) exp(-(x-theta).^2/(2*stdtheta^2))/(sqrt(2*pi)*stdtheta);
elseif (strcmp(dist,'Uniform'))
f1 = @(x)1/(2*stdphi);
f2 = @(x) 1/(2*stdtheta);
else
error('Please provide a valid angle distribution')
end
%% Compute correlation matrix
LOOKUP = zeros(2*M_H-1,M_V); %Define a lookup table to speedup the computation process
for m = 1:M
for l = 1:m
indi = i(l)-i(m) + M_H;
indj = j(l)-j(m) + M_V;
if (LOOKUP(indi,indj)==0)
integrand = @(p,t) exp(1j*2*pi*d_V*(j(l)-j(m)).*sin(t)) .* exp(1j*2*pi*d_H*(i(l)-i(m)).*cos(t).*sin(p)) .* f1(p) .* f2(t);
if (strcmp(dist,'Uniform'))
LOOKUP(indi,indj) = integral2(integrand, varphi-stdphi, varphi+stdphi, theta-stdtheta, theta+stdtheta);
else
LOOKUP(indi,indj) = integral2(integrand, varphi-20*stdphi, varphi+20*stdphi, theta-20*stdtheta, theta+20*stdtheta);
end
end
R(m,l) = LOOKUP(indi,indj);
R(l,m) = R(m,l)';
end
end
|
# Part 2 scratch - NumPy
import csv
import numpy as np
# import nyc_taxi.csv as a list of lists
f = open("nyc_taxis.csv", "r")
taxi_list = list(csv.reader(f))
# remove the header row
taxi_list = taxi_list[1:]
# convert all values to floats
converted_taxi_list = []
for row in taxi_list:
converted_row = []
for item in row:
converted_row.append(float(item))
converted_taxi_list.append(converted_row)
taxi = np.array(converted_taxi_list) #create a numpy array from the list of lists
taxi_shape = taxi.shape # show number of columns & rows
print(taxi_shape)
row_0 = taxi[0] # select single row
rows_391_to_500 = taxi[391:501] # select range of rows
row_21_column_5 = taxi[21, 5] # select single element
# Select every row for the columns at indexes 1, 4, and 7
cols = [1,4,7]
columns_1_4_7 = taxi[:,cols]
# Select the columns at indexes 5 to 8 inclusive for the row at index 99
row_99_columns_5_to_8 = taxi[99, 5:9]
# Select the rows at indexes 100 to 200 inclusive for the column at index 14
rows_100_to_200_column_14 = taxi[100:201, 14]
# perform addition on two columns - can do directly without variables too
fare_amount = taxi[:,9]
fees_amount = taxi[:,10]
fare_and_fees = fare_amount + fees_amount
# calculate avg speed of each trip
# mph = miles traveled / time in hours
trip_distance_miles = taxi[:,7]
trip_length_seconds = taxi[:,8]
trip_length_hours = trip_length_seconds / 3600 # 3600 seconds is one hour
trip_mph = trip_distance_miles / trip_length_hours
mph_min = trip_mph.min()
mph_max = trip_mph.max() # very large value here
mph_mean = trip_mph.mean()
# we'll compare against the first 5 rows only
taxi_first_five = taxi[:5]
# select these columns: fare_amount, fees_amount, tolls_amount, tip_amount
fare_components = taxi_first_five[:,9:13]
# using ndarray.sum() method with axis to return the sum of each row
fare_sums = fare_components.sum(axis=1)
fare_totals = taxi_first_five[:,13]
print(fare_sums)
print(fare_totals)
|
[STATEMENT]
lemma elts_subset_mono:
assumes "S \<subseteq>\<^sub>\<circ> T"
shows "elts S \<subseteq> elts T"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. elts S \<subseteq> elts T
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
S \<subseteq>\<^sub>\<circ> T
goal (1 subgoal):
1. elts S \<subseteq> elts T
[PROOF STEP]
by auto |
= = = Out on loan = = =
|
main()inline{
HideTraceLog();
ClearTraceLog();
once do(MapHeight()){
do(MapWidth())
SetTerrain(i, j, (((sin((i+j)/10.0)+1)/2.0)*5.0 * ((cos((i-j)/10.0)+1.0)/2.0)*5.0 )/25.0 * 9+1);
delay(1);
}
}
|
import os
import torch
import numpy
import random
from copy import deepcopy
from src.systems.recognition import CTC_System
from src.systems.setup import process_config
from src.utils import load_json
from quantization.utils import quantize_ctc_system
import pytorch_lightning as pl
import wandb
def run(config_path, gpu_device=-1):
config = process_config(config_path)
if gpu_device >= 0:
config.gpu_device = gpu_device
seed_everything(config.seed)
SystemClass = globals()[config.system]
system = SystemClass(config)
if config.quant_params.noise_rate > 0:
quantize_ctc_system(system, config)
ckpt_callback = pl.callbacks.ModelCheckpoint(
os.path.join(config.exp_dir, 'checkpoints'),
save_top_k=-1,
period=1,
)
wandb.init(
project='speech',
entity='lyronctk',
name=config.exp_name,
config=config,
sync_tensorboard=True,
)
trainer = pl.Trainer(
default_root_dir=config.exp_dir,
gpus=([config.gpu_device] if config.cuda else None),
max_epochs=config.num_epochs,
min_epochs=config.num_epochs,
checkpoint_callback=ckpt_callback,
resume_from_checkpoint=config.continue_from_checkpoint
)
trainer.fit(system)
def seed_everything(seed):
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
numpy.random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('config', type=str, default='path to config file')
parser.add_argument('--gpu-device', type=int, default=-1)
args = parser.parse_args()
run(args.config, gpu_device=args.gpu_device)
|
! This file is part of mctc-lib.
!
! Licensed under the Apache License, Version 2.0 (the "License");
! you may not use this file except in compliance with the License.
! You may obtain a copy of the License at
!
! http://www.apache.org/licenses/LICENSE-2.0
!
! Unless required by applicable law or agreed to in writing, software
! distributed under the License is distributed on an "AS IS" BASIS,
! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
! See the License for the specific language governing permissions and
! limitations under the License.
!> Numerical constants
module mctc_io_constants
use mctc_env_accuracy, only : wp
implicit none
private
public :: pi, codata
!> Ratio between a circles diameter and its circumfence
real(wp), parameter :: pi = 3.1415926535897932384626433832795029_wp
!> Natural constants defining the SI unit base
type :: enum_codata
!> Planck's constant
real(wp) :: h = 6.6260715e-34_wp ! J·s = kg·m²·s⁻¹
!> Speed of light in vacuum
real(wp) :: c = 299792458.0_wp ! m·s⁻¹
!> Boltzmann's constant
real(wp) :: kb = 1.380649e-23_wp ! J·K⁻¹ = kg·m²·s⁻²·K⁻¹
!> Avogadro's number
real(wp) :: NA = 6.02214076e23_wp ! mol⁻¹
!> Elementary charge
real(wp) :: e = 1.602176634e-19_wp ! C
!> fine structure constant (CODATA2018)
real(wp) :: alpha = 1.0_wp/137.035999046_wp ! dimensionless
!> electron rest mass
real(wp) :: me = 9.10938356e-31_wp ! kg
end type enum_codata
!> Actual collection of natural constants
type(enum_codata), parameter :: codata = enum_codata()
end module mctc_io_constants
|
Formal statement is: lemma not_dvd_pderiv: fixes p :: "'a::{comm_semiring_1,semiring_no_zero_divisors,semiring_char_0} poly" assumes "degree p \<noteq> 0" shows "\<not> p dvd pderiv p" Informal statement is: If $p$ is a non-constant polynomial, then $p$ does not divide its derivative. |
select from dozens of sold out tickets you can purchase, our company is your number one way to shop online tickets! Don't miss being able to catch the unbelievable performance as this artist stops in California to play live at Pacific Amphitheatre tickets. Purchase tickets below to see Justin Moore live at Pacific Amphitheatre on July 14, 2019! |
If $g$ is a summable sequence of non-negative real numbers, and $f$ is a sequence of complex numbers such that $|f_n| \leq g_n$ for all $n \geq N$, then $f$ is summable. |
Require Import Crypto.Arithmetic.PrimeFieldTheorems.
Require Import Crypto.Specific.solinas32_2e191m19_8limbs.Synthesis.
(* TODO : change this to field once field isomorphism happens *)
Definition carry :
{ carry : feBW_loose -> feBW_tight
| forall a, phiBW_tight (carry a) = (phiBW_loose a) }.
Proof.
Set Ltac Profiling.
Time synthesize_carry ().
Show Ltac Profile.
Time Defined.
Print Assumptions carry.
|
subroutine smoothedgetriple(an1,an2,bn1,bn2,coord,axcycle,aycycle,
* azcycle,bxcycle,bycycle,bzcycle,nx0,nx3,
* weight,ierr2)
c
c #####################################################################
c
c PURPOSE -
c
c This subroutine does volume-conserving smoothing of a triple
c line using edge relaxations.
c
c INPUT ARGUMENTS -
c
c an1 (bn1) - number of nodes on the surface of material a (b)
c that surround x1
c
c an2 (bn2) - number of nodes on the surface of material a (b)
c that surround x2
c
c coord - array of size 6 containing the x-coordinate of x1,
c y-coordinate of x1, z-coordinate of x1, x-coordinate of x2,
c y-coordinate of x2, z-coordinate of x2
c
c axcycle (bxcycle) - array of size an1+an2-4 (bn1+bn2-4)
c containing the x-coordinates of the counterclockwise cycle of
c nodes on the surface of material a (b) that surround the edge
c x1-x2, starting with the first node counterclockwise of x2 when
c rotating around x1
c
c aycycle (bycycle) - same as axcycle (bxcycle) except contains
c y-coordinates
c
c azcycle (bzcycle) - same as axcycle (bxcycle) except contains
c z-coordinates
c
c nx0 (nx3) - index of x0 (x3) in axcycle, aycycle, azcycle
c
c weight - number between 0 and 1, inclusive, which controls the
c amount of smoothing
c
c ierr2 - error flag
c
c OUTPUT ARGUMENTS -
c
c coord, ierr2
c
c CHANGE HISTORY -
c
c $Log: smooth_vconserve_triple.f,v $
c Revision 2.00 2007/11/09 20:04:03 spchu
c Import to CVS
c
CPVCS
CPVCS Rev 1.2 18 Feb 2001 16:29:58 nnc
CPVCS Trivial constant change to satisfy Absoft compiler.
CPVCS
CPVCS Rev 1.1 16 Aug 2000 08:48:20 dcg
CPVCS change subroutine name to match call from cycle_lg
CPVCS
CPVCS Rev 1.0 10 Aug 2000 11:58:32 dcg
CPVCS Initial revision.
c
c #####################################################################
c
implicit none
integer an1,an2,bn1,bn2
real*8 coord(6)
real*8 axcycle(an1+an2-4),aycycle(an1+an2-4),azcycle(an1+an2-4)
real*8 bxcycle(bn1+bn2-4),bycycle(bn1+bn2-4),bzcycle(bn1+bn2-4)
integer nx0,nx3
real*8 weight
integer ierr2
c
real*8 small
integer it
real*8 ae(3,204),be(3,204)
c The first an1 (bn1) columns of ae (be) are vectors from x1 to
c surrounding nodes, starting with x2, rotating counterclockwise on
c the surface of material a (b). The next an2 (bn2) columns of ae
c (be) are vectors from x2 to surrounding nodes, starting with x1,
c rotating counterclockwise on the surface of material a (b).
real*8 aA1(3),aA2(3),av(3)
real*8 bA1(3),bA2(3),bv(3)
real*8 temp1,dx1s(3),dx2s(3)
real*8 diff(3),aA(3),bA(3)
real*8 normaAsquared,normbAsquared,aAdotbA,det,ag,bg,h,k
real*8 n(3)
c
ierr2=0
c small should actually be computed
small=1.0e-13
c
c compute ae
do it=1,3
ae(it,1)=coord(3+it)-coord(it)
ae(it,an1+1)=-ae(it,1)
enddo
do it=2,an1
ae(1,it)=axcycle(it-1)-coord(1)
ae(2,it)=aycycle(it-1)-coord(2)
ae(3,it)=azcycle(it-1)-coord(3)
enddo
do it=an1+2,an1+an2-1
ae(1,it)=axcycle(it-3)-coord(4)
ae(2,it)=aycycle(it-3)-coord(5)
ae(3,it)=azcycle(it-3)-coord(6)
enddo
ae(1,an1+an2)=axcycle(1)-coord(4)
ae(2,an1+an2)=aycycle(1)-coord(5)
ae(3,an1+an2)=azcycle(1)-coord(6)
c
c compute be
do it=1,3
be(it,1)=coord(3+it)-coord(it)
be(it,bn1+1)=-be(it,1)
enddo
do it=2,bn1
be(1,it)=bxcycle(it-1)-coord(1)
be(2,it)=bycycle(it-1)-coord(2)
be(3,it)=bzcycle(it-1)-coord(3)
enddo
do it=bn1+2,bn1+bn2-1
be(1,it)=bxcycle(it-3)-coord(4)
be(2,it)=bycycle(it-3)-coord(5)
be(3,it)=bzcycle(it-3)-coord(6)
enddo
be(1,bn1+bn2)=bxcycle(1)-coord(4)
be(2,bn1+bn2)=bycycle(1)-coord(5)
be(3,bn1+bn2)=bzcycle(1)-coord(6)
c
c compute aA1 and aA2
do it=1,3
aA1(it)=0
aA2(it)=0
enddo
do it=1,an1-1
aA1(1)=aA1(1)+ae(2,it)*ae(3,it+1)-ae(3,it)*ae(2,it+1)
aA1(2)=aA1(2)+ae(3,it)*ae(1,it+1)-ae(1,it)*ae(3,it+1)
aA1(3)=aA1(3)+ae(1,it)*ae(2,it+1)-ae(2,it)*ae(1,it+1)
enddo
aA1(1)=aA1(1)+ae(2,an1)*ae(3,1)-ae(3,an1)*ae(2,1)
aA1(2)=aA1(2)+ae(3,an1)*ae(1,1)-ae(1,an1)*ae(3,1)
aA1(3)=aA1(3)+ae(1,an1)*ae(2,1)-ae(2,an1)*ae(1,1)
do it=an1+1,an1+an2-1
aA2(1)=aA2(1)+ae(2,it)*ae(3,it+1)-ae(3,it)*ae(2,it+1)
aA2(2)=aA2(2)+ae(3,it)*ae(1,it+1)-ae(1,it)*ae(3,it+1)
aA2(3)=aA2(3)+ae(1,it)*ae(2,it+1)-ae(2,it)*ae(1,it+1)
enddo
aA2(1)=aA2(1)+ae(2,an1+an2)*ae(3,an1+1)-ae(3,an1+an2)*ae(2,an1+1)
aA2(2)=aA2(2)+ae(3,an1+an2)*ae(1,an1+1)-ae(1,an1+an2)*ae(3,an1+1)
aA2(3)=aA2(3)+ae(1,an1+an2)*ae(2,an1+1)-ae(2,an1+an2)*ae(1,an1+1)
c
c compute av
do it=1,3
av(it)=ae(it,an1+an2)-ae(it,an1+2)
enddo
c
c compute bA1 and bA2
do it=1,3
bA1(it)=0
bA2(it)=0
enddo
do it=1,bn1-1
bA1(1)=bA1(1)+be(2,it)*be(3,it+1)-be(3,it)*be(2,it+1)
bA1(2)=bA1(2)+be(3,it)*be(1,it+1)-be(1,it)*be(3,it+1)
bA1(3)=bA1(3)+be(1,it)*be(2,it+1)-be(2,it)*be(1,it+1)
enddo
bA1(1)=bA1(1)+be(2,bn1)*be(3,1)-be(3,bn1)*be(2,1)
bA1(2)=bA1(2)+be(3,bn1)*be(1,1)-be(1,bn1)*be(3,1)
bA1(3)=bA1(3)+be(1,bn1)*be(2,1)-be(2,bn1)*be(1,1)
do it=bn1+1,bn1+bn2-1
bA2(1)=bA2(1)+be(2,it)*be(3,it+1)-be(3,it)*be(2,it+1)
bA2(2)=bA2(2)+be(3,it)*be(1,it+1)-be(1,it)*be(3,it+1)
bA2(3)=bA2(3)+be(1,it)*be(2,it+1)-be(2,it)*be(1,it+1)
enddo
bA2(1)=bA2(1)+be(2,bn1+bn2)*be(3,bn1+1)-be(3,bn1+bn2)*be(2,bn1+1)
bA2(2)=bA2(2)+be(3,bn1+bn2)*be(1,bn1+1)-be(1,bn1+bn2)*be(3,bn1+1)
bA2(3)=bA2(3)+be(1,bn1+bn2)*be(2,bn1+1)-be(2,bn1+bn2)*be(1,bn1+1)
c
c compute bv
do it=1,3
bv(it)=be(it,bn1+bn2)-be(it,bn1+2)
enddo
c
c compute dx1s and dx2s
temp1=2.d0/3.d0
dx1s(1)=axcycle(nx3)/3.d0+temp1*axcycle(nx0)-coord(1)
dx1s(2)=aycycle(nx3)/3.d0+temp1*aycycle(nx0)-coord(2)
dx1s(3)=azcycle(nx3)/3.d0+temp1*azcycle(nx0)-coord(3)
dx2s(1)=temp1*axcycle(nx3)+axcycle(nx0)/3.d0-coord(4)
dx2s(2)=temp1*aycycle(nx3)+aycycle(nx0)/3.d0-coord(5)
dx2s(3)=temp1*azcycle(nx3)+azcycle(nx0)/3.d0-coord(6)
do it=1,3
dx1s(it)=weight*dx1s(it)
dx2s(it)=weight*dx2s(it)
enddo
c
c compute aA
do it=1,3
diff(it)=dx1s(it)-dx2s(it)
enddo
aA(1)=aA1(1)+aA2(1)+av(2)*diff(3)-av(3)*diff(2)
aA(2)=aA1(2)+aA2(2)+av(3)*diff(1)-av(1)*diff(3)
aA(3)=aA1(3)+aA2(3)+av(1)*diff(2)-av(2)*diff(1)
c
c compute bA
bA(1)=bA1(1)+bA2(1)+bv(2)*diff(3)-bv(3)*diff(2)
bA(2)=bA1(2)+bA2(2)+bv(3)*diff(1)-bv(1)*diff(3)
bA(3)=bA1(3)+bA2(3)+bv(1)*diff(2)-bv(2)*diff(1)
c
normaAsquared=aA(1)**2+aA(2)**2+aA(3)**2
normbAsquared=bA(1)**2+bA(2)**2+bA(3)**2
aAdotbA=aA(1)*bA(1)+aA(2)*bA(2)+aA(3)*bA(3)
det=normaAsquared*normbAsquared-aAdotbA**2
if (det.le.small) then
print *, 'det <= small, nodes not moved'
elseif (det/(normaAsquared*normbAsquared).le.small) then
print *, 'sin2theta <= small, nodes not moved'
else
ag=dx2s(1)*(av(2)*dx1s(3)-av(3)*dx1s(2))+
* dx2s(2)*(av(3)*dx1s(1)-av(1)*dx1s(3))+
* dx2s(3)*(av(1)*dx1s(2)-av(2)*dx1s(1))
do it=1,3
ag=ag+dx1s(it)*aA1(it)+dx2s(it)*aA2(it)
enddo
ag=-ag
bg=dx2s(1)*(bv(2)*dx1s(3)-bv(3)*dx1s(2))+
* dx2s(2)*(bv(3)*dx1s(1)-bv(1)*dx1s(3))+
* dx2s(3)*(bv(1)*dx1s(2)-bv(2)*dx1s(1))
do it=1,3
bg=bg+dx1s(it)*bA1(it)+dx2s(it)*bA2(it)
enddo
bg=-bg
h=(normbAsquared*ag-aAdotbA*bg)/det
k=(normaAsquared*bg-aAdotbA*ag)/det
do it=1,3
n(it)=h*aA(it)+k*bA(it)
coord(it)=coord(it)+dx1s(it)+n(it)
coord(it+3)=coord(it+3)+dx2s(it)+n(it)
enddo
endif
return
end
|
#
# Combinator-based embedded query language.
#
module DataKnots
export
@query,
Collect,
Count,
DataKnot,
Drop,
Each,
Exists,
Filter,
First,
Get,
Given,
Group,
Is,
Is0to1,
Is0toN,
Is1to1,
Is1toN,
It,
Join,
Keep,
Label,
Last,
Let,
Lift,
Max,
Min,
Mix,
Nth,
Record,
Sort,
Sum,
Tag,
Take,
Unique,
unitknot
include("layouts.jl")
include("vectors.jl")
include("shapes.jl")
include("knots.jl")
include("pipelines.jl")
include("rewrites.jl")
include("queries.jl")
end
|
require(lattice)
grid=list();
grid$arr_mod=vector();
grid$arr_tx=vector();
grid$thr=vector();
filename="trace-ma-eb"
data = read.table(filename);
len = length(data[[1]])
for( i in 1:len )
{
grid$arr_mod=c(grid$arr_mod, data[[1]][[i]])
grid$arr_tx=c(grid$arr_tx, data[[2]][[i]])
grid$arr_thr=c(grid$arr_thr, data[[3]][[i]])
}
wireframe( arr_thr~arr_mod * arr_tx, grid, drape=TRUE, aspect = c(1, 0.4), colorkey=TRUE, default.scales = list(distance = c(1, 1, 1), arrows=FALSE), screen = list(z = -40, x = -60) )
|
lemma box_midpoint: fixes a :: "'a::euclidean_space" assumes "box a b \<noteq> {}" shows "((1/2) *\<^sub>R (a + b)) \<in> box a b" |
module Common.Abbrev
%default total
%access public export
Binop : Type -> Type
Binop s = s -> s -> s
Binrel : Type -> Type
Binrel s = s -> s -> Type
OuterBinop : {index : Type} -> (f : index -> Type) -> (a,b,c : index) -> Type
OuterBinop f a b c = f a -> f b -> f c
infixl 5 ===
infixl 5 @==
||| associative infix syntax for `trans`
(===) : a = b -> b = c -> a = c
(===) Refl Refl = Refl
(@==) : a = b -> a = c -> b = c
(@==) Refl Refl = Refl
|
State Before: α : Type u_2
β : Type ?u.86729
β₂ : Type ?u.86732
γ : Type ?u.86735
ι : Sort u_1
ι' : Sort ?u.86741
κ : ι → Sort ?u.86746
κ' : ι' → Sort ?u.86751
inst✝¹ : CompleteLattice α
f g s t : ι → α
a b : α
inst✝ : Nonempty ι
⊢ (⨆ (x : ι), a) = a State After: no goals Tactic: rw [iSup, range_const, sSup_singleton] |
%Program for Fusing 2 images
%Author : Athi Narayanan S
%M.E, Embedded Systems,
%K.S.R College of Engineering
%Erode, Tamil Nadu, India.
%http://sites.google.com/site/athisnarayanan/
%[email protected]
%Program Description
%This program is the main entry of the application.
%This program fuses/combines 2 images
%It supports both Gray & Color Images
%Alpha Factor can be varied to vary the proportion of mixing of each image.
%With Alpha Factor = 0.5, the two images mixed equally.
%With Alpha Facotr < 0.5, the contribution of background image will be more.
%With Alpha Facotr > 0.5, the contribution of foreground image will be more.
function fusedImg = FuseImages(bgImg, fgImg, alphaFactor)
bgImg = double(bgImg);
fgImg = double(fgImg);
fgImgAlpha = alphaFactor .* fgImg;
bgImgAlpha = (1 - alphaFactor) .* bgImg;
fusedImg = fgImgAlpha + bgImgAlpha;
|
datahg19 <- read.table(file = './hg19AllMethods.tsv', sep = '\t', header = TRUE)
# read gened.gtf file
genes <- read.table('./hg19_genes.gtf', header = FALSE, sep = '\t')[,c(2,13)]
colnames(genes) <- c("transcript_id", "gene_id")
# rename transctipt col
colnames(datahg19)[1] <- "transcript_id"
# merge for genes
datahg19 <- merge(x = datahg19, y = genes, by = "transcript_id", all.x = TRUE, all.y = FALSE)
datahg19 <- datahg19[!is.na(datahg19$gene_id),]
datahg19 <- datahg19[,c(length(datahg19),3:length(datahg19)-1)]
datahg19 <- datahg19[!duplicated(datahg19$gene_id),]
datahg19 <- as.data.frame(lapply(datahg19, unlist))
colnames(datahg19)[1] <- "Id"
#simData <- read.csv(file = "../simdata.csv", sep = ",", header = TRUE, stringsAsFactors = FALSE)
#dataDM6 <- merge(x = dataDM6, y = simData, by = "Id", all.x = TRUE, all.y = FALSE)
datahg19$class <- ifelse(test = (datahg19$class == 0),yes = FALSE,no = TRUE)
#write to csv
write.csv(x = datahg19, file = "./hg19Final.csv", row.names = FALSE) |
sizeof ('c') : 4
sizeof (10 * 'c') : 4
sizeof (100 + 12345) : 4
sizeof (1234567890) : 4
sizeof (1234512345 * 1234512345 * 12345678 * 1ULL) : 8
sizeof (-129) : 4
sizeof ({ptr}/{ptr}) : 4
sizeof (3 > 2 ? 3 : 2) : 4
|
Require Import Raft.
Require Import RaftRefinementInterface.
Require Import CommonDefinitions.
Require Import RefinementCommonDefinitions.
Section EveryEntryWasCreated.
Context {orig_base_params : BaseParams}.
Context {one_node_params : OneNodeParams orig_base_params}.
Context {raft_params : RaftParams orig_base_params}.
Definition every_entry_was_created (net : network) : Prop :=
forall e t h l,
In (t, l) (leaderLogs (fst (nwState net h))) ->
In e l ->
term_was_created net (eTerm e).
Inductive in_any_log (net : network) (e : entry) : Prop :=
| in_log : forall h, In e (log (snd (nwState net h))) ->
in_any_log net e
| in_aer : forall p es, In p (nwPackets net) ->
mEntries (pBody p) = Some es ->
In e es ->
in_any_log net e
| in_ll : forall h t ll, In (t, ll) (leaderLogs (fst (nwState net h))) ->
In e ll ->
in_any_log net e.
Class every_entry_was_created_interface : Prop :=
{
every_entry_was_created_invariant :
forall net,
refined_raft_intermediate_reachable net ->
every_entry_was_created net ;
every_entry_was_created_in_any_log_invariant :
forall net e,
refined_raft_intermediate_reachable net ->
in_any_log net e ->
term_was_created net (eTerm e)
}.
End EveryEntryWasCreated. |
[STATEMENT]
lemma suminf_emeasure:
"range A \<subseteq> sets M \<Longrightarrow> disjoint_family A \<Longrightarrow> (\<Sum>i. emeasure M (A i)) = emeasure M (\<Union>i. A i)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>range A \<subseteq> sets M; disjoint_family A\<rbrakk> \<Longrightarrow> (\<Sum>i. emeasure M (A i)) = emeasure M (\<Union> (range A))
[PROOF STEP]
using sets.countable_UN[of A UNIV M] emeasure_countably_additive[of M]
[PROOF STATE]
proof (prove)
using this:
range A \<subseteq> sets M \<Longrightarrow> \<Union> (range A) \<in> sets M
countably_additive (sets M) (emeasure M)
goal (1 subgoal):
1. \<lbrakk>range A \<subseteq> sets M; disjoint_family A\<rbrakk> \<Longrightarrow> (\<Sum>i. emeasure M (A i)) = emeasure M (\<Union> (range A))
[PROOF STEP]
by (simp add: countably_additive_def) |
lemma small_big_trans': "f \<in> l F (g) \<Longrightarrow> g \<in> L F (h) \<Longrightarrow> f \<in> L F (h)" |
# 1-D Diffusion equation
$$\frac{\partial u}{\partial t}= \nu \frac{\partial^2 u}{\partial x^2}$$
```python
# needed imports
from numpy import zeros, ones, linspace, zeros_like
from matplotlib.pyplot import plot, show
%matplotlib inline
```
```python
# Initial condition
import numpy as np
u0 = lambda x: np.exp(-(x-.5)**2/.05**2)
grid = linspace(0., 1., 401)
u = u0(grid)
plot(grid, u) ; show()
```
### Time scheme
$$\frac{u^{n+1}-u^n}{\Delta t} - \nu \partial_{xx} u^{n+1} = 0 $$
$$ \left(I - \nu \Delta t \partial_{xx} \right) u^{n+1} = u^n $$
### Weak formulation
$$
\langle v, u^{n+1} \rangle + \nu \Delta t ~ \langle \partial_x v, \partial_x u^{n+1} \rangle = \langle v, u^n \rangle
$$
expending $u^n$ over the fem basis, we get the linear system
$$A U^{n+1} = M U^n$$
where
$$
M_{ij} = \langle b_i, b_j \rangle
$$
$$
A_{ij} = \langle b_i, b_j \rangle + \nu \Delta t ~ \langle \partial_x b_i, \partial_x b_j \rangle
$$
## Abstract Model using SymPDE
```python
from sympde.core import Constant
from sympde.expr import BilinearForm, LinearForm, integral
from sympde.topology import ScalarFunctionSpace, Line, element_of, dx
from sympde.topology import dx1 # TODO: this is a bug right now
```
```python
# ... abstract model
domain = Line()
V = ScalarFunctionSpace('V', domain)
x = domain.coordinates
u,v = [element_of(V, name=i) for i in ['u', 'v']]
nu = Constant('nu')
dt = Constant('dt')
# bilinear form
# expr = v*u - c*dt*dx(v)*u # TODO BUG not working
expr = v*u + nu*dt*dx1(v)*dx1(u)
a = BilinearForm((u,v), integral(domain , expr))
# bilinear form for the mass matrix
expr = u*v
m = BilinearForm((u,v), integral(domain , expr))
# linear form for initial condition
from sympy import exp
expr = exp(-(x-.5)**2/.05**2)*v
l = LinearForm(v, integral(domain, expr))
```
## Discretization using Psydac
```python
from psydac.api.discretization import discretize
```
```python
nu = 0.3 # viscosity
T = 0.02 # T final time
dt = 0.001
niter = int(T / dt)
degree = [3] # spline degree
ncells = [64] # number of elements
```
```python
# Create computational domain from topological domain
domain_h = discretize(domain, ncells=ncells, comm=None)
# Discrete spaces
Vh = discretize(V, domain_h, degree=degree)
# Discretize the bilinear forms
ah = discretize(a, domain_h, [Vh, Vh])
mh = discretize(m, domain_h, [Vh, Vh])
# Discretize the linear form for the initial condition
lh = discretize(l, domain_h, Vh)
```
```python
# assemble matrices and convert them to scipy
M = mh.assemble().tosparse()
A = ah.assemble(nu=nu, dt=dt).tosparse()
# assemble the rhs and convert it to numpy array
rhs = lh.assemble().toarray()
```
```python
from scipy.sparse.linalg import cg, gmres
```
```python
# L2 projection of the initial condition
un, status = cg(M, rhs, tol=1.e-8, maxiter=5000)
```
```python
from simplines import plot_field_1d
plot_field_1d(Vh.knots[0], Vh.degree[0], un, nx=401)
```
```python
for i in range(0, niter):
b = M.dot(un)
un, status = gmres(A, b, tol=1.e-8, maxiter=5000)
```
```python
plot_field_1d(Vh.knots[0], Vh.degree[0], un, nx=401)
```
```python
```
|
//
// Created by june on 19. 12. 5..
//
// from ros-control meta packages
#include <controller_interface/controller.h>
#include <hardware_interface/joint_command_interface.h>
#include <pluginlib/class_list_macros.h>
#include <urdf/model.h>
#include <realtime_tools/realtime_buffer.h>
#include <realtime_tools/realtime_publisher.h>
#include <geometry_msgs/WrenchStamped.h>
#include "utils.h"
#include "dualarm_controller/TaskCurrentState.h"
#include "dualarm_controller/TaskDesiredState.h"
//manipulability
#include <ellipsoid.h>
#include <similarity.h>
// from kdl packages
#include <kdl/tree.hpp>
#include <kdl/chain.hpp>
#include <kdl_parser/kdl_parser.hpp>
#include <kdl/chaindynparam.hpp> // inverse dynamics
#include <kdl/chainjnttojacsolver.hpp> // jacobian
#include <kdl/chainfksolverpos_recursive.hpp> // forward kinematics
#include <boost/scoped_ptr.hpp>
#define _USE_MATH_DEFINES
#include <cmath>
#include <SerialManipulator.h>
#include <Controller.h>
#include <Motion.h>
#define D2R M_PI/180.0
#define R2D 180.0/M_PI
#define A 0.12
#define b1 0.55
#define b2 -0.43
#define b3 0.45
#define f 0.2
#define l_p1 0.45
#define l_p2 0.43
#define l_p3 0.39
#define Deg_A 70
#define Deg_f 0.5
namespace dualarm_controller
{
class ClosedLoopIK_Control : public controller_interface::Controller<hardware_interface::EffortJointInterface>
{
public:
bool init(hardware_interface::EffortJointInterface *hw, ros::NodeHandle &n)
{
// ********* 1. Get joint name / gain from the parameter server *********
// 1.0 Control objective & Inverse Kinematics mode
if (!n.getParam("ctr_obj", ctr_obj_))
{
ROS_ERROR("Could not find control objective");
return false;
}
if (!n.getParam("ik_mode", ik_mode_))
{
ROS_ERROR("Could not find control objective");
return false;
}
if( ctr_obj_ == 7 && ik_mode_ != 4 )
{
ROS_ERROR("!! If the ctr_obj is 8, ik_mode_ should be 4 !!");
return false;
}
// 1.1 Joint Name
if (!n.getParam("joints", joint_names_))
{
ROS_ERROR("Could not find joint name");
return false;
}
n_joints_ = joint_names_.size();
if (n_joints_ == 0)
{
ROS_ERROR("List of joint names is empty.");
return false;
}
else
{
ROS_INFO("Found %d joint names", n_joints_);
for (int i = 0; i < n_joints_; i++)
{
ROS_INFO("%s", joint_names_[i].c_str());
}
}
// 1.2 Gain
// 1.2.1 Joint Controller
Kp_.data.setZero(n_joints_);
Kd_.data.setZero(n_joints_);
Ki_.data.setZero(n_joints_);
K_inf_.data.setZero(n_joints_);
for (size_t i = 0; i < n_joints_; i++)
{
std::string si = std::to_string(i + 1);
if (!n.getParam("/dualarm/closedloopik_control/gains/dualarm_joint" + si + "/pid/p", Kp_.data(i)))
{
ROS_ERROR("Cannot find pid/p gain");
return false;
}
if (!n.getParam("/dualarm/closedloopik_control/gains/dualarm_joint" + si + "/pid/i", Ki_.data(i)))
{
ROS_ERROR("Cannot find pid/i gain");
return false;
}
if (!n.getParam("/dualarm/closedloopik_control/gains/dualarm_joint" + si + "/pid/d", Kd_.data(i)))
{
ROS_ERROR("Cannot find pid/d gain");
return false;
}
if (!n.getParam("/dualarm/closedloopik_control/gains/dualarm_joint" + si + "/pid/h", K_inf_.data(i)))
{
ROS_ERROR("Cannot find pid/h gain");
return false;
}
}
// 1.2.2 Closed-loop Inverse Kinematics Controller
if (!n.getParam("/dualarm/closedloopik_control/clik_gain/K_pos", K_trans))
{
ROS_ERROR("Cannot find clik translation gain");
return false;
}
if (!n.getParam("/dualarm/closedloopik_control/clik_gain/K_ori", K_rot))
{
ROS_ERROR("Cannot find clik rotation gain");
return false;
}
// 2. ********* urdf *********
urdf::Model urdf;
if (!urdf.initParam("robot_description"))
{
ROS_ERROR("Failed to parse urdf file");
return false;
}
else
{
ROS_INFO_STREAM("Found robot_description");
}
// 3. ********* Get the joint object to use in the realtime loop [Joint Handle, URDF] *********
for (int i = 0; i < n_joints_; i++)
{
try
{
joints_.push_back(hw->getHandle(joint_names_[i]));
}
catch (const hardware_interface::HardwareInterfaceException &e)
{
ROS_ERROR_STREAM("Exception thrown: " << e.what());
return false;
}
urdf::JointConstSharedPtr joint_urdf = urdf.getJoint(joint_names_[i]);
if (!joint_urdf)
{
ROS_ERROR("Could not find joint '%s' in urdf", joint_names_[i].c_str());
return false;
}
joint_urdfs_.push_back(joint_urdf);
}
// 4. ********* KDL *********
// 4.1 kdl parser
if (!kdl_parser::treeFromUrdfModel(urdf, kdl_tree_))
{
ROS_ERROR("Failed to construct kdl tree");
return false;
}
else
{
ROS_INFO_STREAM("Constructed kdl tree");
}
// 4.2 kdl chain
std::string root_name, tip_name1, tip_name2;
if (!n.getParam("root_link", root_name))
{
ROS_ERROR("Could not find root link name");
return false;
}
if (!n.getParam("tip_link1", tip_name1))
{
ROS_ERROR("Could not find tip link name");
return false;
}
if (!n.getParam("tip_link2", tip_name2))
{
ROS_ERROR("Could not find tip link name");
return false;
}
if (!kdl_tree_.getChain(root_name, tip_name1, kdl_chain_))
{
ROS_ERROR_STREAM("Failed to get KDL chain from tree: ");
ROS_ERROR_STREAM(" " << root_name << " --> " << tip_name1);
ROS_ERROR_STREAM(" Chain has " << kdl_chain_.getNrOfJoints() << " joints");
ROS_ERROR_STREAM(" Chain has " << kdl_chain_.getNrOfSegments() << " segments");
ROS_ERROR_STREAM(" The segments are:");
KDL::SegmentMap segment_map = kdl_tree_.getSegments();
KDL::SegmentMap::iterator it;
for (it = segment_map.begin(); it != segment_map.end(); it++)
ROS_ERROR_STREAM(" " << (*it).first);
return false;
}
else
{
ROS_INFO_STREAM("Got kdl first chain");
ROS_INFO_STREAM(" " << root_name << " --> " << tip_name1);
ROS_INFO_STREAM(" Chain has " << kdl_chain_.getNrOfJoints() << " joints");
ROS_INFO_STREAM(" Chain has " << kdl_chain_.getNrOfSegments() << " segments");
}
if(!kdl_tree_.getChain(root_name, tip_name2, kdl_chain2_))
{
ROS_ERROR_STREAM("Failed to get KDL chain from tree: ");
ROS_ERROR_STREAM(" " << root_name << " --> " << tip_name2);
ROS_ERROR_STREAM(" Chain has " << kdl_chain2_.getNrOfJoints() << " joints");
ROS_ERROR_STREAM(" Chain has " << kdl_chain2_.getNrOfSegments() << " segments");
ROS_ERROR_STREAM(" The segments are:");
KDL::SegmentMap segment_map = kdl_tree_.getSegments();
KDL::SegmentMap::iterator it;
for (it = segment_map.begin(); it != segment_map.end(); it++)
ROS_ERROR_STREAM(" " << (*it).first);
return false;
}
else
{
ROS_INFO_STREAM("Got kdl second chain");
ROS_INFO_STREAM(" " << root_name << " --> " << tip_name2);
ROS_INFO_STREAM(" Chain has " << kdl_chain2_.getNrOfJoints() << " joints");
ROS_INFO_STREAM(" Chain has " << kdl_chain2_.getNrOfSegments() << " segments");
}
// 4.3 inverse dynamics solver 초기화
g_kdl_ = KDL::Vector::Zero();
g_kdl_(2) = -9.81; // 0: x-axis 1: y-axis 2: z-axis
id_solver_.reset(new KDL::ChainDynParam(kdl_chain_, g_kdl_));
jnt_to_jac_solver_.reset(new KDL::ChainJntToJacSolver(kdl_chain_));
fk_pos_solver_.reset(new KDL::ChainFkSolverPos_recursive(kdl_chain_));
J1_kdl_.resize(kdl_chain_.getNrOfJoints());
M_kdl_.resize(kdl_chain_.getNrOfJoints());
C_kdl_.resize(kdl_chain_.getNrOfJoints());
G_kdl_.resize(kdl_chain_.getNrOfJoints());
id_solver1_.reset(new KDL::ChainDynParam(kdl_chain2_, g_kdl_));
jnt_to_jac_solver1_.reset(new KDL::ChainJntToJacSolver(kdl_chain2_));
fk_pos_solver1_.reset(new KDL::ChainFkSolverPos_recursive(kdl_chain2_));
J2_kdl_.resize(kdl_chain2_.getNrOfJoints());
M1_kdl_.resize(kdl_chain2_.getNrOfJoints());
C1_kdl_.resize(kdl_chain2_.getNrOfJoints());
G1_kdl_.resize(kdl_chain2_.getNrOfJoints());
// ********* 5. 각종 변수 초기화 *********
// 5.1 KDL Vector 초기화 (사이즈 정의 및 값 0)
x_cmd_.data = Eigen::VectorXd::Zero(12);
targetpos.setZero(12);
ex_.setZero(12);
ex_dot_.setZero(12);
dx.setZero(12);
dxdot.setZero(12);
qd_.data = Eigen::VectorXd::Zero(n_joints_);
qd_dot_.data = Eigen::VectorXd::Zero(n_joints_);
qd_ddot_.data = Eigen::VectorXd::Zero(n_joints_);
qd_old_.data = Eigen::VectorXd::Zero(n_joints_);
q_.data = Eigen::VectorXd::Zero(n_joints_);
qdot_.data = Eigen::VectorXd::Zero(n_joints_);
torque.setZero(n_joints_);
ft_sensor.setZero(12);
wpInv_lambda[0].setZero(3);
wpInv_lambda[1].setZero(3);
// ********* 6. ROS 명령어 *********
// 6.1 publisher
state_pub_.reset(new realtime_tools::RealtimePublisher<dualarm_controller::TaskCurrentState>(n, "states", 10));
state_pub_->msg_.header.stamp = ros::Time::now();
state_pub_->msg_.header.frame_id = "dualarm";
state_pub_->msg_.header.seq=0;
for(int i=0; i<(n_joints_-1); i++)
{
state_pub_->msg_.q.push_back(q_.data(i));
state_pub_->msg_.qdot.push_back(qdot_.data(i));
state_pub_->msg_.dq.push_back(qd_.data(i));
state_pub_->msg_.dqdot.push_back(qd_dot_.data(i));
state_pub_->msg_.torque.push_back(torque(i));
}
for(int j=0; j<2; j++) {
state_pub_->msg_.InverseConditionNum.push_back(InverseConditionNumber[j]);
state_pub_->msg_.SingleMM.push_back(SingleMM[j]);
}
state_pub_->msg_.x.resize(2);
state_pub_->msg_.dx.resize(2);
state_pub_->msg_.MM = MM;
state_pub_->msg_.DAMM = DAMM;
state_pub_->msg_.TODAMM = TODAMM2;
state_pub_->msg_.lambda1.resize(3);
state_pub_->msg_.lambda2.resize(3);
state_pub_->msg_.Kp_R = K_rot;
state_pub_->msg_.Kp_T = K_trans;
pub_buffer_.writeFromNonRT(std::vector<double>(n_joints_, 0.0));
// 6.2 subsriber
const auto joint_state_cb = utils::makeCallback<dualarm_controller::TaskDesiredState>([&](const auto& msg){
ControlMode = msg.Index1;
ControlSubMode = msg.Index2;
ControlMotion = msg.SubIndex;
targetpos(0) = msg.dx[0].orientation.x*DEGtoRAD;
targetpos(1) = msg.dx[0].orientation.y*DEGtoRAD;
targetpos(2) = msg.dx[0].orientation.z*DEGtoRAD;
targetpos(3) = msg.dx[0].position.x;
targetpos(4) = msg.dx[0].position.y;
targetpos(5) = msg.dx[0].position.z;
targetpos(6) = msg.dx[1].orientation.x*DEGtoRAD;
targetpos(7) = msg.dx[1].orientation.y*DEGtoRAD;
targetpos(8) = msg.dx[1].orientation.z*DEGtoRAD;
targetpos(9) = msg.dx[1].position.x;
targetpos(10) = msg.dx[1].position.y;
targetpos(11) = msg.dx[1].position.z;
});
sub_x_cmd_ = n.subscribe<dualarm_controller::TaskDesiredState>( "command", 5, joint_state_cb);
sub_ft_sensor_R = n.subscribe<geometry_msgs::WrenchStamped>("/ft_sensor_topic_R", 5, &ClosedLoopIK_Control::UpdateFTsensorR, this);
sub_ft_sensor_L = n.subscribe<geometry_msgs::WrenchStamped>("/ft_sensor_topic_L", 5, &ClosedLoopIK_Control::UpdateFTsensorL, this);
return true;
}
void UpdateFTsensorR(const geometry_msgs::WrenchStamped::ConstPtr &msg)
{
geometry_msgs::Wrench ft_measure = msg->wrench;
ft_sensor(0) = ft_measure.torque.x;
ft_sensor(1) = ft_measure.torque.y;
ft_sensor(2) = ft_measure.torque.z;
ft_sensor(3) = ft_measure.force.x;
ft_sensor(4) = ft_measure.force.y;
ft_sensor(5) = ft_measure.force.z;
}
void UpdateFTsensorL(const geometry_msgs::WrenchStamped::ConstPtr &msg)
{
geometry_msgs::Wrench ft_measure = msg->wrench;
ft_sensor(6) = ft_measure.torque.x;
ft_sensor(7) = ft_measure.torque.y;
ft_sensor(8) = ft_measure.torque.z;
ft_sensor(9) = ft_measure.force.x;
ft_sensor(10) = ft_measure.force.y;
ft_sensor(11) = ft_measure.force.z;
}
void starting(const ros::Time &time) override
{
t = 0.0;
InitTime=1.0;
ROS_INFO("Starting Closed-loop Inverse Dynamics Controller");
cManipulator = std::make_shared<SerialManipulator>();
Control = std::make_unique<HYUControl::Controller>(cManipulator);
motion = std::make_unique<HYUControl::Motion>(cManipulator);
cManipulator->UpdateManipulatorParam();
CLIK_GAIN_DUMY.setZero(12);
CLIK_GAIN.setZero(12);
CLIK_GAIN(0) = K_rot;
CLIK_GAIN(1) = K_rot;
CLIK_GAIN(2) = K_rot;
CLIK_GAIN(3) = K_trans;
CLIK_GAIN(4) = K_trans;
CLIK_GAIN(5) = K_trans;
CLIK_GAIN(6) = K_rot;
CLIK_GAIN(7) = K_rot;
CLIK_GAIN(8) = K_rot;
CLIK_GAIN(9) = K_trans;
CLIK_GAIN(10) = K_trans;
CLIK_GAIN(11) = K_trans;
Control->SetPIDGain(Kp_.data, Kd_.data, Ki_.data, K_inf_.data);
Control->SetTaskspaceGain(CLIK_GAIN, CLIK_GAIN_DUMY);
alpha = 5.0;
ControlMode = CTRLMODE_IDY_JOINT;
ControlSubMode = SYSTEM_BEGIN;
ControlMotion = MOVE_ZERO;
}
void update(const ros::Time &time, const ros::Duration &period) override
{
std::vector<double> &commands = *pub_buffer_.readFromRT();
// ********* 0. Get states from gazebo *********
// 0.1 sampling time
double dt = period.toSec();
clock_gettime(CLOCK_MONOTONIC, &begin);
// 0.2 joint state
for (int i = 0; i < n_joints_; i++)
{
q_(i) = joints_[i].getPosition();
qdot_(i) = joints_[i].getVelocity();
//torque_(i) = joints_[i].getEffort();
}
//----------------------
// dynamics calculation
//----------------------
cManipulator->pKin->PrepareJacobian(q_.data);
cManipulator->pDyn->PrepareDynamics(q_.data, qdot_.data);
cManipulator->pKin->GetAnalyticJacobian(AJac);
cManipulator->pKin->GetpinvJacobian(pInvJac);
cManipulator->pKin->GetForwardKinematics(ForwardPos, ForwardOri, NumChain);
cManipulator->pKin->GetAngleAxis(ForwardAxis, ForwardAngle, NumChain);
cManipulator->pKin->GetInverseConditionNumber(InverseConditionNumber);
q1_.data = q_.data.head(9);
q1dot_.data = qdot_.data.head(9);
q2_.resize(9);
q2dot_.resize(9);
q2_.data.tail(7) = q_.data.tail(7);
q2_.data.head(2) = q_.data.head(2);
q2dot_.data = qdot_.data.tail(7);
q2dot_.data.head(2) = qdot_.data.head(2);
fk_pos_solver_->JntToCart(q1_, x_[0]);
fk_pos_solver1_->JntToCart(q2_, x_[1]);
jnt_to_jac_solver_->JntToJac(q1_, J1_kdl_);
jnt_to_jac_solver1_->JntToJac(q2_, J2_kdl_);
MM = cManipulator->pKin->GetManipulabilityMeasure();
manipulability_data();
//cManipulator->pDyn->MG_Mat_Joint(M, G);
//id_solver_->JntToMass(q1_, M_kdl_);
//id_solver_->JntToCoriolis(q1_, q1dot_, C_kdl_);
//id_solver_->JntToGravity(q1_, G_kdl_);
//id_solver1_->JntToMass(q2_, M1_kdl_);
//id_solver1_->JntToCoriolis(q2_, q2dot_, C1_kdl_);
//id_solver1_->JntToGravity(q2_, G1_kdl_);
ctr_obj_ = ControlSubMode;
ik_mode_ = ControlMotion;
if( ControlMode == CTRLMODE_CLIK )
{
//motion->TaskMotion(dx, dxdot, dxddot, targetpos, q_.data, qdot_.data, t, JointState, ControlMotion);
//Control->CLIKTaskController(q_.data, qdot_.data, dx, dxdot, ft_sensor, torque, dt, ControlSubMode);
Control->GetControllerStates(qd_.data, qd_dot_.data, ex_);
if(ctr_obj_ == 6)
cManipulator->pKin->GetWDampedpInvLambda(wpInv_lambda);
}
else if( ControlMode == CTRLMODE_IDY_JOINT )
{
qd_dot_.data.setZero(16);
motion->JointMotion(qd_.data, qd_dot_.data, qd_ddot_.data, targetpos, q_.data, qdot_.data, t, JointState, ControlMotion);
Control->InvDynController(q_.data, qdot_.data, qd_.data, qd_dot_.data, qd_ddot_.data, torque, dt);
}
for (int i = 0; i < n_joints_; i++)
{
joints_[i].setCommand(torque(i));
}
clock_gettime(CLOCK_MONOTONIC, &end);
// ********* 4. data 저장 *********
if(t > InitTime)
publish_data();
// ********* 5. state 출력 *********
print_state();
t = t + dt;
}
void stopping(const ros::Time &time) override
{
ROS_INFO("Stop Closed-loop Inverse Dynamics Controller");
}
void manipulability_data()
{
const auto desired_Dualmanipulability =
manipulability_metrics::Ellipsoid{ { { (Eigen::Matrix<double, 6, 1>{} << 1, 0, 0, 0, 0, 0).finished(), 1.0 },
{ (Eigen::Matrix<double, 6, 1>{} << 0, 1, 0, 0, 0, 0).finished(), 1.0 },
{ (Eigen::Matrix<double, 6, 1>{} << 0, 0, 1, 0, 0, 0).finished(), 1.0 },
{ (Eigen::Matrix<double, 6, 1>{} << 0, 0, 0, 1, 0, 0).finished(), 1.0 },
{ (Eigen::Matrix<double, 6, 1>{} << 0, 0, 0, 0, 1, 0).finished(), 1.0 },
{ (Eigen::Matrix<double, 6, 1>{} << 0, 0, 0, 0, 0, 1).finished(), 1.0 } } };
const auto desired_Singelmanipulability =
manipulability_metrics::Ellipsoid{ { { (Eigen::Matrix<double, 6, 1>{} << 1, 0, 0, 0, 0, 0).finished(), 0.2 },
{ (Eigen::Matrix<double, 6, 1>{} << 0, 1, 0, 0, 0, 0).finished(), 0.8 },
{ (Eigen::Matrix<double, 6, 1>{} << 0, 0, 1, 0, 0, 0).finished(), 0.8 },
{ (Eigen::Matrix<double, 6, 1>{} << 0, 0, 0, 1, 0, 0).finished(), 0.9 },
{ (Eigen::Matrix<double, 6, 1>{} << 0, 0, 0, 0, 1, 0).finished(), 1.7 },
{ (Eigen::Matrix<double, 6, 1>{} << 0, 0, 0, 0, 0, 1).finished(), 1.7 } } };
SingleMM[0] = sqrt((J1_kdl_.data * J1_kdl_.data.transpose()).determinant());
SingleMM[1] = sqrt((J2_kdl_.data * J2_kdl_.data.transpose()).determinant());
SingleMM_1[0] = sqrt((AJac.block(0,0,6,16)*AJac.block(0,0,6,16).transpose()).determinant());
SingleMM_1[1] = sqrt((AJac.block(6,0,6,16)*AJac.block(6,0,6,16).transpose()).determinant());
TOMM[0] = manipulability_metrics::inverseShapeDiscrepancy(desired_Singelmanipulability, J1_kdl_.data);
TOMM[1] = manipulability_metrics::inverseShapeDiscrepancy(desired_Singelmanipulability, J2_kdl_.data);
TOMM_1[0] = manipulability_metrics::inverseShapeDiscrepancy(desired_Singelmanipulability, AJac.block(0,0,6,16));
TOMM_1[1] = manipulability_metrics::inverseShapeDiscrepancy(desired_Singelmanipulability, AJac.block(6,0,6,16));
auto left_ellipsoid = manipulability_metrics::ellipsoidFromJacobian(J2_kdl_.data);
auto right_ellipsoid = manipulability_metrics::ellipsoidFromJacobian(J1_kdl_.data);
DAMM = std::max(manipulability_metrics::volumeIntersection(left_ellipsoid, J1_kdl_.data),
manipulability_metrics::volumeIntersection(right_ellipsoid, J2_kdl_.data));
TODAMM2 = manipulability_metrics::dualInverseShapeDiscrepancy(desired_Dualmanipulability, J2_kdl_.data, J1_kdl_.data);
auto left_ellipsoid_1 = manipulability_metrics::ellipsoidFromJacobian(AJac.block(6,0,6,16));
auto right_ellipsoid_1 = manipulability_metrics::ellipsoidFromJacobian(AJac.block(0,0,6,16));
DAMM_1 = std::max(manipulability_metrics::volumeIntersection(left_ellipsoid_1, AJac.block(0,0,6,16)),
manipulability_metrics::volumeIntersection(right_ellipsoid_1, AJac.block(6,0,6,16)));
TODAMM2_1 = manipulability_metrics::dualInverseShapeDiscrepancy(desired_Dualmanipulability, AJac.block(6,0,6,16), AJac.block(0,0,6,16));
}
void publish_data()
{
static int loop_count_ = 0;
if(loop_count_ > 2)
{
if(state_pub_->trylock())
{
state_pub_->msg_.header.stamp = ros::Time::now();
state_pub_->msg_.header.seq++;
for(size_t i=0; i<n_joints_; i++)
{
state_pub_->msg_.q[i] = q_.data(i);
state_pub_->msg_.qdot[i] = qdot_.data(i);
state_pub_->msg_.dq[i] = qd_.data(i);
state_pub_->msg_.dqdot[i] = qd_dot_.data(i);
state_pub_->msg_.torque[i] = torque(i);
}
for(int j=0; j<2; j++)
{
state_pub_->msg_.dx[j].orientation.x = dx(6*j);
state_pub_->msg_.dx[j].orientation.y = dx(6*j+1);
state_pub_->msg_.dx[j].orientation.z = dx(6*j+2);
state_pub_->msg_.dx[j].position.x = dx(6*j+3);
state_pub_->msg_.dx[j].position.y = dx(6*j+4);
state_pub_->msg_.dx[j].position.z = dx(6*j+5);
state_pub_->msg_.x[j].orientation.x = ForwardOri[j](0);
state_pub_->msg_.x[j].orientation.y = ForwardOri[j](1);
state_pub_->msg_.x[j].orientation.z = ForwardOri[j](2);
state_pub_->msg_.x[j].position.x = ForwardPos[j](0);
state_pub_->msg_.x[j].position.y = ForwardPos[j](1);
state_pub_->msg_.x[j].position.z = ForwardPos[j](2);
state_pub_->msg_.InverseConditionNum[j] = InverseConditionNumber[j];
state_pub_->msg_.SingleMM[j] = SingleMM[j];
}
state_pub_->msg_.MM = MM;
state_pub_->msg_.DAMM = DAMM;
state_pub_->msg_.TODAMM = TODAMM2;
state_pub_->msg_.Kp_T = K_trans;
state_pub_->msg_.Kp_R = K_rot;
state_pub_->msg_.lambda1[0] = wpInv_lambda[0](0);
state_pub_->msg_.lambda1[1] = wpInv_lambda[0](1);
state_pub_->msg_.lambda1[2] = wpInv_lambda[0](2);
state_pub_->msg_.lambda2[0] = wpInv_lambda[1](0);
state_pub_->msg_.lambda2[1] = wpInv_lambda[1](1);
state_pub_->msg_.lambda2[2] = wpInv_lambda[1](2);
state_pub_->unlockAndPublish();
}
loop_count_=0;
}
loop_count_++;
}
void print_state()
{
static int count = 0;
if (count > 499)
{
printf("*********************************************************\n\n");
printf("*** Calcutaion Time (unit: sec) ***\n");
printf("t_cal = %0.9lf\n", static_cast<double>(end.tv_sec - begin.tv_sec) + static_cast<double>(end.tv_nsec - begin.tv_nsec) / 1000000000.0);
printf("*** Simulation Time (unit: sec) ***\n");
printf("t = %0.3lf\n", t);
printf("IK mode:%d, CTRL_OBJ:%d\n", ik_mode_, ctr_obj_);
printf("\n");
printf("*** Command from Subscriber in Task Space (unit: m) ***\n");
printf("*** States in Joint Space (unit: deg) ***\n");
Control->GetPIDGain(aKp_, aKd_, aKi_);
for(int i=0; i < n_joints_; i++)
{
printf("[%s]: \t", joint_names_[i].c_str());
printf("Kp:%0.3lf, Kd:%0.3lf,\t", aKp_(i), aKd_(i));
printf("q: %0.3lf,\t", q_.data(i) * R2D);
printf("dq: %0.3lf,\t", qd_.data(i) * R2D);
printf("qdot: %0.3lf,\t", qdot_.data(i) * R2D);
printf("dqdot: %0.3lf,\t", qd_dot_.data(i) * R2D);
printf("tau: %0.3f\n", torque(i));
}
printf("\nForward Kinematics:\n");
for(int j=0; j<NumChain; j++)
{
printf("no.%d, PoE: x:%0.3lf, y:%0.3lf, z:%0.3lf, u:%0.2lf, v:%0.2lf, w:%0.2lf\n", j,
ForwardPos[j](0), ForwardPos[j](1),ForwardPos[j](2),
ForwardOri[j](0), ForwardOri[j](1), ForwardOri[j](2));
printf("no.%d, PoE_desired: x:%0.3lf, y:%0.3lf, z:%0.3lf, u:%0.2lf, v:%0.2lf, w:%0.2lf\n", j,
dx(6*j+3),dx(6*j+4),dx(6*j+5),dx(6*j), dx(6*j+1), dx(6*j+2));
double a, b, g;
x_[j].M.GetEulerZYX(a, b, g);
printf("no.%d, DH: x:%0.3lf, y:%0.3lf, z:%0.3lf, u:%0.2lf, v:%0.2lf, w:%0.2lf\n",
j, x_[j].p(0), x_[j].p(1),x_[j].p(2), g, b, a);
printf("no.%d, AngleAxis x: %0.2lf, y: %0.2lf, z: %0.2lf, Angle: %0.3lf\n",
j, ForwardAxis[j](0), ForwardAxis[j](1), ForwardAxis[j](2), ForwardAngle[j]);
printf("\n");
}
printf("FT Sensor(Right): torque_u:%0.3lf, torque_v:%0.3lf, torque_w:%0.3lf, force_x:%0.3lf, force_y:%0.3lf, force_z:%0.3lf\n",
ft_sensor(0), ft_sensor(1), ft_sensor(2),ft_sensor(3),ft_sensor(4),ft_sensor(5));
printf("FT Sensor(Left): torque_u:%0.3lf, torque_v:%0.3lf, torque_w:%0.3lf, force_x:%0.3lf, force_y:%0.3lf, force_z:%0.3lf\n\n",
ft_sensor(6), ft_sensor(7), ft_sensor(8),ft_sensor(9),ft_sensor(10),ft_sensor(11));
printf("Inverse Condition Number: Right:%0.5lf, Left:%0.5f\n", InverseConditionNumber[0], InverseConditionNumber[1]);
printf("SingleMM: Right:%0.5lf, Left:%0.5lf :: Right:%0.5lf, Left:%0.5lf,\n", SingleMM[0], SingleMM[1], SingleMM_1[0], SingleMM_1[1]);
printf("TOMM: Right:%0.5lf, Left:%0.5lf :: Right:%0.5lf, Left:%0.5lf\n", TOMM[0], TOMM[1], TOMM_1[0], TOMM_1[1]);
printf("MM: %0.5lf\n", MM);
printf("DAMM: %0.5lf :: DAMM: %0.5lf\n", DAMM, DAMM_1);
printf("TODAMM: %0.5lf :: TODAMM: %0.5lf\n\n", TODAMM2, TODAMM2_1);
printf("Right e(u):%0.3lf, e(v):%0.3lf, e(w):%0.3lf, e(x):%0.3lf, e(y):%0.3lf, e(z):%0.3lf\n",
ex_(0)*RADtoDEG, ex_(1)*RADtoDEG, ex_(2)*RADtoDEG, ex_(3), ex_(4), ex_(5));
printf("Left e(u):%0.3lf, e(v):%0.3lf, e(w):%0.3lf, e(x):%0.3lf, e(y):%0.3lf, e(z):%0.3lf\n",
ex_(6)*RADtoDEG, ex_(7)*RADtoDEG, ex_(8)*RADtoDEG, ex_(9), ex_(10), ex_(11));
printf("\n*********************************************************\n");
count = 0;
if(ctr_obj_ == 6)
{
std::cout << "lambda[right]:" << std::endl;
std::cout << wpInv_lambda[0] << std::endl;
std::cout << "lambda[left]:" << std::endl;
std::cout << wpInv_lambda[1] << std::endl;
}
//std::cout << "J1:" <<std::endl;
//std::cout << J1_kdl_.data << "\n"<< std::endl;
//std::cout << "J2:" <<std::endl;
//std::cout << J2_kdl_.data << "\n"<< std::endl;
//std::cout << "Analytic Jacobian:" <<std::endl;
//std::cout << AJac << "\n"<< std::endl;
}
count++;
}
private:
// others
double t=0.0;
int ctr_obj_=0;
int ik_mode_=0;
double InitTime=0.0;
unsigned char ControlMode;
unsigned char ControlSubMode;
unsigned char ControlMotion;
unsigned char JointState;
struct timespec begin, end;
//Joint handles
unsigned int n_joints_;
std::vector<std::string> joint_names_;
std::vector<hardware_interface::JointHandle> joints_;
std::vector<urdf::JointConstSharedPtr> joint_urdfs_;
// kdl
KDL::Tree kdl_tree_;
KDL::Chain kdl_chain_;
KDL::Chain kdl_chain2_;
KDL::JntSpaceInertiaMatrix M_kdl_, M1_kdl_;
KDL::JntArray C_kdl_, C1_kdl_;
KDL::JntArray G_kdl_, G1_kdl_;
KDL::Vector g_kdl_;
Eigen::VectorXd g_vec_collect;
Eigen::MatrixXd g_mat_collect;
Eigen::MatrixXd M_mat_collect;
KDL::Jacobian J1_kdl_, J2_kdl_;
// kdl solver
boost::scoped_ptr<KDL::ChainFkSolverPos_recursive> fk_pos_solver_, fk_pos_solver1_; //Solver to compute the forward kinematics (position)
boost::scoped_ptr<KDL::ChainJntToJacSolver> jnt_to_jac_solver_, jnt_to_jac_solver1_; //Solver to compute the jacobian
boost::scoped_ptr<KDL::ChainDynParam> id_solver_, id_solver1_; // Solver To compute the inverse dynamics
MatrixXd M;
VectorXd G;
VectorXd C;
Vector3d ForwardPos[2];
Vector3d ForwardOri[2];
int NumChain=2;
Vector3d ForwardAxis[2];
double ForwardAngle[2];
double InverseConditionNumber[2];
double SingleMM[2], SingleMM_1[2];
double TOMM[2], TOMM_1[2];
double MM, MM_1;
double DAMM, DAMM_1;
double TODAMM2, TODAMM2_1;
// kdl and Eigen Jacobian
Eigen::MatrixXd pInvJac;
Eigen::MatrixXd AJac;
Eigen::MatrixXd BlockpInvJac;
Eigen::MatrixXd ScaledJac;
Eigen::MatrixXd dampedpInvJac;
Eigen::MatrixXd WdampedpInvJac;
VectorXd wpInv_lambda[2];
// Joint Space State
KDL::JntArray qd_;
KDL::JntArray qd_dot_;
KDL::JntArray qd_ddot_;
KDL::JntArray qd_old_;
KDL::JntArray q_, q1_, q2_;
KDL::JntArray qdot_, q1dot_, q2dot_;
Eigen::VectorXd q0dot;
double alpha;
Eigen::VectorXd ft_sensor;
Eigen::VectorXd torque;
Eigen::VectorXd targetpos;
// Task Space State
// ver. 01
KDL::Frame xd_[2];
KDL::Frame x_[2];
KDL::Twist ex_temp_;
// KDL::Twist xd_dot_, xd_ddot_;
Eigen::VectorXd ex_;
Eigen::VectorXd ex_dot_;
Eigen::VectorXd dx;
Eigen::VectorXd dxdot;
Eigen::VectorXd dxddot;
// Input
KDL::JntArray x_cmd_;
// gains
KDL::JntArray Kp_, Ki_, Kd_, K_inf_;
Eigen::VectorXd aKp_, aKi_, aKd_, aK_inf_;
double K_trans, K_rot;
Eigen::VectorXd CLIK_GAIN, CLIK_GAIN_DUMY;
// publisher
realtime_tools::RealtimeBuffer<std::vector<double>> pub_buffer_;
boost::scoped_ptr<realtime_tools::RealtimePublisher<dualarm_controller::TaskCurrentState>> state_pub_;
// subscriber
ros::Subscriber sub_x_cmd_;
ros::Subscriber sub_ft_sensor_R, sub_ft_sensor_L;
std::shared_ptr<SerialManipulator> cManipulator;
std::unique_ptr<HYUControl::Controller> Control;
std::unique_ptr<HYUControl::Motion> motion;
};
}
PLUGINLIB_EXPORT_CLASS(dualarm_controller::ClosedLoopIK_Control,controller_interface::ControllerBase) |
module app
use iso_fortran_env, only: int64
use mpi
use wuming2d
use wuming_utils
use boundary_shock, &
& bc__init => boundary_shock__init, &
& bc__dfield => boundary_shock__dfield, &
& bc__particle_x => boundary_shock__particle_x, &
& bc__particle_y => boundary_shock__particle_y, &
& bc__injection => boundary_shock__injection, &
& bc__curre => boundary_shock__curre, &
& bc__phi => boundary_shock__phi, &
& bc__mom => boundary_shock__mom
implicit none
private
! main simulation loop
public:: app__main
! configuration file and initial parameter files
character(len=*), parameter :: config_default = 'config.json'
character(len=:), allocatable :: config_string
character(len=:), allocatable :: config
character(len=*), parameter :: param = 'init_param'
! read from "config" section
logical :: restart
character(len=128) :: restart_file
character(len=:), allocatable :: datadir
real(8) :: max_elapsed
integer :: max_it
integer :: intvl_ptcl
integer :: intvl_mom
integer :: intvl_orb
integer :: intvl_expand
integer :: verbose
! read from "parameter" section
integer :: num_process
integer :: n_ppc
integer :: n_x
integer :: n_x_ini
integer :: n_y
real(8) :: u_inject
real(8) :: mass_ratio
real(8) :: sigma_e
real(8) :: omega_pe
real(8) :: v_the
real(8) :: v_thi
real(8) :: theta_bn
real(8) :: phi_bn
real(8) :: l_damp_ini
integer :: nproc
integer :: it0
integer :: np
integer :: n0
integer :: nx, nxgs, nxge, nxs, nxe
integer :: ny, nygs, nyge !, nys, nye
integer :: mpierr
integer, parameter :: ndim = 6
integer, parameter :: nsp = 2
integer, parameter :: nroot = 0
! OTHER CONSTANTS
real(8), parameter :: c = 1.0D0 !SPEED OF LIGHT
real(8), parameter :: gfac = 0.501D0 !IMPLICITNESS FACTOR 0.501-0.505
real(8), parameter :: cfl = 1.0D0 !CFL CONDITION FOR LIGHT WAVE
real(8), parameter :: delx = 1.0D0 !CELL WIDTH
real(8), parameter :: pi = 4.0D0*atan(1.0D0)
! TRACKING PARTICLES INITIALLY XRS <= X <= XRE ONLY WHEN NDIM=6
real(8), parameter :: xrs = 450.0D0
real(8), parameter :: xre = 500.0D0
!
! main variables
!
integer, allocatable, public :: np2(:,:), cumcnt(:,:,:)
real(8), allocatable, public :: uf(:,:,:)
real(8), allocatable, public :: up(:,:,:,:)
real(8), allocatable, public :: gp(:,:,:,:)
real(8), allocatable, public :: mom(:,:,:,:)
real(8) :: r(nsp)
real(8) :: q(nsp)
real(8) :: delt
real(8) :: b0
real(8) :: u0, v0, gam0
contains
!
! main simulation loop
!
subroutine app__main()
implicit none
integer :: it
real(8) :: etime, etime0
! initialization
call load_config()
call init()
! current clock
etime0 = get_etime()
! main loop
do it = it0+1, max_it
! update
call particle__solv(gp, up, uf, cumcnt, nxs, nxe)
call bc__injection(gp, np2, nxs, nxe, u0)
call field__fdtd_i(uf, up, gp, cumcnt, nxs, nxe, &
& bc__dfield, bc__curre, bc__phi)
call bc__particle_y(gp, np2)
call sort__bucket(up, gp, cumcnt, np2, nxs, nxe)
! injection
call inject()
! expand box
if( mod(it, intvl_expand) == 0 ) then
call relocate()
end if
! output entire particles
if ( mod(it, intvl_ptcl) == 0 ) then
call io__ptcl(up, uf, np2, it)
end if
! ouput tracer particles
if ( mod(it, intvl_orb) == 0 ) then
call io__orb(up, uf, np2, it)
end if
! output moments and electromagnetic fields
if ( mod(it, intvl_mom) == 0 ) then
call mom_calc__accl(gp, up, uf, cumcnt, nxs, nxe)
call mom_calc__nvt(mom, gp, np2)
call bc__mom(mom)
call io__mom(mom, uf, it)
endif
! check elapsed time
etime = get_etime() - etime0
if ( etime >= max_elapsed ) then
! save snapshoft for restart
write(restart_file, '(i7.7, "_restart")') it
call save_restart(up, uf, np2, nxs, nxe, it, restart_file)
if ( nrank == nroot ) then
write(0,'("*** Elapsed time limit exceeded ")')
write(0,'("*** A snapshot ", a, " has been saved")') &
& trim(restart_file)
end if
call finalize()
stop
endif
if( verbose >= 1 .and. nrank == nroot ) then
write(*,'("*** Time step: ", i7, " completed in ", e10.2, " sec.")') &
& it, etime
end if
enddo
! save final state
it = max_it + 1
write(restart_file, '(i7.7, "_restart")') it
call save_restart(up, uf, np2, nxs, nxe, it, restart_file)
call finalize()
end subroutine app__main
!
! parse command line argument and load configuration file
!
subroutine load_config()
implicit none
logical :: status, found
integer :: arg_count
character(len=:), allocatable :: filename
type(json_core) :: json
type(json_file) :: file
type(json_value), pointer :: root, p
! check ndim
if( ndim /= 6 ) then
write(0,*) 'Error: ndim must be 6'
stop
end if
!
! process command line to find a configuration file
!
arg_count = command_argument_count()
if( arg_count == 0 ) then
config = config_default
else
! only the first argument is relevant
call get_command_argument(1, config)
end if
! check init file
inquire(file=trim(config), exist=status)
if( .not. status ) then
write(0, '("Error: ", a, " does not exists")') trim(config)
stop
end if
! try loading init file
call json%initialize()
call file%initialize()
call file%load(trim(config))
if( file%failed() ) then
write(0, '("Error: failed to load ", a)') trim(config)
stop
end if
! read "config" section
call file%get(root)
call json%get(root, 'config', p)
call json%get(p, 'verbose', verbose)
call json%get(p, 'datadir', datadir)
call json%get(p, 'max_elapsed', max_elapsed)
call json%get(p, 'max_it', max_it)
call json%get(p, 'intvl_ptcl', intvl_ptcl)
call json%get(p, 'intvl_mom', intvl_mom)
call json%get(p, 'intvl_orb', intvl_orb)
call json%get(p, 'intvl_expand', intvl_expand)
! make sure this is a directory
datadir = trim(datadir) // '/'
! restart file
call json%get(p, 'restart_file', filename, found)
if ( found .and. filename /= '' ) then
restart = .true.
restart_file = filename
endif
! read "parameter" section and initialize
call json%get(root, 'parameter', p)
call json%get(p, 'num_process', num_process)
call json%get(p, 'n_ppc', n_ppc)
call json%get(p, 'n_x', n_x)
call json%get(p, 'n_y', n_y)
call json%get(p, 'n_x_ini', n_x_ini)
call json%get(p, 'u_inject', u_inject)
call json%get(p, 'mass_ratio', mass_ratio)
call json%get(p, 'sigma_e', sigma_e)
call json%get(p, 'omega_pe', omega_pe)
call json%get(p, 'v_the', v_the)
call json%get(p, 'v_thi', v_thi)
call json%get(p, 'theta_bn', theta_bn)
call json%get(p, 'phi_bn', phi_bn)
call json%get(p, 'l_damp_ini', l_damp_ini)
nproc = num_process
nx = n_x
ny = n_y
np = n_ppc * nx * 5
n0 = n_ppc
nxgs = 2
nxge = nxgs + nx - 1
nygs = 2
nyge = nygs + ny - 1
nxs = nxgs
nxe = nxgs + n_x_ini
! degree to radian
theta_bn = theta_bn * pi / 180
phi_bn = phi_bn * pi / 180
call json%serialize(root, config_string)
call json%destroy()
call file%destroy()
end subroutine load_config
!
! initialize simulation
!
subroutine init()
implicit none
integer :: isp, i, j
real(8) :: wpe, wpi, wge, wgi, vte, vti
! MPI
call mpi_set__init(nygs, nyge, nproc)
! random number
call init_random_seed()
! allocate memory and initialize everything by zero
allocate(np2(nys:nye,nsp))
allocate(cumcnt(nxgs:nxge+1,nys:nye,nsp))
allocate(uf(6,nxgs-2:nxge+2,nys-2:nye+2))
allocate(up(ndim,np,nys:nye,nsp))
allocate(gp(ndim,np,nys:nye,nsp))
allocate(mom(1:7,nxgs-1:nxge+1,nys-1:nye+1,1:nsp))
np2 = 0
cumcnt = 0
uf = 0
up = 0
gp = 0
mom = 0
! set physical parameters
delt = cfl*delx/c
u0 =-abs(u_inject)
gam0 = sqrt(1 + (u0*u0)/(c*c))
v0 = u0/gam0
wpe = omega_pe
wge = omega_pe * sqrt(sigma_e)
wpi = wpe / sqrt(mass_ratio)
wgi = wge / mass_ratio
vte = v_the
vti = v_thi
r(1) = mass_ratio
r(2) = 1.0d0
q(1) =+sqrt(gam0*r(1) / (4*pi*n0)) * wpi
q(2) =-sqrt(gam0*r(2) / (4*pi*n0)) * wpe
b0 = r(1)*c / q(1) * wgi * gam0
! number of particles
np2(nys:nye,1:nsp) = n0*(nxe-nxs-1)
if ( nrank == nroot ) then
if ( n0*(nxge-nxgs) > np ) then
write(0,*) 'Error: Too large number of particles'
stop
endif
endif
! preparation of sort
do isp = 1, nsp
!$OMP PARALLEL DO PRIVATE(i,j)
do j = nys, nye
cumcnt(nxs:nxs+1,j,isp) = 0
do i = nxs+2, nxe
cumcnt(i,j,isp) = cumcnt(i-1,j,isp) + n0
enddo
if ( cumcnt(nxe,j,isp) /= np2(j,isp) ) then
write(0,*) 'Error: invalid values encounterd for cumcnt'
stop
endif
enddo
!$OMP END PARALLEL DO
enddo
! initialize modules
call bc__init( &
& ndim, np, nsp, nxgs, nxge, nygs, nyge, nys, nye, &
& nup, ndown, mnpi, mnpr, ncomw, nerr, nstat, delx, delt, c)
call particle__init( &
& ndim, np, nsp, nxgs, nxge, nygs, nyge, nys, nye, &
& delx, delt, c, q, r)
call field__init( &
& ndim, np, nsp, nxgs, nxge, nygs, nyge, nys, nye, &
& mnpr, ncomw, opsum, nerr, delx, delt, c, q, r, gfac)
call sort__init( &
& ndim, np, nsp, nxgs, nxge, nygs, nyge, nys, nye)
call io__init( &
& ndim, np, nsp, nxgs, nxge, nygs, nyge, nys, nye, &
& nproc, nrank, delx, delt, c, q, r, datadir)
call mom_calc__init( &
& ndim, np, nsp, nxgs, nxge, nygs, nyge, nys, nye, &
& delx, delt, c, q, r)
if ( restart ) then
! restart
call io__input(gp, uf, np2, nxs, nxe, it0, restart_file)
call sort__bucket(up, gp, cumcnt, np2, nxs, nxe)
else
! output parameters and set initial condition
call save_param(n0, wpe, wpi, wge, wgi, vti, vte, param)
call set_initial_condition()
it0 = 0
endif
! copy
gp = up
end subroutine init
!
! finalize simulation
!
subroutine finalize()
implicit none
call io__finalize()
call MPI_Finalize(mpierr)
end subroutine finalize
!
! set initial condition for field and particles
!
subroutine set_initial_condition()
implicit none
integer :: i, j, ii, isp
real(8) :: v1, gam1, gamp, sd(nsp)
!
! electromagnetic field
!
!$OMP PARALLEL DO PRIVATE(i,j)
do j=nys-2,nye+2
do i=nxgs-2,nxge+2
uf(1,i,j) = b0*cos(theta_bn)
uf(2,i,j) = b0*sin(theta_bn)*cos(phi_bn)
uf(3,i,j) = b0*sin(theta_bn)*sin(phi_bn)
uf(4,i,j) = 0.0D0
uf(5,i,j) =+vprofile(i*delx)*uf(3,i,j)/c
uf(6,i,j) =-vprofile(i*delx)*uf(2,i,j)/c
enddo
enddo
!$OMP END PARALLEL DO
!
! particle position
!
isp = 1
!$OMP PARALLEL DO PRIVATE(ii,j)
do j=nys,nye
do ii=1,np2(j,isp)
up(1,ii,j,1) = (nxs + (nxe-nxs)*(ii - 0.5d0)/np2(j,isp)) * delx
up(2,ii,j,1) = (j + uniform_rand()) * delx
up(1,ii,j,2) = up(1,ii,j,1)
up(2,ii,j,2) = up(2,ii,j,1)
enddo
enddo
!$OMP END PARALLEL DO
!
! particle velocity
!
sd(1) = v_thi
sd(2) = v_the
do isp=1,nsp
!$OMP PARALLEL DO PRIVATE(ii,j,v1,gam1,gamp)
do j=nys,nye
do ii=1,np2(j,isp)
! Maxwellian in fluid rest frame
up(3,ii,j,isp) = sd(isp) * normal_rand()
up(4,ii,j,isp) = sd(isp) * normal_rand()
up(5,ii,j,isp) = sd(isp) * normal_rand()
! Lorentz transform to lab frame
v1 = vprofile(up(1,ii,j,isp))
gam1 = 1/sqrt(1-(v1/c)**2)
gamp = sqrt(1 + ( &
& up(3,ii,j,isp)**2 + &
& up(4,ii,j,isp)**2 + &
& up(5,ii,j,isp)**2 )/c**2)
up(3,ii,j,isp) = gam1*(up(3,ii,j,isp) + v1*gamp)
enddo
enddo
!$OMP END PARALLEL DO
enddo
! set particle IDs
call set_particle_ids()
end subroutine set_initial_condition
!
! set initial particle IDs
!
subroutine set_particle_ids()
implicit none
integer :: isp, i, j
integer(8) :: gcumsum(nproc+1,nsp), lcumsum(nys:nye+1,nsp), pid
if( ndim /= 6 ) then
return
end if
! calculate the first particle IDs
call get_global_cumsum(np2, gcumsum)
do isp = 1, nsp
lcumsum(nys,isp) = gcumsum(nrank+1,isp)
do j = nys, nye
lcumsum(j+1,isp) = lcumsum(j,isp) + np2(j,isp)
end do
end do
! unique ID as 64bit integer (negative by default)
do isp = 1, nsp
!$OMP PARALLEL DO PRIVATE(i,j,pid)
do j = nys, nye
do i = 1, np2(j,isp)
pid = lcumsum(j,isp) + i
up(6,i,j,isp) = transfer(-pid, 1.0_8)
end do
end do
!$OMP END PARALLEL DO
end do
! make particle ID positive for output
do isp = 1, nsp
!$OMP PARALLEL DO PRIVATE(i,j,pid)
do j = nys, nye
do i = 1, np2(j,isp)
if ( up(1,i,j,isp) >= xrs .and. up(1,i,j,isp) <= xre ) then
pid = transfer(up(6,i,j,isp), 1_8)
up(6,i,j,isp) = transfer(sign(pid, +1_8), 1.0_8)
endif
enddo
enddo
!$OMP END PARALLEL DO
enddo
end subroutine set_particle_ids
!
! save everything for restart
!
subroutine save_restart(up, uf, np2, nxs, nxe, it, restart_file)
implicit none
integer, intent(in) :: np2(nys:nye,nsp), nxs, nxe
integer, intent(in) :: it
real(8), intent(in) :: up(ndim,np,nys:nye,nsp)
real(8), intent(in) :: uf(6,nxgs-2:nxge+2,nys-2:nye+2)
character(len=*), intent(in) :: restart_file
logical :: found
type(json_core) :: json
type(json_file) :: file
type(json_value), pointer :: root, p
call json%initialize()
call file%initialize()
call file%deserialize(config_string)
call file%get(root)
call json%get(root, 'config', p)
call json%update(p, 'restart_file', trim(restart_file), found)
! write data to the disk
call io__output(up, uf, np2, nxs, nxe, it, trim(restart_file))
if ( nrank == nroot ) then
call json%print(root, config)
end if
call json%destroy()
call file%destroy()
end subroutine save_restart
!
! save parameters
!
subroutine save_param(n0, wpe, wpi, wge, wgi, vti, vte, filename)
implicit none
integer, intent(in) :: n0
real(8), intent(in) :: wpe, wpi, wge, wgi, vti, vte
character(len=*), intent(in) :: filename
character(len=256) :: jsonfile, datafile
integer(int64) :: disp
integer :: fh
type(json_core) :: json
type(json_file) :: file
type(json_value), pointer :: root, p
! save deafult parameters
call io__param(n0, wpe, wpi, wge, wgi, vti, vte, filename)
! save additional parameters
datafile = trim(datadir) // trim(filename) // '.raw'
jsonfile = trim(datadir) // trim(filename) // '.json'
! open json file
call file%initialize()
call json%initialize()
call file%load(jsonfile)
call file%get(root)
! open data file
call mpiio_open_file(datafile, fh, disp, 'a')
! put attributes
call json%get(root, 'attribute', p)
call jsonio_put_attribute(json, p, u0, 'u0', disp, '')
call mpiio_write_atomic(fh, disp, u0)
! write json and close
if( nrank == 0 ) then
call json%print(root, jsonfile)
end if
call json%destroy()
! close data file
call mpiio_close_file(fh)
end subroutine save_param
subroutine relocate()
implicit none
integer :: isp, j, ii, ii1 ,ii2
real(8) :: v1, gam1, gamp, sd(nsp)
integer(8) :: gcumsum(nproc+1,nsp), nptotal(nsp), pid
if(nxe==nxge) return
! expand box
nxe = nxe+1
! get particle number for ID
call get_global_cumsum(np2, gcumsum)
nptotal(1:nsp) = gcumsum(nproc+1,1:nsp)
!
! position
!
!$OMP PARALLEL DO PRIVATE(ii,ii1,ii2,j)
do j=nys,nye
do ii=1,n0
ii1 = np2(j,1) + ii
ii2 = np2(j,2) + ii
up(1,ii1,j,1) = (nxe-1)*delx + (ii - 0.5d0)/n0*delx
up(2,ii1,j,1) = (j + uniform_rand()) * delx
up(1,ii2,j,2) = up(1,ii1,j,1)
up(2,ii2,j,2) = up(2,ii1,j,1)
enddo
enddo
!$OMP END PARALLEL DO
!
! velocity
!
sd(1) = v_thi
sd(2) = v_the
do isp=1,nsp
!$OMP PARALLEL DO PRIVATE(ii,j,v1,gam1,gamp,pid)
do j=nys,nye
do ii=np2(j,isp)+1,np2(j,isp)+n0
! Maxwellian in fluid rest frame
up(3,ii,j,isp) = sd(isp) * normal_rand()
up(4,ii,j,isp) = sd(isp) * normal_rand()
up(5,ii,j,isp) = sd(isp) * normal_rand()
! Lorentz transform to lab frame
v1 = vprofile(up(1,ii,j,isp))
gam1 = 1/sqrt(1-(v1/c)**2)
gamp = sqrt(1 + ( &
& up(3,ii,j,isp)**2 + &
& up(4,ii,j,isp)**2 + &
& up(5,ii,j,isp)**2 )/c**2)
up(3,ii,j,isp) = gam1*(up(3,ii,j,isp) + v1*gamp)
! particle ID
pid = ii - np2(j,isp) + (j-nygs)*n0 + nptotal(isp)
up(6,ii,j,isp) = transfer(-pid, 1.0_8)
enddo
np2(j,isp) = np2(j,isp) + n0
cumcnt(nxe,j,isp) = cumcnt(nxe-1,j,isp) + n0
enddo
!$OMP END PARALLEL DO
enddo
!$OMP PARALLEL DO PRIVATE(j)
do j = nys-2, nye+2
uf(2,nxe-1,j) = b0*sin(theta_bn)*cos(phi_bn)
uf(3,nxe-1,j) = b0*sin(theta_bn)*sin(phi_bn)
uf(5,nxe-1,j) =+v0*uf(3,nxe-1,j)/c
uf(6,nxe-1,j) =-v0*uf(2,nxe-1,j)/c
uf(2,nxe,j) = b0*sin(theta_bn)*cos(phi_bn)
uf(3,nxe,j) = b0*sin(theta_bn)*sin(phi_bn)
enddo
!$OMP END PARALLEL DO
end subroutine relocate
!
! injection from the right-hand side boundary
!
subroutine inject()
implicit none
integer :: isp, ii, ii1, ii2, i, j
real(8) :: v1, gam1, gamp, sd(nsp)
real(8) :: pflux, x0, xinj
integer :: nginj, ngmod, nginj_proc(nproc), index_proc(nproc)
integer :: nlinj, nlmod, nlinj_grid(nys:nye), index_grid(nys:nye)
integer :: ncinj_proc(nproc+1), ncinj_grid(nys:nye+1)
integer(8) :: gcumsum(nproc+1,nsp), nptotal(nsp), pid
!
! Determine number of particle injected into each cell with
! the following steps.
!
! (1) Determine the total number of particles injected.
! (A fractional portion is taken into acccount by random numbers.)
! (2) Equally divide it among PEs. Reminders are added randomly.
! (3) In each PE, equally divide the number of particles for each
! cell. Reminders are added randomly.
!
! * number of particles injected into the entire system
pflux = n0 * abs(v0) * delt * delx * (nyge - nygs + 1)
nginj = int(pflux)
if( uniform_rand() < pflux - int(pflux) ) then
nginj = nginj + 1
end if
! * number of particles injected into the local domain
ngmod = mod(nginj, nproc)
do i = 1, nproc
nginj_proc(i) = nginj / nproc
index_proc(i) = i
end do
call shuffle(index_proc)
do i = 1, ngmod
nginj_proc(index_proc(i)) = nginj_proc(index_proc(i)) + 1
end do
! send from root to other processe
call MPI_Bcast(nginj_proc, nproc, MPI_INTEGER, 0, MPI_COMM_WORLD, mpierr)
! * number of particles injected into each cell
nlinj = nginj_proc(nrank+1)
nlmod = mod(nlinj, nye - nys + 1)
do j = nys, nye
nlinj_grid(j) = nlinj / (nye - nys + 1)
index_grid(j) = j
end do
call shuffle(index_grid)
do j = nys, nys + nlmod - 1
nlinj_grid(index_grid(j)) = nlinj_grid(index_grid(j)) + 1
end do
!
! The following steps are needed for assigning particle IDs.
!
! get current number of particles in the system
call get_global_cumsum(np2, gcumsum)
nptotal(1:nsp) = gcumsum(nproc+1,1:nsp)
! cumulative sum of number of injection particles
ncinj_proc(1) = 0
do i = 1, nproc
ncinj_proc(i+1) = ncinj_proc(i) + nginj_proc(i)
end do
ncinj_grid(nys) = ncinj_proc(nrank+1)
do i = nys, nye
ncinj_grid(i+1) = ncinj_grid(i) + nlinj_grid(i)
end do
!
! position
!
x0 = abs(v0) * delt
!$OMP PARALLEL DO PRIVATE(ii,ii1,ii2,j)
do j = nys, nye
do ii = 1, nlinj_grid(j)
ii1 = np2(j,1) + ii
ii2 = np2(j,2) + ii
up(1,ii1,j,1) = nxe*delx + (ii - 0.5d0)/nlinj_grid(j)*x0
up(2,ii1,j,1) = (j + uniform_rand()) * delx
up(1,ii2,j,2) = up(1,ii1,j,1)
up(2,ii2,j,2) = up(2,ii1,j,1)
end do
end do
!$OMP END PARALLEL DO
!
! velocity
!
sd(1) = v_thi
sd(2) = v_the
do isp = 1, nsp
!$OMP PARALLEL DO PRIVATE(ii,j,xinj,v1,gam1,gamp,pid)
do j = nys, nye
do ii = np2(j,isp)+1, np2(j,isp)+nlinj_grid(j)
! Maxwellian in fluid rest frame
up(3,ii,j,isp) = sd(isp) * normal_rand()
up(4,ii,j,isp) = sd(isp) * normal_rand()
up(5,ii,j,isp) = sd(isp) * normal_rand()
! injection (non-relativistic approximation)
xinj = up(1,ii,j,isp) + (v0 + up(3,ii,j,isp)) * delt
up(1,ii,j,isp) = xinj
! if( xinj <= nxe*delx ) then
! ! leave ux as is
! up(1,ii,j,isp) = xinj
! else
! ! folding (x, ux)
! up(3,ii,j,isp) =-up(3,ii,j,isp)
! up(1,ii,j,isp) =-up(1,ii,j,isp) + &
! & 2*x0 + (v0 + up(3,ii,j,isp)) * delt
! end if
! Lorentz transform to lab frame
v1 = vprofile(up(1,ii,j,isp))
gam1 = 1/sqrt(1-(v1/c)**2)
gamp = sqrt(1 + ( &
& up(3,ii,j,isp)**2 + &
& up(4,ii,j,isp)**2 + &
& up(5,ii,j,isp)**2 )/c**2)
up(3,ii,j,isp) = gam1*(up(3,ii,j,isp) + v1*gamp)
! particle ID
pid = ii - np2(j,isp) + ncinj_grid(j) + nptotal(isp)
up(6,ii,j,isp) = transfer(-pid, 1.0_8)
end do
end do
!$OMP END PARALLEL DO
end do
do isp = 1, nsp
!$OMP WORKSHARE
np2(nys:nye,isp) = np2(nys:nye,isp) + nlinj_grid(nys:nye)
cumcnt(nxe,nys:nye,isp) = cumcnt(nxe,nys:nye,isp) + nlinj_grid(nys:nye)
!$OMP END WORKSHARE
enddo
!$OMP PARALLEL DO PRIVATE(j)
do j = nys-2, nye+2
uf(2,nxe-1,j) = b0*sin(theta_bn)*cos(phi_bn)
uf(3,nxe-1,j) = b0*sin(theta_bn)*sin(phi_bn)
uf(5,nxe-1,j) =+v0*uf(3,nxe-1,j)/c
uf(6,nxe-1,j) =-v0*uf(2,nxe-1,j)/c
uf(2,nxe,j) = b0*sin(theta_bn)*cos(phi_bn)
uf(3,nxe,j) = b0*sin(theta_bn)*sin(phi_bn)
enddo
!$OMP END PARALLEL DO
end subroutine inject
!
! get global cumulative sum of particle numbers
!
subroutine get_global_cumsum(np2, cumsum)
implicit none
integer, intent(in) :: np2(nys:nye,nsp)
integer(8), intent(inout) :: cumsum(nproc+1,nsp)
integer :: i, isp, mpierr
integer(8) :: lcount(nsp), gcount(nsp, nproc)
! get number of particles for each proces
lcount(1:nsp) = sum(np2(nys:nye,1:nsp), dim=1)
call MPI_Allgather(lcount, nsp, MPI_INTEGER8, gcount, nsp, MPI_INTEGER8, &
& MPI_COMM_WORLD, mpierr)
! calculate cumulative sum
do isp = 1, nsp
cumsum(1,isp) = 0
do i = 1, nproc
cumsum(i+1,isp) = cumsum(i,isp) + gcount(isp,i)
end do
end do
end subroutine get_global_cumsum
!
! initial velocity profile
!
function vprofile(x) result(y)
implicit none
real(8), intent(in) :: x
real(8) :: y
real(8) :: x0, xs
x0 = l_damp_ini + nxgs*delx
xs = l_damp_ini * 0.1d0
y = 0.5d0 * v0 * (1 + tanh((x-x0)/xs))
end function vprofile
end module app
|
/-
Copyright (c) 2017 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl, Kenny Lau, Yury Kudryashov
-/
import dynamics.fixed_points.basic
import order.hom.order
/-!
# Fixed point construction on complete lattices
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
This file sets up the basic theory of fixed points of a monotone function in a complete lattice.
## Main definitions
* `order_hom.lfp`: The least fixed point of a bundled monotone function.
* `order_hom.gfp`: The greatest fixed point of a bundled monotone function.
* `order_hom.prev_fixed`: The greatest fixed point of a bundled monotone function smaller than or
equal to a given element.
* `order_hom.next_fixed`: The least fixed point of a bundled monotone function greater than or
equal to a given element.
* `fixed_points.complete_lattice`: The Knaster-Tarski theorem: fixed points of a monotone
self-map of a complete lattice form themselves a complete lattice.
## Tags
fixed point, complete lattice, monotone function
-/
universes u v w
variables {α : Type u} {β : Type v} {γ : Type w}
open function (fixed_points is_fixed_pt)
namespace order_hom
section basic
variables [complete_lattice α] (f : α →o α)
/-- Least fixed point of a monotone function -/
def lfp : (α →o α) →o α :=
{ to_fun := λ f, Inf {a | f a ≤ a},
monotone' := λ f g hle, Inf_le_Inf $ λ a ha, (hle a).trans ha }
/-- Greatest fixed point of a monotone function -/
def gfp : (α →o α) →o α :=
{ to_fun := λ f, Sup {a | a ≤ f a},
monotone' := λ f g hle, Sup_le_Sup $ λ a ha, le_trans ha (hle a) }
lemma lfp_le {a : α} (h : f a ≤ a) : lfp f ≤ a := Inf_le h
lemma lfp_le_fixed {a : α} (h : f a = a) : lfp f ≤ a := f.lfp_le h.le
lemma le_lfp {a : α} (h : ∀ b, f b ≤ b → a ≤ b) : a ≤ lfp f := le_Inf h
lemma map_le_lfp {a : α} (ha : a ≤ f.lfp) : f a ≤ f.lfp :=
f.le_lfp $ λ b hb, (f.mono $ le_Inf_iff.1 ha _ hb).trans hb
@[simp] lemma map_lfp : f (lfp f) = lfp f :=
have h : f (lfp f) ≤ lfp f, from f.map_le_lfp le_rfl,
h.antisymm $ f.lfp_le $ f.mono h
lemma is_fixed_pt_lfp : is_fixed_pt f f.lfp := f.map_lfp
lemma lfp_le_map {a : α} (ha : lfp f ≤ a) : lfp f ≤ f a :=
calc lfp f = f (lfp f) : f.map_lfp.symm
... ≤ f a : f.mono ha
lemma is_least_lfp_le : is_least {a | f a ≤ a} (lfp f) :=
⟨f.map_lfp.le, λ a, f.lfp_le⟩
lemma is_least_lfp : is_least (fixed_points f) (lfp f) :=
⟨f.is_fixed_pt_lfp, λ a, f.lfp_le_fixed⟩
lemma lfp_induction {p : α → Prop} (step : ∀ a, p a → a ≤ lfp f → p (f a))
(hSup : ∀ s, (∀ a ∈ s, p a) → p (Sup s)) :
p (lfp f) :=
begin
set s := {a | a ≤ lfp f ∧ p a},
specialize hSup s (λ a, and.right),
suffices : Sup s = lfp f, from this ▸ hSup,
have h : Sup s ≤ lfp f := Sup_le (λ b, and.left),
have hmem : f (Sup s) ∈ s, from ⟨f.map_le_lfp h, step _ hSup h⟩,
exact h.antisymm (f.lfp_le $ le_Sup hmem)
end
lemma le_gfp {a : α} (h : a ≤ f a) : a ≤ gfp f :=
le_Sup h
lemma gfp_le {a : α} (h : ∀ b, b ≤ f b → b ≤ a) : gfp f ≤ a :=
Sup_le h
lemma is_fixed_pt_gfp : is_fixed_pt f (gfp f) := f.dual.is_fixed_pt_lfp
@[simp] lemma map_gfp : f (gfp f) = gfp f := f.dual.map_lfp
lemma map_le_gfp {a : α} (ha : a ≤ gfp f) : f a ≤ gfp f := f.dual.lfp_le_map ha
lemma gfp_le_map {a : α} (ha : gfp f ≤ a) : gfp f ≤ f a := f.dual.map_le_lfp ha
lemma is_greatest_gfp_le : is_greatest {a | a ≤ f a} (gfp f) :=
f.dual.is_least_lfp_le
lemma is_greatest_gfp : is_greatest (fixed_points f) (gfp f) :=
f.dual.is_least_lfp
lemma gfp_induction {p : α → Prop} (step : ∀ a, p a → gfp f ≤ a → p (f a))
(hInf : ∀ s, (∀ a ∈ s, p a) → p (Inf s)) :
p (gfp f) :=
f.dual.lfp_induction step hInf
end basic
section eqn
variables [complete_lattice α] [complete_lattice β] (f : β →o α) (g : α →o β)
-- Rolling rule
lemma map_lfp_comp : f (lfp (g.comp f)) = lfp (f.comp g) :=
le_antisymm ((f.comp g).map_lfp ▸ f.mono (lfp_le_fixed _ $ congr_arg g (f.comp g).map_lfp)) $
lfp_le _ (congr_arg f (g.comp f).map_lfp).le
lemma map_gfp_comp : f ((g.comp f).gfp) = (f.comp g).gfp :=
f.dual.map_lfp_comp g.dual
-- Diagonal rule
lemma lfp_lfp (h : α →o α →o α) :
lfp (lfp.comp h) = lfp h.on_diag :=
begin
let a := lfp (lfp.comp h),
refine (lfp_le _ _).antisymm (lfp_le _ (eq.le _)),
{ exact lfp_le _ h.on_diag.map_lfp.le },
have ha : (lfp ∘ h) a = a := (lfp.comp h).map_lfp,
calc h a a = h a (lfp (h a)) : congr_arg (h a) ha.symm
... = lfp (h a) : (h a).map_lfp
... = a : ha
end
lemma gfp_gfp (h : α →o α →o α) : gfp (gfp.comp h) = gfp h.on_diag :=
@lfp_lfp αᵒᵈ _ $ (order_hom.dual_iso αᵒᵈ αᵒᵈ).symm.to_order_embedding.to_order_hom.comp h.dual
end eqn
section prev_next
variables [complete_lattice α] (f : α →o α)
lemma gfp_const_inf_le (x : α) : gfp (const α x ⊓ f) ≤ x :=
gfp_le _ $ λ b hb, hb.trans inf_le_left
/-- Previous fixed point of a monotone map. If `f` is a monotone self-map of a complete lattice and
`x` is a point such that `f x ≤ x`, then `f.prev_fixed x hx` is the greatest fixed point of `f`
that is less than or equal to `x`. -/
def prev_fixed (x : α) (hx : f x ≤ x) : fixed_points f :=
⟨gfp (const α x ⊓ f),
calc f (gfp (const α x ⊓ f)) = x ⊓ f (gfp (const α x ⊓ f)) :
eq.symm $ inf_of_le_right $ (f.mono $ f.gfp_const_inf_le x).trans hx
... = gfp (const α x ⊓ f) : (const α x ⊓ f).map_gfp ⟩
/-- Next fixed point of a monotone map. If `f` is a monotone self-map of a complete lattice and
`x` is a point such that `x ≤ f x`, then `f.next_fixed x hx` is the least fixed point of `f`
that is greater than or equal to `x`. -/
def next_fixed (x : α) (hx : x ≤ f x) : fixed_points f :=
{ val := (const α x ⊔ f).lfp,
.. f.dual.prev_fixed x hx }
lemma prev_fixed_le {x : α} (hx : f x ≤ x) : ↑(f.prev_fixed x hx) ≤ x :=
f.gfp_const_inf_le x
lemma le_next_fixed {x : α} (hx : x ≤ f x) : x ≤ f.next_fixed x hx :=
f.dual.prev_fixed_le hx
lemma next_fixed_le {x : α} (hx : x ≤ f x) {y : fixed_points f} (h : x ≤ y) :
f.next_fixed x hx ≤ y :=
subtype.coe_le_coe.1 $ lfp_le _ $ sup_le h y.2.le
@[simp] lemma next_fixed_le_iff {x : α} (hx : x ≤ f x) {y : fixed_points f} :
f.next_fixed x hx ≤ y ↔ x ≤ y :=
⟨λ h, (f.le_next_fixed hx).trans h, f.next_fixed_le hx⟩
@[simp]
lemma le_prev_fixed {x : α} (hx : f x ≤ x) {y : fixed_points f} (h : ↑y ≤ x) :
y ≤ f.prev_fixed x hx :=
(f.le_prev_fixed_iff hx).2 h
lemma le_map_sup_fixed_points (x y : fixed_points f) : (x ⊔ y : α) ≤ f (x ⊔ y) :=
calc (x ⊔ y : α) = f x ⊔ f y : congr_arg2 (⊔) x.2.symm y.2.symm
... ≤ f (x ⊔ y) : f.mono.le_map_sup x y
lemma map_inf_fixed_points_le (x y : fixed_points f) : f (x ⊓ y) ≤ x ⊓ y :=
f.dual.le_map_sup_fixed_points x y
lemma le_map_Sup_subset_fixed_points (A : set α) (hA : A ⊆ fixed_points f) : Sup A ≤ f (Sup A) :=
Sup_le $ λ x hx, hA hx ▸ (f.mono $ le_Sup hx)
lemma map_Inf_subset_fixed_points_le (A : set α) (hA : A ⊆ fixed_points f) : f (Inf A) ≤ Inf A :=
le_Inf $ λ x hx, (hA hx) ▸ (f.mono $ Inf_le hx)
end prev_next
end order_hom
namespace fixed_points
open order_hom
variables [complete_lattice α] (f : α →o α)
instance : semilattice_sup (fixed_points f) :=
{ sup := λ x y, f.next_fixed (x ⊔ y) (f.le_map_sup_fixed_points x y),
le_sup_left := λ x y, subtype.coe_le_coe.1 $ le_sup_left.trans (f.le_next_fixed _),
le_sup_right := λ x y, subtype.coe_le_coe.1 $ le_sup_right.trans (f.le_next_fixed _),
sup_le := λ x y z hxz hyz, f.next_fixed_le _ $ sup_le hxz hyz,
.. subtype.partial_order _ }
instance : semilattice_inf (fixed_points f) :=
{ inf := λ x y, f.prev_fixed (x ⊓ y) (f.map_inf_fixed_points_le x y),
.. subtype.partial_order _, .. (order_dual.semilattice_inf (fixed_points f.dual)) }
instance : complete_semilattice_Sup (fixed_points f) :=
{ Sup := λ s, f.next_fixed (Sup (coe '' s))
(f.le_map_Sup_subset_fixed_points (coe '' s) (λ z ⟨x, hx⟩, hx.2 ▸ x.2)),
le_Sup := λ s x hx, subtype.coe_le_coe.1 $ le_trans (le_Sup $ set.mem_image_of_mem _ hx)
(f.le_next_fixed _),
Sup_le := λ s x hx, f.next_fixed_le _ $ Sup_le $ set.ball_image_iff.2 hx,
.. subtype.partial_order _ }
instance : complete_semilattice_Inf (fixed_points f) :=
{ Inf := λ s, f.prev_fixed (Inf (coe '' s))
(f.map_Inf_subset_fixed_points_le (coe '' s) (λ z ⟨x, hx⟩, hx.2 ▸ x.2)),
le_Inf := λ s x hx, f.le_prev_fixed _ $ le_Inf $ set.ball_image_iff.2 hx,
Inf_le := λ s x hx, subtype.coe_le_coe.1 $ le_trans (f.prev_fixed_le _)
(Inf_le $ set.mem_image_of_mem _ hx),
.. subtype.partial_order _ }
/-- **Knaster-Tarski Theorem**: The fixed points of `f` form a complete lattice. -/
instance : complete_lattice (fixed_points f) :=
{ top := ⟨f.gfp, f.is_fixed_pt_gfp⟩,
bot := ⟨f.lfp, f.is_fixed_pt_lfp⟩,
le_top := λ x, f.le_gfp x.2.ge,
bot_le := λ x, f.lfp_le x.2.le,
.. subtype.partial_order _,
.. fixed_points.semilattice_sup f,
.. fixed_points.semilattice_inf f,
.. fixed_points.complete_semilattice_Sup f,
.. fixed_points.complete_semilattice_Inf f }
end fixed_points
|
{-# OPTIONS --experimental-irrelevance #-}
-- Andreas, 2011-04-15
-- {-# OPTIONS -v tc.data:20 #-}
module IrrelevantDataParameter where
postulate
A : Set
data K .(a : A) : Set where
c : K a
postulate
a : A
data K' .(b : A) : Set where
c : K' a
-- ok, since parameter irrelevant
-- 2011-09-09
postulate
_×_ : ..(A B : Set) -> Set
Lst : ..(A : Set) -> Set
nl : .(A : Set) -> Lst A
cns : .(A : Set) -> A × Lst A -> Lst A
-- cns' : .(A : Set) -> (a : A) -> (as : Lst A) -> Lst A -- not well-formed! |
lemma interior_eq: "interior S = S \<longleftrightarrow> open S" |
(** the concept of left action of a monoidal category on a category
written by Ralph Matthes in lockstep with the code in [UniMath.CategoryTheory.MonoidalOld.MonoidalCategoriesWhiskered]
naming is inspired from https://ncatlab.org/nlab/show/actegory:
the whole structure is an [actegory], the binary operation is the [action], the extra data are the [action_unitor] and the [actor], together with their inverses
2022
*)
Require Import UniMath.MoreFoundations.All.
Require Import UniMath.Foundations.All.
Require Import UniMath.CategoryTheory.Core.Categories.
Require Import UniMath.CategoryTheory.Core.Functors.
Require Import UniMath.CategoryTheory.Core.NaturalTransformations.
Require Import UniMath.CategoryTheory.Core.Isos.
Require Import UniMath.CategoryTheory.Equivalences.Core.
Require Import UniMath.CategoryTheory.Equivalences.FullyFaithful.
Require Import UniMath.CategoryTheory.Monoidal.WhiskeredBifunctors.
Require Import UniMath.CategoryTheory.Monoidal.Categories.
Local Open Scope cat.
Import BifunctorNotations.
Import MonoidalNotations.
Section A.
Context {V : category} (Mon_V : monoidal V). (** given the monoidal category that acts upon categories *)
(** Data **)
Definition action_data (C : category) : UU :=
bifunctor_data V C C.
Identity Coercion actionintobifunctor : action_data >-> bifunctor_data.
(** the following widens the concept of left unitor of a monoidal category, a right unitor is not appropriate for actions *)
Definition action_unitor_data {C : category} (A : action_data C) : UU :=
∏ (x : C), C⟦I_{Mon_V} ⊗_{A} x, x⟧.
Definition action_unitorinv_data {C : category} (A : action_data C) : UU :=
∏ (x : C), C⟦x, I_{Mon_V} ⊗_{A} x⟧.
Definition actor_data {C : category} (A : action_data C) : UU :=
∏ (v w : V) (x : C), C ⟦(v ⊗_{Mon_V} w) ⊗_{A} x, v ⊗_{A} (w ⊗_{A} x)⟧.
Definition actorinv_data {C : category} (A : action_data C) : UU :=
∏ (v w : V) (x : C), C ⟦v ⊗_{A} (w ⊗_{A} x), (v ⊗_{Mon_V} w) ⊗_{A} x⟧.
Definition actegory_data (C : category) : UU :=
∑ A : action_data C,
(action_unitor_data A) × (action_unitorinv_data A) ×
(actor_data A) × (actorinv_data A).
Definition make_actegory_data {C : category} {A : action_data C}
(au : action_unitor_data A) (auinv : action_unitorinv_data A)
(aα : actor_data A) (aαinv : actorinv_data A) : actegory_data C
:= (A,,au,,auinv,,aα,,aαinv).
Definition actegory_action_data {C : category} (AD : actegory_data C) : action_data C := pr1 AD.
Coercion actegory_action_data : actegory_data >-> action_data.
Definition actegory_unitordata {C : category} (AD : actegory_data C) : action_unitor_data AD
:= pr1 (pr2 AD).
Notation "au_{ AD }" := (actegory_unitordata AD).
Definition actegory_unitorinvdata {C : category} (AD : actegory_data C) : action_unitorinv_data AD
:= pr12 (pr2 AD).
Notation "auinv_{ AD }" := (actegory_unitorinvdata AD).
Definition actegory_actordata {C : category} (AD : actegory_data C) : actor_data AD
:= pr12 (pr2 (pr2 AD)).
Notation "aα_{ AD }" := (actegory_actordata AD).
Definition actegory_actorinvdata {C : category} (AD : actegory_data C) : actorinv_data AD
:= pr22 (pr2 (pr2 AD)).
Notation "aαinv_{ AD }" := (actegory_actorinvdata AD).
(** Axioms **)
Definition action_unitor_nat {C : category} {A : action_data C} (au : action_unitor_data A) : UU :=
∏ (x y : C), ∏ (f : C ⟦x,y⟧), I_{Mon_V} ⊗^{A}_{l} f · au y = au x · f.
Definition action_unitorinv_nat {C : category} {A : action_data C} (auinv : action_unitorinv_data A) : UU :=
∏ (x y : C), ∏ (f : C ⟦x,y⟧), auinv x · I_{Mon_V} ⊗^{A}_{l} f = f · auinv y.
Definition action_unitor_iso_law {C : category} {A : action_data C} (au : action_unitor_data A) (auinv : action_unitorinv_data A) : UU :=
∏ (x : C), is_inverse_in_precat (au x) (auinv x).
Definition action_unitor_law {C : category} {A : action_data C} (au : action_unitor_data A) (auinv : action_unitorinv_data A) : UU :=
action_unitor_nat au × action_unitor_iso_law au auinv.
Definition action_unitorlaw_nat {C : category} {A : action_data C} {au : action_unitor_data A} {auinv : action_unitorinv_data A}
(aul : action_unitor_law au auinv) : action_unitor_nat au := pr1 aul.
Definition action_unitorlaw_iso_law {C : category} {A : action_data C} {au : action_unitor_data A} {auinv : action_unitorinv_data A}
(aul : action_unitor_law au auinv) : action_unitor_iso_law au auinv := pr2 aul.
Definition actor_nat_leftwhisker {C : category} {A : action_data C} (aα : actor_data A) : UU
:= ∏ (v w : V) (z z' : C) (h : C⟦z,z'⟧),
(aα v w z) · (v ⊗^{A}_{l} (w ⊗^{A}_{l} h)) = ((v ⊗_{Mon_V} w) ⊗^{A}_{l} h) · (aα v w z').
Definition actor_nat_rightwhisker {C : category} {A : action_data C} (aα : actor_data A) : UU
:= ∏ (v v' w : V) (z : C) (f : V⟦v,v'⟧),
(aα v w z) · (f ⊗^{A}_{r} (w ⊗_{A} z)) = ((f ⊗^{Mon_V}_{r} w) ⊗^{A}_{r} z) · (aα v' w z).
Definition actor_nat_leftrightwhisker {C : category} {A : action_data C} (aα : actor_data A) : UU
:= ∏ (v w w' : V) (z : C) (g : V⟦w,w'⟧),
(aα v w z) · (v ⊗^{A}_{l} (g ⊗^{A}_{r} z)) = ((v ⊗^{Mon_V}_{l} g) ⊗^{A}_{r} z) · (aα v w' z).
Definition actor_iso_law {C : category} {A : action_data C} (aα : actor_data A) (aαinv : actorinv_data A) : UU
:= ∏ (v w : V) (z : C), is_inverse_in_precat (aα v w z) (aαinv v w z).
Definition actor_law {C : category} {A : action_data C} (aα : actor_data A) (aαinv : actorinv_data A) : UU :=
(actor_nat_leftwhisker aα) × (actor_nat_rightwhisker aα) ×
(actor_nat_leftrightwhisker aα) × (actor_iso_law aα aαinv).
Definition actorlaw_natleft {C : category} {A : action_data C} {aα : actor_data A} {aαinv : actorinv_data A}
(aαl : actor_law aα aαinv) : actor_nat_leftwhisker aα := pr1 aαl.
Definition actorlaw_natright {C : category} {A : action_data C} {aα : actor_data A} {aαinv : actorinv_data A}
(aαl : actor_law aα aαinv) : actor_nat_rightwhisker aα := pr1 (pr2 aαl).
Definition actorlaw_natleftright {C : category} {A : action_data C} {aα : actor_data A} {aαinv : actorinv_data A}
(aαl : actor_law aα aαinv) : actor_nat_leftrightwhisker aα := pr1 (pr2 (pr2 aαl)).
Definition actorlaw_iso_law {C : category} {A : action_data C} {aα : actor_data A} {aαinv : actorinv_data A}
(aαl : actor_law aα aαinv) : actor_iso_law aα aαinv := pr2 (pr2 (pr2 aαl)).
Definition actegory_triangle_identity {C : category}
{A : action_data C}
(au : action_unitor_data A)
(aα : actor_data A)
:= ∏ (v : V) (y : C), (aα v I_{Mon_V} y) · (v ⊗^{A}_{l} (au y)) = (ru_{Mon_V} v) ⊗^{A}_{r} y.
Definition actegory_triangle_identity' {C : category}
{A : action_data C}
(au : action_unitor_data A)
(aα : actor_data A)
:= ∏ (v : V) (y : C), (aα I_{Mon_V} v y) · (au (v ⊗_{A} y)) = (lu_{Mon_V} v) ⊗^{A}_{r} y.
Definition actegory_pentagon_identity {C : category} {A : action_data C} (aα : actor_data A) : UU :=
∏ (w v v' : V) (z : C),
((α_{Mon_V} w v v') ⊗^{A}_{r} z) · (aα w (v ⊗_{Mon_V} v') z) · (w ⊗^{A}_{l} (aα v v' z)) =
(aα (w⊗_{Mon_V} v) v' z) · (aα w v (v' ⊗_{A} z)).
Definition actegory_laws {C : category} (AD : actegory_data C) : UU :=
is_bifunctor AD ×
(action_unitor_law au_{AD} auinv_{AD}) × (actor_law aα_{AD} aαinv_{AD}) ×
(actegory_triangle_identity au_{AD} aα_{AD}) × (actegory_pentagon_identity aα_{AD}).
Definition actegory (C : category) : UU :=
∑ (AD : actegory_data C), (actegory_laws AD).
Definition actegory_actdata {C : category} (Act : actegory C) : actegory_data C := pr1 Act.
Coercion actegory_actdata : actegory >-> actegory_data.
Definition actegory_actlaws {C : category} (Act : actegory C) : actegory_laws Act := pr2 Act.
Definition actegory_action_is_bifunctor {C : category} (Act : actegory C) : is_bifunctor Act
:= pr12 Act.
Coercion actegory_action
{C : category}
(Act : actegory C)
: bifunctor V C C
:= _ ,, actegory_action_is_bifunctor Act.
Definition actegory_unitorlaw {C : category} (Act : actegory C) : action_unitor_law au_{Act} auinv_{Act} := pr12 (actegory_actlaws Act).
Definition actegory_unitornat {C : category} (Act : actegory C) : action_unitor_nat au_{Act} := action_unitorlaw_nat (actegory_unitorlaw Act).
Definition actegory_unitorisolaw {C : category} (Act : actegory C) : action_unitor_iso_law au_{Act} auinv_{Act} := action_unitorlaw_iso_law (actegory_unitorlaw Act).
Lemma actegory_unitorinvnat {C : category} (Act : actegory C) : action_unitorinv_nat auinv_{Act}.
Proof.
intros x y f.
apply (z_iso_inv_on_right _ _ _ (_,,_,,actegory_unitorisolaw Act x)).
cbn.
rewrite assoc.
apply (z_iso_inv_on_left _ _ _ _ (_,,_,,actegory_unitorisolaw Act y)).
apply pathsinv0, actegory_unitornat.
Qed.
Definition actegory_actorlaw {C : category} (Act : actegory C) : actor_law aα_{Act} aαinv_{Act} := pr122 (actegory_actlaws Act).
Definition actegory_actornatleft {C : category} (Act : actegory C) : actor_nat_leftwhisker aα_{Act} := actorlaw_natleft (actegory_actorlaw Act).
Definition actegory_actornatright {C : category} (Act : actegory C) : actor_nat_rightwhisker aα_{Act} := actorlaw_natright (actegory_actorlaw Act).
Definition actegory_actornatleftright {C : category} (Act : actegory C) : actor_nat_leftrightwhisker aα_{Act} := actorlaw_natleftright (actegory_actorlaw Act).
Definition actegory_actorisolaw {C : category} (Act : actegory C) : actor_iso_law aα_{Act} aαinv_{Act} := actorlaw_iso_law (actegory_actorlaw Act).
Lemma actor_nat1 {C : category} (Act : actegory C)
{v v' w w' : V} {z z' : C} (f : V⟦v,v'⟧) (g : V⟦w,w'⟧) (h : C⟦z,z'⟧) :
(actegory_actordata Act v w z) · ((f ⊗^{Act}_{r} (w ⊗_{Act} z)) · (v' ⊗^{Act}_{l} ((g ⊗^{Act}_{r} z) · (w' ⊗^{Act}_{l} h)))) =
(((f ⊗^{Mon_V}_{r} w) · (v' ⊗^{Mon_V}_{l} g)) ⊗^{Act}_{r} z) · ((v' ⊗_{Mon_V} w') ⊗^{Act}_{l} h) · (actegory_actordata Act v' w' z').
Proof.
rewrite assoc.
rewrite (actegory_actornatright Act).
rewrite assoc'.
etrans. {
apply cancel_precomposition.
rewrite (bifunctor_leftcomp Act).
rewrite assoc.
rewrite (actegory_actornatleftright Act).
apply idpath.
}
etrans. {
apply cancel_precomposition.
rewrite assoc'.
apply cancel_precomposition.
apply (actegory_actornatleft Act).
}
rewrite assoc.
rewrite assoc.
apply cancel_postcomposition.
apply pathsinv0.
rewrite (bifunctor_rightcomp Act).
apply idpath.
Qed.
Lemma actor_nat2 {C : category} (Act : actegory C)
{v v' w w' : V} {z z' : C} (f : V⟦v,v'⟧) (g : V⟦w,w'⟧) (h : C⟦z,z'⟧) :
(actegory_actordata Act v w z) · (f ⊗^{Act} (g ⊗^{Act} h)) =
((f ⊗^{Mon_V} g) ⊗^{Act} h) · (actegory_actordata Act v' w' z').
Proof.
intros.
unfold functoronmorphisms1.
exact (actor_nat1 Act f g h).
Qed.
Definition actegory_triangleidentity {C : category} (Act : actegory C) : actegory_triangle_identity au_{Act} aα_{Act} := pr1 (pr222 (actegory_actlaws Act)).
Definition actegory_pentagonidentity {C : category} (Act : actegory C) : actegory_pentagon_identity aα_{Act} := pr2 (pr222 (actegory_actlaws Act)).
Lemma isaprop_actegory_laws {C : category} (AD : actegory_data C)
: isaprop (actegory_laws AD).
Proof.
repeat (apply isapropdirprod)
; repeat (apply impred ; intro)
; repeat (try apply C)
; repeat (apply isaprop_is_inverse_in_precat).
Qed.
(** Some additional data and properties which one deduces from actegories **)
Lemma action_unitor_nat_z_iso {C : category} (Act : actegory C):
nat_z_iso (leftwhiskering_functor Act I_{Mon_V}) (functor_identity C).
Proof.
use make_nat_z_iso.
- use make_nat_trans.
+ exact (λ x, au_{Act} x).
+ exact (λ x y f, actegory_unitornat Act x y f).
- intro x. exists (auinv_{Act} x).
apply (actegory_unitorisolaw Act x).
Defined.
Definition z_iso_from_actor_iso
{C : category} (Act : actegory C) (v w : V) (x : C)
: z_iso ((v ⊗_{Mon_V} w) ⊗_{Act} x) (v ⊗_{Act} (w ⊗_{Act} x))
:= make_z_iso
(aα_{Act} v w x)
(aαinv_{Act} v w x)
(actegory_actorisolaw Act v w x).
Definition actorinv_nat_leftwhisker {C : category} (Act : actegory C) :
∏ (v w : V) (z z' : C) (h : C⟦z,z'⟧),
(v ⊗^{Act}_{l} (w ⊗^{Act}_{l} h)) · (aαinv_{Act} v w z') = (aαinv_{Act} v w z) · ((v ⊗_{Mon_V} w) ⊗^{Act}_{l} h) .
Proof.
intros v w z z' h.
apply (swap_nat_along_zisos (z_iso_from_actor_iso Act v w z) (z_iso_from_actor_iso Act v w z')).
apply actegory_actornatleft.
Qed.
Definition actorinv_nat_rightwhisker {C : category} (Act : actegory C) :
∏ (v v' w : V) (z: C) (f : V⟦v,v'⟧),
(f ⊗^{Act}_{r} (w ⊗_{Act} z)) · (aαinv_{Act} v' w z) = (aαinv_{Act} v w z) · ((f ⊗^{Mon_V}_{r} w) ⊗^{Act}_{r} z).
Proof.
intros v v' w z f.
apply (swap_nat_along_zisos (z_iso_from_actor_iso Act v w z) (z_iso_from_actor_iso Act v' w z)).
apply actegory_actornatright.
Qed.
Definition actorinv_nat_leftrightwhisker {C : category} (Act : actegory C) :
∏ (v w w' : V) (z : C) (g : V⟦w,w'⟧),
(v ⊗^{Act}_{l} (g ⊗^{Act}_{r} z)) · (aαinv_{Act} v w' z) = (aαinv_{Act} v w z) · ((v ⊗^{Mon_V}_{l} g) ⊗^{Act}_{r} z).
Proof.
intros v w w' z g.
apply (swap_nat_along_zisos (z_iso_from_actor_iso Act v w z) (z_iso_from_actor_iso Act v w' z)).
apply actegory_actornatleftright.
Qed.
Definition actorinv_nat1 {C : category} (Act : actegory C)
{v v' w w' : V} {z z' : C} (f : V⟦v,v'⟧) (g : V⟦w,w'⟧) (h : C⟦z,z'⟧) :
((f ⊗^{Act}_{r} (w ⊗_{Act} z)) · (v' ⊗^{Act}_{l} ((g ⊗^{Act}_{r} z) · (w' ⊗^{Act}_{l} h)))) · (aαinv_{Act} v' w' z') =
(aαinv_{Act} v w z) · ((((f ⊗^{Mon_V}_{r} w) · (v' ⊗^{Mon_V}_{l} g)) ⊗^{Act}_{r} z) · ((v' ⊗_{Mon_V} w') ⊗^{ Act}_{l} h)).
Proof.
apply (swap_nat_along_zisos
(z_iso_from_actor_iso Act v w z)
(z_iso_from_actor_iso Act v' w' z')
).
unfold z_iso_from_actor_iso.
unfold make_z_iso.
unfold make_is_z_isomorphism.
unfold pr1.
apply actor_nat1.
Qed.
Lemma actorinv_nat2 {C : category} (Act : actegory C)
{v v' w w' : V} {z z' : C} (f : V⟦v,v'⟧) (g : V⟦w,w'⟧) (h : C⟦z,z'⟧) :
(f ⊗^{Act} (g ⊗^{Act} h)) · (aαinv_{Act} v' w' z') = (aαinv_{Act} v w z) · ((f ⊗^{Mon_V} g) ⊗^{Act} h).
Proof.
intros.
unfold functoronmorphisms1.
apply actorinv_nat1.
Qed.
Lemma pentagon_identity_actorinv {C : category} (Act : actegory C) (w v u : V) (z : C):
w ⊗^{ Act}_{l} (aαinv_{Act} v u z)
· aαinv_{Act} w (v ⊗_{Mon_V} u) z
· αinv_{Mon_V} w v u ⊗^{Act}_{r} z =
aαinv_{Act} w v (u ⊗_{Act} z)
· aαinv_{Act} (w ⊗_{Mon_V} v) u z.
Proof.
apply pathsinv0.
apply (z_iso_inv_on_right _ _ _ (z_iso_from_actor_iso Act _ _ _)).
unfold z_iso_from_actor_iso.
unfold make_z_iso.
unfold make_is_z_isomorphism.
etrans. { apply (pathsinv0 (id_right _)). }
apply (z_iso_inv_on_right _ _ _ (z_iso_from_actor_iso Act _ _ _)).
cbn.
apply pathsinv0.
etrans. {
rewrite assoc.
apply cancel_postcomposition.
apply (pathsinv0 (actegory_pentagonidentity Act w v u z)).
}
etrans. {
rewrite assoc.
rewrite assoc.
apply cancel_postcomposition.
apply cancel_postcomposition.
rewrite assoc'.
apply cancel_precomposition.
apply (pathsinv0 (bifunctor_leftcomp Act _ _ _ _ _ _)).
}
etrans. {
apply cancel_postcomposition.
apply cancel_postcomposition.
apply cancel_precomposition.
apply maponpaths.
apply (pr2 (z_iso_from_actor_iso Act v u z)).
}
etrans. {
apply cancel_postcomposition.
apply cancel_postcomposition.
apply cancel_precomposition.
apply (bifunctor_leftid Act).
}
etrans. {
apply cancel_postcomposition.
apply cancel_postcomposition.
apply id_right.
}
etrans. {
apply cancel_postcomposition.
rewrite assoc'.
apply cancel_precomposition.
apply (pr2 (z_iso_from_actor_iso Act w (v⊗_{Mon_V}u) z)).
}
etrans. {
apply cancel_postcomposition.
apply id_right.
}
etrans. {
apply (pathsinv0 (bifunctor_rightcomp Act _ _ _ _ _ _)).
}
etrans. {
apply maponpaths.
apply (pr2 (pr2 (z_iso_from_associator_iso Mon_V w v u))).
}
apply (bifunctor_rightid Act).
Qed.
End A.
Arguments actegory_unitordata {_ _ _} _ _.
Arguments actegory_unitorinvdata {_ _ _} _ _.
Arguments actegory_actordata {_ _ _} _ _ _ _.
Arguments actegory_actorinvdata {_ _ _} _ _ _ _.
Module ActegoryNotations.
Notation "au_{ Act }" := (actegory_unitordata Act).
Notation "aα_{ Act }" := (actegory_actordata Act).
Notation "au^{ Act }_{ x }" := (actegory_unitordata Act x ).
Notation "aα^{ Act }_{ v , w , x }" := (actegory_actordata Act v w x).
Notation "auinv^{ Act }_{ x }" := (actegory_unitorinvdata Act x ).
Notation "aαinv^{ Act }_{ v , w , x }" := (actegory_actorinvdata Act v w x).
End ActegoryNotations.
Section EquivalenceFromTensorWithUnit.
Import MonoidalNotations.
Context {V : category} (Mon_V : monoidal V) {C : category} (Act : actegory Mon_V C).
Definition ladjunction_data_from_action_with_unit
: Core.adjunction_data C C.
Proof.
exists (leftwhiskering_functor (actegory_action _ Act) I_{Mon_V}).
exists (functor_identity C).
use tpair.
- apply (nat_z_iso_inv (action_unitor_nat_z_iso _ Act)).
- apply (action_unitor_nat_z_iso _ Act).
Defined.
Definition lequivalence_from_action_with_unit
: equivalence_of_cats C C.
Proof.
exists ladjunction_data_from_action_with_unit.
split.
- intro ; apply (nat_z_iso_inv (action_unitor_nat_z_iso _ Act)).
- intro ; apply (action_unitor_nat_z_iso _ Act).
Defined.
Lemma leftwhiskering_fullyfaithful_action
: fully_faithful (leftwhiskering_functor (actegory_action _ Act) I_{Mon_V}).
Proof.
apply fully_faithful_from_equivalence.
exact (adjointificiation lequivalence_from_action_with_unit).
Defined.
Lemma leftwhiskering_faithful_action
: faithful (leftwhiskering_functor (actegory_action _ Act) I_{Mon_V}).
Proof.
exact (pr2 (fully_faithful_implies_full_and_faithful _ _ _ leftwhiskering_fullyfaithful_action)).
Defined.
End EquivalenceFromTensorWithUnit.
Section SecondTriangleEquality.
Import MonoidalNotations.
Import ActegoryNotations.
Context {V : category} (Mon_V : monoidal V) {C : category} (Act : actegory Mon_V C).
Local Lemma lemma0 (v : V) (x : C) :
(α_{Mon_V} I_{Mon_V} I_{Mon_V} v) ⊗^{Act}_{r} x · (I_{ Mon_V} ⊗^{ Mon_V}_{l} lu^{ Mon_V }_{ v}) ⊗^{ Act}_{r} x =
(ru^{ Mon_V }_{ I_{ Mon_V}} ⊗^{ Mon_V}_{r} v) ⊗^{ Act}_{r} x.
Proof.
refine (! bifunctor_rightcomp Act _ _ _ _ _ _ @ _).
apply maponpaths.
apply (monoidal_triangleidentity Mon_V I_{Mon_V} v).
Qed.
Local Lemma lemma2 (v : V) (x : C) :
I_{Mon_V} ⊗^{Act}_{l} (lu_{Mon_V} v ⊗^{Act}_{r} x) = aαinv^{Act}_{ I_{Mon_V}, (I_{Mon_V} ⊗_{Mon_V} v), x} · (((I_{Mon_V} ⊗^{Mon_V}_{l} lu_{Mon_V} v) ⊗^{Act}_{r} x) · aα_{Act} I_{Mon_V} v x).
Proof.
set (aαiso := make_z_iso _ _ (actegory_actorisolaw Mon_V Act I_{Mon_V} (I_{Mon_V} ⊗_{Mon_V} v) x)).
apply pathsinv0.
apply (z_iso_inv_on_right _ _ _ aαiso).
apply pathsinv0.
apply (actegory_actornatleftright Mon_V Act).
Qed.
Local Lemma lemma2' (v : V) (x : C) :
(I_{ Mon_V} ⊗^{ Mon_V}_{l} lu^{ Mon_V }_{ v}) ⊗^{ Act}_{r} x =
αinv^{ Mon_V }_{ I_{ Mon_V}, I_{ Mon_V}, v} ⊗^{ Act}_{r} x
· (ru^{ Mon_V }_{ I_{ Mon_V}} ⊗^{ Mon_V}_{r} v) ⊗^{ Act}_{r} x.
Proof.
apply pathsinv0.
set (αiso := make_z_iso _ _ (monoidal_associatorisolaw Mon_V I_{Mon_V} I_{Mon_V} v)).
set (αisor := functor_on_z_iso (rightwhiskering_functor Act x) αiso).
apply (z_iso_inv_on_right _ _ _ αisor).
apply pathsinv0.
apply lemma0.
Qed.
Local Lemma lemma3 (v : V) (x : C) :
I_{Mon_V} ⊗^{Act}_{l} (lu_{Mon_V} v ⊗^{Act}_{r} x) =
aαinv^{Act}_{ I_{Mon_V}, (I_{Mon_V} ⊗_{Mon_V} v), x}
· ((((αinv_{Mon_V} I_{Mon_V} I_{Mon_V} v) ⊗^{Act}_{r} x)
· (ru_{Mon_V} I_{Mon_V} ⊗^{Mon_V}_{r} v) ⊗^{Act}_{r} x)
· aα_{Act} I_{Mon_V} v x).
Proof.
refine (lemma2 v x @ _).
apply maponpaths.
apply maponpaths_2.
apply lemma2'.
Qed.
Local Lemma right_whisker_with_action_unitor' (v : V) (x : C) :
I_{Mon_V} ⊗^{Act}_{l} (lu_{Mon_V} v ⊗^{Act}_{r} x) =
I_{Mon_V} ⊗^{Act}_{l} (aα_{Act} I_{Mon_V} v x · au_{Act} (v ⊗_{Act} x)).
Proof.
refine (lemma3 v x @ _).
set (aαiso := make_z_iso _ _ (actegory_actorisolaw Mon_V Act I_{Mon_V} (I_{Mon_V} ⊗_{Mon_V} v) x)).
apply (z_iso_inv_on_right _ _ _ aαiso).
set (αiso' := make_z_iso _ _ (monoidal_associatorisolaw Mon_V I_{Mon_V} I_{Mon_V} v)).
set (αisor := functor_on_z_iso (rightwhiskering_functor Act x) αiso').
etrans. { apply assoc'. }
apply (z_iso_inv_on_right _ _ _ αisor).
apply pathsinv0.
simpl.
etrans. { apply assoc. }
etrans.
{
apply maponpaths.
apply (bifunctor_leftcomp Act _ _ _ _ _ _).
}
etrans. { apply assoc. }
etrans. {
apply maponpaths_2.
apply (actegory_pentagonidentity Mon_V Act I_{Mon_V} I_{Mon_V} v x).
}
etrans.
2: {
apply (actegory_actornatright Mon_V Act).
}
etrans. { apply assoc'. }
apply maponpaths.
apply actegory_triangleidentity.
Qed.
Lemma right_whisker_with_action_unitor : actegory_triangle_identity' Mon_V au_{Act} aα_{Act}.
Proof.
intros v x.
use faithful_reflects_commutative_triangle.
3: { apply (leftwhiskering_faithful_action(V:=V)). }
apply pathsinv0.
refine (right_whisker_with_action_unitor' _ _ @ _).
apply (bifunctor_leftcomp Act).
Qed.
Definition actegory_triangleidentity' := right_whisker_with_action_unitor.
End SecondTriangleEquality.
|
method(r::OptimizationResults) = r.method
minimizer(r::OptimizationResults) = r.minimizer
minimum(r::OptimizationResults) = r.minimum
iterations(r::OptimizationResults) = r.iterations
iteration_limit_reached(r::OptimizationResults) = r.iteration_converged
trace(r::OptimizationResults) = length(r.trace) > 0 ? r.trace : error("No trace in optimization results. To get a trace, run optimize() with store_trace = true.")
function x_trace(r::UnivariateOptimizationResults)
tr = trace(r)
!haskey(tr[1].metadata, "minimizer") && error("Trace does not contain x. To get a trace of x, run optimize() with extended_trace = true")
[ state.metadata["minimizer"] for state in tr ]
end
function x_lower_trace(r::UnivariateOptimizationResults)
tr = trace(r)
!haskey(tr[1].metadata, "x_lower") && error("Trace does not contain x. To get a trace of x, run optimize() with extended_trace = true")
[ state.metadata["x_lower"] for state in tr ]
end
x_lower_trace(r::MultivariateOptimizationResults) = error("x_lower_trace is not implemented for $(method(r)).")
function x_upper_trace(r::UnivariateOptimizationResults)
tr = trace(r)
!haskey(tr[1].metadata, "x_upper") && error("Trace does not contain x. To get a trace of x, run optimize() with extended_trace = true")
[ state.metadata["x_upper"] for state in tr ]
end
x_upper_trace(r::MultivariateOptimizationResults) = error("x_upper_trace is not implemented for $(method(r)).")
function x_trace(r::MultivariateOptimizationResults)
tr = trace(r)
!haskey(tr[1].metadata, "x") && error("Trace does not contain x. To get a trace of x, run optimize() with extended_trace = true")
[ state.metadata["x"] for state in tr ]
end
f_trace(r::OptimizationResults) = [ state.value for state in trace(r) ]
g_norm_trace(r::OptimizationResults) = error("g_norm_trace is not implemented for $(method(r)).")
g_norm_trace(r::MultivariateOptimizationResults) = [ state.g_norm for state in trace(r) ]
f_calls(r::OptimizationResults) = r.f_calls
g_calls(r::OptimizationResults) = error("g_calls is not implemented for $(method(r)).")
g_calls(r::MultivariateOptimizationResults) = r.g_calls
converged(r::UnivariateOptimizationResults) = r.converged
converged(r::MultivariateOptimizationResults) = r.x_converged || r.f_converged || r.g_converged
x_converged(r::OptimizationResults) = error("x_converged is not implemented for $(method(r)).")
x_converged(r::MultivariateOptimizationResults) = r.x_converged
f_converged(r::OptimizationResults) = error("f_converged is not implemented for $(method(r)).")
f_converged(r::MultivariateOptimizationResults) = r.f_converged
g_converged(r::OptimizationResults) = error("g_converged is not implemented for $(method(r)).")
g_converged(r::MultivariateOptimizationResults) = r.g_converged
x_tol(r::OptimizationResults) = error("x_tol is not implemented for $(method(r)).")
x_tol(r::MultivariateOptimizationResults) = r.x_tol
f_tol(r::OptimizationResults) = error("f_tol is not implemented for $(method(r)).")
f_tol(r::MultivariateOptimizationResults) = r.f_tol
g_tol(r::OptimizationResults) = error("g_tol is not implemented for $(method(r)).")
g_tol(r::MultivariateOptimizationResults) = r.g_tol
initial_state(r::OptimizationResults) = error("initial_state is not implemented for $(method(r)).")
initial_state(r::MultivariateOptimizationResults) = r.initial_x
lower_bound(r::OptimizationResults) = error("lower_bound is not implemented for $(method(r)).")
lower_bound(r::UnivariateOptimizationResults) = r.initial_lower
upper_bound(r::OptimizationResults) = error("upper_bound is not implemented for $(method(r)).")
upper_bound(r::UnivariateOptimizationResults) = r.initial_upper
rel_tol(r::OptimizationResults) = error("rel_tol is not implemented for $(method(r)).")
rel_tol(r::UnivariateOptimizationResults) = r.rel_tol
abs_tol(r::OptimizationResults) = error("abs_tol is not implemented for $(method(r)).")
abs_tol(r::UnivariateOptimizationResults) = r.abs_tol
|
/-
Copyright (c) 2019 Seul Baek. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Seul Baek
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.tactic.omega.int.preterm
import Mathlib.PostPort
universes l
namespace Mathlib
/-
Linear integer arithmetic formulas in pre-normalized form.
-/
namespace omega
namespace int
/-- Intermediate shadow syntax for LNA formulas that includes unreified exprs -/
/-- Intermediate shadow syntax for LIA formulas that includes non-canonical terms -/
inductive preform
where
| eq : preterm → preterm → preform
| le : preterm → preterm → preform
| not : preform → preform
| or : preform → preform → preform
| and : preform → preform → preform
namespace preform
/-- Evaluate a preform into prop using the valuation v. -/
@[simp] def holds (v : ℕ → ℤ) : preform → Prop :=
sorry
end preform
/-- univ_close p n := p closed by prepending n universal quantifiers -/
@[simp] def univ_close (p : preform) : (ℕ → ℤ) → ℕ → Prop :=
sorry
namespace preform
/-- Fresh de Brujin index not used by any variable in argument -/
def fresh_index : preform → ℕ :=
sorry
/-- All valuations satisfy argument -/
def valid (p : preform) :=
∀ (v : ℕ → ℤ), holds v p
/-- There exists some valuation that satisfies argument -/
def sat (p : preform) :=
∃ (v : ℕ → ℤ), holds v p
/-- implies p q := under any valuation, q holds if p holds -/
def implies (p : preform) (q : preform) :=
∀ (v : ℕ → ℤ), holds v p → holds v q
/-- equiv p q := under any valuation, p holds iff q holds -/
def equiv (p : preform) (q : preform) :=
∀ (v : ℕ → ℤ), holds v p ↔ holds v q
theorem sat_of_implies_of_sat {p : preform} {q : preform} : implies p q → sat p → sat q :=
fun (h1 : implies p q) (h2 : sat p) => exists_imp_exists h1 h2
theorem sat_or {p : preform} {q : preform} : sat (or p q) ↔ sat p ∨ sat q := sorry
/-- There does not exist any valuation that satisfies argument -/
def unsat (p : preform) :=
¬sat p
def repr : preform → string :=
sorry
protected instance has_repr : has_repr preform :=
has_repr.mk repr
end preform
theorem univ_close_of_valid {p : preform} {m : ℕ} {v : ℕ → ℤ} : preform.valid p → univ_close p v m := sorry
theorem valid_of_unsat_not {p : preform} : preform.unsat (preform.not p) → preform.valid p := sorry
|
module Input
import Command
%default total
public export
data Input : Type where
Quit : Input
RunProgram : Input
AppendCommand : Command -> Input
DeleteCommand : Input
getArrow : IO (Maybe Input)
getArrow = do
c <- getChar
pure $ case c of
'A' => Just $ AppendCommand Forward
'C' => Just $ AppendCommand TurnRight
'D' => Just $ AppendCommand TurnLeft
_ => Nothing
getSpecial : IO (Maybe Input)
getSpecial = do
c <- getChar
case c of
'[' => getArrow
_ => pure Nothing
export
getInput : IO (Maybe Input)
getInput = do
c <- getChar
case (ord c) of
27 => getSpecial
113 => pure $ Just Quit
104 => pure $ Just $ AppendCommand TurnLeft
108 => pure $ Just $ AppendCommand TurnRight
106 => pure $ Just $ AppendCommand Forward
13 => pure $ Just RunProgram
127 => pure $ Just DeleteCommand
_ => pure $ Nothing
|
function x = flush(x)
x.leftfactors = [];
x.rightfactors = [];
x.midfactors = [];
|
SUBROUTINE RA_CLEV ( cldtyp, cldhgt, ncld, chc1, chc2, chc3,
+ iret )
C************************************************************************
C* RA_CLEV *
C* *
C* This subroutine uses the cloud information decoded from an airways *
C* report and returns it encoded in three combined cloud height *
C* and coverage reports. If -X ( partially obscured ) is reported, *
C* 1000 is added to the first report. The combined value is the *
C* height * 10 + coverage. *
C* *
C* RA_CLEV ( CLDTYP, CLDHGT, NCLD, CHC1, CHC2, CHC3, IRET ) *
C* *
C* Input parameters: *
C* CLDTYP (NCLD) REAL GEMPAK cloud types *
C* CLDHGT (NCLD) REAL Cloud height in hundreds of feet*
C* NCLD INTEGER Number of cloud reports *
C* *
C* Output parameters: *
C* CHC1 REAL Cloud report 1 *
C* CHC2 REAL Cloud report 2 *
C* CHC3 REAL Cloud report 3 *
C* IRET INTEGER Return code *
C* 0 = normal return *
C** *
C* Log: *
C* B. Doty/RDS 11/87 *
C* I. Graffman/RDS 4/88 *
C* M. desJardins/GSFC 9/89 GEMPAK 5 *
C************************************************************************
INCLUDE 'GEMPRM.PRM'
C*
REAL cldtyp (*), cldhgt (*)
C*
INCLUDE 'ERMISS.FNC'
C-----------------------------------------------------------------------
iret = 0
C
C* Set values to missing.
C
chc1 = RMISSD
chc2 = RMISSD
chc3 = RMISSD
C
C* Return if there are no cloud reports.
C
IF ( ncld .eq. 0 ) RETURN
C
C* Move reported clouds into low, mid and high clouds.
C
knt = 0
IF ( ERMISS ( cldhgt (1) ) .and. ( cldtyp (1) .eq. 9. ) ) THEN
is = 2
IF ( ncld .eq. 1 ) chc1 = 10000.
ELSE
is = 1
END IF
DO i = is, ncld
IF ( ( .not. ERMISS ( cldhgt (i) ) ) .or.
+ ( cldtyp (i) .eq. 1. ) ) THEN
IF ( ERMISS ( cldhgt (i) ) ) THEN
ccc = cldtyp (i)
ELSE
ccc = cldhgt (i) * 10. + cldtyp (i)
END IF
knt = knt + 1
IF ( knt .eq. 1 ) THEN
IF ( is .eq. 1 ) THEN
chc1 = ccc
ELSE
chc1 = 10000. + ccc
END IF
ELSE IF ( knt .eq. 2 ) THEN
chc2 = ccc
ELSE IF ( knt .eq. 3 ) THEN
chc3 = ccc
END IF
END IF
END DO
C*
RETURN
END
|
theorem nothing_mem_nil {t: Type} {xs: list t}: (∀ (x: t), x ∉ xs) ↔ xs = [] :=
begin
split,
intro h,
induction xs,
refl,
exfalso,
exact h xs_hd (or.inl rfl),
intro h,
rw h,
exact list.not_mem_nil,
end
|
{-# OPTIONS --without-K #-}
module hott where
open import hott.level public
open import hott.equivalence public
open import hott.loop public
open import hott.univalence public
open import hott.truncation public
|
module Parameters_for_PM3_C
USE vast_kind_param, ONLY: double
!...Created by Pacific-Sierra Research 77to90 4.4G 20:31:02 03/10/06
real(double), dimension(107) :: usspm3, upppm3, uddpm3, zspm3, zppm3, &
zdpm3, betasp, betapp, alppm3, gsspm3, gsppm3, gpppm3, gp2pm3, hsppm3
real(double), dimension(107,4) :: guesp1, guesp2, guesp3
! DATA FOR ELEMENT 1 HYDROGEN
data alppm3(1)/3.3563860d0/
data betasp(1)/-5.6265120d0/
data gsspm3(1)/14.7942080d0/
data usspm3(1)/-13.0733210d0/
data zspm3(1)/0.9678070d0/
! DATA FOR ELEMENT 3 LITHIUM
data alppm3(3)/1.2550000d0/
data betapp(3)/-1.5000000d0/
data betasp(3)/-0.5500000d0/
data gp2pm3(3)/4.5000000d0/
data gpppm3(3)/5.2500000d0/
data gsppm3(3)/3.0000000d0/
data gsspm3(3)/4.5000000d0/
data hsppm3(3)/0.1500000d0/
data upppm3(3)/-3.4000000d0/
data usspm3(3)/-5.3000000d0/
data zppm3(3)/0.7500000d0/
data zspm3(3)/0.6500000d0/
! DATA FOR ELEMENT 4 BERYLLIUM
data alppm3(4)/1.5935360d0/
data betapp(4)/-2.7806840d0/
data betasp(4)/-3.9620530d0/
data gp2pm3(4)/9.0052190d0/
data gpppm3(4)/6.0571820d0/
data gsppm3(4)/6.5761990d0/
data gsspm3(4)/9.0128510d0/
data hsppm3(4)/0.5446790d0/
data upppm3(4)/-11.3042430d0/
data usspm3(4)/-17.2647520d0/
data zppm3(4)/1.5087550d0/
data zspm3(4)/0.8774390d0/
! DATA FOR ELEMENT 6 CARBON
data alppm3(6)/2.7078070d0/
data betapp(6)/-9.8027550d0/
data betasp(6)/-11.9100150d0/
data gp2pm3(6)/9.0425660d0/
data gpppm3(6)/10.7962920d0/
data gsppm3(6)/10.2650270d0/
data gsspm3(6)/11.2007080d0/
data hsppm3(6)/2.2909800d0/
data upppm3(6)/-36.2669180d0/
data usspm3(6)/-47.2703200d0/
data zppm3(6)/1.8423450d0/
data zspm3(6)/1.5650850d0/
! DATA FOR ELEMENT 7 NITROGEN
data alppm3(7)/2.8305450d0/
data betapp(7)/-20.0438480d0/
data betasp(7)/-14.0625210d0/
data gp2pm3(7)/10.8072770d0/
data gpppm3(7)/11.7546720d0/
data gsppm3(7)/7.3485650d0/
data gsspm3(7)/11.9047870d0/
data hsppm3(7)/1.1367130d0/
data upppm3(7)/-47.5097360d0/
data usspm3(7)/-49.3356720d0/
data zppm3(7)/2.3137280d0/
data zspm3(7)/2.0280940d0/
! DATA FOR ELEMENT 8 OXYGEN
data alppm3(8)/3.2171020d0/
data betapp(8)/-24.7525150d0/
data betasp(8)/-45.2026510d0/
data gp2pm3(8)/12.4060950d0/
data gpppm3(8)/13.6540160d0/
data gsppm3(8)/10.6211600d0/
data gsspm3(8)/15.7557600d0/
data hsppm3(8)/0.5938830d0/
data upppm3(8)/-71.8795800d0/
data usspm3(8)/-86.9930020d0/
data zppm3(8)/2.3894020d0/
data zspm3(8)/3.7965440d0/
! DATA FOR ELEMENT 9 FLUORINE
data alppm3(9)/3.3589210d0/
data betapp(9)/-27.7446600d0/
data betasp(9)/-48.4059390d0/
data gp2pm3(9)/14.4183930d0/
data gpppm3(9)/14.8172560d0/
data gsppm3(9)/16.0736890d0/
data gsspm3(9)/10.4966670d0/
data hsppm3(9)/0.7277630d0/
data upppm3(9)/-105.6850470d0/
data usspm3(9)/-110.4353030d0/
data zppm3(9)/2.4911780d0/
data zspm3(9)/4.7085550d0/
! DATA FOR ELEMENT 11 SODIUM
data alppm3(11)/1.681d0/
! DATA FOR ELEMENT 12 MAGNESIUM
data alppm3(12)/1.3291470d0/
data betapp(12)/-0.5695810d0/
data betasp(12)/-2.0716910d0/
data gp2pm3(12)/7.0908230d0/
data gpppm3(12)/6.9104460d0/
data gsppm3(12)/6.7939950d0/
data gsspm3(12)/6.6943000d0/
data hsppm3(12)/0.5433000d0/
data upppm3(12)/-14.1734600d0/
data usspm3(12)/-14.6236880d0/
data zppm3(12)/1.4834530d0/
data zspm3(12)/0.6985520d0/
! DATA FOR ELEMENT 13 ALUMINUM
data alppm3(13)/1.5217030d0/
data betapp(13)/-0.9565500d0/
data betasp(13)/-0.5943010d0/
data gp2pm3(13)/6.1210770d0/
data gpppm3(13)/6.3477900d0/
data gsppm3(13)/11.6598560d0/
data gsspm3(13)/5.7767370d0/
data hsppm3(13)/4.0062450d0/
data upppm3(13)/-22.2641590d0/
data usspm3(13)/-24.8454040d0/
data zdpm3(13)/1.0000000d0/
data zppm3(13)/1.0736290d0/
data zspm3(13)/1.7028880d0/
! DATA FOR ELEMENT 14 SILICON
data alppm3(14)/2.1358090d0/
data betapp(14)/-3.9331480d0/
data betasp(14)/-2.8621450d0/
data gp2pm3(14)/5.1612970d0/
data gpppm3(14)/6.7593670d0/
data gsppm3(14)/5.9490570d0/
data gsspm3(14)/5.0471960d0/
data hsppm3(14)/0.9198320d0/
data upppm3(14)/-22.8136350d0/
data usspm3(14)/-26.7634830d0/
data zdpm3(14)/1.0000000d0/
data zppm3(14)/1.3130880d0/
data zspm3(14)/1.6350750d0/
! DATA FOR ELEMENT 15 PHOSPHORUS
data alppm3(15)/1.9405340d0/
data betapp(15)/-4.1600400d0/
data betasp(15)/-12.6158790d0/
data gp2pm3(15)/6.0620020d0/
data gpppm3(15)/6.6184780d0/
data gsppm3(15)/5.1869490d0/
data gsspm3(15)/7.8016150d0/
data hsppm3(15)/1.5428090d0/
data upppm3(15)/-29.5930520d0/
data usspm3(15)/-40.4130960d0/
data zdpm3(15)/1.0000000d0/
data zppm3(15)/1.5047320d0/
data zspm3(15)/2.0175630d0/
! DATA FOR ELEMENT 16 SULFUR
data alppm3(16)/2.2697060d0/
data betapp(16)/-8.0914150d0/
data betasp(16)/-8.8274650d0/
data gp2pm3(16)/7.9702470d0/
data gpppm3(16)/9.9681640d0/
data gsppm3(16)/6.7859360d0/
data gsspm3(16)/8.9646670d0/
data hsppm3(16)/4.0418360d0/
data upppm3(16)/-44.3925830d0/
data usspm3(16)/-49.8953710d0/
data zdpm3(16)/1.0000000d0/
data zppm3(16)/1.6589720d0/
data zspm3(16)/1.8911850d0/
! DATA FOR ELEMENT 17 CHLORINE
data alppm3(17)/2.5172960d0/
data betapp(17)/-11.5939220d0/
data betasp(17)/-27.5285600d0/
data gp2pm3(17)/7.5041540d0/
data gpppm3(17)/7.5222150d0/
data gsppm3(17)/8.0481150d0/
data gsspm3(17)/16.0136010d0/
data hsppm3(17)/3.4811530d0/
data upppm3(17)/-53.6143960d0/
data usspm3(17)/-100.6267470d0/
data zdpm3(17)/1.0000000d0/
data zppm3(17)/2.1510100d0/
data zspm3(17)/2.2462100d0/
data alppm3(19)/1.400d0/
! DATA FOR ELEMENT 22 Titanium (Russell-Saunders only)
data alppm3(22)/3.0683070d0/
data betapp(22)/-0.1000000d0/
data betasp(22)/-0.1000000d0/
data gp2pm3(22)/3.5000000d0/
data gpppm3(22)/5.0000000d0/
data gsppm3(22)/4.1500000d0/
data gsspm3(22)/6.0000000d0/
data hsppm3(22)/1.0000000d0/
data uddpm3(22)/-30.0000000d0/
data upppm3(22)/10.0000000d0/
data usspm3(22)/10.0000000d0/
data zdpm3(22)/2.8845490d0/
data zppm3(22)/1.5000000d0/
data zspm3(22)/1.5000000d0/
! DATA FOR ELEMENT 30 ZINC
data alppm3(30)/1.3501260d0/
data betapp(30)/-6.3518640d0/
data betasp(30)/-0.7155780d0/
data gp2pm3(30)/4.6696560d0/
data gpppm3(30)/4.9801740d0/
data gsppm3(30)/7.7362040d0/
data gsspm3(30)/9.6771960d0/
data hsppm3(30)/0.6004130d0/
data upppm3(30)/-11.0474090d0/
data usspm3(30)/-18.5321980d0/
data zdpm3(30)/1.0000000d0/
data zppm3(30)/1.5069220d0/
data zspm3(30)/1.8199890d0/
! DATA FOR ELEMENT 31 GALLIUM
data alppm3(31)/1.6051150d0/
data betapp(31)/-0.4070530d0/
data betasp(31)/-4.9456180d0/
data gp2pm3(31)/4.9830450d0/
data gpppm3(31)/5.0868550d0/
data gsppm3(31)/8.9256190d0/
data gsspm3(31)/8.4585540d0/
data hsppm3(31)/2.0512600d0/
data upppm3(31)/-21.8753710d0/
data usspm3(31)/-29.8555930d0/
data zppm3(31)/0.8394110d0/
data zspm3(31)/1.8470400d0/
! DATA FOR ELEMENT 32 GERMANIUM
data alppm3(32)/1.9723370d0/
data betapp(32)/-2.2501567d0/
data betasp(32)/-5.3250024d0/
data gp2pm3(32)/6.9242663d0/
data gpppm3(32)/7.6718647d0/
data gsppm3(32)/10.2095293d0/
data gsspm3(32)/5.3769635d0/
data hsppm3(32)/1.3370204d0/
data upppm3(32)/-31.5863583d0/
data usspm3(32)/-35.4671955d0/
data zppm3(32)/1.5924319d0/
data zspm3(32)/2.2373526d0/
! DATA FOR ELEMENT 33 ARSENIC
data alppm3(33)/1.7944770d0/
data betapp(33)/-5.0173860d0/
data betasp(33)/-8.2321650d0/
data gp2pm3(33)/8.2103460d0/
data gpppm3(33)/8.2872500d0/
data gsppm3(33)/5.3979830d0/
data gsspm3(33)/8.7890010d0/
data hsppm3(33)/1.9510340d0/
data upppm3(33)/-35.1524150d0/
data usspm3(33)/-38.5074240d0/
data zppm3(33)/1.7038890d0/
data zspm3(33)/2.6361770d0/
! DATA FOR ELEMENT 34 SELENIUM
data alppm3(34)/3.0439570d0/
data betapp(34)/-5.4930390d0/
data betasp(34)/-6.1578220d0/
data gp2pm3(34)/7.7242890d0/
data gpppm3(34)/9.5683260d0/
data gsppm3(34)/10.0604610d0/
data gsspm3(34)/7.4325910d0/
data hsppm3(34)/4.0165580d0/
data upppm3(34)/-49.8230760d0/
data usspm3(34)/-55.3781350d0/
data zppm3(34)/1.7325360d0/
data zspm3(34)/2.8280510d0/
! DATA FOR ELEMENT 35 BROMINE
data alppm3(35)/2.5118420d0/
data betapp(35)/-6.8140130d0/
data betasp(35)/-31.1713420d0/
data gp2pm3(35)/7.8168490d0/
data gpppm3(35)/8.2827630d0/
data gsppm3(35)/16.0616800d0/
data gsspm3(35)/15.9434250d0/
data hsppm3(35)/0.5788690d0/
data upppm3(35)/-74.2271290d0/
data usspm3(35)/-116.6193110d0/
data zdpm3(35)/1.0000000d0/
data zppm3(35)/2.1275900d0/
data zspm3(35)/5.3484570d0/
! DATA FOR ELEMENT 48 Cadmium
data alppm3(48)/1.5253820d0/
data betapp(48)/-0.6010340d0/
data betasp(48)/-8.5819440d0/
data gp2pm3(48)/4.6696560d0/
data gpppm3(48)/4.9481040d0/
data gsppm3(48)/8.2315390d0/
data gsspm3(48)/9.2069600d0/
data hsppm3(48)/1.6562340d0/
data upppm3(48)/8.7497950d0/
data usspm3(48)/-15.8285840d0/
data zppm3(48)/2.0664120d0/
data zspm3(48)/1.6793510d0/
! DATA FOR ELEMENT 49 INDIUM
data alppm3(49)/1.4183850d0/
data betapp(49)/-1.8289080d0/
data betasp(49)/-2.9933190d0/
data gp2pm3(49)/4.9842110d0/
data gpppm3(49)/6.2992690d0/
data gsppm3(49)/8.2298730d0/
data gsspm3(49)/6.5549000d0/
data hsppm3(49)/2.6314610d0/
data upppm3(49)/-20.0058220d0/
data usspm3(49)/-26.1762050d0/
data zppm3(49)/1.4453500d0/
data zspm3(49)/2.0161160d0/
! DATA FOR ELEMENT 50 TIN
data alppm3(50)/1.6996500d0/
data betapp(50)/-2.0059990d0/
data betasp(50)/-2.7858020d0/
data gp2pm3(50)/5.1822140d0/
data gpppm3(50)/5.6738100d0/
data gsppm3(50)/7.2353270d0/
data gsspm3(50)/10.1900330d0/
data hsppm3(50)/1.0331570d0/
data upppm3(50)/-25.8944190d0/
data usspm3(50)/-34.5501920d0/
data zppm3(50)/1.6382330d0/
data zspm3(50)/2.3733280d0/
! DATA FOR ELEMENT 51 ANTIMONY
data alppm3(51)/2.0343010d0/
data betapp(51)/-2.8179480d0/
data betasp(51)/-14.7942170d0/
data gp2pm3(51)/6.2500000d0/
data gpppm3(51)/6.3500000d0/
data gsppm3(51)/5.2776800d0/
data gsspm3(51)/9.2382770d0/
data hsppm3(51)/2.4244640d0/
data upppm3(51)/-29.4349540d0/
data usspm3(51)/-56.4321960d0/
data zppm3(51)/1.8999920d0/
data zspm3(51)/2.3430390d0/
! DATA FOR ELEMENT 52 TELLURIUM
data alppm3(52)/2.4850190d0/
data betapp(52)/-3.8954300d0/
data betasp(52)/-2.6651460d0/
data gp2pm3(52)/7.7551210d0/
data gpppm3(52)/7.7775920d0/
data gsppm3(52)/8.1691450d0/
data gsspm3(52)/10.2550730d0/
data hsppm3(52)/3.7724620d0/
data upppm3(52)/-46.3140990d0/
data usspm3(52)/-44.9380360d0/
data zppm3(52)/1.6475550d0/
data zspm3(52)/4.1654920d0/
! DATA FOR ELEMENT 53 IODINE
data alppm3(53)/1.9901850d0/
data betapp(53)/-5.8947030d0/
data betasp(53)/-14.4942340d0/
data gp2pm3(53)/5.9664070d0/
data gpppm3(53)/7.2883300d0/
data gsppm3(53)/14.9904060d0/
data gsspm3(53)/13.6319430d0/
data hsppm3(53)/2.6300350d0/
data upppm3(53)/-61.0915820d0/
data usspm3(53)/-96.4540370d0/
data zdpm3(53)/1.0000000d0/
data zppm3(53)/2.4543540d0/
data zspm3(53)/7.0010130d0/
! DATA FOR ELEMENT 80 MERCURY
data alppm3(80)/1.5293770d0/
data betapp(80)/-3.4640310d0/
data betasp(80)/-3.1013650d0/
data gp2pm3(80)/16.0007400d0/
data gpppm3(80)/14.7092830d0/
data gsppm3(80)/10.6392970d0/
data gsspm3(80)/6.6247200d0/
data hsppm3(80)/2.0363110d0/
data upppm3(80)/-18.3307510d0/
data usspm3(80)/-17.7622290d0/
data zppm3(80)/2.4799510d0/
data zspm3(80)/1.4768850d0/
! DATA FOR ELEMENT 81 THALLIUM
data alppm3(81)/1.3409510d0/
data betapp(81)/-7.9467990d0/
data betasp(81)/-1.0844950d0/
data gp2pm3(81)/8.9627270d0/
data gpppm3(81)/4.9927850d0/
data gsppm3(81)/11.2238830d0/
data gsspm3(81)/10.4604120d0/
data hsppm3(81)/2.5304060d0/
data upppm3(81)/-26.9206370d0/
data usspm3(81)/-30.0531700d0/
data zppm3(81)/1.9694450d0/
data zspm3(81)/6.8679210d0/
! DATA FOR ELEMENT 82 LEAD
data alppm3(82)/1.6200450d0/
data betapp(82)/-1.3954300d0/
data betasp(82)/-6.1260240d0/
data gp2pm3(82)/5.0456510d0/
data gpppm3(82)/5.1837800d0/
data gsppm3(82)/6.7937820d0/
data gsspm3(82)/7.0119920d0/
data hsppm3(82)/1.5663020d0/
data upppm3(82)/-24.4258340d0/
data usspm3(82)/-30.3227560d0/
data zppm3(82)/1.8924180d0/
data zspm3(82)/3.1412890d0/
! DATA FOR ELEMENT 83 BISMUTH
data alppm3(83)/1.8574310d0/
data betapp(83)/-5.8001520d0/
data betasp(83)/-5.6072830d0/
data gp2pm3(83)/8.3354470d0/
data gpppm3(83)/8.6960070d0/
data gsppm3(83)/6.1033080d0/
data gsspm3(83)/4.9894800d0/
data hsppm3(83)/0.5991220d0/
data upppm3(83)/-35.5210260d0/
data usspm3(83)/-33.4959380d0/
data zppm3(83)/1.9349350d0/
data zspm3(83)/4.9164510d0/
! DATA FOR ELEMENT 103 CAPPED BOND
data alppm3(102)/2.5441341d0/
data betasp(102)/-9999999.0000000d0/
data gsspm3(102)/12.8480000d0/
data hsppm3(102)/0.1000000d0/
data usspm3(102)/-11.9062760d0/
data zdpm3(102)/0.3000000d0/
data zppm3(102)/0.3000000d0/
data zspm3(102)/4.0000000d0/
! DATA FOR THE " ++ " SPARKLE
data alppm3(103)/1.5D0/
! DATA FOR THE " + " SPARKLE
data alppm3(104)/1.5d0/
! DATA FOR THE " -- " SPARKLE
data alppm3(105)/1.5d0/
! DATA FOR THE " - " SPARKLE
data alppm3(106)/1.5d0/
data guesp1(1,1)/ 1.1287500D0/
data guesp2(1,1)/ 5.0962820D0/
data guesp3(1,1)/ 1.5374650D0/
data guesp1(1,2)/ - 1.0603290D0/
data guesp2(1,2)/ 6.0037880D0/
data guesp3(1,2)/ 1.5701890D0/
data guesp1(3,1)/ - 0.4500000D0/
data guesp2(3,1)/ 5.0000000D0/
data guesp3(3,1)/ 1.0000000D0/
data guesp1(3,2)/ 0.8000000D0/
data guesp2(3,2)/ 6.5000000D0/
data guesp3(3,2)/ 1.0000000D0/
data guesp1(4,1)/ 1.6315720D0/
data guesp2(4,1)/ 2.6729620D0/
data guesp3(4,1)/ 1.7916860D0/
data guesp1(4,2)/ - 2.1109590D0/
data guesp2(4,2)/ 1.9685940D0/
data guesp3(4,2)/ 1.7558710D0/
data guesp1(6,1)/ 0.0501070D0/
data guesp2(6,1)/ 6.0031650D0/
data guesp3(6,1)/ 1.6422140D0/
data guesp1(6,2)/ 0.0507330D0/
data guesp2(6,2)/ 6.0029790D0/
data guesp3(6,2)/ 0.8924880D0/
data guesp1(7,1)/ 1.5016740D0/
data guesp2(7,1)/ 5.9011480D0/
data guesp3(7,1)/ 1.7107400D0/
data guesp1(7,2)/ - 1.5057720D0/
data guesp2(7,2)/ 6.0046580D0/
data guesp3(7,2)/ 1.7161490D0/
data guesp1(8,1)/ - 1.1311280D0/
data guesp2(8,1)/ 6.0024770D0/
data guesp3(8,1)/ 1.6073110D0/
data guesp1(8,2)/ 1.1378910D0/
data guesp2(8,2)/ 5.9505120D0/
data guesp3(8,2)/ 1.5983950D0/
data guesp1(9,1)/ - 0.0121660D0/
data guesp2(9,1)/ 6.0235740D0/
data guesp3(9,1)/ 1.8568590D0/
data guesp1(9,2)/ - 0.0028520D0/
data guesp2(9,2)/ 6.0037170D0/
data guesp3(9,2)/ 2.6361580D0/
data guesp1(12,1)/ 2.1170500D0/
data guesp2(12,1)/ 6.0094770D0/
data guesp3(12,1)/ 2.0844060D0/
data guesp1(12,2)/ - 2.5477670D0/
data guesp2(12,2)/ 4.3953700D0/
data guesp3(12,2)/ 2.0636740D0/
data guesp1(13,1)/ - 0.4730900D0/
data guesp2(13,1)/ 1.9158250D0/
data guesp3(13,1)/ 1.4517280D0/
data guesp1(13,2)/ - 0.1540510D0/
data guesp2(13,2)/ 6.0050860D0/
data guesp3(13,2)/ 2.5199970D0/
data guesp1(14,1)/ - 0.3906000D0/
data guesp2(14,1)/ 6.0000540D0/
data guesp3(14,1)/ 0.6322620D0/
data guesp1(14,2)/ 0.0572590D0/
data guesp2(14,2)/ 6.0071830D0/
data guesp3(14,2)/ 2.0199870D0/
data guesp1(15,1)/ - 0.6114210D0/
data guesp2(15,1)/ 1.9972720D0/
data guesp3(15,1)/ 0.7946240D0/
data guesp1(15,2)/ - 0.0939350D0/
data guesp2(15,2)/ 1.9983600D0/
data guesp3(15,2)/ 1.9106770D0/
data guesp1(16,1)/ - 0.3991910D0/
data guesp2(16,1)/ 6.0006690D0/
data guesp3(16,1)/ 0.9621230D0/
data guesp1(16,2)/ - 0.0548990D0/
data guesp2(16,2)/ 6.0018450D0/
data guesp3(16,2)/ 1.5799440D0/
data guesp1(17,1)/ - 0.1715910D0/
data guesp2(17,1)/ 6.0008020D0/
data guesp3(17,1)/ 1.0875020D0/
data guesp1(17,2)/ - 0.0134580D0/
data guesp2(17,2)/ 1.9666180D0/
data guesp3(17,2)/ 2.2928910D0/
data guesp1(30,1)/ - 0.1112340D0/
data guesp2(30,1)/ 6.0014780D0/
data guesp3(30,1)/ 1.5160320D0/
data guesp1(30,2)/ - 0.1323700D0/
data guesp2(30,2)/ 1.9958390D0/
data guesp3(30,2)/ 2.5196420D0/
data guesp1(31,1)/ - 0.5601790D0/
data guesp2(31,1)/ 5.6232730D0/
data guesp3(31,1)/ 1.5317800D0/
data guesp1(31,2)/ - 0.2727310D0/
data guesp2(31,2)/ 1.9918430D0/
data guesp3(31,2)/ 2.1838640D0/
data guesp1(32,1)/ 0.9631726D0/
data guesp2(32,1)/ 6.0120134D0/
data guesp3(32,1)/ 2.1633655D0/
data guesp1(32,2)/ - 0.9593891D0/
data guesp2(32,2)/ 5.7491802D0/
data guesp3(32,2)/ 2.1693724D0/
data guesp1(33,1)/ - 0.4600950D0/
data guesp2(33,1)/ 1.9831150D0/
data guesp3(33,1)/ 1.0867930D0/
data guesp1(33,2)/ - 0.0889960D0/
data guesp2(33,2)/ 1.9929440D0/
data guesp3(33,2)/ 2.1400580D0/
data guesp1(34,1)/ 0.0478730D0/
data guesp2(34,1)/ 6.0074000D0/
data guesp3(34,1)/ 2.0817170D0/
data guesp1(34,2)/ 0.1147200D0/
data guesp2(34,2)/ 6.0086720D0/
data guesp3(34,2)/ 1.5164230D0/
data guesp1(35,1)/ 0.9604580D0/
data guesp2(35,1)/ 5.9765080D0/
data guesp3(35,1)/ 2.3216540D0/
data guesp1(35,2)/ - 0.9549160D0/
data guesp2(35,2)/ 5.9447030D0/
data guesp3(35,2)/ 2.3281420D0/
data guesp1(49,1)/ - 0.3431380D0/
data guesp2(49,1)/ 1.9940340D0/
data guesp3(49,1)/ 1.6255160D0/
data guesp1(49,2)/ - 0.1095320D0/
data guesp2(49,2)/ 5.6832170D0/
data guesp3(49,2)/ 2.8670090D0/
data guesp1(50,1)/ - 0.1503530D0/
data guesp2(50,1)/ 6.0056940D0/
data guesp3(50,1)/ 1.7046420D0/
data guesp1(50,2)/ - 0.0444170D0/
data guesp2(50,2)/ 2.2573810D0/
data guesp3(50,2)/ 2.4698690D0/
data guesp1(51,1)/ 3.0020280D0/
data guesp2(51,1)/ 6.0053420D0/
data guesp3(51,1)/ 0.8530600D0/
data guesp1(51,2)/ - 0.0188920D0/
data guesp2(51,2)/ 6.0114780D0/
data guesp3(51,2)/ 2.7933110D0/
data guesp1(52,1)/ 0.0333910D0/
data guesp2(52,1)/ 5.9563790D0/
data guesp3(52,1)/ 2.2775750D0/
data guesp1(52,2)/ - 1.9218670D0/
data guesp2(52,2)/ 4.9732190D0/
data guesp3(52,2)/ 0.5242430D0/
data guesp1(53,1)/ - 0.1314810D0/
data guesp2(53,1)/ 5.2064170D0/
data guesp3(53,1)/ 1.7488240D0/
data guesp1(53,2)/ - 0.0368970D0/
data guesp2(53,2)/ 6.0101170D0/
data guesp3(53,2)/ 2.7103730D0/
data guesp1(80,1)/ 1.0827200D0/
data guesp2(80,1)/ 6.4965980D0/
data guesp3(80,1)/ 1.1951460D0/
data guesp1(80,2)/ - 0.0965530D0/
data guesp2(80,2)/ 3.9262810D0/
data guesp3(80,2)/ 2.6271600D0/
data guesp1(81,1)/ - 1.3613990D0/
data guesp2(81,1)/ 3.5572260D0/
data guesp3(81,1)/ 1.0928020D0/
data guesp1(81,2)/ - 0.0454010D0/
data guesp2(81,2)/ 2.3069950D0/
data guesp3(81,2)/ 2.9650290D0/
data guesp1(82,1)/ - 0.1225760D0/
data guesp2(82,1)/ 6.0030620D0/
data guesp3(82,1)/ 1.9015970D0/
data guesp1(82,2)/ - 0.0566480D0/
data guesp2(82,2)/ 4.7437050D0/
data guesp3(82,2)/ 2.8618790D0/
data guesp1(83,1)/ 2.5816930D0/
data guesp2(83,1)/ 5.0940220D0/
data guesp3(83,1)/ 0.4997870D0/
data guesp1(83,2)/ 0.0603200D0/
data guesp2(83,2)/ 6.0015380D0/
data guesp3(83,2)/ 2.4279700D0/
end module Parameters_for_PM3_C
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.