text
stringlengths 0
3.34M
|
---|
-- Andreas, 2015-10-05, issue reported by mechvel, test case by Jesper Cockx
-- {-# OPTIONS -v tc.term.exlam:20 #-}
abstract
foo : Set₁
foo = Set
where
AbstractSet₁ : Set₂
AbstractSet₁ = Set₁
f : Set → AbstractSet₁
f = λ { _ → Set }
{-# NON_TERMINATING #-}
loop : Set → Set
loop = λ { A → loop A }
|
Formal statement is: lemma continuous_on_closure: "continuous_on (closure S) f \<longleftrightarrow> (\<forall>x e. x \<in> closure S \<and> 0 < e \<longrightarrow> (\<exists>d. 0 < d \<and> (\<forall>y. y \<in> S \<and> dist y x < d \<longrightarrow> dist (f y) (f x) < e)))" (is "?lhs = ?rhs") Informal statement is: A function $f$ is continuous on the closure of a set $S$ if and only if for every $x \in \overline{S}$ and every $\epsilon > 0$, there exists a $\delta > 0$ such that for all $y \in S$ with $|y - x| < \delta$, we have $|f(y) - f(x)| < \epsilon$. |
Require Import Essentials.Notations.
Require Import Essentials.Types.
Require Import Essentials.Facts_Tactics.
Require Import Category.Main.
Require Import Topos.SubObject_Classifier.
Require Import Basic_Cons.Terminal Basic_Cons.PullBack.
Require Import Coq_Cats.Type_Cat.Type_Cat Coq_Cats.Type_Cat.CCC.
Require Import Coq.Logic.ChoiceFacts.
Require Coq.Logic.ClassicalFacts.
Local Axiom PropExt : ClassicalFacts.prop_extensionality.
Local Axiom ConstructiveIndefiniteDescription_Type : forall T : Type, ConstructiveIndefiniteDescription_on T.
(** The type Prop is the subobject classifier for Type_Cat. With ⊤ mapping the single element of
the singleton set to (True : Prop) *)
Section Type_Cat_characteristic_function_unique.
Context {A B : Type} (F : @Monic Type_Cat A B) (h : B → Prop) (hpb : is_PullBack (mono_morphism F) (fun _ => TT) h (fun _ => True)).
Theorem Type_Cat_characteristic_function_unique : h = fun x => (exists y : A, (mono_morphism F) y = x).
Proof.
extensionality x.
apply PropExt; split.
{
intros Hx.
cut ((fun _ : UNIT => h x) = (fun _ => True)).
{
intros H.
set (W := equal_f (is_pullback_morph_ex_com_1 hpb UNIT (fun _ => x) (fun _ => TT) H) TT).
cbn in W.
eexists; exact W.
}
{
extensionality y; apply PropExt; split; trivial.
}
}
{
intros [y []].
set (W := (equal_f (is_pullback_morph_com hpb))).
cbn in W.
rewrite W; trivial.
}
Qed.
End Type_Cat_characteristic_function_unique.
Local Hint Resolve UNIT_SINGLETON.
Program Definition Type_Cat_SubObject_Classifier : SubObject_Classifier Type_Cat :=
{|
SOC := Prop;
SOC_morph := fun _ : UNIT => True;
SOC_char := fun A B f x => exists y : A, (mono_morphism f) y = x;
SO_pulback :=
fun A B f =>
{|
is_pullback_morph_ex :=
fun p' pm1 pm2 pmc x => proj1_sig (ConstructiveIndefiniteDescription_Type A _ match eq_sym (equal_f pmc x) in _ = y return y with eq_refl => I end)
|}
|}.
Next Obligation.
Proof.
extensionality x.
apply PropExt; split; intros H; auto.
exists x; trivial.
Qed.
Next Obligation.
Proof.
extensionality x.
match goal with
[|- mono_morphism ?f (proj1_sig ?A) = _ ] => destruct A as [y Hy]
end.
trivial.
Qed.
Next Obligation.
Proof.
match goal with
[g : (_ ≫–> _)%morphism |- _] =>
match goal with
[H : (fun w => (mono_morphism g) (_ w)) = (fun x => (mono_morphism g) (_ x)) |- _] =>
apply (mono_morphism_monomorphic g) in H
end
end.
auto.
Qed.
Next Obligation.
Proof.
etransitivity; [|symmetry]; eapply Type_Cat_characteristic_function_unique; eassumption.
Qed. |
lemma csqrt_0 [simp]: "csqrt 0 = 0" |
use TestModule, only : local_cut => cut
type(local_cut) :: c, cut
write(*, *) c%a%<caret>x
end |
lemma emeasure_SUP_chain: assumes sets: "\<And>i. i \<in> A \<Longrightarrow> sets (M i) = sets N" "X \<in> sets N" assumes ch: "Complete_Partial_Order.chain (\<le>) (M ` A)" and "A \<noteq> {}" shows "emeasure (SUP i\<in>A. M i) X = (SUP i\<in>A. emeasure (M i) X)" |
Require Import Coq.QArith.QArith.
Require Import Coq.MSets.MSetPositive.
Require Import Coq.Classes.Morphisms.
Require Import Coq.Setoids.Setoid.
Require Export Coq.ZArith.ZArith.
Require Import Coq.Lists.List.
Require Import PerformanceExperiments.Harness.
Require PerformanceExperiments.Sample.
Import ListNotations.
Local Open Scope list_scope. Local Open Scope Z_scope.
Definition sieve' (fuel : nat) (max : Z)
:= List.rev
(fst
(@nat_rect
(fun _ => list Z (* primes *) * PositiveSet.t (* composites *) * positive (* next_prime *) -> list Z (* primes *) * PositiveSet.t (* composites *))
(fun '(primes, composites, next_prime) => (primes, composites))
(fun _ rec '(primes, composites, next_prime)
=> rec
(if (PositiveSet.mem next_prime composites || (Z.pos next_prime >? max))%bool%Z
then (primes, composites, Pos.succ next_prime)
else (Z.pos next_prime :: primes,
List.fold_right
PositiveSet.add
composites
(List.map (fun n => Pos.mul (Pos.of_nat (S n)) next_prime)
(List.seq 0 (Z.to_nat (max / Z.pos next_prime)))),
Pos.succ next_prime)))
fuel
(nil, PositiveSet.empty, 2%positive))).
Definition sieve (n : Z)
:= Eval cbv [sieve'] in sieve' (Z.to_nat n) n.
Global Arguments Pos.to_nat !_ / .
Notation goal n := (sieve n = nil) (only parsing).
Ltac start _ := cbv [sieve].
Ltac verify_form term :=
lazymatch term with
| nil => idtac
| cons ?n ?rest
=> let n' := (eval vm_compute in n) in
constr_eq n n';
verify_form rest
end.
Ltac verify _ :=
lazymatch goal with
| [ |- ?lhs = nil ]
=> verify_form lhs
end.
Inductive red_kind := vm | native | cbv | lazy | cbn | simpl.
Local Notation "'eta_kind' ( k' => f ) k"
:= match k with
| vm => subst! vm for k' in f
| native => subst! native for k' in f
| cbv => subst! cbv for k' in f
| lazy => subst! lazy for k' in f
| cbn => subst! cbn for k' in f
| simpl => subst! simpl for k' in f
end
(only parsing, at level 70, k' ident).
Local Lemma sanity : forall T f k, eta_kind (k => f k) k = f k :> T.
Proof. intros; repeat match goal with |- context[match ?e with _ => _ end] => destruct e end; reflexivity. Qed.
Definition size_of_kind (k : red_kind) (arg : Z) : Q
:= let x := inject_Z arg in
match k with
| vm
=> 9.56E-04 + 9.75E-06*x + 1.55E-08*x^2
| native
=> (0.0963 + 6.75E-06*x + 4.29E-09*x^2)
(* * 2 *)
| cbv
=> 1.93E-03 + 4.25E-05*x + 3.01E-07*x^2
| lazy
=> 1.07E-03 + 3.93E-05*x + 7.6E-07*x^2
| cbn
=> -2.11 + 0.262*x + -3.49E-03*x^2 + 3.16E-05*x^3
| simpl
=> -1.48 + 0.162*x + -1.52E-03*x^2 + 2.45E-05*x^3
end%Q.
Definition max_input_of_kind (k : red_kind) : option Z
:= match k with
| vm => None
| native => None
| cbv => None
| lazy => None
| cbn => None
| simpl => None
end.
(* Definition args_of_size (test_tac_n : nat) (s : size)
:= match test_tac_n, s with
| 0%nat, SuperFast => [(2, 3, 1); (5, 49, 2)]
| 1%nat, SuperFast => [(2, 3, 1); (5, 1199, 2)]
| 2%nat, SuperFast => [(2, 3, 1); (5, 449, 2)]
| 3%nat, SuperFast => [(2, 3, 1); (5, 499, 2)]
| 4%nat, SuperFast => [(2, 3, 1); (5, 39, 2)]
| 5%nat, SuperFast => [(2, 3, 1); (5, 39, 2)]
| 6%nat, SuperFast => [(2, 3, 1); (5, 39, 2)]
| 0%nat, Fast => [(51, 4999, 2)]
| 1%nat, Fast => [(1201, 4999, 2)]
| 2%nat, Fast => [(451, 3999, 2)]
| 3%nat, Fast => [(501, 4999, 2)]
| 4%nat, Fast => [(41, 4999, 2)]
| 5%nat, Fast => [(41, 79, 2)]
| 6%nat, Fast => [(41, 79, 2)]
| 2%nat, Medium => [(4001, 4999, 2)]
| 5%nat, Medium => [(81, 149, 2)]
| 6%nat, Medium => [(81, 149, 2)]
| 5%nat, Slow => [(151, 189, 2)]
| 6%nat, Slow => [(151, 189, 2)]
| 5%nat, VerySlow => [(191, 4999, 2)]
| 6%nat, VerySlow => [(191, 4999, 2)]
| 0%nat, _ | 1%nat, _ | 2%nat, _ | 3%nat, _ | 4%nat, _ => []
| _, _ => []
end%Z.
*)
Definition args_of_size' (k : red_kind) (s : size) : list Z
:= Eval cbv beta iota in
eta_size
(s'
=> if match s' with Sanity => true | _ => false end
then [2; 3; 4; 5; 6; 7; 8; 9; 10]
else eta_kind
(k'
=> Sample.generate_inputs
(T:=Z)
2
(size_of_kind k')
(Qseconds_of_size s')
(Qstandard_max_seconds_of_size s')
Sample.default_max_points
(max_input_of_kind k'))
k)
s.
Local Set NativeCompute Profiling.
Local Set NativeCompute Timing.
(* Takes about 0.6 seconds *)
Time Definition args_of_size (k : red_kind) (s : size)
:= Eval native_compute in eta_size (s' => eta_kind (k' => args_of_size' k' s') k) s.
Ltac mkgoal kind n := constr:(goal n).
Ltac redgoal _ := start ().
Ltac describe_goal n :=
let n := (eval vm_compute in n) in
idtac "Params: n=" n.
Ltac time_solve_goal kind
:= lazymatch kind with
| vm
=> fun n
=> time "vm_compute-regression-quadratic" vm_compute
| native
=> fun n
=> let G := match goal with |- ?G => G end in
cut G; [ intros _ | ];
[ time "native_compute(1)-regression-quadratic" native_compute
| time "native_compute(2)-regression-quadratic" native_compute;
shelve ]
| cbv
=> fun n
=> time "cbv-regression-quadratic" cbv
| lazy
=> fun n
=> time "lazy-regression-quadratic" lazy
| cbn
=> fun n
=> time "cbn-regression-cubic" cbn
| simpl
=> fun n
=> time "simpl-regression-cubic" simpl
end.
(**
<<<
#!/usr/bin/env python3
print(r'''(**
<<<
''')
print(open(__file__, 'r').read())
print(r'''>>>
*)''')
for i, c in enumerate(('vm', 'native', 'cbv', 'lazy', 'cbn', 'simpl')):
print(f'Ltac mkgoal{i} := mkgoal constr:({c}).\nLtac time_solve_goal{i} := time_solve_goal constr:({c}).\nLtac run{i} sz := Harness.runtests_verify_sanity (args_of_size ({c})) describe_goal mkgoal{i} redgoal time_solve_goal{i} verify sz.\n')
>>>
*)
Ltac mkgoal0 := mkgoal constr:(vm).
Ltac time_solve_goal0 := time_solve_goal constr:(vm).
Ltac run0 sz := Harness.runtests_verify_sanity (args_of_size (vm)) describe_goal mkgoal0 redgoal time_solve_goal0 verify sz.
Ltac mkgoal1 := mkgoal constr:(native).
Ltac time_solve_goal1 := time_solve_goal constr:(native).
Ltac run1 sz := Harness.runtests_verify_sanity (args_of_size (native)) describe_goal mkgoal1 redgoal time_solve_goal1 verify sz.
Ltac mkgoal2 := mkgoal constr:(cbv).
Ltac time_solve_goal2 := time_solve_goal constr:(cbv).
Ltac run2 sz := Harness.runtests_verify_sanity (args_of_size (cbv)) describe_goal mkgoal2 redgoal time_solve_goal2 verify sz.
Ltac mkgoal3 := mkgoal constr:(lazy).
Ltac time_solve_goal3 := time_solve_goal constr:(lazy).
Ltac run3 sz := Harness.runtests_verify_sanity (args_of_size (lazy)) describe_goal mkgoal3 redgoal time_solve_goal3 verify sz.
Ltac mkgoal4 := mkgoal constr:(cbn).
Ltac time_solve_goal4 := time_solve_goal constr:(cbn).
Ltac run4 sz := Harness.runtests_verify_sanity (args_of_size (cbn)) describe_goal mkgoal4 redgoal time_solve_goal4 verify sz.
Ltac mkgoal5 := mkgoal constr:(simpl).
Ltac time_solve_goal5 := time_solve_goal constr:(simpl).
Ltac run5 sz := Harness.runtests_verify_sanity (args_of_size (simpl)) describe_goal mkgoal5 redgoal time_solve_goal5 verify sz.
Global Set NativeCompute Timing.
Global Open Scope Z_scope.
(*
Goal True.
cut (goal 10).
shelve.
cbn.
run5 Sanity.
*)
|
\section{Applications to the boosting algorithm}
\begin{frame}{Going back to the original idea}
\begin{myblock}{Our hope was to...}{bg=yellow!05,fg=black}{bg=yellow!20, fg=black}
\begin{enumerate}
\item<1-> Adaptively control the complexity of each tree
\item<1-> Automatically stop the boosting procedure
%\item<4-> In a highly efficient manner
\end{enumerate}
\end{myblock}
\visible<2->{
\begin{myblock}{What we do: Two inequalities}{bg=green!05,fg=black}{bg=green!20, fg=black}
\begin{enumerate}
\item<2-> For two hierarchical trees, $q^0$ and $q^1$, where $q^1$ holds one more split than $q^0$, don't split if
\begin{align*}
\E\left[\hat{l}(\response,f(\features;\hat{\mathbf{w}}^0,\hat{q}^0)) - \hat{l}(\response,f(\features;\hat{\mathbf{w}}^1,\hat{q}^1))\right] + C_m(\hat{\mathbf{w}}^0,\hat{q}^0)
- C_m(\hat{\mathbf{w}}^1,\hat{q}^1)
\end{align*}
is smaller than zero.
\item<3-> Stop the iterative boosting algorithm when
\begin{align*}
\frac{\delta\left(\delta - 2\right)}{2n} \sum_{t\in\setleafnodes_k}\frac{G_{tk}^2}{H_{tk}}
+ \delta C_m\left(\hat{\mathbf{w}}_{t,k},q_{t,k}\right) > 0.
\end{align*}
\end{enumerate}
\end{myblock}
}
\end{frame}
\begin{frame}{The algorithm}
% \resizebox{10cm}{8cm}{
\small\begin{tabbing}
\hspace{2em} \= \hspace{2em} \= \hspace{2em} \= \\
{\bfseries Input}: \\
\> - A training set $\data_n=\{(x_i, y_{i})\}_{i=1}^n$,\\
\> - a differentiable loss $l(y,f(x))$,\\
\> - a learning rate $\delta$,\\
\> \colorbox{blue!20}{- boosting iterations $K$},\\
\> \colorbox{blue!20}{- one or more tree-complexity regularization criteria.}\\
1. Initialize model with a constant value:
$f^{(0)}(\features) = \hat{\eta}= \underset{\eta}{\arg\min} \sum_{i=1}^n \loss(y_i, \eta).$\\
2. \colorbox{blue!20}{{\bfseries for} $k = 1$ to $K$:} \colorbox{orange!30}{{\bfseries while} the inequality (2) evaluates to \textbf{\texttt{false}} } \\
\> $i)$ Compute derivatives $g_i$ and $h_i$ for all $i=1:n$.\\
\> $ii)$ Determine $q_k$ by the iterative binary splitting procedure until\\
\>\> \colorbox{blue!20}{a regularization criterion is reached.} \colorbox{orange!30}{ the inequality (1) is \textbf{\texttt{true}} }\\
\> $iii)$ Fit the leaf weights $\mathbf{w}$, given $q_k$\\
\> $v)$ Update the model with a scaled tree:
$f^{(k)}(\features) = f^{(k-1)}(\features) + \delta f_k(\features).$\\
{\bfseries end \colorbox{blue!20}{for} \colorbox{orange!30}{while}}\\
3. Output the model: {\bfseries Return} $f^{(K)}(\features)$.%=\sum_{k=0}^{K}f_k(\features)$.\\
\end{tabbing}
\end{frame}
\begin{frame}{Does it work?}
\begin{itemize}
\item The tree-boosting animation in the introduction was generated by this algorithm.
\end{itemize}
\visible<2->{
\begin{figure}
\centering
\includegraphics<2->[height=4.5cm,width=7cm]{figures/loss_vs_numtrees.pdf}
\caption{Training (black) and test loss (orange) and estimated generalization error (blue), for a tree-boosting ensemble trained on 1000 observations from a linear model: $\response\sim N(\features, 1)$. The blue line visualizes inequality 2.}
\end{figure}
}
\end{frame}
\begin{frame}{ISLR and ESL datasets}
\begin{itemize}
\item Comparisons on real data
\item Every dataset randomly split into training and test datasets 100 different ways
\item Average test scores (relative to XGB) and standard deviations (parenthesis)
\end{itemize}
\centering\resizebox{10cm}{2cm}{
\begin{tabular}{@{\extracolsep{4pt}}lcccccc}
\\[-1.8ex]\hline
\hline \\[-1.8ex]
Dataset & \multicolumn{1}{c}{Dimensions} & \multicolumn{1}{c}{GBTorch} & \multicolumn{1}{c}{GLM} & \multicolumn{1}{c}{Random forest} & \multicolumn{1}{c}{XGBoost} \\
\hline \\[-1.8ex]
Boston & $506 \times 14$ & 1.07 (0.162) & 1.3 (0.179) & 0.876 (0.15) & 1 (0.176) \\
Ozone & $111\times 4$ &0.827 (0.22) & 0.666 (0.131) & 0.669 (0.182) & 1 (0.202) \\
Auto & $392\times 311$ &1.16 (0.136) & 11.1 (14.5) & 0.894 (0.134) & 1 (0.188) \\
Carseats & $400\times 12$ & 1.2 (0.168) & 0.413 (0.0432) & 1.16 (0.141) & 1 (0.115) \\
College & $777\times 18$& 1.3 (0.948) & 0.55 (0.154) & 1.07 (0.906) & 1 (0.818) \\
Hitters & $263\times 20$ & 1.05 (0.362) & 1.21 (0.347) & 0.796 (0.31) & 1 (0.318) \\
Wage & $3000\times 26$ & 1.96 (1.72) & 289 (35.4) & 82.2 (21.3) & 1 (1.01) \\
Caravan & $5822\times 86$ & 1.02 (0.0508) & 1.12 (0.115) & 1.31 (0.168) & 1 (0.0513) \\
Default & $10000\times 4$ & 0.938 (0.068) & 0.902 (0.0698) & 2.83 (0.51) & 1 (0.0795) \\
OJ & $1070\times 18$ & 0.996 (0.0496) & 0.952 (0.0721) & 1.17 (0.183) & 1 (0.0703) \\
Smarket & $1250\times 7$ & 0.999 (0.00285) & 1 (0.00651) & 1.04 (0.0164) & 1 (0.00259) \\
Weekly & $1089\times 7$ & 0.992 (0.0082) & 0.995 (0.0123) & 1.02 (0.0195) & 1 (0.00791) \\
\hline \\[-1.8ex]
\end{tabular}
}
\end{frame}
\begin{frame}{Computational considerations}
\begin{myblock}{In general...}{bg=yellow!05,fg=black}{bg=yellow!20, fg=black}
\begin{itemize}
\item Let $k$-fold cross validation be used to determine the tuning for a standard tree-boosting implementation using "early-stopping".
\item Consider $p$ hyperparameters, each having $r$ candidate values.
\item<1-> Then our implementation is approximately $k\times r^p +1 $ times faster.
% \item<2-> Should give similar results to $p=4$ (tree-complexity criteria).
\end{itemize}
\end{myblock}
\visible<2->{
\begin{myblock}{A comparison with XGB}{bg=green!05,fg=black}{bg=green!20, fg=black}
\begin{itemize}
\item<2-> On the Caravan dataset ($5822\times 86$ classification), our implementation took 2.68 seconds to train.
\item<3-> Using a 30\% validation set, XGB took 4.85 seconds
\item<4-> One minute using $10$-fold CV: the number of boosting iterations
\item<5-> About 16 minutes to learn one additional hyperparameter
\item<6> About 2.65 hours on yet another additional hyperparameter
\end{itemize}
\end{myblock}
}
\end{frame}
% LAST SLIDE in Applications
\begin{frame}{The researcher enters the ML competition}
\begin{itemize}
\item Would he win?
\end{itemize}
\visible<2->{
\begin{myblock}{There are additional techniques for improvement}{bg=blue!5,fg=black}{bg=blue!10, fg=black}
Most notably...
\begin{itemize}
% \item<2-> Most notably...
\item<3-> L1-L2 regularization
\item<3-> Stochastic sampling of both rows and columns
\item<3-> Our trees are optimal if they all were the last (unscaled) tree
\end{itemize}
\end{myblock}
}
\visible<4->{
\begin{myblock}{But there are benefits!}{bg=yellow!05,fg=black}{bg=yellow!20, fg=black}
%But he would have an advantage
\begin{itemize}
\item The key to many ML competitions is the feature engineering
\item Possibility of very quickly (and automatically) testing for relevant features
\end{itemize}
\end{myblock}
}
\end{frame} |
C
C
C Copyright (C) 2000, 2001 Silicon Graphics, Inc. All Rights Reserved.
C
C This program is free software; you can redistribute it and/or modify it
C under the terms of version 2.1 of the GNU Lesser General Public License
C as published by the Free Software Foundation.
C
C This program is distributed in the hope that it would be useful, but
C WITHOUT ANY WARRANTY; without even the implied warranty of
C MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
C
C Further, this software is distributed without any warranty that it is
C free of the rightful claim of any third person regarding infringement
C or the like. Any license provided herein, whether implied or
C otherwise, applies only to this software file. Patent licenses, if
C any, provided herein do not apply to combinations of this program with
C other software, or any other product whatsoever.
C
C You should have received a copy of the GNU Lesser General Public
C License along with this program; if not, write the Free Software
C Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307,
C USA.
C
C Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pky,
C Mountain View, CA 94043, or:
C
C http://www.sgi.com
C
C For further information regarding this notice, see:
C
C http://oss.sgi.com/projects/GenInfo/NoticeExplan
C
C
subroutine isortb( ad, n, x, incx, id, incd )
implicit none
* ... Scalar arguments ...
integer incx, incd, n
character*1 ad
* ... Vector arguments ...
integer id( 1 )
integer x( * )
* ... Local vector ...
* For use in a call to GETTMC(). Acquire system information,
* specifically, the number of banks.
* ... Local variables ...
integer ldn1, iend, iord, k, kiord
integer i1, i2, j, kk, ll, kbd
integer iarg, imin, imax, info
logical asc, des
integer amin, amax, x1, x2
* ... External routines ...
integer ihost_banks@
* ... Begin Execution ...
*
CDIR$ ID "@(#) libu/sci/isortb.f 92.1 10/20/99 12:36:37"
*
iarg = numarg()
* Acquire banksize. GETTMC is a system call the retreives
* system information. You pass it the address of the 128-word
* array, MCTABLE, and it returns the information. In location
* 2 of MCTABLE, is the bank size of the target system. I
* need this value in order to effectively govern when
* I am going to do loop inversions.
kbd = ihost_banks@()
* Test input parameters
des = ad .eq. 'D' .or. ad .eq. 'd'
asc = ad .eq. 'A' .or. ad .eq. 'a'
info = 0
if( .not.des .and. .not.asc ) then
info = 1
elseif( n .le. 0 ) then
info = 2
elseif( incx .le. 0 ) then
info = 4
elseif( iarg .eq. 6 ) then
if( incd .le. 0 ) info = 6
elseif( iarg.ne.4 ) then
info = 7
endif
if( info .ne. 0 ) then
call xersor( 'ISORTB ', info )
goto 1500
endif
ldn1 = int( log( real( n ) - 0.5 ) / log( 2. ) )
* Ascending order
if( asc ) then
if( iarg .eq. 4 ) goto 10
if( iarg .eq. 6 ) goto 400
* In-place sort
10 continue
if( n .eq. 1 ) return
cmic$ parallel shared( incx, kbd, ldn1, n, x )
cmic$1 private ( amax, amin, i1, i2, iend, iord, j, k, kiord, kk )
cmic$2 private ( ll, x1, x2 )
* This is the outer loop. This governs the size of
* the segments to be merged.
do 300 ll = ldn1, 0, -1
iord = 2**ll
if( iord .ge. kbd .or. 2*iord*iord .gt. n ) goto 130
*
* NOTE 1:
* Suppose iord is 8. What this doubly nested DO-loop
* does, in effect, is that it starts out with the
* first 8 elements. It splits this segment in two
* and compares each of the first 4 elements with
* the corresponding element from the second set of
* 4 elements. It then goes to the next segment of
* 8 elements and repeats the process until the end of
* the array is reached.
*
cmic$ do parallel
do 120 j = 1, iord
cdir$ ivdep
do 110 i1 = j, n-iord, 2*iord
i2 = i1 + iord
x1 = x( 1+incx*(i1-1) )
x2 = x( 1+incx*(i2-1) )
amin = cvmgm( x2, x1, x2-x1 )
amax = cvmgm( x1, x2, x2-x1 )
x( 1+incx*(i1-1) ) = amin
x( 1+incx*(i2-1) ) = amax
110 continue
120 continue
goto 160
*
* NOTE 2:
* This is an inverted version of the above loop. Loop
* inversion helps with performance. I always try to
* keep the inner loops long for vectorization and
* the outer loops short for parallel processing.
*
130 continue
cmic$ do parallel
do 150 j = 1, n-iord, 2*iord
iend = min( j+iord-1, n-iord )
cdir$ ivdep
do 140 i1 = j, iend
i2 = i1 + iord
x1 = x( 1+incx*(i1-1) )
x2 = x( 1+incx*(i2-1) )
amin = cvmgm( x2, x1, x2-x1 )
amax = cvmgm( x1, x2, x2-x1 )
x( 1+incx*(i1-1) ) = amin
x( 1+incx*(i2-1) ) = amax
140 continue
150 continue
*
* NOTE 3:
* This triply nested DO-loop performs the task of "merging"
* the results from the previous steps. Going back to
* segment length of 8, it finds out how many such segments
* exist in the array. For the sake of convenience, suppose
* the size of the array is 64. That means there are 8
* segments of length 8. What this part of the code
* does is to first merge the first segment and the fifth
* segment, the second segment and the sixth, etc. Then it
* does the first and third, the second and fourth, etc.
* Finally it does the first and second, the third and
* fourth. When doing the merge of the say the first
* and second segments of 8, the upper 4 elements of
* the first segment are compared-exchanged with the
* lower 4-elements of the second segment and so it goes.
* The illustration in Knuth is instructive.
*
160 continue
do 230 kk = ldn1-ll, 1, -1
k = 2**kk-1
kiord = k*iord
if( iord .ge. kbd .or. 2*iord*iord .gt. n ) goto 190
cmic$ do parallel
do 180 j = 1, iord
cdir$ ivdep
do 170 i1 = j+iord, n-kiord, 2*iord
i2 = i1 + kiord
x1 = x( 1+incx*(i1-1) )
x2 = x( 1+incx*(i2-1) )
amin = cvmgm( x2, x1, x2-x1 )
amax = cvmgm( x1, x2, x2-x1 )
x( 1+incx*(i1-1) ) = amin
x( 1+incx*(i2-1) ) = amax
170 continue
180 continue
goto 230
*
* NOTE 4:
* Inverted version of the previous doubly nested DO-loop.
* The rest of the code consists of variations of the
* present code except that it may either being doing
* the sort in descending order and/or indexed.
*
190 continue
cmic$ do parallel
do 210 j = 1, n-kiord-iord, 2*iord
iend = min( j+2*iord-1, n-kiord )
cdir$ ivdep
do 200 i1 = j+iord, iend
i2 = i1 + kiord
x1 = x( 1+incx*(i1-1) )
x2 = x( 1+incx*(i2-1) )
amin = cvmgm( x2, x1, x2-x1 )
amax = cvmgm( x1, x2, x2-x1 )
x( 1+incx*(i1-1) ) = amin
x( 1+incx*(i2-1) ) = amax
200 continue
210 continue
230 continue
300 continue
cmic$ end parallel
goto 610
* Indexed sort
400 continue
if( n .eq. 1 ) then
id( 1 ) = 1
return
endif
*
do 405 i1 = 1, n
id( 1+incd*(i1-1) ) = i1
405 continue
*
cmic$ parallel shared ( id, incd, incx, kbd, ldn1, n, x )
cmic$1 private ( i1, i2, iend, imax, imin, iord, j, k, kiord, kk )
cmic$2 private ( ll, x1, x2 )
do 600 ll = ldn1, 0, -1
iord = 2**ll
if( iord .ge. kbd .or. 2*iord*iord .gt. n ) goto 430
*
cmic$ do parallel
do 420 j = 1, iord
cdir$ ivdep
do 410 i1 = j, n-iord, 2*iord
i2 = i1 + iord
x1 = x( 1+incx*( id( 1+incd*( i1-1 ) )-1 ) )
x2 = x( 1+incx*( id( 1+incd*( i2-1 ) )-1 ) )
imin = cvmgm( id( 1+incd*(i2-1) ),
& id( 1+incd*(i1-1) ),
& x2-x1 )
imax = cvmgm( id( 1+incd*(i1-1) ),
& id( 1+incd*(i2-1) ),
& x2-x1 )
id( 1+incd*( i1-1 ) ) = imin
id( 1+incd*( i2-1 ) ) = imax
410 continue
420 continue
goto 460
430 continue
cmic$ do parallel
do 450 j = 1, n-iord, 2*iord
iend = min( j+iord-1, n-iord )
cdir$ ivdep
do 440 i1 = j, iend
i2 = i1 + iord
x1 = x( 1+incx*( id( 1+incd*( i1-1 ) )-1 ) )
x2 = x( 1+incx*( id( 1+incd*( i2-1 ) )-1 ) )
imin = cvmgm( id( 1+incd*(i2-1) ),
& id( 1+incd*(i1-1) ),
& x2-x1 )
imax = cvmgm( id( 1+incd*(i1-1) ),
& id( 1+incd*(i2-1) ),
& x2-x1 )
id( 1+incd*( i1-1 ) ) = imin
id( 1+incd*( i2-1 ) ) = imax
440 continue
450 continue
460 continue
do 530 kk = ldn1-ll, 1, -1
k = 2**kk-1
kiord = k*iord
if( iord .ge. kbd .or. 2*iord*iord .gt. n ) goto 490
cmic$ do parallel
do 480 j = 1, iord
cdir$ ivdep
do 470 i1 = j+iord, n-kiord, 2*iord
i2 = i1 + kiord
x1 = x( 1+incx*( id( 1+incd*( i1-1 ) )-1 ) )
x2 = x( 1+incx*( id( 1+incd*( i2-1 ) )-1 ) )
imin = cvmgm( id( 1+incd*(i2-1) ),
& id( 1+incd*(i1-1) ),
& x2-x1 )
imax = cvmgm( id( 1+incd*(i1-1) ),
& id( 1+incd*(i2-1) ),
& x2-x1 )
id( 1+incd*( i1-1 ) ) = imin
id( 1+incd*( i2-1 ) ) = imax
470 continue
480 continue
goto 530
490 continue
cmic$ do parallel
do 510 j = 1, n-kiord-iord, 2*iord
iend = min( j+2*iord-1, n-kiord )
cdir$ ivdep
do 500 i1 = j+iord, iend
i2 = i1 + kiord
x1 = x( 1+incx*( id( 1+incd*( i1-1 ) )-1 ) )
x2 = x( 1+incx*( id( 1+incd*( i2-1 ) )-1 ) )
imin = cvmgm( id( 1+incd*(i2-1) ),
& id( 1+incd*(i1-1) ),
& x2-x1 )
imax = cvmgm( id( 1+incd*(i1-1) ),
& id( 1+incd*(i2-1) ),
& x2-x1 )
id( 1+incd*( i1-1 ) ) = imin
id( 1+incd*( i2-1 ) ) = imax
500 continue
510 continue
530 continue
600 continue
cmic$ end parallel
610 continue
elseif( des ) then
if( iarg .eq. 4 ) goto 710
if( iarg .eq. 6 ) goto 1100
* In-place sort
710 continue
cmic$ parallel shared( incx, kbd, ldn1, n, x )
cmic$1 private ( amax, amin, i1, i2, iend, iord, j, k, kiord, kk )
cmic$2 private ( ll, x1, x2 )
do 1000 ll = ldn1, 0, -1
iord = 2**ll
if( iord .ge. kbd .or. 2*iord*iord .gt. n ) goto 830
*
cmic$ do parallel
do 820 j = 1, iord
cdir$ ivdep
do 810 i1 = j, n-iord, 2*iord
i2 = i1 + iord
x1 = x( 1+incx*(i1-1) )
x2 = x( 1+incx*(i2-1) )
amin = cvmgm( x2, x1, x2-x1 )
amax = cvmgm( x1, x2, x2-x1 )
x( 1+incx*(i1-1) ) = amax
x( 1+incx*(i2-1) ) = amin
810 continue
820 continue
goto 860
830 continue
cmic$ do parallel
do 850 j = 1, n-iord, 2*iord
iend = min( j+iord-1, n-iord )
cdir$ ivdep
do 840 i1 = j, iend
i2 = i1 + iord
x1 = x( 1+incx*(i1-1) )
x2 = x( 1+incx*(i2-1) )
amin = cvmgm( x2, x1, x2-x1 )
amax = cvmgm( x1, x2, x2-x1 )
x( 1+incx*(i1-1) ) = amax
x( 1+incx*(i2-1) ) = amin
840 continue
850 continue
860 continue
do 930 kk = ldn1-ll, 1, -1
k = 2**kk-1
kiord = k*iord
if( iord .ge. kbd .or. 2*iord*iord .gt. n ) goto 890
cmic$ do parallel
do 880 j = 1, iord
cdir$ ivdep
do 870 i1 = j+iord, n-kiord, 2*iord
i2 = i1 + kiord
x1 = x( 1+incx*(i1-1) )
x2 = x( 1+incx*(i2-1) )
amin = cvmgm( x2, x1, x2-x1 )
amax = cvmgm( x1, x2, x2-x1 )
x( 1+incx*(i1-1) ) = amax
x( 1+incx*(i2-1) ) = amin
870 continue
880 continue
goto 930
890 continue
cmic$ do parallel
do 910 j = 1, n-kiord-iord, 2*iord
iend = min( j+2*iord-1, n-kiord )
cdir$ ivdep
do 900 i1 = j+iord, iend
i2 = i1 + kiord
x1 = x( 1+incx*(i1-1) )
x2 = x( 1+incx*(i2-1) )
amin = cvmgm( x2, x1, x2-x1 )
amax = cvmgm( x1, x2, x2-x1 )
x( 1+incx*(i1-1) ) = amax
x( 1+incx*(i2-1) ) = amin
900 continue
910 continue
930 continue
1000 continue
cmic$ end parallel
goto 1310
* Indexed sort
1100 continue
*
i2 = 1
do 1105 i1 = 1, n
id( i2 ) = i1
i2 = i2 + incd
1105 continue
*
cmic$ parallel shared ( id, incd, incx, kbd, ldn1, n, x )
cmic$1 private ( i1, i2, iend, imax, imin, iord, j, k, kiord, kk )
cmic$2 private ( ll, x1, x2 )
do 1300 ll = ldn1, 0, -1
iord = 2**ll
if( iord .ge. kbd .or. 2*iord*iord .gt. n ) goto 1130
*
cmic$ do parallel
do 1120 j = 1, iord
cdir$ ivdep
do 1110 i1 = j, n-iord, 2*iord
i2 = i1 + iord
x1 = x( 1+incx*( id( 1+incd*( i1-1 ) )-1 ) )
x2 = x( 1+incx*( id( 1+incd*( i2-1 ) )-1 ) )
imin = cvmgm( id( 1+incd*(i2-1) ),
& id( 1+incd*(i1-1) ),
& x2-x1 )
imax = cvmgm( id( 1+incd*(i1-1) ),
& id( 1+incd*(i2-1) ),
& x2-x1 )
id( 1+incd*( i1-1 ) ) = imax
id( 1+incd*( i2-1 ) ) = imin
1110 continue
1120 continue
goto 1160
1130 continue
cmic$ do parallel
do 1150 j = 1, n-iord, 2*iord
iend = min( j+iord-1, n-iord )
cdir$ ivdep
do 1140 i1 = j, iend
i2 = i1 + iord
x1 = x( 1+incx*( id( 1+incd*( i1-1 ) )-1 ) )
x2 = x( 1+incx*( id( 1+incd*( i2-1 ) )-1 ) )
imin = cvmgm( id( 1+incd*(i2-1) ),
& id( 1+incd*(i1-1) ),
& x2-x1 )
imax = cvmgm( id( 1+incd*(i1-1) ),
& id( 1+incd*(i2-1) ),
& x2-x1 )
id( 1+incd*( i1-1 ) ) = imax
id( 1+incd*( i2-1 ) ) = imin
1140 continue
1150 continue
1160 continue
do 1230 kk = ldn1-ll, 1, -1
k = 2**kk-1
kiord = k*iord
if( iord .ge. kbd .or. 2*iord*iord .gt. n ) goto 1190
cmic$ do parallel
do 1180 j = 1, iord
cdir$ ivdep
do 1170 i1 = j+iord, n-kiord, 2*iord
i2 = i1 + kiord
x1 = x( 1+incx*( id( 1+incd*( i1-1 ) )-1 ) )
x2 = x( 1+incx*( id( 1+incd*( i2-1 ) )-1 ) )
imin = cvmgm( id( 1+incd*(i2-1) ),
& id( 1+incd*(i1-1) ),
& x2-x1 )
imax = cvmgm( id( 1+incd*(i1-1) ),
& id( 1+incd*(i2-1) ),
& x2-x1 )
id( 1+incd*( i1-1 ) ) = imax
id( 1+incd*( i2-1 ) ) = imin
1170 continue
1180 continue
goto 1230
1190 continue
cmic$ do parallel
do 1210 j = 1, n-kiord-iord, 2*iord
iend = min( j+2*iord-1, n-kiord )
cdir$ ivdep
do 1200 i1 = j+iord, iend
i2 = i1 + kiord
x1 = x( 1+incx*( id( 1+incd*( i1-1 ) )-1 ) )
x2 = x( 1+incx*( id( 1+incd*( i2-1 ) )-1 ) )
imin = cvmgm( id( 1+incd*(i2-1) ),
& id( 1+incd*(i1-1) ),
& x2-x1 )
imax = cvmgm( id( 1+incd*(i1-1) ),
& id( 1+incd*(i2-1) ),
& x2-x1 )
id( 1+incd*( i1-1 ) ) = imax
id( 1+incd*( i2-1 ) ) = imin
1200 continue
1210 continue
1230 continue
1300 continue
cmic$ end parallel
1310 continue
endif
1500 continue
return
end
|
Require Import Coq.Program.Syntax.
Require Export ZArith.
Require Import Coq.Program.Basics.
Require Import SetoidTactics.
Require Import SetoidClass.
Require Import Coq.Strings.String.
Require Import Coq.Strings.Ascii.
Require Import Coq.Lists.List.
Require Import HDI.Util.
Require Import HDI.Syntax.
Require Import HDI.Heap.
Require Import HDI.OpSem.
Require Import HDI.Bisimulation.
Require Import HDI.Safety.
Require Export HDI.Predicates.
Local Open Scope Z.
Local Open Scope stmt.
Definition hoare_double (x: predicate heap) (s: stmt) :=
forall h, h |= x -> safe h s.
Notation "|-{{ x }} s" :=
(hoare_double x%pred s%stmt)
(no associativity, at level 65, s at level 42).
Lemma hd_weaken: forall x y s,
|-{{y}} s ->
x |-- y ->
|-{{x}} s.
Proof.
unfold hoare_double.
intuition auto.
Qed.
Add Parametric Morphism : hoare_double
with signature entails --> eq ==> impl
as hd_d.
Proof. firstorder. Qed.
Lemma hd_exists: forall A x s,
(forall a:A, |-{{x a}} s) ->
|-{{Ex_ a:A, x a}} s.
Proof. firstorder. Qed.
Lemma hd_forall: forall A (a:A) x s,
|-{{x a}} s ->
|-{{All_ a:A, x a}} s.
Proof. firstorder. Qed.
Lemma ret_ok: forall v F k,
|-{{F}} k v ->
|-{{F}} x <- ret v; k x.
Proof.
unfold hoare_double; auto with safe.
Qed.
Lemma read_ok: forall a v F k,
|-{{a |-> v && F}} k v ->
|-{{a |-> v && F}} x <- read a; k x.
Proof.
unfold hoare_double; eauto with safe pred.
Qed.
Definition pred_free (x: predicate heap) (a:Z) :=
forall h v, h |= x -> upd_heap h a v |= x.
Lemma write_ok: forall a v v0 F k,
pred_free F a ->
|-{{a |-> v && F}} k 1 ->
|-{{a |-> v0 && F}} write a v >>= k.
Proof.
unfold hoare_double; eauto with safe pred.
intros.
eapply safe_write;
[ firstorder | eauto with pred ].
Qed.
|
-- An ATP local hint can be only a postulate, function or a data
-- constructor.
-- This error is detected by Syntax.Translation.ConcreteToAbstract.
module ATPBadLocalHint3 where
postulate
D : Set
_≡_ : D → D → Set
zero : D
succ : D → D
data N : D → Set where
zN : N zero
sN : ∀ {n} → N n → N (succ n)
refl : ∀ n → N n → n ≡ n
refl n Nn = prf
where
postulate prf : n ≡ n
{-# ATP prove prf Nn #-}
|
rebol [
title: "RSWF script uploader"
file: %upload-script.r
name: 'upload-rswf-script
purpose: "To upload script to web"
comment: ""
author: "oldes"
date: 7-10-2005
require: [
rs-project %url-encode
]
]
rs/run 'url-encode
do %colorize-rswf.r
rswf-web-url: http://box.lebeda.web/~hmm/rswf/
upload-rswf-script: func[rswf-file [file!] /box /local data swf-version rswfdata][
print ["Uploading:" rswf-file]
file-parts: split-path rswf-file
if not parse form last file-parts [copy name to ".rswf" 5 skip end][
print "The file must be .rswf dialect file!"
return false
]
rswf/string-replace-pairs: either box [
rswf-web-url: http://box.lebeda.ws/~hmm/rswf/
[
"http://127.0.0.1:81/projects-web/box.lebeda.ws/latest/web/"
"http://box.lebeda.ws/"
"http://box.lebeda.web/"
"http://box.lebeda.ws/"
]
][
rswf-web-url: http://box.lebeda.web/~hmm/rswf/
[]
]
swfdata: make-swf/compressed rswf-file
rswfdata: read rswf-file
set [header code] load/next/header rswf-file
if not integer? swf-version: header/type [
if none? swf-version: select [
swf 4
swf4 4
swf5 5
swf6 6
mx 6
mx2004 7
swf7 7
swf8 8
] swf-version [ swf-version: 6 ]
]
clrdata: colorize-rswf code
foreach [s t] rswf/string-replace-pairs [
clrdata: replace/all clrdata s t
]
to-sql-date: func[value [date!]][
rejoin [value/year "-" value/month "-" value/day
either value: value/time [
rejoin [" " value/hour ":" value/minute ":" value/second]
][""]
]
]
print read/custom rswf-web-url/submit.php reduce [
'post rejoin [
"type=" "example"
"&name=" url-encode name
"&title=" url-encode form either error? try [header/title][name][header/title]
"&author=" url-encode form either error? try [header/author][""][header/author]
"&email=" url-encode form either error? try [header/email ][""][header/email ]
"&bgcolor=" copy/part skip form to-binary
either error? try [header/background ][255.255.255][header/background ]
2 6
"&swfversion=" swf-version
"&swfwidth=" header/size/x
"&swfheight=" header/size/y
"&swfrate=" header/rate
"&created=" url-encode to-sql-date either none? header/date [modified? rswf-file][header/date ]
"&modified=" url-encode to-sql-date modified? rswf-file
"&purpose=" url-encode form either none? header/purpose [""][header/purpose]
"&comment=" url-encode form either none? header/comment [""][header/comment]
"&jscode=" url-encode form either error? try [header/js ][""][header/js]
"&related="
"&swf=" url-encode enbase/base swfdata 64
"&code=" url-encode enbase/base rswfdata 64
"&colorized=" url-encode enbase/base clrdata 64
]
]
rswf/string-replace-pairs: none
]
comment {
id SERIAL,
name VARCHAR(50) NOT NULL,
type VARCHAR(20),
status SMALLINT,
title TEXT,
author TEXT,
email TEXT,
bgcolor CHAR(7),
swfversion SMALLINT,
swfwidth SMALLINT,
swfheight SMALLINT,
swfsize INT,
rswfsize INT,
created TIMESTAMP,
modified TIMESTAMP,
purpose TEXT,
comment TEXT,
jscode TEXT,
related TEXT[],
code TEXT,
colorized TEXT,
}
;upload-rswf-script %../test.rswf
do %/i/rebol/rs/utils/foreach-file.r
upload-all-rswfs: func[/local errors value][
errors: []
foreach-file %examples/ [
if parse/all form file ["swf" thru ".rswf" end] [
probe file
if error? set/any 'value try [
upload-rswf-script/box rejoin [rswf-root-dir %examples/ file]
][
print "!!! ERRROR: "
probe value: disarm value none
append errors file
append errors value
]
]
]
if not empty? errors [
print ["### ERRORS:" mold errors]
]
] |
State Before: R : Type u_3
S : Type u_4
inst✝⁴ : CommRing R
inst✝³ : Nontrivial R
inst✝² : LinearOrderedCommRing S
n : Type u_2
inst✝¹ : Fintype n
inst✝ : DecidableEq n
ι : Type u_1
s : Finset ι
c : ι → R
A : ι → Matrix n n R
abv : AbsoluteValue R S
x : S
hx : ∀ (k : ι) (i j : n), ↑abv (A k i j) ≤ x
y : S
hy : ∀ (k : ι), ↑abv (c k) ≤ y
⊢ ↑abv (det (∑ k in s, c k • A k)) ≤ Nat.factorial (Fintype.card n) • (card s • y * x) ^ Fintype.card n State After: no goals Tactic: simpa only [smul_mul_assoc] using
det_sum_le s fun k i j =>
calc
abv (c k * A k i j) = abv (c k) * abv (A k i j) := abv.map_mul _ _
_ ≤ y * x := mul_le_mul (hy k) (hx k i j) (abv.nonneg _) ((abv.nonneg _).trans (hy k)) |
Require Coq.Arith.Arith.
Require Coq.Arith.EqNat.
Require Import Coq.Lists.List.
Import ListNotations.
Import NPeano.
Theorem silly1 : forall (n m o p : nat),
n = m ->
[n;o] = [n;p] ->
[n;o] = [m;p].
Proof.
intros n m o p eq1 eq2.
rewrite <- eq1.
apply eq2.
Qed.
Theorem silly_ex :
(forall n, even n = true -> odd (S n) = true) ->
even 3 = true ->
odd 4 = true.
Proof.
intros H.
apply H.
Qed.
Theorem silly3_firsttry : forall (n : nat),
true = EqNat.beq_nat n 5 ->
EqNat.beq_nat (S (S n)) 7 = true.
Proof.
intros n H.
symmetry.
apply H.
Qed.
Theorem rev_exercise1 : forall (l l' : list nat),
l = rev l' ->
l' = rev l.
Proof.
intros l l' H.
rewrite H.
symmetry.
apply rev_involutive.
Qed.
Theorem trans_eq : forall (X : Type) (n m o : X),
n = m -> m = o -> n = o.
Proof.
intros X n m o eq1 eq2.
rewrite eq1, eq2.
reflexivity.
Qed.
Definition minustwo (n : nat) : nat :=
n - 2.
Example trans_eq_exercise : forall (n m o p : nat),
m = (minustwo o) ->
(n + p) = m ->
(n + p) = (minustwo o).
Proof.
intros n m o p H1 H2.
apply trans_eq with m; assumption.
Qed.
|
lemma (in ring_of_sets) finite_Union [intro]: "finite X \<Longrightarrow> X \<subseteq> M \<Longrightarrow> \<Union>X \<in> M" |
'''
compile_GDTTS.py
Updated: 3/19/18
This script reads all the LGA output files stored in the /scores/ subdirectory
found in the data folder, parses out the GDTTS score and writes the scores and ids
to a csv file.
'''
import os
import numpy as np
from tqdm import tqdm
# Data Parameters
data_folder = '../../../data/T0882/'
################################################################################
if __name__ == '__main__':
# Set paths relative to this file
os.chdir(os.path.dirname(os.path.realpath(__file__)))
# Read scores
ids = []
scores = []
for data_path in tqdm(sorted(os.listdir(data_folder+'scores'))):
with open(data_folder+'scores/'+data_path, 'r') as f:
for l in f:
if l.startswith('SUMMARY(GDT)'):
score = float(l.split()[6])/100.0
scores.append(score)
ids.append(data_path)
# Write scores
with open(data_folder+data_folder.split('/')[-2]+'.csv', 'w') as f:
f.write('ID,GDT_TS\n')
for i in range(len(ids)):
f.write(ids[i] + ',' + str(scores[i]) + '\n')
|
Important: This product does not include the techniques used in the performance video. Instead, it includes a similar looking, much more practical variation of the effect.
Copyright ©2012 Magicians: The Gathering (DarkSleightZ) - It's eS productions. All Rights Reserved.
I figured it out but still a awsome trick.
However, reversedmagic, if you think you figured it out, I would like to tell you that you are probably wrong. Even the product we sell is (as mentioned) not of what you see in the performance video. It does have the exact same possibilities though.
For instance, do you think that the performance involves palming? Yes? Well.. nope, sorry, it doesn't.
oh goodness, i really don't want to see those generic "i know how you did it" YouTube comments on here.. I thought you were safe from those on here Mark! :P Also... am I not considered an active member anymore? I only see the standard price or is there no member discount for this one?
Theheron, be sure you are logged in before entering the Store section. It still shows all discounts for you.
Why was another deck used to make a selection...?
Awesome trick! i think i'm going to buy it!
A little question : the price is 2$ and i live in France so i pay in € so what do i have to do to buy it with paypall?
If you already have a paypal account, then the currency conversion will automatically occur, so you don't have to worry about that.
If you don't and you want to register for one, you need at least have a debit or a credit card, unless someone else already put some money in your paypal account (which makes you not require the debit or credit card when paying).
Do you also have information about how and when do we have the video after paying please ?
After paying, you will receive an e-mail with all necessary information automatically. You then login and download your purchased files.
In case it doesn't tell you how much time there is left before the download is finished, you can check the product thread and see how big the file actually is.
Hope that answers it all.
How big of group are we talking about?
And if you have to, you can do it angle proof easily.
Got it! Amazing as always, Mark! |
#############################################################################
#File: ggplot2_Introduction_&_Customization.r
#Purpose: Demonstrate how to produce and customize plots using the ggplot
# function in the ggplot2 package. We will create and customize a scatter
# plot, a bar plot, a stacked histogram, and a categorical boxplot.
# We will also plot multiple plots on one panel.
#By: Will Bartsch - EPA-MED, Duluth
#Date: 7/16/14
#############################################################################
#Load the necessary packages.
library(ggplot2)
library(plyr)
library(grid)
#Create a data frame to use for the plots.
dat <- data.frame(condition = rep(c("A", "B", "C"), each=30),
response=rep(c("Yes", "No"), each=15, times=3),
xvar = 50:139 + rnorm(90,sd=15),
yvar = 50:139 + rnorm(90,sd=15))
head(dat)
dim(dat)
########################## A scatter plot #############################
# Basic plot using base R
plot(dat$xvar, dat$yvar)
# Basic plot using the 'qplot' function in ggplot2
qplot(dat$xvar, dat$yvar)
# Basic plot using the 'ggplot' function in ggplot2
ggplot(dat, aes(y=yvar, x=xvar)) + geom_point()
# Change the symbols to a solid triangle and color code according to
# "condition"
# A plot of all the symbols to find the triangle
plot(1:25, pch=c(1:25))
text(1:25, pos=3, offset=0.4)
# A colorblind friendly palette (create it for later use)
cbbPalette <- c("#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442",
"#0072B2", "#D55E00", "#CC79A7")
# Create the plot
sp <- ggplot(dat, aes(y=yvar, x=xvar, color=condition))
sp + geom_point(shape=17)
# Add a title and axis lables
sp + geom_point(shape=17)+
ggtitle("Data Presentation") + xlab("X") + ylab("Y")
# Same plot with a black and white theme
sp + geom_point(shape=17)+
ggtitle("Data Presentation") + xlab("X") + ylab("Y") + theme_bw()
# Format the legend
sp + geom_point(shape=17, size=3)+
ggtitle("Data Presentation") + xlab("X") + ylab("Y") +
scale_colour_manual("Group", breaks=c("A", "B", "C"),
labels=c("Apple", "Banana", "Cherry"), values=cbbPalette) + # Change the labels and title, change the color palette
theme(legend.background=element_rect(color="black", fill="gray"), # Make the legend background gray with a black border
legend.text=element_text(size=16), #Change label text size
legend.title=element_text(size=16, face="bold"), #Change title text
legend.key.size=unit(1, "cm"), #Change Size of symbol box
legend.position = c(0.89,0.135)) #Set position
# Format Axis, Labels and Title (Maintain previous formating) and add
# trend lines
sp + geom_point(shape=17, size=3)+
ggtitle("Data Presentation") + xlab("X") + ylab("Y") +
scale_colour_manual("Group", breaks=c("A", "B", "C"),
labels=c("Apple", "Banana", "Cherry"), values=cbbPalette)+
theme(legend.background=element_rect(color="black", fill="gray"),
legend.text=element_text(size=16),
legend.title=element_text(size=16, face="bold"),
legend.key.size=unit(1, "cm"),
legend.position = c(0.895,0.135))+
theme(axis.title=element_text(size=16), #Change axis title size
axis.text=element_text(size=12), #Change axis label size
plot.title=element_text(size=20, face="italic"))+ #Change plot title size and make it italic
xlim(0,175) + ylim(0, 175) + #Reset the axis ranges
#scale_y_continuous(limits = c(0,175), expand = c(0,0))+ #Reset the axis ranges and force it to start at 0
#scale_x_continuous(limits = c(0,175), expand = c(0,0))+ #Reset the axis ranges and force it to start at 0
geom_smooth(method=lm, fill=NA, size=1) #Add the linear regression trend lines
############## A bar plot of means with standard error bars #################
# Summarize the data by mean and standard deviation using ddply from
# the plyr package
dat_mean <- ddply(dat, .(condition), summarize, avg=mean(yvar),
stdev=sd(yvar))
dat_mean$std_error <- dat_mean$stdev/sqrt(30)
# Make a basic plot
bp <- ggplot(dat_mean, aes(y=avg, x=condition))
bp + geom_bar(stat="identity", color="black", fill="black")+
geom_errorbar(aes(ymin=avg-std_error,
ymax=avg+std_error), color='red', width=0.5)
# Customize the plot with white background and no grid lines
bp + geom_bar(stat="identity")+ geom_errorbar(aes(ymin=avg-std_error,
ymax=avg+std_error), color='red', width=0.5, size=1.5) +
ggtitle("Example\nBar Plot") +
xlab(expression(paste(italic("Condition")))) +
ylab(expression(paste("NH"[4]^" + ",mu,"g/L")))+
theme(panel.background=element_rect(fill="white"),
panel.grid=element_blank(),
panel.border=element_rect(color="black", fill=NA))
##################### A faceted histogram plot ###########################
hp <- ggplot(dat, aes(x=xvar, fill=condition))
hp + geom_histogram(color="black", binwidth=15) +
facet_grid(condition ~ .) + ggtitle("Results") +
xlab("X") + ylab("Count") +
scale_y_continuous(breaks=seq(0, 12, 3))+
scale_fill_hue("Variable", l=20) #Darken the color scheme and label legend
# If you don't want a legend, use the following line in your script:
theme(legend.position="none")
#################### Categorical Box Plot ###################
boxp <- ggplot(dat, aes(y=yvar, x=condition, fill=response))
boxp + geom_boxplot() + ggtitle("Was There\nA\nResponse")+
xlab("Condition") + ylab("Y Variable")
#################### Plotting Multiple Plots on One Panel ###################
# Multiple plot function (from www.cookbook-r.com)
#
# ggplot objects can be passed in ..., or to plotlist
# (as a list of ggplot objects)
# - cols: Number of columns in layout
# - layout: A matrix specifying the layout. If present, 'cols' is ignored.
#
# If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE),
# then plot 1 will go in the upper left, 2 will go in the upper right, and
# 3 will go all the way across the bottom.
#
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
require(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
#Make multiple plots and save them as an object
p1 <- ggplot(dat, aes(y=yvar, x=xvar, color=condition)) +
geom_point(shape=17)
p2 <- ggplot(dat, aes(y=yvar, x=xvar, color=condition)) +
geom_smooth(method=lm)
p3 <- ggplot(dat_mean, aes(y=avg, x=condition)) +
geom_bar(stat="identity", color="black", fill="black")+
geom_errorbar(aes(ymin=avg-1.96*stdev,
ymax=avg+1.96*stdev), color='red', width=0.5)
p4 <- ggplot(dat, aes(y=yvar, x=condition)) +
geom_boxplot()
# Plot them
multiplot(p1,p2,p3,p4, cols=2)
multiplot(p1,p2, cols=2)
multiplot(p1,p2, cols=1)
multiplot(p1,p2,p3, layout=matrix(c(1,2,3,3), nrow=2, byrow=TRUE))
multiplot(p1,p2,p3, layout=matrix(c(1,2,1,3), ncol=2, byrow=TRUE))
############################# Resources ################################
#
# Cookbook for R
# Available online and a great resource. Go to the Graphs section:
# www.cookbook-r.com
#
#
# Use R! ggplot2 - Springer
#
########################################################################
|
namespace Lua
export
%foreign "x, y => x + y"
plus : Int -> Int -> Int
export -- there is now also a `W` local variable, accessible anywhere from FFI for convenience
-- `W` is a special table standing for the %MkWorld value
%foreign "callback, _ => callback('abc')(22)(W)"
callback : (callback : String -> Int -> PrimIO String) -> PrimIO String
export
%foreign "x => idris.inspect(x)|inspect"
inspect : ty -> String
export
%foreign "x, y, f => f(x)(y)"
apply2 : a -> (0 b : Type {- for demo purposes -}) -> b -> (a -> b -> c) -> c
export
%foreign "_ => print(W)"
printWorld : IO ()
main : IO ()
main = do printLn $ Lua.plus 1 3
str <- fromPrim $ callback (\str, i => toPrim $ pure $ str ++ " " ++ show i)
putStrLn str
putStrLn (inspect $ with List [the Int 1, 2, 3])
putStrLn $ apply2 "abc" _ "def" (++)
printWorld
|
lemma open_INT [continuous_intros, intro]: "finite A \<Longrightarrow> \<forall>x\<in>A. open (B x) \<Longrightarrow> open (\<Inter>x\<in>A. B x)" |
Inductive month : Set :=
| January | February | March | April | May
| June | July | August | September | October
| November | December.
Print month_rect.
Print month_ind.
Print month_rec.
Inductive season : Set :=
| Winter | Spring | Summer | Fall.
Print month_rec.
Definition season_for_month : month -> season
:= month_rec (fun _ => season)
Winter Winter Winter
Spring Spring Spring
Summer Summer Summer
Fall Fall Fall.
Print season_for_month.
Definition season_for_month' (m : month) : season :=
match m with
| January => Winter
| February => Winter
| March => Winter
| April => Spring
| May => Spring
| June => Spring
| July => Summer
| August => Summer
| September => Summer
| October => Fall
| November => Fall
| December => Fall
end.
Theorem month_equal :
forall m : month, m = January \/ m = February \/ m = March \/
m = April \/ m = May \/ m = June \/
m = July \/ m = August \/ m = September \/
m = October \/ m = November \/ m = December.
Proof.
intro m.
pattern m.
Check month_ind.
apply month_ind.
left. reflexivity.
right. left. reflexivity.
right. right. left. reflexivity.
right. right. right. left. reflexivity.
right. right. right. right. left. reflexivity.
right. right. right. right. right. left. reflexivity.
right. right. right. right. right. right. left. reflexivity.
right. right. right. right. right. right. right. left. reflexivity.
right. right. right. right. right. right. right. right. left. reflexivity.
right. right. right. right. right. right. right. right. right. left. reflexivity.
right. right. right. right. right. right. right. right. right.
right. left. reflexivity.
right. right. right. right. right. right. right. right. right.
right. right. reflexivity.
Qed.
(* right? right. *)
Theorem bool_equal_0 : forall b : bool, b = true \/ b = false.
Proof.
Check bool_ind.
Check or_introl.
Check or_intror.
Check refl_equal.
Check (bool_ind (fun b : bool => b = true \/ b = false)
(or_introl _ (refl_equal true))
(or_intror _ (refl_equal false))).
exact (bool_ind (fun b : bool => b = true \/ b = false)
(or_introl _ (refl_equal true))
(or_intror _ (refl_equal false))).
Qed.
Theorem t : True \/ True.
Proof.
Print or_introl.
Print True.
exact (or_introl True I).
Qed.
(* yep.. still don't know what I'm doing... :-[ *)
Theorem bool_equal_1 : forall b : bool, b = true \/ b = false.
Proof.
intro b.
pattern b.
Check bool_ind.
apply bool_ind.
left.
reflexivity.
right.
reflexivity.
Qed.
Definition f_0 (b : bool) :=
match b with
| true => 1
| false => 2
end.
Print f_0.
Definition month_length (leap : bool) (m : month) : nat :=
match m with
| January => 31
| February => if leap then 29 else 28
| March => 31
| April => 30
| May => 31
| June => 30
| July => 31
| August => 31
| September => 30
| October => 31
| November => 30
| December => 31
end.
Check month_rec.
Definition month_length' (leap : bool) : month -> nat :=
month_rec (fun (m : month) => nat)
31
(if leap then 29 else 28)
31
30
31
30
31
31
30
31
30
31.
Definition month_length'' (leap : bool) (m : month) : nat :=
match m with
| February => if leap then 29 else 28
| April => 30
| June => 30
| September => 30
| November => 30
| otherwise => 31
end.
Eval compute in month_length'' false April.
Section ex_6_6.
Definition bool_and (b1 b2 : bool) : bool :=
match b1 with
| true => b2
| false => false
end.
Definition bool_or (b1 b2 : bool) : bool :=
match b1 with
| true => true
| false => b2
end.
Definition bool_xor (b1 b2 : bool) : bool :=
match b1 with
| false => b2
| true => match b2 with
| true => false
| false => true
end
end.
Definition bool_not (b : bool) : bool :=
match b with
| false => true
| true => false
end.
Definition bool_eq (b1 b2 : bool) : bool :=
match b1 with
| false => bool_not b2
| true => b2
end.
Theorem bool_xor_not_eq_iso : forall b1 b2 : bool,
bool_xor b1 b2 = bool_not (bool_eq b1 b2).
Proof.
intros b1 b2.
unfold bool_xor.
unfold bool_not.
unfold bool_eq.
pattern b1.
apply bool_ind.
reflexivity.
pattern b2.
apply bool_ind.
simpl.
reflexivity.
simpl.
reflexivity.
Qed.
Theorem bool_not_and_iso_or_not_not : forall b1 b2 : bool,
bool_not (bool_and b1 b2) =
bool_or (bool_not b1) (bool_not b2).
Proof.
intros b1 b2.
unfold bool_or.
pattern b1.
apply bool_ind.
simpl.
reflexivity.
simpl.
reflexivity.
Qed.
Theorem bool_not_not_iso : forall b : bool, bool_not (bool_not b) = b.
Proof.
apply bool_ind.
simpl.
reflexivity.
simpl.
reflexivity.
Qed.
Theorem bool_or_not_iso_true : forall b : bool, bool_or b (bool_not b) = true.
Proof.
apply bool_ind.
simpl.
reflexivity.
simpl.
reflexivity.
Qed.
Theorem bool_eq_iso_eq : forall b1 b2 : bool, b1 = b2 -> bool_eq b1 b2 = true.
Proof.
intros b1 b2.
pattern b2.
apply bool_ind.
intros H_b1_eq_true.
rewrite -> H_b1_eq_true.
simpl bool_eq.
reflexivity.
intros H_b1_eq_false.
rewrite -> H_b1_eq_false.
simpl bool_eq.
reflexivity.
Qed.
Theorem not_or_eq_and_not_not : forall b1 b2 : bool,
bool_not (bool_or b1 b2) = bool_and (bool_not b1)
(bool_not b2).
Proof.
intros b1 b2.
pattern b1.
apply bool_ind.
simpl.
reflexivity.
simpl.
reflexivity.
Qed.
Theorem or_and_and_eq_and_or : forall b1 b2 b3 : bool,
bool_or (bool_and b1 b3) (bool_and b2 b3) =
bool_and (bool_or b1 b2) b3.
Proof.
intros b1 b2 b3.
pattern b1.
apply bool_ind.
simpl.
pattern b2.
apply bool_ind.
simpl.
unfold bool_or.
pattern b3.
apply bool_ind.
reflexivity.
reflexivity.
pattern b3.
apply bool_ind.
simpl.
reflexivity.
simpl.
reflexivity.
simpl.
reflexivity.
Qed.
End ex_6_6.
Require Import ZArith.
Inductive plane : Set := point : Z -> Z -> plane.
Print plane_ind.
Definition abscissa (p : plane) : Z :=
match p with point x y => x end.
Reset plane.
Record plane : Set := point { abscissa : Z; ordinate : Z }.
Print plane.
Print abscissa.
Open Scope Z_scope.
Definition ex_6_8 (p : plane) : Z :=
let (abscissa, ordinate) := p
in Zabs abscissa + Zabs ordinate.
Eval compute in ex_6_8 (point 1 3).
Inductive vehicle : Set :=
| bicycle : nat -> vehicle
| motorized : nat -> nat -> vehicle.
Print vehicle_ind.
Definition nb_wheels (v : vehicle) : nat :=
match v with
| bicycle _ => 2%nat
| motorized _ n => n
end.
Definition nb_seats (v : vehicle) : nat :=
match v with
| bicycle n => n
| motorized n _ => n
end.
Check vehicle_rec.
Definition nb_seats' (v : vehicle) : nat :=
vehicle_rec (fun _ => nat)
(fun n => n)
(fun n _ => n)
v.
Print nb_seats'.
Open Scope nat_scope.
Theorem at_least_28 : forall (leap:bool) (m:month), 28 <= month_length leap m.
Proof.
intros leap m.
case m ; simpl ; auto with arith.
case leap; simpl ; auto with arith.
Qed.
Reset at_least_28.
Theorem at_least_28 : forall (leap:bool) (m:month), 28 <= month_length leap m.
Proof.
intros leap m.
case m.
simpl month_length.
Check le_n.
Check le_S.
apply le_S.
apply le_S.
apply le_S.
apply le_n.
simpl month_length.
case leap.
apply le_S.
apply le_n.
apply le_n.
simpl month_length.
apply le_S.
apply le_S.
apply le_S.
apply le_n.
simpl month_length.
apply le_S.
apply le_S.
apply le_n.
simpl month_length.
apply le_S.
apply le_S.
apply le_S.
apply le_n.
simpl month_length.
apply le_S.
apply le_S.
apply le_n.
simpl month_length.
apply le_S.
apply le_S.
apply le_S.
apply le_n.
simpl month_length.
apply le_S.
apply le_S.
apply le_S.
apply le_n.
simpl month_length.
apply le_S.
apply le_S.
apply le_n.
simpl month_length.
apply le_S.
apply le_S.
apply le_S.
apply le_n.
simpl month_length.
apply le_S.
apply le_S.
apply le_n.
simpl month_length.
apply le_S.
apply le_S.
apply le_S.
apply le_n.
Qed.
(* that was tiring *)
Print at_least_28.
Print le_S.
Definition next_month (m : month) : month :=
match m with
January => February | February => March | March => April
| April => May | May => June | June => July
| July => August | August => September | September => October
| October => November | November => December | December => January
end.
(* this version is way less interesting. *)
Theorem next_auguest_then_july_boring :
forall m : month, m = July -> next_month m = August.
Proof.
intros m.
intros H_m_eq_july.
rewrite -> H_m_eq_july.
simpl.
reflexivity.
Qed.
Theorem next_august_then_july :
forall m : month, next_month m = August -> m = July.
Proof.
intros m.
case m ; simpl ; intros H_next_eq; discriminate H_next_eq || reflexivity.
(* The above was very compact, but I think some connectives would
improve understandably. *)
Qed.
(* I am having a hard time understanding the evidence function of this: *)
Print next_august_then_july.
(* a bit complex no? *)
Theorem not_January_eq_February : ~January = February.
Proof.
unfold not.
intros H_january_eq_february.
change ((fun m:month => match m with January => True | _ => False end)
February).
rewrite <- H_january_eq_february.
trivial.
Qed.
Print month_rect.
Definition is_January' (m:month) : Prop :=
match m with
January => True
| _ => False
end.
Definition is_January (m:month) : Prop :=
month_rect (fun _ => Prop)
True
False
False
False
False
False
False
False
False
False
False
False
m.
Eval compute in is_January January.
Eval compute in is_January March.
Theorem not_true_false : ~true = false.
Proof.
unfold not.
intros H_eq.
change ((fun (v:bool) => match v with true => True | false => False end)
false).
rewrite <- H_eq.
trivial.
Qed.
Print not_true_false.
Theorem not_bike_eq_moto : forall (n m : nat), ~(bicycle n = motorized n m).
Proof.
intros n m.
unfold not.
intro H_eq.
change ((fun (v : vehicle) => match v with
bicycle _ => True
| motorized _ _ => False
end)
(motorized n m)).
rewrite <- H_eq.
trivial.
Qed.
Theorem not_bike_eq_moto' : forall (n m : nat), ~(bicycle n = motorized n m).
Proof.
intros n m.
unfold not.
intros H_eq.
discriminate H_eq.
Qed.
|
subroutine filrec(string,filnam,skey,nkey)
c
c takes a character string and reads from it a filename, and an
c skey and/or nkey for a record. blanks, commas, or equal signs can
c separate the inputs on the command line.
c
character*128 temp, words(3)
character*(*) string , filnam , skey
c
nkey = 0
skey = ' '
nwords = 3
call bwords(string,nwords,words)
c---- first word is filename
filnam = words(1)
nwords = nwords - 1
c---- second word is nkey or skey
temp = words(2)
c---- determine if second//third word is nkey/skey
c skeys are exactly 5 characters long,
c nkeys are never more than 3 characters long
50 continue
nwords = nwords - 1
call triml(temp)
ilen = istrln(temp)
if(ilen.eq.5) then
skey = temp
call upper(skey)
elseif(ilen.eq.4) then
call fstop('error reading skey or nkey from '//temp)
else
call str2in(temp, nkey, ierr)
end if
c---- the third word, if it exists
if (nwords.eq.1) then
temp = words(3)
call triml(temp)
go to 50
end if
1000 return
c end subroutine filrec
end
|
State Before: α : Type ?u.35577
β : Type ?u.35580
ι : Type ?u.35583
u : ℕ → ℝ
c : ℝ
hc : 0 ≤ c
n : ℕ
h : ∀ (k : ℕ), k < n → c * u k ≤ u (k + 1)
⊢ c ^ n * u 0 ≤ u n State After: no goals Tactic: apply (monotone_mul_left_of_nonneg hc).seq_le_seq n _ _ h <;>
simp [_root_.pow_succ, mul_assoc, le_refl] |
(* Title: HOL/Types_To_Sets/Examples/Group_On_With.thy
Author: Fabian Immler, TU München
*)
theory Group_On_With
imports
Prerequisites
"../Types_To_Sets"
begin
subsection \<open>\<^emph>\<open>on\<close> carrier set \<^emph>\<open>with\<close> explicit group operations\<close>
locale semigroup_add_on_with =
fixes S::"'a set" and pls::"'a\<Rightarrow>'a\<Rightarrow>'a"
assumes add_assoc: "a \<in> S \<Longrightarrow> b \<in> S \<Longrightarrow> c \<in> S \<Longrightarrow> pls (pls a b) c = pls a (pls b c)"
assumes add_mem: "a \<in> S \<Longrightarrow> b \<in> S \<Longrightarrow> pls a b \<in> S"
locale ab_semigroup_add_on_with = semigroup_add_on_with +
assumes add_commute: "a \<in> S \<Longrightarrow> b \<in> S \<Longrightarrow> pls a b = pls b a"
locale comm_monoid_add_on_with = ab_semigroup_add_on_with +
fixes z
assumes add_zero: "a \<in> S \<Longrightarrow> pls z a = a"
assumes zero_mem: "z \<in> S"
begin
lemma carrier_ne: "S \<noteq> {}" using zero_mem by auto
end
definition "sum_with pls z f S =
(if \<exists>C. f ` S \<subseteq> C \<and> comm_monoid_add_on_with C pls z then
Finite_Set.fold (pls o f) z S else z)"
lemma sum_with_empty[simp]: "sum_with pls z f {} = z"
by (auto simp: sum_with_def)
lemma sum_with_cases[case_names comm zero]:
"P (sum_with pls z f S)"
if "\<And>C. f ` S \<subseteq> C \<Longrightarrow> comm_monoid_add_on_with C pls z \<Longrightarrow> P (Finite_Set.fold (pls o f) z S)"
"(\<And>C. comm_monoid_add_on_with C pls z \<Longrightarrow> (\<exists>s\<in>S. f s \<notin> C)) \<Longrightarrow> P z"
using that
by (auto simp: sum_with_def)
context comm_monoid_add_on_with begin
lemma sum_with_infinite: "infinite A \<Longrightarrow> sum_with pls z g A = z"
by (induction rule: sum_with_cases) auto
context begin
private abbreviation "pls' \<equiv> \<lambda>x y. pls (if x \<in> S then x else z) (if y \<in> S then y else z)"
lemma fold_pls'_mem: "Finite_Set.fold (pls' \<circ> g) z A \<in> S"
if "g ` A \<subseteq> S"
proof cases
assume A: "finite A"
interpret comp_fun_commute "pls' o g"
using that
using add_assoc add_commute add_mem zero_mem
by unfold_locales auto
from fold_graph_fold[OF A] have "fold_graph (pls' \<circ> g) z A (Finite_Set.fold (pls' \<circ> g) z A)" .
from fold_graph_closed_lemma[OF this, of S "pls' \<circ> g"]
add_assoc add_commute add_mem zero_mem
show ?thesis
by auto
qed (use add_assoc add_commute add_mem zero_mem in simp)
lemma fold_pls'_eq: "Finite_Set.fold (pls' \<circ> g) z A = Finite_Set.fold (pls \<circ> g) z A"
if "g ` A \<subseteq> S"
using add_assoc add_commute add_mem zero_mem that
by (intro fold_closed_eq[where B=S]) auto
lemma sum_with_mem: "sum_with pls z g A \<in> S" if "g ` A \<subseteq> S"
proof -
interpret comp_fun_commute "pls' o g"
using add_assoc add_commute add_mem zero_mem that
by unfold_locales auto
have "\<exists>C. g ` A \<subseteq> C \<and> comm_monoid_add_on_with C pls z"
using that comm_monoid_add_on_with_axioms by auto
then show ?thesis
using fold_pls'_mem[OF that]
by (simp add: sum_with_def fold_pls'_eq that)
qed
lemma sum_with_insert:
"sum_with pls z g (insert x A) = pls (g x) (sum_with pls z g A)"
if g_into: "g x \<in> S" "g ` A \<subseteq> S"
and A: "finite A" and x: "x \<notin> A"
proof -
interpret comp_fun_commute "pls' o g"
using add_assoc add_commute add_mem zero_mem g_into
by unfold_locales auto
have "Finite_Set.fold (pls \<circ> g) z (insert x A) = Finite_Set.fold (pls' \<circ> g) z (insert x A)"
using g_into
by (subst fold_pls'_eq) auto
also have "\<dots> = pls' (g x) (Finite_Set.fold (pls' \<circ> g) z A)"
unfolding fold_insert[OF A x]
by (auto simp: o_def)
also have "\<dots> = pls (g x) (Finite_Set.fold (pls' \<circ> g) z A)"
proof -
from fold_graph_fold[OF A] have "fold_graph (pls' \<circ> g) z A (Finite_Set.fold (pls' \<circ> g) z A)" .
from fold_graph_closed_lemma[OF this, of S "pls' \<circ> g"] add_assoc add_commute add_mem zero_mem
have "Finite_Set.fold (pls' \<circ> g) z A \<in> S"
by auto
then show ?thesis
using g_into by auto
qed
also have "Finite_Set.fold (pls' \<circ> g) z A = Finite_Set.fold (pls \<circ> g) z A"
using g_into
by (subst fold_pls'_eq) auto
finally
have "Finite_Set.fold (pls \<circ> g) z (insert x A) = pls (g x) (Finite_Set.fold (pls \<circ> g) z A)" .
moreover
have "\<exists>C. g ` insert x A \<subseteq> C \<and> comm_monoid_add_on_with C pls z"
"\<exists>C. g ` A \<subseteq> C \<and> comm_monoid_add_on_with C pls z"
using that (1,2) comm_monoid_add_on_with_axioms by auto
ultimately show ?thesis
by (simp add: sum_with_def)
qed
end
end
locale ab_group_add_on_with = comm_monoid_add_on_with +
fixes mns um
assumes ab_left_minus: "a \<in> S \<Longrightarrow> pls (um a) a = z"
assumes ab_diff_conv_add_uminus: "a \<in> S \<Longrightarrow> b \<in> S \<Longrightarrow> mns a b = pls a (um b)"
assumes uminus_mem: "a \<in> S \<Longrightarrow> um a \<in> S"
subsection \<open>obvious instances (by type class constraints)\<close>
lemma semigroup_add_on_with[simp]: "semigroup_add_on_with (UNIV::'a::semigroup_add set) (+)"
by (auto simp: semigroup_add_on_with_def ac_simps)
lemma semigroup_add_on_with_Ball_def: "semigroup_add_on_with S pls \<longleftrightarrow>
(\<forall>a\<in>S. \<forall>b\<in>S. \<forall>c\<in>S. pls (pls a b) c = pls a (pls b c)) \<and> (\<forall>a\<in>S. \<forall>b\<in>S. pls a b \<in> S)"
by (auto simp: semigroup_add_on_with_def)
lemma ab_semigroup_add_on_with_Ball_def:
"ab_semigroup_add_on_with S pls \<longleftrightarrow> semigroup_add_on_with S pls \<and> (\<forall>a\<in>S. \<forall>b\<in>S. pls a b = pls b a)"
by (auto simp: ab_semigroup_add_on_with_def ab_semigroup_add_on_with_axioms_def)
lemma ab_semigroup_add_on_with[simp]: "ab_semigroup_add_on_with (UNIV::'a::ab_semigroup_add set) (+)"
by (auto simp: ab_semigroup_add_on_with_Ball_def ac_simps)
lemma comm_monoid_add_on_with_Ball_def:
"comm_monoid_add_on_with S pls z \<longleftrightarrow> ab_semigroup_add_on_with S pls \<and> (\<forall>a\<in>S. pls z a = a) \<and> z \<in> S"
by (auto simp: comm_monoid_add_on_with_def comm_monoid_add_on_with_axioms_def)
lemma comm_monoid_add_on_with[simp]: "comm_monoid_add_on_with UNIV (+) (0::'a::comm_monoid_add)"
by (auto simp: comm_monoid_add_on_with_Ball_def ab_semigroup_add_on_with_Ball_def
semigroup_add_on_with_Ball_def ac_simps)
lemma ab_group_add_on_with_Ball_def:
"ab_group_add_on_with S pls z mns um \<longleftrightarrow> comm_monoid_add_on_with S pls z \<and>
(\<forall>a\<in>S. pls (um a) a = z) \<and> (\<forall>a\<in>S. \<forall>b\<in>S. mns a b = pls a (um b)) \<and> (\<forall>a\<in>S. um a \<in> S)"
by (auto simp: ab_group_add_on_with_def ab_group_add_on_with_axioms_def)
lemma ab_group_add_on_with[simp]: "ab_group_add_on_with (UNIV::'a::ab_group_add set) (+) 0 (-) uminus"
by (auto simp: ab_group_add_on_with_Ball_def)
lemma sum_with: "sum f S = sum_with (+) 0 f S"
proof (induction rule: sum_with_cases)
case (comm C)
then show ?case
unfolding sum.eq_fold
by simp
next
case zero
from zero[OF comm_monoid_add_on_with]
show ?case by simp
qed
subsection \<open>transfer rules\<close>
lemma semigroup_add_on_with_transfer[transfer_rule]:
includes lifting_syntax
assumes [transfer_rule]: "bi_unique A"
shows "(rel_set A ===> (A ===> A ===> A) ===> (=)) semigroup_add_on_with semigroup_add_on_with"
unfolding semigroup_add_on_with_Ball_def
by transfer_prover
lemma Domainp_applyI:
includes lifting_syntax
shows "(A ===> B) f g \<Longrightarrow> A x y \<Longrightarrow> Domainp B (f x)"
by (auto simp: rel_fun_def)
lemma Domainp_apply2I:
includes lifting_syntax
shows "(A ===> B ===> C) f g \<Longrightarrow> A x y \<Longrightarrow> B x' y' \<Longrightarrow> Domainp C (f x x')"
by (force simp: rel_fun_def)
lemma right_total_semigroup_add_transfer[transfer_rule]:
includes lifting_syntax
assumes [transfer_rule]: "right_total A" "bi_unique A"
shows "((A ===> A ===> A) ===> (=)) (semigroup_add_on_with (Collect (Domainp A))) class.semigroup_add"
proof (intro rel_funI)
fix x y assume xy[transfer_rule]: "(A ===> A ===> A) x y"
show "semigroup_add_on_with (Collect (Domainp A)) x = class.semigroup_add y"
unfolding semigroup_add_on_with_def class.semigroup_add_def
by transfer (auto intro!: Domainp_apply2I[OF xy])
qed
lemma ab_semigroup_add_on_with_transfer[transfer_rule]:
includes lifting_syntax
assumes [transfer_rule]: "bi_unique A"
shows
"(rel_set A ===> (A ===> A ===> A) ===> (=)) ab_semigroup_add_on_with ab_semigroup_add_on_with"
unfolding ab_semigroup_add_on_with_Ball_def by transfer_prover
lemma right_total_ab_semigroup_add_transfer[transfer_rule]:
includes lifting_syntax
assumes [transfer_rule]: "right_total A" "bi_unique A"
shows
"((A ===> A ===> A) ===> (=)) (ab_semigroup_add_on_with (Collect (Domainp A))) class.ab_semigroup_add"
unfolding class.ab_semigroup_add_def class.ab_semigroup_add_axioms_def ab_semigroup_add_on_with_Ball_def
by transfer_prover
lemma comm_monoid_add_on_with_transfer[transfer_rule]:
includes lifting_syntax
assumes [transfer_rule]: "bi_unique A"
shows
"(rel_set A ===> (A ===> A ===> A) ===> A ===> (=)) comm_monoid_add_on_with comm_monoid_add_on_with"
unfolding comm_monoid_add_on_with_Ball_def
by transfer_prover
lemma right_total_comm_monoid_add_transfer[transfer_rule]:
includes lifting_syntax
assumes [transfer_rule]: "right_total A" "bi_unique A"
shows
"((A ===> A ===> A) ===> A ===> (=))
(comm_monoid_add_on_with (Collect (Domainp A))) class.comm_monoid_add"
proof (intro rel_funI)
fix p p' z z'
assume [transfer_rule]: "(A ===> A ===> A) p p'" "A z z'"
show "comm_monoid_add_on_with (Collect (Domainp A)) p z = class.comm_monoid_add p' z'"
unfolding class.comm_monoid_add_def class.comm_monoid_add_axioms_def comm_monoid_add_on_with_Ball_def
apply transfer
using \<open>A z z'\<close>
by auto
qed
lemma ab_group_add_transfer[transfer_rule]:
includes lifting_syntax
assumes [transfer_rule]: "right_total A" "bi_unique A"
shows "((A ===> A ===> A) ===> A ===> (A ===> A ===> A) ===> (A ===> A)===> (=))
(ab_group_add_on_with (Collect (Domainp A))) class.ab_group_add"
proof (intro rel_funI)
fix p p' z z' m m' um um'
assume [transfer_rule]:
"(A ===> A ===> A) p p'" "A z z'" "(A ===> A ===> A) m m'"
and um[transfer_rule]: "(A ===> A) um um'"
show "ab_group_add_on_with (Collect (Domainp A)) p z m um = class.ab_group_add p' z' m' um'"
unfolding class.ab_group_add_def class.ab_group_add_axioms_def ab_group_add_on_with_Ball_def
by transfer (use um in \<open>auto simp: rel_fun_def\<close>)
qed
lemma ab_group_add_on_with_transfer[transfer_rule]:
includes lifting_syntax
assumes [transfer_rule]: "right_total A" "bi_unique A"
shows
"(rel_set A ===> (A ===> A ===> A) ===> A ===> (A ===> A ===> A) ===> (A ===> A)===> (=))
ab_group_add_on_with ab_group_add_on_with"
unfolding class.ab_group_add_def class.ab_group_add_axioms_def ab_group_add_on_with_Ball_def
by transfer_prover
lemma ex_comm_monoid_add_around_imageE:
includes lifting_syntax
assumes ex_comm: "\<exists>C. f ` S \<subseteq> C \<and> comm_monoid_add_on_with C pls zero"
assumes transfers: "(A ===> A ===> A) pls pls'" "A zero zero'" "Domainp (rel_set B) S"
and in_dom: "\<And>x. x \<in> S \<Longrightarrow> Domainp A (f x)"
obtains C where "comm_monoid_add_on_with C pls zero" "f ` S \<subseteq> C" "Domainp (rel_set A) C"
proof -
from ex_comm obtain C0 where C0: "f ` S \<subseteq> C0" and comm: "comm_monoid_add_on_with C0 pls zero"
by auto
define C where "C = C0 \<inter> Collect (Domainp A)"
have "comm_monoid_add_on_with C pls zero"
using comm Domainp_apply2I[OF \<open>(A ===> A ===> A) pls pls'\<close>] \<open>A zero zero'\<close>
by (auto simp: comm_monoid_add_on_with_Ball_def ab_semigroup_add_on_with_Ball_def
semigroup_add_on_with_def C_def)
moreover have "f ` S \<subseteq> C" using C0
by (auto simp: C_def in_dom)
moreover have "Domainp (rel_set A) C" by (auto simp: C_def Domainp_set)
ultimately show ?thesis ..
qed
lemma sum_with_transfer[transfer_rule]:
includes lifting_syntax
assumes [transfer_rule]: "right_total A" "bi_unique A" "bi_unique B"
shows "((A ===> A ===> A) ===> A ===> (B ===> A) ===> rel_set B ===> A)
sum_with sum_with"
proof (safe intro!: rel_funI)
fix pls pls' zero zero' f g S T
assume transfer_pls[transfer_rule]: "(A ===> A ===> A) pls pls'"
and transfer_zero[transfer_rule]: "A zero zero'"
assume transfer_g[transfer_rule]: "(B ===> A) f g"
and transfer_T[transfer_rule]: "rel_set B S T"
show "A (sum_with pls zero f S) (sum_with pls' zero' g T)"
proof cases
assume ex_comm: "\<exists>C. f ` S \<subseteq> C \<and> comm_monoid_add_on_with C pls zero"
have Domains: "Domainp (rel_set B) S" "(\<And>x. x \<in> S \<Longrightarrow> Domainp A (f x))"
using transfer_T transfer_g
by auto (meson Domainp_applyI rel_set_def)
from ex_comm_monoid_add_around_imageE[OF ex_comm transfer_pls transfer_zero Domains]
obtain C where comm: "comm_monoid_add_on_with C pls zero"
and C: "f ` S \<subseteq> C"
and "Domainp (rel_set A) C"
by auto
then obtain C' where [transfer_rule]: "rel_set A C C'" by auto
interpret comm: comm_monoid_add_on_with C pls zero by fact
have C': "g ` T \<subseteq> C'"
by transfer (rule C)
have comm': "comm_monoid_add_on_with C' pls' zero'"
by transfer (rule comm)
then interpret comm': comm_monoid_add_on_with C' pls' zero' .
from C' comm' have ex_comm': "\<exists>C. g ` T \<subseteq> C \<and> comm_monoid_add_on_with C pls' zero'" by auto
show ?thesis
using transfer_T C C'
proof (induction S arbitrary: T rule: infinite_finite_induct)
case (infinite S)
note [transfer_rule] = infinite.prems
from infinite.hyps have "infinite T" by transfer
then show ?case by (simp add: sum_with_def infinite.hyps \<open>A zero zero'\<close>)
next
case [transfer_rule]: empty
have "T = {}" by transfer rule
then show ?case by (simp add: sum_with_def \<open>A zero zero'\<close>)
next
case (insert x F)
note [transfer_rule] = insert.prems(1)
have [simp]: "finite T"
by transfer (simp add: insert.hyps)
obtain y where [transfer_rule]: "B x y" and y: "y \<in> T"
by (meson insert.prems insertI1 rel_setD1)
define T' where "T' = T - {y}"
have T_def: "T = insert y T'"
by (auto simp: T'_def y)
define sF where "sF = sum_with pls zero f F"
define sT where "sT = sum_with pls' zero' g T'"
have [simp]: "y \<notin> T'" "finite T'"
by (auto simp: y T'_def)
have "rel_set B (insert x F - {x}) T'"
unfolding T'_def
by transfer_prover
then have transfer_T'[transfer_rule]: "rel_set B F T'"
using insert.hyps
by simp
from insert.prems have "f ` F \<subseteq> C" "g ` T' \<subseteq> C'"
by (auto simp: T'_def)
from insert.IH[OF transfer_T' this] have [transfer_rule]: "A sF sT" by (auto simp: sF_def sT_def o_def)
have rew: "(sum_with pls zero f (insert x F)) = pls (f x) (sum_with pls zero f F)"
apply (subst comm.sum_with_insert)
subgoal using insert.prems by auto
subgoal using insert.prems by auto
subgoal by fact
subgoal by fact
subgoal by auto
done
have rew': "(sum_with pls' zero' g (insert y T')) = pls' (g y) (sum_with pls' zero' g T')"
apply (subst comm'.sum_with_insert)
subgoal
apply transfer
using insert.prems by auto
subgoal
apply transfer
using insert.prems by auto
subgoal by fact
subgoal by fact
subgoal by auto
done
have "A (sum_with pls zero f (insert x F)) (sum_with pls' zero' g (insert y T'))"
unfolding sT_def[symmetric] sF_def[symmetric] rew rew'
by transfer_prover
then show ?case
by (simp add: T_def)
qed
next
assume *: "\<nexists>C. f ` S \<subseteq> C \<and> comm_monoid_add_on_with C pls zero"
then have **: "\<nexists>C'. g ` T \<subseteq> C' \<and> comm_monoid_add_on_with C' pls' zero'"
by transfer simp
show ?thesis
by (simp add: sum_with_def * ** \<open>A zero zero'\<close>)
qed
qed
subsection \<open>Rewrite rules to make \<open>ab_group_add\<close> operations explicit\<close>
named_theorems explicit_ab_group_add
lemmas [explicit_ab_group_add] = sum_with
subsection \<open>Locale defining \<open>ab_group_add\<close>-Operations in a local type\<close>
locale local_typedef_ab_group_add_on_with = local_typedef S s +
ab_group_add_on_with S
for S ::"'b set" and s::"'s itself"
begin
lemma mem_minus_lt: "x \<in> S \<Longrightarrow> y \<in> S \<Longrightarrow> mns x y \<in> S"
using ab_diff_conv_add_uminus[of x y] add_mem[of x "um y"] uminus_mem[of y]
by auto
context includes lifting_syntax begin
definition plus_S::"'s \<Rightarrow> 's \<Rightarrow> 's" where "plus_S = (rep ---> rep ---> Abs) pls"
definition minus_S::"'s \<Rightarrow> 's \<Rightarrow> 's" where "minus_S = (rep ---> rep ---> Abs) mns"
definition uminus_S::"'s \<Rightarrow> 's" where "uminus_S = (rep ---> Abs) um"
definition zero_S::"'s" where "zero_S = Abs z"
lemma plus_S_transfer[transfer_rule]: "(cr_S ===> cr_S ===> cr_S) pls plus_S"
unfolding plus_S_def
by (auto simp: cr_S_def add_mem intro!: rel_funI)
lemma minus_S_transfer[transfer_rule]: "(cr_S ===> cr_S ===> cr_S) mns minus_S"
unfolding minus_S_def
by (auto simp: cr_S_def mem_minus_lt intro!: rel_funI)
lemma uminus_S_transfer[transfer_rule]: "(cr_S ===> cr_S) um uminus_S"
unfolding uminus_S_def
by (auto simp: cr_S_def uminus_mem intro!: rel_funI)
lemma zero_S_transfer[transfer_rule]: "cr_S z zero_S"
unfolding zero_S_def
by (auto simp: cr_S_def zero_mem intro!: rel_funI)
end
sublocale type: ab_group_add plus_S "zero_S::'s" minus_S uminus_S
apply unfold_locales
subgoal by transfer (rule add_assoc)
subgoal by transfer (rule add_commute)
subgoal by transfer (rule add_zero)
subgoal by transfer (rule ab_left_minus)
subgoal by transfer (rule ab_diff_conv_add_uminus)
done
context includes lifting_syntax begin
lemma sum_transfer[transfer_rule]:
"((A===>cr_S) ===> rel_set A ===> cr_S) (sum_with pls z) type.sum"
if [transfer_rule]: "bi_unique A"
proof (safe intro!: rel_funI)
fix f g I J
assume fg[transfer_rule]: "(A ===> cr_S) f g" and rel_set: "rel_set A I J"
show "cr_S (sum_with pls z f I) (type.sum g J)"
using rel_set
proof (induction I arbitrary: J rule: infinite_finite_induct)
case (infinite I)
note [transfer_rule] = infinite.prems
from infinite.hyps have "infinite J" by transfer
with infinite.hyps show ?case
by (simp add: zero_S_transfer sum_with_infinite)
next
case [transfer_rule]: empty
have "J = {}" by transfer rule
then show ?case by (simp add: zero_S_transfer)
next
case (insert x F)
note [transfer_rule] = insert.prems
have [simp]: "finite J"
by transfer (simp add: insert.hyps)
obtain y where [transfer_rule]: "A x y" and y: "y \<in> J"
by (meson insert.prems insertI1 rel_setD1)
define J' where "J' = J - {y}"
have T_def: "J = insert y J'"
by (auto simp: J'_def y)
define sF where "sF = sum_with pls z f F"
define sT where "sT = type.sum g J'"
have [simp]: "y \<notin> J'" "finite J'"
by (auto simp: y J'_def)
have "rel_set A (insert x F - {x}) J'"
unfolding J'_def
by transfer_prover
then have "rel_set A F J'"
using insert.hyps
by simp
from insert.IH[OF this] have [transfer_rule]: "cr_S sF sT" by (auto simp: sF_def sT_def)
have f_S: "f x \<in> S" "f ` F \<subseteq> S"
using \<open>A x y\<close> fg insert.prems
by (auto simp: rel_fun_def cr_S_def rel_set_def)
have "cr_S (pls (f x) sF) (plus_S (g y) sT)" by transfer_prover
then have "cr_S (sum_with pls z f (insert x F)) (type.sum g (insert y J'))"
by (simp add: sum_with_insert insert.hyps type.sum.insert_remove sF_def[symmetric]
sT_def[symmetric] f_S)
then show ?case
by (simp add: T_def)
qed
qed
end
end
subsection \<open>transfer theorems from \<^term>\<open>class.ab_group_add\<close> to \<^term>\<open>ab_group_add_on_with\<close>\<close>
context ab_group_add_on_with begin
context includes lifting_syntax assumes ltd: "\<exists>(Rep::'s \<Rightarrow> 'a) (Abs::'a \<Rightarrow> 's). type_definition Rep Abs S" begin
interpretation local_typedef_ab_group_add_on_with pls z mns um S "TYPE('s)" by unfold_locales fact
text\<open>Get theorem names:\<close>
print_locale! ab_group_add
lemmas lt_sum_mono_neutral_cong_left = sum.mono_neutral_cong_left
[var_simplified explicit_ab_group_add,
unoverload_type 'c,
OF type.comm_monoid_add_axioms,
untransferred]
end
lemmas sum_mono_neutral_cong_left =
lt_sum_mono_neutral_cong_left
[cancel_type_definition,
OF carrier_ne,
simplified pred_fun_def, simplified]
end
end |
lemma measure_subadditive_countably: assumes A: "range A \<subseteq> sets M" and fin: "(\<Sum>i. emeasure M (A i)) \<noteq> \<infinity>" shows "measure M (\<Union>i. A i) \<le> (\<Sum>i. measure M (A i))" |
/-
Copyright (c) 2021 Martin Zinkevich. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Martin Zinkevich
-/
import measure_theory.integration
import probability_theory.independence
/-!
# Integration in Probability Theory
Integration results for independent random variables. Specifically, for two
independent random variables X and Y over the extended non-negative
reals, `E[X * Y] = E[X] * E[Y]`, and similar results.
-/
noncomputable theory
open set measure_theory
open_locale ennreal
variables {α : Type*}
namespace probability_theory
/-- This (roughly) proves that if a random variable `f` is independent of an event `T`,
then if you restrict the random variable to `T`, then
`E[f * indicator T c 0]=E[f] * E[indicator T c 0]`. It is useful for
`lintegral_mul_eq_lintegral_mul_lintegral_of_independent_measurable_space`. -/
lemma lintegral_mul_indicator_eq_lintegral_mul_lintegral_indicator
{Mf : measurable_space α} [M : measurable_space α] {μ : measure α}
(hMf : Mf ≤ M) (c : ℝ≥0∞) {T : set α} (h_meas_T : M.measurable_set' T)
(h_ind : indep_sets Mf.measurable_set' {T} μ)
{f : α → ℝ≥0∞} (h_meas_f : @measurable α ℝ≥0∞ Mf _ f) :
∫⁻ a, f a * T.indicator (λ _, c) a ∂μ =
∫⁻ a, f a ∂μ * ∫⁻ a, T.indicator (λ _, c) a ∂μ :=
begin
revert f,
have h_mul_indicator : ∀ g, measurable g → measurable (λ a, g a * T.indicator (λ x, c) a) :=
λ g h_mg, h_mg.mul (measurable_const.indicator h_meas_T),
apply measurable.ennreal_induction,
{ intros c' s' h_meas_s',
simp_rw [← inter_indicator_mul],
rw [lintegral_indicator _ (measurable_set.inter (hMf _ h_meas_s') (h_meas_T)),
lintegral_indicator _ (hMf _ h_meas_s'),
lintegral_indicator _ h_meas_T],
simp only [measurable_const, lintegral_const, univ_inter, lintegral_const_mul,
measurable_set.univ, measure.restrict_apply],
rw h_ind, { ring }, { apply h_meas_s' }, { simp } },
{ intros f' g h_univ h_meas_f' h_meas_g h_ind_f' h_ind_g,
have h_measM_f' := h_meas_f'.mono hMf le_rfl,
have h_measM_g := h_meas_g.mono hMf le_rfl,
simp_rw [pi.add_apply, right_distrib],
rw [lintegral_add (h_mul_indicator _ h_measM_f') (h_mul_indicator _ h_measM_g),
lintegral_add h_measM_f' h_measM_g, right_distrib, h_ind_f', h_ind_g] },
{ intros f h_meas_f h_mono_f h_ind_f,
have h_measM_f := λ n, (h_meas_f n).mono hMf le_rfl,
simp_rw [ennreal.supr_mul],
rw [lintegral_supr h_measM_f h_mono_f, lintegral_supr, ennreal.supr_mul],
{ simp_rw [← h_ind_f] },
{ exact λ n, h_mul_indicator _ (h_measM_f n) },
{ intros m n h_le a, apply ennreal.mul_le_mul _ le_rfl, apply h_mono_f h_le } },
end
/-- This (roughly) proves that if `f` and `g` are independent random variables,
then `E[f * g] = E[f] * E[g]`. However, instead of directly using the independence
of the random variables, it uses the independence of measurable spaces for the
domains of `f` and `g`. This is similar to the sigma-algebra approach to
independence. See `lintegral_mul_eq_lintegral_mul_lintegral_of_independent_fn` for
a more common variant of the product of independent variables. -/
lemma lintegral_mul_eq_lintegral_mul_lintegral_of_independent_measurable_space
{Mf : measurable_space α} {Mg : measurable_space α} [M : measurable_space α]
{μ : measure α} (hMf : Mf ≤ M) (hMg : Mg ≤ M)
(h_ind : indep Mf Mg μ)
(f g : α → ℝ≥0∞) (h_meas_f : @measurable α ℝ≥0∞ Mf _ f)
(h_meas_g : @measurable α ℝ≥0∞ Mg _ g) :
∫⁻ a, f a * g a ∂μ = ∫⁻ a, f a ∂μ * ∫⁻ a, g a ∂μ :=
begin
revert g,
have h_meas_Mg : ∀ ⦃f : α → ℝ≥0∞⦄, @measurable α ℝ≥0∞ Mg _ f → measurable f,
{ intros f' h_meas_f', apply h_meas_f'.mono hMg le_rfl },
have h_measM_f := h_meas_f.mono hMf le_rfl,
apply measurable.ennreal_induction,
{ intros c s h_s,
apply lintegral_mul_indicator_eq_lintegral_mul_lintegral_indicator hMf _ (hMg _ h_s) _ h_meas_f,
apply probability_theory.indep_sets_of_indep_sets_of_le_right h_ind,
rw singleton_subset_iff, apply h_s },
{ intros f' g h_univ h_measMg_f' h_measMg_g h_ind_f' h_ind_g',
have h_measM_f' := h_meas_Mg h_measMg_f',
have h_measM_g := h_meas_Mg h_measMg_g,
simp_rw [pi.add_apply, left_distrib],
rw [lintegral_add h_measM_f' h_measM_g,
lintegral_add (h_measM_f.mul h_measM_f') (h_measM_f.mul h_measM_g),
left_distrib, h_ind_f', h_ind_g'] },
{ intros f' h_meas_f' h_mono_f' h_ind_f',
have h_measM_f' := λ n, h_meas_Mg (h_meas_f' n),
simp_rw [ennreal.mul_supr],
rw [lintegral_supr, lintegral_supr h_measM_f' h_mono_f', ennreal.mul_supr],
{ simp_rw [← h_ind_f'] },
{ apply λ (n : ℕ), h_measM_f.mul (h_measM_f' n) },
{ apply λ (n : ℕ) (m : ℕ) (h_le : n ≤ m) a, ennreal.mul_le_mul le_rfl
(h_mono_f' h_le a) } }
end
/-- This proves that if `f` and `g` are independent random variables,
then `E[f * g] = E[f] * E[g]`. -/
lemma lintegral_mul_eq_lintegral_mul_lintegral_of_indep_fun [M : measurable_space α]
(μ : measure α) (f g : α → ℝ≥0∞) (h_meas_f : measurable f) (h_meas_g : measurable g)
(h_indep_fun : indep_fun (borel ennreal) (borel ennreal) f g μ) :
∫⁻ (a : α), (f * g) a ∂μ = ∫⁻ (a : α), f a ∂μ * ∫⁻ (a : α), g a ∂μ :=
begin
apply lintegral_mul_eq_lintegral_mul_lintegral_of_independent_measurable_space
(measurable_iff_comap_le.1 h_meas_f) (measurable_iff_comap_le.1 h_meas_g),
apply h_indep_fun,
repeat { apply measurable.of_comap_le le_rfl },
end
end probability_theory
|
module Scene.Material.Diffuse where
import Numeric.LinearAlgebra
import Scene.Base
import Math.Color
import Math.Ray
import Scene.Monad
import Control.Applicative
import Control.Monad.Random
doWhileM :: Monad m => (a -> Bool) -> m a -> m a
doWhileM cond prog = prog >>= (\x -> if cond x then return x else doWhileM cond prog)
diffuse :: (Color -> Color) -> Material
diffuse absorb (Ray p normal) (Ray q incidence) = do
s_unit <- doWhileM (\s -> s <.> s < 1) $ vec3 <$> getRandomR (-1,1) <*> getRandomR (-1,1) <*> getRandomR (-1,1)
let s = scale (norm_2 normal) s_unit + normal
ahead <- trace (Ray p s)
return $! absorb ahead |
(***************************************************************************)
(* Formalization of the Chou, Gao and Zhang's decision procedure. *)
(* Julien Narboux ([email protected]) *)
(* LIX/INRIA FUTURS 2004-2006 *)
(* University of Strasbourg 2008 *)
(***************************************************************************)
Require Import area_method.
(** Transitivity of the parallel predicate expressed constructively *)
Theorem parallel_transitivity :
forall A B C D E F : Point,
A <> B ->
on_parallel C D A B ->
on_parallel E F C D ->
parallel A B E F.
Proof.
area_method.
Qed.
(** Pseudo-transitivity of the parallel predicate expressed constructively *)
Theorem parallel_pseudo_transitivity :
forall A B C D E F : Point,
A <> B ->
on_parallel C D A B ->
on_parallel E F A B ->
parallel C D E F.
Proof.
area_method.
Qed.
(** If AB and CD are two parallel and congruent segments then
AC is parallel to BD *)
Theorem parallellogram_second_parallel :
forall A B C D : Point,
on_parallel_d D C A B 1 ->
parallel A C B D.
Proof.
area_method.
Qed.
(** The construction of a parallelogram using the fact that the diagonals intersect
in the midpoint *)
Theorem parallellogram_construction :
forall A B C D I : Point,
is_midpoint I A C ->
on_line_d D I B (-(1)) ->
parallel C D A B.
Proof.
area_method.
Qed.
(** An example where a complex sequence of constructions is compiled
into higher level constructions to ease the elimination process *)
Lemma example_construction_simplification:
forall A B C D E F G Line_3_b Line_6_b,
on_line C A B ->
on_parallel Line_3_b C A D ->
inter_ll E C Line_3_b B D ->
on_parallel Line_6_b C F A ->
inter_ll G C Line_6_b F B ->
parallel E G D F.
Proof.
area_method.
Qed.
Theorem parallellogram_construction_2 :
forall A B C D I : Point,
is_midpoint I A C ->
on_line_d D I B (-(1)) ->
parallel C D A B /\ parallel A D B C.
Proof.
area_method.
Qed.
(** We show that the diagonals of a parallelogram intersect in the midpoint *)
Theorem parallelogram_midpoint :
forall A B C D I : Point,
on_parallel_d D C A B (0-1) ->
inter_ll I A C B D ->
A<>C ->
A<>I ->
parallel A I A C ->
A ** I / A**C = 1 / 2.
Proof.
am_before_field.
intuition.
Ffield.
Qed.
Theorem Prop51Hartsshornebis :
forall A B C D E : Point,
~ Col D A C ->
~ Col A B C ->
is_midpoint D A B ->
is_midpoint E A C ->
parallel D E B C ->
B <> C ->
D ** E / B ** C = 1 / 2.
Proof.
area_method.
Qed.
|
import LMT
variable {I} [Nonempty I] {E} [Nonempty E] [Nonempty (A I E)]
example {a1 a2 a3 : A I E} :
(((a2).write i3 (v3)).write i2 (v3)) = (a2) → (v3) ≠ ((a2).read i3) → False := by
arr
|
module Group_property
import Monoid
import Group
import congruence
%access public export
||| Property 1 - Identity is unique for groups
total
Group_property_1 : (grp : Type) -> ((*) : grp -> grp -> grp) ->
(IsGroup grp (*)) -> (e1 : grp) -> (e2 :grp) ->
(IsIdentity grp (*) e1) -> (IsIdentity grp (*) e2) ->
(e1 = e2)
Group_property_1 grp (*) pf_grp e1 e2 pf1 pf2 = trans (sym(snd (pf2 e1))) (fst (pf1 e2))
||| Property 2 - Inverse of an element is unique
total
Group_property_2 : (grp : Type) -> ((*) : grp -> grp -> grp) -> (pfgrp : IsGroup grp (*)) ->
(a : grp) -> (b : grp) -> (c : grp) ->
(IsInverse grp (*) (fst(snd pfgrp)) a b) ->
(IsInverse grp (*) (fst(snd pfgrp)) a c) -> (b = c)
Group_property_2 grp (*) pfgrp a b c pfb pfc = let
pfAss = fst pfgrp
pfid = (fst (snd pfgrp))
in
rewrite (sym (fst ((snd pfid) b))) in
(rewrite (sym (fst pfc)) in
(rewrite (sym (pfAss b a c)) in
(rewrite (snd pfb) in
(rewrite (snd ((snd pfid) c)) in Refl))))
||| Property 3 - b = c implies a*b = a*c
total
Group_property_3 : (grp : Type) -> ((*) : grp -> grp -> grp) -> (pfgrp : IsGroup grp (*)) ->
(a : grp) -> (b : grp) -> (c : grp) -> (b = c) -> (a*b = a*c)
Group_property_3 grp (*) pfgrp a b c pfEq = cong pfEq
||| Property 4 - a*b = a*c implies b = c
total
Group_property_4 : (grp : Type) -> ((*) : grp -> grp -> grp) -> (pfgrp : IsGroup grp (*)) ->
(a : grp) -> (b : grp) -> (c : grp) -> (a*b = a*c) -> (b = c)
Group_property_4 grp (*) pfgrp a b c pfEq = let pfid = (fst (snd (snd pfgrp)))
a_inv = fst(Inv_with_pf grp (*) pfgrp a)
pf_inv = snd(Inv_with_pf grp (*) pfgrp a)
pfAss = fst pfgrp
in
(rewrite (sym (snd ((snd pfid) b))) in
(rewrite (sym (snd ((snd pfid) c))) in
(rewrite (sym (snd pf_inv)) in
(rewrite (pfAss a_inv a b) in
(rewrite (pfAss a_inv a c) in
(Group_property_3 grp (*) pfgrp a_inv (a*b) (a*c) pfEq))))))
||| Property 5 - b = c implies b*a = c*a
total
Group_property_5 : (grp : Type) -> ((*) : grp -> grp -> grp) -> (pfgrp : IsGroup grp (*)) ->
(a : grp) -> (b : grp) -> (c : grp) -> (b = c) -> ( (b*a) = (c*a) )
Group_property_5 grp (*) pfgrp a b c pfEq = (congruence grp grp b c (\x : grp => x*a) pfEq)
||| Property 6 - b*a = c*a implies b = c
total
Group_property_6 : (grp : Type) -> ((*) : grp -> grp -> grp) -> (pfgrp : IsGroup grp (*)) ->
(a : grp) -> (b : grp) -> (c : grp) -> (b*a = c*a) -> (b = c)
Group_property_6 grp (*) pfgrp a b c pfEq = let pfid = (fst (snd (snd pfgrp)))
a_inv = fst(Inv_with_pf grp (*) pfgrp a)
pf_inv = snd(Inv_with_pf grp (*) pfgrp a)
pfAss = fst pfgrp
in
(rewrite (sym (fst ((snd pfid) b))) in
(rewrite (sym (fst ((snd pfid) c))) in
(rewrite (sym (fst pf_inv)) in
(rewrite (sym (pfAss b a a_inv)) in
(rewrite (sym (pfAss c a a_inv)) in
(Group_property_5 grp (*) pfgrp a_inv (b*a) (c*a) pfEq))))))
||| Auxilliary proof 1 - Two idenitities mentioned in the definition of the group
||| (one in (fst (snd pfgrp)) another in (fst (snd (snd pfgrp))) are equal.
total
Group_aux_pf_1 : (grp : Type) -> ((*) : grp -> grp -> grp) -> (pfgrp : (IsGroup grp (*))) ->
( (fst (Group_id grp (*) pfgrp)) = (fst (fst (snd (snd pfgrp)))) )
Group_aux_pf_1 grp (*) pfgrp = let
pfid1 = (Group_id grp (*) pfgrp)
e1 = (fst pfid1)
pf1 = (snd pfid1)
pfid2 = (fst (snd (snd pfgrp)))
e2 = (fst pfid2)
pf2 = (snd pfid2)
in
(Group_property_1 grp (*) pfgrp e1 e2 pf1 pf2)
||| Property 7 - One sided inverse is two sided inverse
total
Group_property_7 : (grp : Type) -> ((*) : grp -> grp -> grp) -> (pfgrp : (IsGroup grp (*))) -> (a : grp)
-> (b : grp) -> ( (a*b) = (fst (Group_id grp (*) pfgrp)) )
-> (IsInverse grp (*) (Group_id grp (*) pfgrp) a b)
Group_property_7 grp (*) pfgrp a b pfEq = let
pfid1 = Group_id grp (*) pfgrp -- Identity in IdentityExists
pfid2 = fst (snd (snd pfgrp)) -- Identity in InverseExists
e1 = (fst pfid1)
e2 = (fst pfid2)
a_inv = Inv grp (*) pfgrp a
a_inv_pf = snd (Inv_with_pf grp (*) pfgrp a)
a_inv_pf1 = (fst a_inv_pf) -- Proof that a*a_inv = e2
a_inv_pf2 = (snd a_inv_pf) -- Proof that a_inv*a = e2
pfId_eq = Group_aux_pf_1 grp (*) pfgrp -- proof that e1 = e2
pf1 = trans a_inv_pf1 (sym pfId_eq) -- proof that a*a_inv = e1
pf2 = trans pf1 (sym pfEq) -- proof that a*a_inv = a*b
pf3 = Group_property_4 grp (*) pfgrp a a_inv b pf2 -- proof that a_inv b
pf4 = trans a_inv_pf2 (sym pfId_eq) -- proof that a_inv*a = e1
pf5 = Group_property_5 grp (*) pfgrp a a_inv b pf3 -- proof that a_inv*a = b*a
pf6 = trans (sym pf5) pf4
in
(pfEq, pf6)
||| Property 8 - If f : g -> h is group homomorphism then f(inv(a)) = inv(f(a))
total
Group_property_8 : (dom : Type) -> ((*) : dom -> dom -> dom) -> (pfdom : IsGroup dom (*)) ->
(cod : Type) -> ((+) : cod -> cod -> cod) -> (pfcod : IsGroup cod (+)) ->
(f : dom -> cod) -> (pfhom : (Hom dom (*) pfdom cod (+) pfcod f)) ->
(a : dom) -> ( (f (Inv dom (*) pfdom a)) = (Inv cod (+) pfcod (f a)) )
Group_property_8 dom (*) pfdom cod (+) pfcod f pfhom a = let
pfid1_dom = (fst (snd pfdom))
e_dom1 = (fst pfid1_dom) -- identity in the IdentityExists
e_dom1_pf = (snd pfid1_dom) -- proof that e_dom1 is the identity
pfid2_dom = (fst (snd (snd pfdom)))
e_dom2 = (fst pfid2_dom) -- identity in the InverseExists
e_dom2_pf = (snd pfid2_dom) -- prood that it is the identity
pf_eq_dom_id12 = (Group_property_1 dom (*) pfdom e_dom1 e_dom2
e_dom1_pf e_dom2_pf) -- proof that e_dom1 and e_dom2 are equal
pfid1_cod = (fst (snd pfcod))
e_cod1 = (fst pfid1_cod) -- identity in the IdentityExists
e_cod1_pf = (snd pfid1_cod) -- proof that e_cod1 is the identity
pfid2_cod = (fst (snd (snd pfcod)))
e_cod2 = (fst pfid2_cod) -- identity in the InverseExists
e_cod2_pf = (snd pfid2_cod) -- prood that it is the identity
pf_eq_cod_id12 = (Group_property_1 cod (+) pfcod e_cod1 e_cod2
e_cod1_pf e_cod2_pf) -- proof that e_cod1 and e_cod2 are equal
a_inv_with_pf = snd (snd (snd pfdom)) a -- (Inv_with_pf dom (*) pfdom a)
a_inv = fst a_inv_with_pf
b = (f a_inv)
c = (Inv cod (+) pfcod (f a))
pf_id_to_id = (fst pfhom)
pf_res = (snd pfhom)
--aux_pf1 : ( ( Group_id dom (*) pfdom ) = (fst (snd pfdom)) )
--aux_pf1 = Refl -- proof that fst (Group_id dom (*) pfdom) = fst (fst (snd pfdom))
pf1 = (pf_res a a_inv) -- proof that f(a * a_inv) = f a + f a_inv
pf2 = (fst (snd a_inv_with_pf)) -- proof that a * a_inv = e_dom2
pf3 = congruence dom cod (a * a_inv) e_dom2 f pf2 -- proof that a * a_inv = e_dom2
pf4 = trans (sym pf1) pf3 -- proof that f(a * a_inv) = f e_dom2
pf5 = (Group_property_1 cod (+) pfcod
(f e_dom1) e_cod1 pf_id_to_id e_cod1_pf) -- proof that f e_dom1 = e_cod1
pf6 = congruence dom cod e_dom1 e_dom2 f pf_eq_dom_id12-- proof that f e_dom1 = f e_dom2
pf7 = trans (trans pf4 (sym pf6)) pf5
in
(Group_property_2 cod (+) pfcod (f a) b c (pf7,?rhs1) ?rhs2)
|
module examplesPaperJFP.Equality where
infix 4 _≡_
data _≡_ {a} {A : Set a} (x : A) : A → Set a where
refl : x ≡ x
-- obsolete, now in Agda.Builtin.Equality: {-# BUILTIN EQUALITY _≡_ #-}
-- No longer exists in Agda: {-# BUILTIN REFL refl #-}
|
/-
Copyright (c) 2018 Violeta Hernández Palacios, Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Violeta Hernández Palacios, Mario Carneiro
! This file was ported from Lean 3 source module set_theory.ordinal.fixed_point
! leanprover-community/mathlib commit 0dd4319a17376eda5763cd0a7e0d35bbaaa50e83
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathlib.SetTheory.Ordinal.Arithmetic
import Mathlib.SetTheory.Ordinal.Exponential
/-!
# Fixed points of normal functions
We prove various statements about the fixed points of normal ordinal functions. We state them in
three forms: as statements about type-indexed families of normal functions, as statements about
ordinal-indexed families of normal functions, and as statements about a single normal function. For
the most part, the first case encompasses the others.
Moreover, we prove some lemmas about the fixed points of specific normal functions.
## Main definitions and results
* `nfpFamily`, `nfpBFamily`, `nfp`: the next fixed point of a (family of) normal function(s).
* `fp_family_unbounded`, `fp_bfamily_unbounded`, `fp_unbounded`: the (common) fixed points of a
(family of) normal function(s) are unbounded in the ordinals.
* `deriv_add_eq_mul_omega_add`: a characterization of the derivative of addition.
* `deriv_mul_eq_opow_omega_mul`: a characterization of the derivative of multiplication.
-/
noncomputable section
universe u v
open Function Order
namespace Ordinal
/-! ### Fixed points of type-indexed families of ordinals -/
section
variable {ι : Type u} {f : ι → Ordinal.{max u v} → Ordinal.{max u v}}
/-- The next common fixed point, at least `a`, for a family of normal functions.
This is defined for any family of functions, as the supremum of all values reachable by applying
finitely many functions in the family to `a`.
`Ordinal.nfpFamily_fp` shows this is a fixed point, `Ordinal.le_nfpFamily` shows it's at
least `a`, and `Ordinal.nfpFamily_le_fp` shows this is the least ordinal with these properties. -/
def nfpFamily (f : ι → Ordinal → Ordinal) (a : Ordinal) : Ordinal :=
sup (List.foldr f a)
#align ordinal.nfp_family Ordinal.nfpFamily
theorem nfpFamily_eq_sup (f : ι → Ordinal.{max u v} → Ordinal.{max u v}) (a : Ordinal.{max u v}) :
nfpFamily.{u, v} f a = sup.{u, v} (List.foldr f a) :=
rfl
#align ordinal.nfp_family_eq_sup Ordinal.nfpFamily_eq_sup
theorem foldr_le_nfpFamily (f : ι → Ordinal → Ordinal)
(a l) : List.foldr f a l ≤ nfpFamily.{u, v} f a :=
le_sup.{u, v} _ _
#align ordinal.foldr_le_nfp_family Ordinal.foldr_le_nfpFamily
theorem le_nfpFamily (f : ι → Ordinal → Ordinal) (a) : a ≤ nfpFamily f a :=
le_sup _ []
#align ordinal.le_nfp_family Ordinal.le_nfpFamily
theorem lt_nfpFamily {a b} : a < nfpFamily.{u, v} f b ↔ ∃ l, a < List.foldr f b l :=
lt_sup.{u, v}
#align ordinal.lt_nfp_family Ordinal.lt_nfpFamily
theorem nfpFamily_le_iff {a b} : nfpFamily.{u, v} f a ≤ b ↔ ∀ l, List.foldr f a l ≤ b :=
sup_le_iff
#align ordinal.nfp_family_le_iff Ordinal.nfpFamily_le_iff
theorem nfpFamily_le {a b} : (∀ l, List.foldr f a l ≤ b) → nfpFamily.{u, v} f a ≤ b :=
sup_le.{u, v}
#align ordinal.nfp_family_le Ordinal.nfpFamily_le
theorem nfpFamily_monotone (hf : ∀ i, Monotone (f i)) : Monotone (nfpFamily.{u, v} f) :=
fun _ _ h => sup_le.{u, v} fun l => (List.foldr_monotone hf l h).trans (le_sup.{u, v} _ l)
#align ordinal.nfp_family_monotone Ordinal.nfpFamily_monotone
/- ./././Mathport/Syntax/Translate/Expr.lean:177:8: unsupported: ambiguous notation -/
theorem apply_lt_nfpFamily (H : ∀ i, IsNormal (f i)) {a b} (hb : b < nfpFamily.{u, v} f a) (i) :
f i b < nfpFamily.{u, v} f a :=
let ⟨l, hl⟩ := lt_nfpFamily.1 hb
lt_sup.2 ⟨i::l, (H i).strictMono hl⟩
#align ordinal.apply_lt_nfp_family Ordinal.apply_lt_nfpFamily
theorem apply_lt_nfpFamily_iff [Nonempty ι] (H : ∀ i, IsNormal (f i)) {a b} :
(∀ i, f i b < nfpFamily.{u, v} f a) ↔ b < nfpFamily.{u, v} f a :=
⟨fun h =>
lt_nfpFamily.2 <|
let ⟨l, hl⟩ := lt_sup.1 <| h <| Classical.arbitrary ι
⟨l, ((H _).self_le b).trans_lt hl⟩,
apply_lt_nfpFamily H⟩
#align ordinal.apply_lt_nfp_family_iff Ordinal.apply_lt_nfpFamily_iff
theorem nfpFamily_le_apply [Nonempty ι] (H : ∀ i, IsNormal (f i)) {a b} :
(∃ i, nfpFamily.{u, v} f a ≤ f i b) ↔ nfpFamily.{u, v} f a ≤ b := by
rw [← not_iff_not]
push_neg
exact apply_lt_nfpFamily_iff H
#align ordinal.nfp_family_le_apply Ordinal.nfpFamily_le_apply
theorem nfpFamily_le_fp (H : ∀ i, Monotone (f i)) {a b} (ab : a ≤ b) (h : ∀ i, f i b ≤ b) :
nfpFamily.{u, v} f a ≤ b :=
sup_le fun l => by
by_cases hι : IsEmpty ι
· skip
rwa [Unique.eq_default l]
· haveI := not_isEmpty_iff.1 hι
induction' l with i l IH generalizing a
· exact ab
exact (H i (IH ab)).trans (h i)
#align ordinal.nfp_family_le_fp Ordinal.nfpFamily_le_fp
/- ./././Mathport/Syntax/Translate/Expr.lean:177:8: unsupported: ambiguous notation -/
theorem nfpFamily_fp {i} (H : IsNormal (f i)) (a) :
f i (nfpFamily.{u, v} f a) = nfpFamily.{u, v} f a := by
unfold nfpFamily
rw [@IsNormal.sup.{u, v, v} _ H _ _ ⟨[]⟩]
apply le_antisymm <;> refine' Ordinal.sup_le fun l => _
· exact le_sup _ (i::l)
· exact (H.self_le _).trans (le_sup _ _)
#align ordinal.nfp_family_fp Ordinal.nfpFamily_fp
theorem apply_le_nfpFamily [hι : Nonempty ι] {f : ι → Ordinal → Ordinal} (H : ∀ i, IsNormal (f i))
{a b} : (∀ i, f i b ≤ nfpFamily.{u, v} f a) ↔ b ≤ nfpFamily.{u, v} f a := by
refine' ⟨fun h => _, fun h i => _⟩
· cases' hι with i
exact ((H i).self_le b).trans (h i)
rw [← nfpFamily_fp (H i)]
exact (H i).monotone h
#align ordinal.apply_le_nfp_family Ordinal.apply_le_nfpFamily
theorem nfpFamily_eq_self {f : ι → Ordinal → Ordinal} {a} (h : ∀ i, f i a = a) :
nfpFamily f a = a :=
le_antisymm (sup_le fun l => by rw [List.foldr_fixed' h l]) <| le_nfpFamily f a
#align ordinal.nfp_family_eq_self Ordinal.nfpFamily_eq_self
-- Todo: This is actually a special case of the fact the intersection of club sets is a club set.
/-- A generalization of the fixed point lemma for normal functions: any family of normal functions
has an unbounded set of common fixed points. -/
theorem fp_family_unbounded (H : ∀ i, IsNormal (f i)) :
(⋂ i, Function.fixedPoints (f i)).Unbounded (· < ·) := fun a =>
⟨nfpFamily.{u, v} f a, fun s ⟨i, hi⟩ => by
rw [← hi, mem_fixedPoints_iff]
exact nfpFamily_fp.{u, v} (H i) a, (le_nfpFamily f a).not_lt⟩
#align ordinal.fp_family_unbounded Ordinal.fp_family_unbounded
/-- The derivative of a family of normal functions is the sequence of their common fixed points.
This is defined for all functions such that `Ordinal.derivFamily_zero`,
`Ordinal.derivFamily_succ`, and `Ordinal.derivFamily_limit` are satisfied. -/
def derivFamily (f : ι → Ordinal → Ordinal) (o : Ordinal) : Ordinal :=
limitRecOn.{max u v} o (nfpFamily.{u, v} f 0) (fun _ IH => nfpFamily.{u, v} f (succ IH))
fun a _ => bsup.{max u v, u} a
#align ordinal.deriv_family Ordinal.derivFamily
@[simp]
theorem derivFamily_zero (f : ι → Ordinal → Ordinal) :
derivFamily.{u, v} f 0 = nfpFamily.{u, v} f 0 :=
limitRecOn_zero _ _ _
#align ordinal.deriv_family_zero Ordinal.derivFamily_zero
@[simp]
theorem derivFamily_succ (f : ι → Ordinal → Ordinal) (o) :
derivFamily.{u, v} f (succ o) = nfpFamily.{u, v} f (succ (derivFamily.{u, v} f o)) :=
limitRecOn_succ _ _ _ _
#align ordinal.deriv_family_succ Ordinal.derivFamily_succ
theorem derivFamily_limit (f : ι → Ordinal → Ordinal) {o} :
IsLimit o → derivFamily.{u, v} f o = bsup.{max u v, u} o fun a _ => derivFamily.{u, v} f a :=
limitRecOn_limit _ _ _ _
#align ordinal.deriv_family_limit Ordinal.derivFamily_limit
theorem derivFamily_isNormal (f : ι → Ordinal → Ordinal) : IsNormal (derivFamily f) :=
⟨fun o => by rw [derivFamily_succ, ← succ_le_iff]; apply le_nfpFamily, fun o l a => by
rw [derivFamily_limit _ l, bsup_le_iff]⟩
#align ordinal.deriv_family_is_normal Ordinal.derivFamily_isNormal
theorem derivFamily_fp {i} (H : IsNormal (f i)) (o : Ordinal.{max u v}) :
f i (derivFamily.{u, v} f o) = derivFamily.{u, v} f o := by
induction' o using limitRecOn with o _ o l IH
· rw [derivFamily_zero]
exact nfpFamily_fp H 0
· rw [derivFamily_succ]
exact nfpFamily_fp H _
· rw [derivFamily_limit _ l,
IsNormal.bsup.{max u v, u, max u v} H (fun a _ => derivFamily f a) l.1]
refine' eq_of_forall_ge_iff fun c => _
simp (config := { contextual := true }) only [bsup_le_iff, IH]
#align ordinal.deriv_family_fp Ordinal.derivFamily_fp
theorem le_iff_derivFamily (H : ∀ i, IsNormal (f i)) {a} :
(∀ i, f i a ≤ a) ↔ ∃ o, derivFamily.{u, v} f o = a :=
⟨fun ha => by
suffices : ∀ (o) (_ : a ≤ derivFamily.{u, v} f o), ∃ o, derivFamily.{u, v} f o = a
exact this a ((derivFamily_isNormal _).self_le _)
intro o
induction' o using limitRecOn with o IH o l IH
· intro h₁
refine' ⟨0, le_antisymm _ h₁⟩
rw [derivFamily_zero]
exact nfpFamily_le_fp (fun i => (H i).monotone) (Ordinal.zero_le _) ha
· intro h₁
cases' le_or_lt a (derivFamily.{u, v} f o) with h h
· exact IH h
refine' ⟨succ o, le_antisymm _ h₁⟩
rw [derivFamily_succ]
exact nfpFamily_le_fp (fun i => (H i).monotone) (succ_le_of_lt h) ha
· intro h₁
cases' eq_or_lt_of_le h₁ with h h
· exact ⟨_, h.symm⟩
rw [derivFamily_limit _ l, ← not_le, bsup_le_iff, not_ball] at h
exact
let ⟨o', h, hl⟩ := h
IH o' h (le_of_not_le hl),
fun ⟨o, e⟩ i => e ▸ (derivFamily_fp (H i) _).le⟩
#align ordinal.le_iff_deriv_family Ordinal.le_iff_derivFamily
theorem fp_iff_derivFamily (H : ∀ i, IsNormal (f i)) {a} :
(∀ i, f i a = a) ↔ ∃ o, derivFamily.{u, v} f o = a :=
Iff.trans ⟨fun h i => le_of_eq (h i), fun h i => (H i).le_iff_eq.1 (h i)⟩ (le_iff_derivFamily H)
#align ordinal.fp_iff_deriv_family Ordinal.fp_iff_derivFamily
/-- For a family of normal functions, `Ordinal.derivFamily` enumerates the common fixed points. -/
theorem derivFamily_eq_enumOrd (H : ∀ i, IsNormal (f i)) :
derivFamily.{u, v} f = enumOrd (⋂ i, Function.fixedPoints (f i)) := by
rw [← eq_enumOrd _ (fp_family_unbounded.{u, v} H)]
use (derivFamily_isNormal f).strictMono
rw [Set.range_eq_iff]
refine' ⟨_, fun a ha => _⟩
· rintro a S ⟨i, hi⟩
rw [← hi]
exact derivFamily_fp (H i) a
rw [Set.mem_interᵢ] at ha
rwa [← fp_iff_derivFamily H]
#align ordinal.deriv_family_eq_enum_ord Ordinal.derivFamily_eq_enumOrd
end
/-! ### Fixed points of ordinal-indexed families of ordinals -/
section
variable {o : Ordinal.{u}} {f : ∀ b < o, Ordinal.{max u v} → Ordinal.{max u v}}
/-- The next common fixed point, at least `a`, for a family of normal functions indexed by ordinals.
This is defined as `ordinal.nfp_family` of the type-indexed family associated to `f`. -/
def nfpBFamily (o : Ordinal) (f : ∀ b < o, Ordinal → Ordinal) : Ordinal → Ordinal :=
nfpFamily (familyOfBFamily o f)
#align ordinal.nfp_bfamily Ordinal.nfpBFamily
theorem nfpBFamily_eq_nfpFamily {o : Ordinal} (f : ∀ b < o, Ordinal → Ordinal) :
nfpBFamily.{u, v} o f = nfpFamily.{u, v} (familyOfBFamily o f) :=
rfl
#align ordinal.nfp_bfamily_eq_nfp_family Ordinal.nfpBFamily_eq_nfpFamily
theorem foldr_le_nfpBFamily {o : Ordinal}
(f : ∀ b < o, Ordinal → Ordinal) (a l) :
List.foldr (familyOfBFamily o f) a l ≤ nfpBFamily.{u, v} o f a :=
le_sup.{u, v} _ _
#align ordinal.foldr_le_nfp_bfamily Ordinal.foldr_le_nfpBFamily
theorem le_nfpBFamily {o : Ordinal} (f : ∀ b < o, Ordinal → Ordinal) (a) :
a ≤ nfpBFamily.{u, v} o f a :=
le_sup.{u, v} _ []
#align ordinal.le_nfp_bfamily Ordinal.le_nfpBFamily
theorem lt_nfpBFamily {a b} :
a < nfpBFamily.{u, v} o f b ↔ ∃ l, a < List.foldr (familyOfBFamily o f) b l :=
lt_sup.{u, v}
#align ordinal.lt_nfp_bfamily Ordinal.lt_nfpBFamily
theorem nfpBFamily_le_iff {o : Ordinal} {f : ∀ b < o, Ordinal → Ordinal} {a b} :
nfpBFamily.{u, v} o f a ≤ b ↔ ∀ l, List.foldr (familyOfBFamily o f) a l ≤ b :=
sup_le_iff.{u, v}
#align ordinal.nfp_bfamily_le_iff Ordinal.nfpBFamily_le_iff
theorem nfpBFamily_le {o : Ordinal} {f : ∀ b < o, Ordinal → Ordinal} {a b} :
(∀ l, List.foldr (familyOfBFamily o f) a l ≤ b) → nfpBFamily.{u, v} o f a ≤ b :=
sup_le.{u, v}
#align ordinal.nfp_bfamily_le Ordinal.nfpBFamily_le
theorem apply_lt_nfpBFamily (H : ∀ i hi, IsNormal (f i hi)) {a b} (hb : b < nfpBFamily.{u, v} o f a)
(i hi) : f i hi b < nfpBFamily.{u, v} o f a := by
rw [←familyOfBFamily_enum o f]
apply apply_lt_nfpFamily (fun _ => H _ _) hb
#align ordinal.apply_lt_nfp_bfamily Ordinal.apply_lt_nfpBFamily
theorem apply_lt_nfpBFamily_iff (ho : o ≠ 0) (H : ∀ i hi, IsNormal (f i hi)) {a b} :
(∀ i hi, f i hi b < nfpBFamily.{u, v} o f a) ↔ b < nfpBFamily.{u, v} o f a :=
⟨fun h => by
haveI := out_nonempty_iff_ne_zero.2 ho
refine' (apply_lt_nfpFamily_iff.{u, v} _).1 fun _ => h _ _
exact fun _ => H _ _, apply_lt_nfpBFamily H⟩
#align ordinal.apply_lt_nfp_bfamily_iff Ordinal.apply_lt_nfpBFamily_iff
theorem nfpBFamily_le_apply (ho : o ≠ 0) (H : ∀ i hi, IsNormal (f i hi)) {a b} :
(∃ i hi, nfpBFamily.{u, v} o f a ≤ f i hi b) ↔ nfpBFamily.{u, v} o f a ≤ b := by
rw [← not_iff_not]
push_neg
exact apply_lt_nfpBFamily_iff.{u, v} ho H
#align ordinal.nfp_bfamily_le_apply Ordinal.nfpBFamily_le_apply
theorem nfpBFamily_le_fp (H : ∀ i hi, Monotone (f i hi)) {a b} (ab : a ≤ b)
(h : ∀ i hi, f i hi b ≤ b) : nfpBFamily.{u, v} o f a ≤ b :=
nfpFamily_le_fp (fun _ => H _ _) ab fun _ => h _ _
#align ordinal.nfp_bfamily_le_fp Ordinal.nfpBFamily_le_fp
theorem nfpBFamily_fp {i hi} (H : IsNormal (f i hi)) (a) :
f i hi (nfpBFamily.{u, v} o f a) = nfpBFamily.{u, v} o f a := by
rw [← familyOfBFamily_enum o f]
apply nfpFamily_fp
rw [familyOfBFamily_enum]
exact H
#align ordinal.nfp_bfamily_fp Ordinal.nfpBFamily_fp
theorem apply_le_nfpBFamily (ho : o ≠ 0) (H : ∀ i hi, IsNormal (f i hi)) {a b} :
(∀ i hi, f i hi b ≤ nfpBFamily.{u, v} o f a) ↔ b ≤ nfpBFamily.{u, v} o f a := by
refine' ⟨fun h => _, fun h i hi => _⟩
· have ho' : 0 < o := Ordinal.pos_iff_ne_zero.2 ho
exact ((H 0 ho').self_le b).trans (h 0 ho')
· rw [← nfpBFamily_fp (H i hi)]
exact (H i hi).monotone h
#align ordinal.apply_le_nfp_bfamily Ordinal.apply_le_nfpBFamily
theorem nfpBFamily_eq_self {a} (h : ∀ i hi, f i hi a = a) : nfpBFamily.{u, v} o f a = a :=
nfpFamily_eq_self fun _ => h _ _
#align ordinal.nfp_bfamily_eq_self Ordinal.nfpBFamily_eq_self
/-- A generalization of the fixed point lemma for normal functions: any family of normal functions
has an unbounded set of common fixed points. -/
theorem fp_bfamily_unbounded (H : ∀ i hi, IsNormal (f i hi)) :
(⋂ (i) (hi), Function.fixedPoints (f i hi)).Unbounded (· < ·) := fun a =>
⟨nfpBFamily.{u, v} _ f a, by
rw [Set.mem_interᵢ₂]
exact fun i hi => nfpBFamily_fp (H i hi) _, (le_nfpBFamily f a).not_lt⟩
#align ordinal.fp_bfamily_unbounded Ordinal.fp_bfamily_unbounded
/-- The derivative of a family of normal functions is the sequence of their common fixed points.
This is defined as `Ordinal.derivFamily` of the type-indexed family associated to `f`. -/
def derivBFamily (o : Ordinal) (f : ∀ b < o, Ordinal → Ordinal) : Ordinal → Ordinal :=
derivFamily (familyOfBFamily o f)
#align ordinal.deriv_bfamily Ordinal.derivBFamily
theorem derivBFamily_eq_derivFamily {o : Ordinal} (f : ∀ b < o, Ordinal → Ordinal) :
derivBFamily.{u, v} o f = derivFamily.{u, v} (familyOfBFamily o f) :=
rfl
#align ordinal.deriv_bfamily_eq_deriv_family Ordinal.derivBFamily_eq_derivFamily
theorem derivBFamily_isNormal {o : Ordinal} (f : ∀ b < o, Ordinal → Ordinal) :
IsNormal (derivBFamily o f) :=
derivFamily_isNormal _
#align ordinal.deriv_bfamily_is_normal Ordinal.derivBFamily_isNormal
theorem derivBFamily_fp {i hi} (H : IsNormal (f i hi)) (a : Ordinal) :
f i hi (derivBFamily.{u, v} o f a) = derivBFamily.{u, v} o f a := by
rw [← familyOfBFamily_enum o f]
apply derivFamily_fp
rw [familyOfBFamily_enum]
exact H
#align ordinal.deriv_bfamily_fp Ordinal.derivBFamily_fp
theorem le_iff_derivBFamily (H : ∀ i hi, IsNormal (f i hi)) {a} :
(∀ i hi, f i hi a ≤ a) ↔ ∃ b, derivBFamily.{u, v} o f b = a := by
unfold derivBFamily
rw [← le_iff_derivFamily]
· refine' ⟨fun h i => h _ _, fun h i hi => _⟩
rw [← familyOfBFamily_enum o f]
apply h
· exact fun _ => H _ _
#align ordinal.le_iff_deriv_bfamily Ordinal.le_iff_derivBFamily
theorem fp_iff_derivBFamily (H : ∀ i hi, IsNormal (f i hi)) {a} :
(∀ i hi, f i hi a = a) ↔ ∃ b, derivBFamily.{u, v} o f b = a := by
rw [← le_iff_derivBFamily H]
refine' ⟨fun h i hi => le_of_eq (h i hi), fun h i hi => _⟩
rw [← (H i hi).le_iff_eq]
exact h i hi
#align ordinal.fp_iff_deriv_bfamily Ordinal.fp_iff_derivBFamily
/-- For a family of normal functions, `ordinal.deriv_bfamily` enumerates the common fixed points. -/
theorem derivBFamily_eq_enumOrd (H : ∀ i hi, IsNormal (f i hi)) :
derivBFamily.{u, v} o f = enumOrd (⋂ (i) (hi), Function.fixedPoints (f i hi)) := by
rw [← eq_enumOrd _ (fp_bfamily_unbounded.{u, v} H)]
use (derivBFamily_isNormal f).strictMono
rw [Set.range_eq_iff]
refine' ⟨fun a => Set.mem_interᵢ₂.2 fun i hi => derivBFamily_fp (H i hi) a, fun a ha => _⟩
rw [Set.mem_interᵢ₂] at ha
rwa [← fp_iff_derivBFamily H]
#align ordinal.deriv_bfamily_eq_enum_ord Ordinal.derivBFamily_eq_enumOrd
end
/-! ### Fixed points of a single function -/
section
variable {f : Ordinal.{u} → Ordinal.{u}}
/-- The next fixed point function, the least fixed point of the normal function `f`, at least `a`.
This is defined as `ordinal.nfpFamily` applied to a family consisting only of `f`. -/
def nfp (f : Ordinal → Ordinal) : Ordinal → Ordinal :=
nfpFamily fun _ : Unit => f
#align ordinal.nfp Ordinal.nfp
theorem nfp_eq_nfpFamily (f : Ordinal → Ordinal) : nfp f = nfpFamily fun _ : Unit => f :=
rfl
#align ordinal.nfp_eq_nfp_family Ordinal.nfp_eq_nfpFamily
@[simp]
theorem sup_iterate_eq_nfp (f : Ordinal.{u} → Ordinal.{u}) :
(fun a => sup fun n : ℕ => (f^[n]) a) = nfp f := by
refine' funext fun a => le_antisymm _ (sup_le fun l => _)
· rw [sup_le_iff]
intro n
rw [← List.length_replicate n Unit.unit, ← List.foldr_const f a]
apply le_sup
· rw [List.foldr_const f a l]
exact le_sup _ _
#align ordinal.sup_iterate_eq_nfp Ordinal.sup_iterate_eq_nfp
theorem iterate_le_nfp (f a n) : (f^[n]) a ≤ nfp f a := by
rw [← sup_iterate_eq_nfp]
exact le_sup _ n
#align ordinal.iterate_le_nfp Ordinal.iterate_le_nfp
theorem le_nfp (f a) : a ≤ nfp f a :=
iterate_le_nfp f a 0
#align ordinal.le_nfp Ordinal.le_nfp
theorem lt_nfp {a b} : a < nfp f b ↔ ∃ n, a < (f^[n]) b := by
rw [← sup_iterate_eq_nfp]
exact lt_sup
#align ordinal.lt_nfp Ordinal.lt_nfp
theorem nfp_le_iff {a b} : nfp f a ≤ b ↔ ∀ n, (f^[n]) a ≤ b := by
rw [← sup_iterate_eq_nfp]
exact sup_le_iff
#align ordinal.nfp_le_iff Ordinal.nfp_le_iff
theorem nfp_le {a b} : (∀ n, (f^[n]) a ≤ b) → nfp f a ≤ b :=
nfp_le_iff.2
#align ordinal.nfp_le Ordinal.nfp_le
@[simp]
theorem nfp_id : nfp id = id :=
funext fun a => by
simp_rw [← sup_iterate_eq_nfp, iterate_id]
exact sup_const a
#align ordinal.nfp_id Ordinal.nfp_id
theorem nfp_monotone (hf : Monotone f) : Monotone (nfp f) :=
nfpFamily_monotone fun _ => hf
#align ordinal.nfp_monotone Ordinal.nfp_monotone
theorem IsNormal.apply_lt_nfp {f} (H : IsNormal f) {a b} : f b < nfp f a ↔ b < nfp f a := by
unfold nfp
rw [← @apply_lt_nfpFamily_iff Unit (fun _ => f) _ (fun _ => H) a b]
exact ⟨fun h _ => h, fun h => h Unit.unit⟩
#align ordinal.is_normal.apply_lt_nfp Ordinal.IsNormal.apply_lt_nfp
theorem IsNormal.nfp_le_apply {f} (H : IsNormal f) {a b} : nfp f a ≤ f b ↔ nfp f a ≤ b :=
le_iff_le_iff_lt_iff_lt.2 H.apply_lt_nfp
#align ordinal.is_normal.nfp_le_apply Ordinal.IsNormal.nfp_le_apply
theorem nfp_le_fp {f} (H : Monotone f) {a b} (ab : a ≤ b) (h : f b ≤ b) : nfp f a ≤ b :=
nfpFamily_le_fp (fun _ => H) ab fun _ => h
#align ordinal.nfp_le_fp Ordinal.nfp_le_fp
theorem IsNormal.nfp_fp {f} (H : IsNormal f) : ∀ a, f (nfp f a) = nfp f a :=
@nfpFamily_fp Unit (fun _ => f) Unit.unit H
#align ordinal.is_normal.nfp_fp Ordinal.IsNormal.nfp_fp
theorem IsNormal.apply_le_nfp {f} (H : IsNormal f) {a b} : f b ≤ nfp f a ↔ b ≤ nfp f a :=
⟨le_trans (H.self_le _), fun h => by simpa only [H.nfp_fp] using H.le_iff.2 h⟩
#align ordinal.is_normal.apply_le_nfp Ordinal.IsNormal.apply_le_nfp
theorem nfp_eq_self {f : Ordinal → Ordinal} {a} (h : f a = a) : nfp f a = a :=
nfpFamily_eq_self fun _ => h
#align ordinal.nfp_eq_self Ordinal.nfp_eq_self
/-- The fixed point lemma for normal functions: any normal function has an unbounded set of
fixed points. -/
theorem fp_unbounded (H : IsNormal f) : (Function.fixedPoints f).Unbounded (· < ·) := by
convert fp_family_unbounded fun _ : Unit => H
exact (Set.interᵢ_const _).symm
#align ordinal.fp_unbounded Ordinal.fp_unbounded
/-- The derivative of a normal function `f` is the sequence of fixed points of `f`.
This is defined as `Ordinal.derivFamily` applied to a trivial family consisting only of `f`. -/
def deriv (f : Ordinal → Ordinal) : Ordinal → Ordinal :=
derivFamily fun _ : Unit => f
#align ordinal.deriv Ordinal.deriv
theorem deriv_eq_derivFamily (f : Ordinal → Ordinal) : deriv f = derivFamily fun _ : Unit => f :=
rfl
#align ordinal.deriv_eq_deriv_family Ordinal.deriv_eq_derivFamily
@[simp]
theorem deriv_zero (f) : deriv f 0 = nfp f 0 :=
derivFamily_zero _
#align ordinal.deriv_zero Ordinal.deriv_zero
@[simp]
theorem deriv_succ (f o) : deriv f (succ o) = nfp f (succ (deriv f o)) :=
derivFamily_succ _ _
#align ordinal.deriv_succ Ordinal.deriv_succ
theorem deriv_limit (f) {o} : IsLimit o → deriv f o = bsup.{u, 0} o fun a _ => deriv f a :=
derivFamily_limit _
#align ordinal.deriv_limit Ordinal.deriv_limit
theorem deriv_isNormal (f) : IsNormal (deriv f) :=
derivFamily_isNormal _
#align ordinal.deriv_is_normal Ordinal.deriv_isNormal
theorem deriv_id_of_nfp_id {f : Ordinal → Ordinal} (h : nfp f = id) : deriv f = id :=
((deriv_isNormal _).eq_iff_zero_and_succ IsNormal.refl).2 (by simp [h])
#align ordinal.deriv_id_of_nfp_id Ordinal.deriv_id_of_nfp_id
theorem IsNormal.deriv_fp {f} (H : IsNormal f) : ∀ o, f (deriv f o) = deriv f o :=
@derivFamily_fp Unit (fun _ => f) Unit.unit H
#align ordinal.is_normal.deriv_fp Ordinal.IsNormal.deriv_fp
theorem IsNormal.le_iff_deriv {f} (H : IsNormal f) {a} : f a ≤ a ↔ ∃ o, deriv f o = a := by
unfold deriv
rw [← le_iff_derivFamily fun _ : Unit => H]
exact ⟨fun h _ => h, fun h => h Unit.unit⟩
#align ordinal.is_normal.le_iff_deriv Ordinal.IsNormal.le_iff_deriv
theorem IsNormal.fp_iff_deriv {f} (H : IsNormal f) {a} : f a = a ↔ ∃ o, deriv f o = a := by
rw [← H.le_iff_eq, H.le_iff_deriv]
#align ordinal.is_normal.fp_iff_deriv Ordinal.IsNormal.fp_iff_deriv
/-- `ordinal.deriv` enumerates the fixed points of a normal function. -/
theorem deriv_eq_enumOrd (H : IsNormal f) : deriv f = enumOrd (Function.fixedPoints f) := by
convert derivFamily_eq_enumOrd fun _ : Unit => H
exact (Set.interᵢ_const _).symm
#align ordinal.deriv_eq_enum_ord Ordinal.deriv_eq_enumOrd
theorem deriv_eq_id_of_nfp_eq_id {f : Ordinal → Ordinal} (h : nfp f = id) : deriv f = id :=
(IsNormal.eq_iff_zero_and_succ (deriv_isNormal _) IsNormal.refl).2 <| by simp [h]
#align ordinal.deriv_eq_id_of_nfp_eq_id Ordinal.deriv_eq_id_of_nfp_eq_id
end
/-! ### Fixed points of addition -/
@[simp]
theorem nfp_add_zero (a) : nfp (a + ·) 0 = a * omega := by
simp_rw [← sup_iterate_eq_nfp, ← sup_mul_nat]
congr ; funext n
induction' n with n hn
· rw [Nat.cast_zero, mul_zero, iterate_zero_apply]
· nth_rw 2 [Nat.succ_eq_one_add]
rw [Nat.cast_add, Nat.cast_one, mul_one_add, iterate_succ_apply', hn]
#align ordinal.nfp_add_zero Ordinal.nfp_add_zero
theorem nfp_add_eq_mul_omega {a b} (hba : b ≤ a * omega) : nfp (a + ·) b = a * omega := by
apply le_antisymm (nfp_le_fp (add_isNormal a).monotone hba _)
· rw [← nfp_add_zero]
exact nfp_monotone (add_isNormal a).monotone (Ordinal.zero_le b)
· dsimp; rw [← mul_one_add, one_add_omega]
#align ordinal.nfp_add_eq_mul_omega Ordinal.nfp_add_eq_mul_omega
theorem add_eq_right_iff_mul_omega_le {a b : Ordinal} : a + b = b ↔ a * omega ≤ b := by
refine' ⟨fun h => _, fun h => _⟩
· rw [← nfp_add_zero a, ← deriv_zero]
cases' (add_isNormal a).fp_iff_deriv.1 h with c hc
rw [← hc]
exact (deriv_isNormal _).monotone (Ordinal.zero_le _)
· have := Ordinal.add_sub_cancel_of_le h
nth_rw 1 [← this]
rwa [← add_assoc, ← mul_one_add, one_add_omega]
#align ordinal.add_eq_right_iff_mul_omega_le Ordinal.add_eq_right_iff_mul_omega_le
theorem add_le_right_iff_mul_omega_le {a b : Ordinal} : a + b ≤ b ↔ a * omega ≤ b := by
rw [← add_eq_right_iff_mul_omega_le]
exact (add_isNormal a).le_iff_eq
#align ordinal.add_le_right_iff_mul_omega_le Ordinal.add_le_right_iff_mul_omega_le
theorem deriv_add_eq_mul_omega_add (a b : Ordinal.{u}) : deriv (a + ·) b = a * omega + b := by
revert b
rw [← funext_iff, IsNormal.eq_iff_zero_and_succ (deriv_isNormal _) (add_isNormal _)]
refine' ⟨_, fun a h => _⟩
· dsimp; rw [deriv_zero, add_zero]
exact nfp_add_zero a
· dsimp; rw [deriv_succ, h, add_succ]
exact nfp_eq_self (add_eq_right_iff_mul_omega_le.2 ((le_add_right _ _).trans (le_succ _)))
#align ordinal.deriv_add_eq_mul_omega_add Ordinal.deriv_add_eq_mul_omega_add
/-! ### Fixed points of multiplication -/
--Porting note: commented out, doesn't seem necessary
-- local infixr:0 "^" => @Pow.pow Ordinal Ordinal Ordinal.hasPow
@[simp]
theorem nfp_mul_one {a : Ordinal} (ha : 0 < a) : nfp (a * .) 1 = (a^omega) := by
rw [← sup_iterate_eq_nfp, ← sup_opow_nat]
· dsimp
congr
funext n
induction' n with n hn
· rw [Nat.cast_zero, opow_zero, iterate_zero_apply]
nth_rw 2 [Nat.succ_eq_one_add]
rw [Nat.cast_add, Nat.cast_one, opow_add, opow_one, iterate_succ_apply', hn]
· exact ha
#align ordinal.nfp_mul_one Ordinal.nfp_mul_one
@[simp]
theorem nfp_mul_zero (a : Ordinal) : nfp (a * .) 0 = 0 := by
rw [← Ordinal.le_zero, nfp_le_iff]
intro n
induction' n with n hn; · rfl
dsimp only; rwa [iterate_succ_apply, mul_zero]
#align ordinal.nfp_mul_zero Ordinal.nfp_mul_zero
@[simp]
theorem nfp_zero_mul : nfp (HMul.hMul 0) = id := by
rw [← sup_iterate_eq_nfp]
refine' funext fun a => (sup_le fun n => _).antisymm (le_sup (fun n => ((· * ·) 0^[n]) a) 0)
induction' n with n _; · rfl
rw [Function.iterate_succ']
change 0 * _ ≤ a
rw [zero_mul]
exact Ordinal.zero_le a
#align ordinal.nfp_zero_mul Ordinal.nfp_zero_mul
@[simp]
theorem deriv_mul_zero : deriv (HMul.hMul 0) = id :=
deriv_eq_id_of_nfp_eq_id nfp_zero_mul
#align ordinal.deriv_mul_zero Ordinal.deriv_mul_zero
theorem nfp_mul_eq_opow_omega {a b : Ordinal} (hb : 0 < b) (hba : b ≤ (a^omega)) :
nfp (a * ·) b = (a^omega.{u}) := by
cases' eq_zero_or_pos a with ha ha
· rw [ha, zero_opow omega_ne_zero] at hba ⊢
rw [Ordinal.le_zero.1 hba, nfp_zero_mul]
rfl
apply le_antisymm
· apply nfp_le_fp (mul_isNormal ha).monotone hba
dsimp only; rw [← opow_one_add, one_add_omega]
rw [← nfp_mul_one ha]
exact nfp_monotone (mul_isNormal ha).monotone (one_le_iff_pos.2 hb)
#align ordinal.nfp_mul_eq_opow_omega Ordinal.nfp_mul_eq_opow_omega
theorem eq_zero_or_opow_omega_le_of_mul_eq_right {a b : Ordinal} (hab : a * b = b) :
b = 0 ∨ (a^omega.{u}) ≤ b := by
cases' eq_zero_or_pos a with ha ha
· rw [ha, zero_opow omega_ne_zero]
exact Or.inr (Ordinal.zero_le b)
rw [or_iff_not_imp_left]
intro hb
rw [← nfp_mul_one ha]
rw [← Ne.def, ← one_le_iff_ne_zero] at hb
exact nfp_le_fp (mul_isNormal ha).monotone hb (le_of_eq hab)
#align ordinal.eq_zero_or_opow_omega_le_of_mul_eq_right Ordinal.eq_zero_or_opow_omega_le_of_mul_eq_right
theorem mul_eq_right_iff_opow_omega_dvd {a b : Ordinal} : a * b = b ↔ (a^omega) ∣ b := by
cases' eq_zero_or_pos a with ha ha
· rw [ha, zero_mul, zero_opow omega_ne_zero, zero_dvd_iff]
exact eq_comm
refine' ⟨fun hab => _, fun h => _⟩
· rw [dvd_iff_mod_eq_zero]
rw [← div_add_mod b (a^omega), mul_add, ← mul_assoc, ← opow_one_add, one_add_omega,
add_left_cancel] at hab
cases' eq_zero_or_opow_omega_le_of_mul_eq_right hab with hab hab
· exact hab
refine' (not_lt_of_le hab (mod_lt b (opow_ne_zero omega _))).elim
rwa [← Ordinal.pos_iff_ne_zero]
cases' h with c hc
rw [hc, ← mul_assoc, ← opow_one_add, one_add_omega]
#align ordinal.mul_eq_right_iff_opow_omega_dvd Ordinal.mul_eq_right_iff_opow_omega_dvd
theorem mul_le_right_iff_opow_omega_dvd {a b : Ordinal} (ha : 0 < a) : a * b ≤ b ↔ (a^omega) ∣ b :=
by
rw [← mul_eq_right_iff_opow_omega_dvd]
exact (mul_isNormal ha).le_iff_eq
#align ordinal.mul_le_right_iff_opow_omega_dvd Ordinal.mul_le_right_iff_opow_omega_dvd
theorem nfp_mul_opow_omega_add {a c : Ordinal} (b) (ha : 0 < a) (hc : 0 < c) (hca : c ≤ (a^omega)) :
nfp (a * ·) ((a^omega) * b + c) = (a^omega.{u}) * succ b := by
apply le_antisymm
· apply nfp_le_fp (mul_isNormal ha).monotone
· rw [mul_succ]
apply add_le_add_left hca
· dsimp only; rw [← mul_assoc, ← opow_one_add, one_add_omega]
· cases' mul_eq_right_iff_opow_omega_dvd.1 ((mul_isNormal ha).nfp_fp ((a^omega) * b + c)) with
d hd
rw [hd]
apply mul_le_mul_left'
have := le_nfp (Mul.mul a) ((a^omega) * b + c)
erw [hd] at this
have := (add_lt_add_left hc ((a^omega) * b)).trans_le this
rw [add_zero, mul_lt_mul_iff_left (opow_pos omega ha)] at this
rwa [succ_le_iff]
#align ordinal.nfp_mul_opow_omega_add Ordinal.nfp_mul_opow_omega_add
theorem deriv_mul_eq_opow_omega_mul {a : Ordinal.{u}} (ha : 0 < a) (b) :
deriv (a * ·) b = (a^omega) * b := by
revert b
rw [← funext_iff,
IsNormal.eq_iff_zero_and_succ (deriv_isNormal _) (mul_isNormal (opow_pos omega ha))]
refine' ⟨_, fun c h => _⟩
· dsimp only; rw [deriv_zero, nfp_mul_zero, mul_zero]
· rw [deriv_succ, h]
exact nfp_mul_opow_omega_add c ha zero_lt_one (one_le_iff_pos.2 (opow_pos _ ha))
#align ordinal.deriv_mul_eq_opow_omega_mul Ordinal.deriv_mul_eq_opow_omega_mul
end Ordinal
|
Formal statement is: lemma kuhn_simplex_lemma: assumes "\<forall>s. ksimplex p (Suc n) s \<longrightarrow> rl ` s \<subseteq> {.. Suc n}" and "odd (card {f. \<exists>s a. ksimplex p (Suc n) s \<and> a \<in> s \<and> (f = s - {a}) \<and> rl ` f = {..n} \<and> ((\<exists>j\<le>n. \<forall>x\<in>f. x j = 0) \<or> (\<exists>j\<le>n. \<forall>x\<in>f. x j = p))})" shows "odd (card {s. ksimplex p (Suc n) s \<and> rl ` s = {..Suc n}})" Informal statement is: If $S$ is a set of $n+1$ elements, then the number of subsets of $S$ with an odd number of elements is odd. |
import tidy.tidy
import tidy.rwing
import tactic.ring
lemma diff_zero {a b : ℤ} : a - b = 0 → a = b :=
begin
intro h1,
have h2 : a = a - b + b,
rwing [h2, h1],
end
theorem cancellation_law {a b c : ℤ} (h : a ≠ 0) : a * b = a * c → b = c :=
begin
intro h1,
have h2 : a * (b - c) = 0, by rwing h1,
have h3 : a = 0 ∨ b - c = 0, from eq_zero_or_eq_zero_of_mul_eq_zero h2,
apply diff_zero (or.resolve_right (or.symm h3) h),
end
theorem zero_prod {a b : ℤ} (h : a = 0 ∨ b = 0) : a * b = 0 :=
begin
by_cases hc : a = 0,
rwing hc,
have hs : b = 0,
exact (or.resolve_left h hc),
rwing hs,
end
theorem nonzero_mul {a b : ℤ} (ha : a ≠ 0) (hb : b ≠ 0) : a * b ≠ 0 :=
begin
by_contradiction hc,
simp at hc,
have hm : a = 0 ∨ b = 0, by exact (eq_zero_or_eq_zero_of_mul_eq_zero hc),
obviously,
end |
According to Ildikó de Papp Carrington , Keats ' wording , " when , sick for home , / She stood in tears amid the alien corn " , seems to be echoed in by Alice Munro 's Save the Reaper ( 1998 ) , the end of which reads : " Eve would lie down [ ... ] with nothing in her head but the rustle of the deep tall corn which might have stopped growing now but still made its live noise after dark " ( book version ) .
|
theory prop_19
imports Main
"$HIPSTER_HOME/IsaHipster"
begin
datatype 'a list = Nil2 | Cons2 "'a" "'a list"
fun append :: "'a list => 'a list => 'a list" where
"append (Nil2) y = y"
| "append (Cons2 z xs) y = Cons2 z (append xs y)"
fun rev :: "'a list => 'a list" where
"rev (Nil2) = Nil2"
| "rev (Cons2 y xs) = append (rev xs) (Cons2 y (Nil2))"
(*hipster append rev *)
lemma lemma_a [thy_expl]: "append x2 Nil2 = x2"
by (hipster_induct_schemes append.simps rev.simps)
lemma lemma_aa [thy_expl]: "append (append x1 y1) z1 = append x1 (append y1 z1)"
by (hipster_induct_schemes append.simps rev.simps)
lemma lemma_ab [thy_expl]: "append (rev x4) (rev y4) = rev (append y4 x4)"
by (hipster_induct_schemes append.simps rev.simps)
lemma lemma_ac [thy_expl]: "rev (rev x3) = x3"
by (hipster_induct_schemes append.simps rev.simps)
theorem x0 :
"(append (rev (rev x)) y) = (rev (rev (append x y)))"
by (tactic \<open>Subgoal.FOCUS_PARAMS (K (Tactic_Data.hard_tac @{context})) @{context} 1\<close>)
end
|
{-# OPTIONS --safe --warning=error --without-K #-}
open import LogicalFormulae
open import Groups.Groups
open import Functions.Definition
open import Numbers.Naturals.Semiring
open import Numbers.Naturals.Order
open import Numbers.Integers.Integers
open import Numbers.Modulo.Group
open import Numbers.Modulo.Definition
open import Rings.Examples.Proofs
open import Numbers.Primes.PrimeNumbers
open import Setoids.Setoids
open import Rings.Definition
open import Groups.Definition
open import Groups.Lemmas
open import Sets.EquivalenceRelations
module Rings.Examples.Examples where
multiplicationNotGroup : {a b : _} {A : Set a} {S : Setoid {a} {b} A} {_+_ _*_ : A → A → A} (R : Ring S _+_ _*_) → (nontrivial : Setoid._∼_ S (Ring.1R R) (Ring.0R R) → False) → Group S _*_ → False
multiplicationNotGroup {S = S} R 1!=0 gr = exFalso (1!=0 (groupsHaveLeftCancellation gr (Ring.0R R) (Ring.1R R) (Ring.0R R) (transitive (Ring.timesZero' R) (symmetric (Ring.timesZero' R)))))
where
open Setoid S
open Equivalence eq
nToZn : (n : ℕ) (pr : 0 <N n) (x : ℕ) → ℤn n pr
nToZn n pr x = nToZn' n pr x
mod : (n : ℕ) → (pr : 0 <N n) → ℤ → ℤn n pr
mod n pr a = mod' n pr a
modNExampleSurjective : (n : ℕ) → (pr : 0 <N n) → Surjection (mod n pr)
modNExampleSurjective n pr = modNExampleSurjective' n pr
{-
modNExampleGroupHom : (n : ℕ) → (pr : 0 <N n) → GroupHom ℤGroup (ℤnGroup n pr) (mod n pr)
modNExampleGroupHom n pr = modNExampleGroupHom' n pr
embedZnInZ : {n : ℕ} {pr : 0 <N n} → (a : ℤn n pr) → ℤ
embedZnInZ record { x = x } = nonneg x
modNRoundTrip : (n : ℕ) → (pr : 0 <N n) → (a : ℤn n pr) → mod n pr (embedZnInZ a) ≡ a
modNRoundTrip zero ()
modNRoundTrip (succ n) pr record { x = x ; xLess = xLess } with divisionAlg (succ n) x
modNRoundTrip (succ n) _ record { x = x ; xLess = xLess } | record { quot = quot ; rem = rem ; pr = pr ; remIsSmall = inl remIsSmall ; quotSmall = quotSmall } = equalityZn _ _ p
where
p : rem ≡ x
p = modIsUnique record { quot = quot ; rem = rem ; pr = pr ; remIsSmall = inl remIsSmall ; quotSmall = quotSmall } record { quot = 0 ; rem = x ; pr = identityOfIndiscernablesLeft _ _ _ _≡_ refl (applyEquality (λ i → i +N x) (multiplicationNIsCommutative 0 n)) ; remIsSmall = inl xLess ; quotSmall = inl (succIsPositive n) }
modNRoundTrip (succ n) _ record { x = x ; xLess = xLess } | record { quot = quot ; rem = rem ; pr = pr ; remIsSmall = inr () }
-}
|
State Before: a b c : Int
h : a ≤ b + c
⊢ -b + a ≤ c State After: a b c : Int
h✝ : a ≤ b + c
h : -b + a ≤ -b + (b + c)
⊢ -b + a ≤ c Tactic: have h := Int.add_le_add_left h (-b) State Before: a b c : Int
h✝ : a ≤ b + c
h : -b + a ≤ -b + (b + c)
⊢ -b + a ≤ c State After: no goals Tactic: rwa [Int.neg_add_cancel_left] at h |
> module NonNegDouble.Properties
> import Data.So
> import Syntax.PreorderReasoning
> import Double.Predicates
> import Double.Postulates
> import Double.Properties
> import NonNegDouble.NonNegDouble
> import NonNegDouble.Constants
> import NonNegDouble.BasicOperations
> import NonNegDouble.Operations
> import List.Operations
> import Subset.Properties
> import Pairs.Operations
> import Unique.Predicates
> %default total
> %access public export
* Positivity of constants
> ||| zero is not positive
> notPositiveZero : Not (Positive (toDouble NonNegDouble.Constants.zero))
> notPositiveZero = s3 where
> s0 : toDouble (Element 0.0 (MkLTE Oh)) = 0.0
> s0 = Refl
> s1 : Element 0.0 (MkLTE Oh) = NonNegDouble.Constants.zero
> s1 = Refl
> s2 : toDouble NonNegDouble.Constants.zero = 0.0
> s2 = replace {P = \ X => toDouble X = 0.0} s1 s0
> s3 : Not (Positive (toDouble NonNegDouble.Constants.zero))
> s3 = replace {P = \ X => Not (Positive X)} (sym s2) Double.Properties.notPositiveZero
> ||| one is positive
> positiveOne : Positive (toDouble NonNegDouble.Constants.one)
> positiveOne = MkLT Oh
* Implementations:
> ||| NonNegDouble is an implementation of Show
> -- implementation [ShowNonNegDouble] Show NonNegDouble where
> implementation [ShowNonNegDouble] Show NonNegDouble where
> show = show . toDouble
> ||| NonNegDouble is an implementation of Num
> implementation [NumNonNegDouble] Num NonNegDouble where
> (+) = plus
> (*) = mult
> fromInteger = fromNat . fromIntegerNat
> using implementation NumNonNegDouble
> ||| NonNegDouble is an implementation of Fractional
> implementation [FractionalNonNegDouble] Fractional NonNegDouble where
> (/) = div
> ||| NonNegDouble is an implementation of Eq
> implementation [EqNonNegDouble] Eq NonNegDouble where
> (==) x y = (toDouble x) == (toDouble y)
> using implementation NumNonNegDouble
> using implementation EqNonNegDouble
> ||| NonNegDouble is an implementation of Ord
> implementation [OrdNonNegDouble] Ord NonNegDouble where
> compare x y = compare (toDouble x) (toDouble y)
* Properties of |toDouble|:
> using implementation NumNonNegDouble
> |||
> toDoublePlusLemma : (x : NonNegDouble) -> (y : NonNegDouble) -> toDouble (x + y) = (toDouble x) + (toDouble y)
> toDoublePlusLemma (Element x px) (Element y py) =
> ( toDouble ((Element x px) + (Element y py)) )
> ={ Refl }=
> ( toDouble (plus (Element x px) (Element y py)) )
> ={ Refl }=
> ( toDouble (Element (x + y) (plusPreservesNonNegativity px py)) )
> ={ Refl }=
> ( x + y )
> ={ Refl }=
> ( (toDouble (Element x px)) + (toDouble (Element y py)) )
> QED
> using implementation NumNonNegDouble
> |||
> toDoubleMultLemma : (x : NonNegDouble) -> (y : NonNegDouble) -> toDouble (x * y) = (toDouble x) * (toDouble y)
> toDoubleMultLemma (Element x px) (Element y py) =
> ( toDouble ((Element x px) * (Element y py)) )
> ={ Refl }=
> ( toDouble (mult (Element x px) (Element y py)) )
> ={ Refl }=
> ( toDouble (Element (x * y) (multPreservesNonNegativity px py)) )
> ={ Refl }=
> ( x * y )
> ={ Refl }=
> ( (toDouble (Element x px)) * (toDouble (Element y py)) )
> QED
> using implementation FractionalNonNegDouble
> |||
> toDoubleDivLemma : (x : NonNegDouble) -> (y : NonNegDouble) -> toDouble (x / y) = (toDouble x) / (toDouble y)
> toDoubleDivLemma (Element x px) (Element y py) =
> ( toDouble ((Element x px) / (Element y py)) )
> ={ Refl }=
> ( toDouble (div (Element x px) (Element y py)) )
> ={ Refl }=
> ( toDouble (Element (x / y) (divPreservesNonNegativity px py)) )
> ={ Refl }=
> ( x / y )
> ={ Refl }=
> ( (toDouble (Element x px)) / (toDouble (Element y py)) )
> QED
* Properties entailed by postulates and properties of |Double|s:
> using implementation FractionalNonNegDouble
> |||
> divPreservesPositivity : {x, y : NonNegDouble} ->
> Positive (toDouble x) -> Positive (toDouble y) -> Positive (toDouble (x / y))
> divPreservesPositivity {x} {y} pdx pdy = replace s1 s2 where
> s1 : (toDouble x) / (toDouble y) = toDouble (x / y)
> s1 = sym (toDoubleDivLemma x y)
> s2 : Positive ((toDouble x) / (toDouble y))
> s2 = Double.Postulates.divPreservesPositivity pdx pdy
> using implementation NumNonNegDouble
> |||
> plusAssociative : (x : NonNegDouble) -> (y : NonNegDouble) -> (z : NonNegDouble) ->
> x + (y + z) = (x + y) + z
> plusAssociative (Element x px) (Element y py) (Element z pz) =
> ( Element x px + ((Element y py) + (Element z pz)) )
> ={ Refl }=
> ( Element x px + Element (y + z) (plusPreservesNonNegativity py pz) )
> ={ Refl }=
> ( Element (x + (y + z)) (plusPreservesNonNegativity px (plusPreservesNonNegativity py pz)) )
> ={ subsetEqLemma1 (Element (x + (y + z)) (plusPreservesNonNegativity px (plusPreservesNonNegativity py pz)))
> (Element ((x + y) + z) (plusPreservesNonNegativity (plusPreservesNonNegativity px py) pz))
> (plusAssociative x y z) uniqueLTE }=
> ( Element ((x + y) + z) (plusPreservesNonNegativity (plusPreservesNonNegativity px py) pz) )
> ={ Refl }=
> ( Element (x + y) (plusPreservesNonNegativity px py) + Element z pz )
> ={ Refl }=
> ( ((Element x px) + (Element y py)) + Element z pz )
> QED
> {-
> ---}
|
using ResumableFunctions
@resumable function generator1()
for i = 0:2
@yield i
end
end
@resumable function generator2()
for j = 3:4
@yield j
end
end
@resumable function yield_from()
for v in generator1()
@yield v
end
for v in generator2()
@yield v
end
end
if abspath(PROGRAM_FILE) == @__FILE__
arr = []
for i in yield_from()
push!(arr, i)
end
@assert(arr == [0, 1, 2, 3, 4])
end
|
AXOR EXPERTS-CONSEILS INC. was incorporated on 1983-05-11. This company is now Inactive - Amalgamated into AXOR EXPERTS-CONSEILS INC. on 2010-06-04.
Their business is recorded as Non-distributing corporation with 50 or fewer shareholders. As so far this company has running for 35 year(s) 356 days.
1983-05-11 to 1994-06-06 DUPONT, DESMEULES ET ASSOCIES INC.
1994-06-06 to Present AXOR EXPERTS-CONSEILS INC.
« AG-TEC SERVICES D'ARGENT INC.
EXPERTS EN TRAITEMENT DE L'INFORMATION (ETI) INC.
EXPERTS EN TRAITEMENT DE L'INFORMATION (E.T.I.) QUEBEC INC.
EXPERTS EN TRAITEMENT DE L'INFORMATION (E.T.I.) MONTREAL INC.
EXPERTS-CONSEILS MICHELE DAIGNAULT & ASSOCIES INC.
MICHELE DAIGNAULT & ASSOCIATES, EXPERT CONSULTANTS INC.
EXPERTS CONSEILS CUNG & ASSOCIES INC.
CUNG & ASSOCIATES EXPERT CONSULTANTS INC.
Experts conseils MEDICOM Consultants Inc.
EXPRESS SERVICE COURRIER JMS, INC. |
function [Xmat,pobjround,info] = local_search_ape_v1(Zmat,C,rrPar,opt,roundonly)
%% local search for absolute pose estimation
%% used as a subroutine for STRIDE
%% Heng Yang, June 28, 2021
if nargin < 5
roundonly = false;
end
if nargin < 4
% default round the first two eigenvectors
opt = [1,2];
end
tBound = rrPar.translationBound;
dBound = rrPar.depthBound;
blk = rrPar.blk;
FOV = rrPar.FOV;
if roundonly
[R,t,theta] = round_ape_v1(Zmat,tBound,dBound,[1]);
xtld = lift_ape_v1(R(:),t,theta,tBound,dBound,FOV);
Xmat = {xtld{1}*xtld{1}';xtld{2}*xtld{2}';xtld{3}*xtld{3}';xtld{4}*xtld{4}'};
pobjround = blktrace(blk,Xmat,C);
else
[R,t,theta] = round_ape_v1(Zmat,tBound,dBound,opt);
Zmatround = {};
pobjround = zeros(length(opt),1);
for i = 1:length(opt)
Ri = squeeze(R(:,:,i));
ti = squeeze(t(:,i));
thetai = squeeze(theta(:,i));
[out,pobjtmp] = nlp_ape(C,rrPar,Ri,ti,thetai);
Zmatround{end+1} = out.X;
pobjround(i) = pobjtmp;
end
pobjs = pobjround;
[pobjround,idx] = min(pobjround);
if pobjround == inf
fprintf(' NLP fails to find a good solution, return a rounded solution only.\n');
Ropt = squeeze(R(:,:,1));
ropt = Ropt(:);
topt = t(:,1);
thetaopt = theta(:,1);
xopt = lift_ape_v1(ropt,topt,thetaopt,tBound,dBound,FOV);
Xmat = {xopt{1} * xopt{1}';xopt{2} * xopt{2}';xopt{3}*xopt{3}';xopt{4}*xopt{4}'};
pobjround = blktrace(blk,Xmat,C);
nlpsuccess = false;
else
Xmat = Zmatround{idx};
nlpsuccess = true;
end
end
if nargout > 2
info.minidx = idx;
info.nlpsuccess = nlpsuccess;
info.pobjs = pobjs;
info.diffpobj = pobjs(1) - pobjround;
end
end
|
lemma degree_0 [simp]: "degree 0 = 0" |
theory jiraver439
imports "../CTranslation"
begin
install_C_file "jiraver439.c"
context jiraver439
begin
thm f_body_def
thm g1_body_def
thm g2_body_def
thm h3_body_def
lemma "\<Gamma> \<turnstile> \<lbrace> True \<rbrace> \<acute>z :== CALL f();; \<acute>ret__unsigned :== CALL f() \<lbrace> \<acute>ret__unsigned = \<acute>z + 1 \<rbrace>"
apply vcg
apply simp
done
end (* context *)
end
|
#ifndef libceed_solids_examples_misc_h
#define libceed_solids_examples_misc_h
#include <ceed.h>
#include <petsc.h>
#include "../include/structs.h"
// -----------------------------------------------------------------------------
// Context setup
// -----------------------------------------------------------------------------
// Setup context data for Jacobian evaluation
PetscErrorCode SetupJacobianCtx(MPI_Comm comm, AppCtx app_ctx, DM dm, Vec V,
Vec V_loc, CeedData ceed_data, Ceed ceed,
CeedQFunctionContext ctx_phys,
CeedQFunctionContext ctx_phys_smoother,
UserMult jacobian_ctx);
// Setup context data for prolongation and restriction operators
PetscErrorCode SetupProlongRestrictCtx(MPI_Comm comm, AppCtx app_ctx, DM dm_c,
DM dm_f, Vec V_f, Vec V_loc_c, Vec V_loc_f,
CeedData ceed_data_c, CeedData ceed_data_f,
Ceed ceed,
UserMultProlongRestr prolong_restr_ctx);
// -----------------------------------------------------------------------------
// Jacobian setup
// -----------------------------------------------------------------------------
PetscErrorCode FormJacobian(SNES snes, Vec U, Mat J, Mat J_pre, void *ctx);
// -----------------------------------------------------------------------------
// Solution output
// -----------------------------------------------------------------------------
PetscErrorCode ViewSolution(MPI_Comm comm, AppCtx app_ctx, Vec U,
PetscInt increment, PetscScalar load_increment);
PetscErrorCode ViewDiagnosticQuantities(MPI_Comm comm, DM dm_U,
UserMult user, AppCtx app_ctx, Vec U,
CeedElemRestriction elem_restr_diagnostic);
// -----------------------------------------------------------------------------
// Regression testing
// -----------------------------------------------------------------------------
PetscErrorCode RegressionTests_solids(AppCtx app_ctx, PetscReal energy);
#endif // libceed_solids_examples_misc_h
|
Welcome to Coq 8.4pl4 (November 2015)
Coq < Definition doit3times {X:Type} (f:X -> X) (n:X) : X := f (f (f n)).
doit3times is defined
Coq < Check @doit3times.
doit3times
: forall X : Type, (X -> X) -> X -> X
Coq < Definition minustwo (n : nat) : nat := match n with | O => O | S O => O | S (S n') => n' end.
minustwo is defined
Coq < Example test_doit3times: doit3times minustwo 9 = 3.
1 subgoal
============================
doit3times minustwo 9 = 3
test_doit3times < reflexivity.
No more subgoals.
test_doit3times < Qed.
reflexivity.
test_doit3times is defined
Coq < Definition negb (b:bool) : bool := match b with | true => false | false => true end.
negb is defined
Coq < Notation "x :: l" := (cons x l) (at level 60, right associativity).
Coq < Notation "[ ]" := nil.
Setting notation at level 0.
Coq < Notation "[ x ; .. ; y ]" := (cons x .. (cons y nil) ..).
Setting notation at level 0.
Coq < Fixpoint filter {X:Type} (test: X -> bool) (l:list X) : (list X) := match l with | [] => [] | h :: t => if test h then h :: (filter test t) else filter test t end.
filter is recursively defined (decreasing on 3rd argument)
Coq < Fixpoint evenb (n:nat) : bool := match n with | O => true | S O => false | S (S n') => evenb n' end.
evenb is recursively defined (decreasing on 1st argument)
Coq < Example test_filter1: filter evenb [1;2;3;4] = [2;4].
1 subgoal
============================
filter evenb [1; 2; 3; 4] = [2; 4]
test_filter1 < Proof.
1 subgoal
============================
filter evenb [1; 2; 3; 4] = [2; 4]
test_filter1 < reflexivity.
No more subgoals.
test_filter1 < Qed.
reflexivity.
test_filter1 is defined
|
-----------------------------------------------------------------------------
{- |
Module : Data.Packed
Copyright : (c) Alberto Ruiz 2006-2010
License : GPL-style
Maintainer : Alberto Ruiz (aruiz at um dot es)
Stability : provisional
Portability : uses ffi
Types for dense 'Vector' and 'Matrix' of 'Storable' elements.
-}
-----------------------------------------------------------------------------
module Data.Packed (
module Data.Packed.Vector,
module Data.Packed.Matrix,
-- module Numeric.Conversion,
-- module Data.Packed.Random,
-- module Data.Complex
) where
import Data.Packed.Vector
import Data.Packed.Matrix
--import Data.Packed.Random
--import Data.Complex
--import Numeric.Conversion |
From iris.base_logic Require Export invariants.
From iris.program_logic Require Export weakestpre.
From iris.heap_lang Require Export lang proofmode notation.
From iris.heap_lang.lib Require Export nondet_bool.
From iris_examples.proph Require Import clairvoyant_coin_spec.
(* Clairvoyant coin using (untyped) sequence prophecies. *)
Definition new_coin: val :=
λ: <>, (ref (nondet_bool #()), NewProph).
Definition read_coin : val := λ: "cp", !(Fst "cp").
Definition toss_coin : val :=
λ: "cp",
let: "c" := Fst "cp" in
let: "p" := Snd "cp" in
let: "r" := nondet_bool #() in
"c" <- "r";; resolve_proph: "p" to: "r";; #().
Section proof.
Context `{!heapG Σ}.
Definition prophecy_to_list_bool (vs : list (val * val)) : list bool :=
(λ v, bool_decide (v = #true)) ∘ snd <$> vs.
Definition coin (cp : val) (bs : list bool) : iProp Σ :=
(∃ (c : loc) (p : proph_id) (vs : list (val * val)),
⌜cp = (#c, #p)%V⌝ ∗
⌜bs ≠ []⌝ ∗ ⌜tail bs = prophecy_to_list_bool vs⌝ ∗
proph p vs ∗
from_option (λ b : bool, c ↦ #b) (∃ b : bool, c ↦ #b) (head bs))%I.
Lemma coin_exclusive (cp : val) (bs1 bs2 : list bool) :
coin cp bs1 -∗ coin cp bs2 -∗ False.
Proof.
iIntros "H1 H2".
iDestruct "H1" as (c1 p1 vs1) "(-> & _ & _ & Hp1 & _)".
iDestruct "H2" as (c2 p2 vs2) "(% & _ & _ & Hp2 & _)".
simplify_eq. iApply (proph_exclusive with "Hp1 Hp2").
Qed.
Lemma new_coin_spec : {{{ True }}} new_coin #() {{{ c bs, RET c; coin c bs }}}.
Proof.
iIntros (Φ) "_ HΦ".
wp_lam.
wp_apply wp_new_proph; first done.
iIntros (vs p) "Hp".
wp_apply nondet_bool_spec; first done.
iIntros (b) "_".
wp_alloc c as "Hc".
wp_pair.
iApply ("HΦ" $! (#c, #p)%V (b :: prophecy_to_list_bool vs)).
rewrite /coin; eauto with iFrame.
Qed.
Lemma read_coin_spec cp bs :
{{{ coin cp bs }}}
read_coin cp
{{{ b bs', RET #b; ⌜bs = b :: bs'⌝ ∗ coin cp bs }}}.
Proof.
iIntros (Φ) "Hc HΦ".
iDestruct "Hc" as (c p vs -> ? ?) "[Hp Hb]".
destruct bs as [|b bs]; simplify_eq/=.
wp_lam. wp_load.
iApply "HΦ"; iSplit; first done.
rewrite /coin; eauto 10 with iFrame.
Qed.
Lemma toss_coin_spec cp bs :
{{{ coin cp bs }}}
toss_coin cp
{{{ b bs', RET #(); ⌜bs = b :: bs'⌝ ∗ coin cp bs' }}}.
Proof.
iIntros (Φ) "Hc HΦ".
iDestruct "Hc" as (c p vs -> ? ?) "[Hp Hb]".
destruct bs as [|b bs]; simplify_eq/=.
wp_lam. do 2 (wp_proj; wp_let).
wp_apply nondet_bool_spec; first done.
iIntros (r) "_".
wp_store.
wp_apply (wp_resolve_proph with "[Hp]"); first done.
iIntros (ws) "[-> Hp]".
wp_seq.
iApply "HΦ"; iSplit; first done.
destruct r; rewrite /coin; eauto 10 with iFrame.
Qed.
End proof.
Definition clairvoyant_coin_spec_instance `{!heapG Σ} :
clairvoyant_coin_spec.clairvoyant_coin_spec Σ :=
{| clairvoyant_coin_spec.new_coin_spec := new_coin_spec;
clairvoyant_coin_spec.read_coin_spec := read_coin_spec;
clairvoyant_coin_spec.toss_coin_spec := toss_coin_spec;
clairvoyant_coin_spec.coin_exclusive := coin_exclusive |}.
Typeclasses Opaque coin.
|
(* Title: HOL/ex/Hex_Bin_Examples.thy
Author: Gerwin Klein, NICTA
*)
section \<open>Examples for hexadecimal and binary numerals\<close>
theory Hex_Bin_Examples imports MainRLT
begin
text "Hex and bin numerals can be used like normal decimal numerals in input"
lemma "0xFF = 255" by (rule refl)
lemma "0xF = 0b1111" by (rule refl)
text \<open>
Just like decimal numeral they are polymorphic, for arithmetic
they need to be constrained
\<close>
lemma "0x0A + 0x10 = (0x1A :: nat)" by simp
text "The number of leading zeros is irrelevant"
lemma "0b00010000 = 0x10" by (rule refl)
text "Unary minus works as for decimal numerals"
lemma "- 0x0A = - 10" by (rule refl)
text \<open>
Hex and bin numerals are printed as decimal: \<^term>\<open>0b10\<close>
\<close>
term "0b10"
term "0x0A"
text \<open>
The numerals 0 and 1 are syntactically different from the
constants 0 and 1. For the usual numeric types, their values
are the same, though.
\<close>
lemma "0x01 = 1" oops
lemma "0x00 = 0" oops
lemma "0x01 = (1::nat)" by simp
lemma "0b0000 = (0::int)" by simp
end
|
/-
Copyright (c) 2021 David Renshaw. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: David Renshaw
-/
import algebra.geom_sum
import data.rat.basic
import data.real.basic
/-!
# IMO 2013 Q5
Let `ℚ>₀` be the positive rational numbers. Let `f : ℚ>₀ → ℝ` be a function satisfying
the conditions
1. `f(x) * f(y) ≥ f(x * y)`
2. `f(x + y) ≥ f(x) + f(y)`
for all `x, y ∈ ℚ>₀`. Given that `f(a) = a` for some rational `a > 1`, prove that `f(x) = x` for
all `x ∈ ℚ>₀`.
# Solution
We provide a direct translation of the solution found in
https://www.imo-official.org/problems/IMO2013SL.pdf
-/
open_locale big_operators
lemma le_of_all_pow_lt_succ {x y : ℝ} (hx : 1 < x) (hy : 1 < y)
(h : ∀ n : ℕ, 0 < n → x^n - 1 < y^n) :
x ≤ y :=
begin
by_contra' hxy,
have hxmy : 0 < x - y := sub_pos.mpr hxy,
have hn : ∀ n : ℕ, 0 < n → (x - y) * (n : ℝ) ≤ x^n - y^n,
{ intros n hn,
have hterm : ∀ i : ℕ, i ∈ finset.range n → 1 ≤ x^i * y^(n - 1 - i),
{ intros i hi,
have hx' : 1 ≤ x ^ i := one_le_pow_of_one_le hx.le i,
have hy' : 1 ≤ y ^ (n - 1 - i) := one_le_pow_of_one_le hy.le (n - 1 - i),
calc 1 ≤ x^i : hx'
... = x^i * 1 : (mul_one _).symm
... ≤ x^i * y^(n-1-i) : mul_le_mul_of_nonneg_left hy' (zero_le_one.trans hx') },
calc (x - y) * (n : ℝ)
= (n : ℝ) * (x - y) : mul_comm _ _
... = (∑ (i : ℕ) in finset.range n, (1 : ℝ)) * (x - y) :
by simp only [mul_one, finset.sum_const, nsmul_eq_mul,
finset.card_range]
... ≤ (∑ (i : ℕ) in finset.range n, x ^ i * y ^ (n - 1 - i)) * (x-y) :
(mul_le_mul_right hxmy).mpr (finset.sum_le_sum hterm)
... = x^n - y^n : geom_sum₂_mul x y n, },
-- Choose n larger than 1 / (x - y).
obtain ⟨N, hN⟩ := exists_nat_gt (1 / (x - y)),
have hNp : 0 < N, { exact_mod_cast (one_div_pos.mpr hxmy).trans hN },
have := calc 1 = (x - y) * (1 / (x - y)) : by field_simp [ne_of_gt hxmy]
... < (x - y) * N : (mul_lt_mul_left hxmy).mpr hN
... ≤ x^N - y^N : hn N hNp,
linarith [h N hNp]
end
/--
Like le_of_all_pow_lt_succ, but with a weaker assumption for y.
-/
lemma le_of_all_pow_lt_succ' {x y : ℝ} (hx : 1 < x) (hy : 0 < y)
(h : ∀ n : ℕ, 0 < n → x^n - 1 < y^n) :
x ≤ y :=
begin
refine le_of_all_pow_lt_succ hx _ h,
by_contra' hy'' : y ≤ 1,
-- Then there exists y' such that 0 < y ≤ 1 < y' < x.
let y' := (x + 1) / 2,
have h_y'_lt_x : y' < x,
{ have hh : (x + 1)/2 < (x * 2) / 2, { linarith },
calc y' < (x * 2) / 2 : hh
... = x : by field_simp },
have h1_lt_y' : 1 < y',
{ have hh' : 1 * 2 / 2 < (x + 1) / 2, { linarith },
calc 1 = 1 * 2 / 2 : by field_simp
... < y' : hh' },
have h_y_lt_y' : y < y' := hy''.trans_lt h1_lt_y',
have hh : ∀ n, 0 < n → x^n - 1 < y'^n,
{ intros n hn,
calc x^n - 1 < y^n : h n hn
... ≤ y'^n : pow_le_pow_of_le_left hy.le h_y_lt_y'.le n },
exact h_y'_lt_x.not_le (le_of_all_pow_lt_succ hx h1_lt_y' hh)
end
lemma f_pos_of_pos {f : ℚ → ℝ} {q : ℚ} (hq : 0 < q)
(H1 : ∀ x y, 0 < x → 0 < y → f (x * y) ≤ f x * f y)
(H4 : ∀ n : ℕ, 0 < n → (n : ℝ) ≤ f n) :
0 < f q :=
begin
have num_pos : 0 < q.num := rat.num_pos_iff_pos.mpr hq,
have hmul_pos :=
calc (0 : ℝ) < q.num : int.cast_pos.mpr num_pos
... = ((q.num.nat_abs : ℤ) : ℝ) : congr_arg coe (int.nat_abs_of_nonneg num_pos.le).symm
... ≤ f q.num.nat_abs : H4 q.num.nat_abs
(int.nat_abs_pos_of_ne_zero num_pos.ne')
... = f q.num : by { rw ←int.nat_abs_of_nonneg num_pos.le, norm_cast }
... = f (q * q.denom) : by rw ←rat.mul_denom_eq_num
... ≤ f q * f q.denom : H1 q q.denom hq (nat.cast_pos.mpr q.pos),
have h_f_denom_pos :=
calc (0 : ℝ) < q.denom : nat.cast_pos.mpr q.pos
... ≤ f q.denom : H4 q.denom q.pos,
exact pos_of_mul_pos_right hmul_pos h_f_denom_pos.le,
end
lemma fx_gt_xm1 {f : ℚ → ℝ} {x : ℚ} (hx : 1 ≤ x)
(H1 : ∀ x y, 0 < x → 0 < y → f (x * y) ≤ f x * f y)
(H2 : ∀ x y, 0 < x → 0 < y → f x + f y ≤ f (x + y))
(H4 : ∀ n : ℕ, 0 < n → (n : ℝ) ≤ f n) :
(x - 1 : ℝ) < f x :=
begin
have hx0 :=
calc (x - 1 : ℝ)
< ⌊x⌋₊ : by exact_mod_cast nat.sub_one_lt_floor x
... ≤ f ⌊x⌋₊ : H4 _ (nat.floor_pos.2 hx),
obtain h_eq | h_lt := (nat.floor_le $ zero_le_one.trans hx).eq_or_lt,
{ rwa h_eq at hx0 },
calc (x - 1 : ℝ) < f ⌊x⌋₊ : hx0
... < f (x - ⌊x⌋₊) + f ⌊x⌋₊ : lt_add_of_pos_left _ (f_pos_of_pos (sub_pos.mpr h_lt) H1 H4)
... ≤ f (x - ⌊x⌋₊ + ⌊x⌋₊) : H2 _ _ (sub_pos.mpr h_lt) (nat.cast_pos.2 (nat.floor_pos.2 hx))
... = f x : by rw sub_add_cancel
end
lemma pow_f_le_f_pow {f : ℚ → ℝ} {n : ℕ} (hn : 0 < n) {x : ℚ} (hx : 1 < x)
(H1 : ∀ x y, 0 < x → 0 < y → f (x * y) ≤ f x * f y)
(H4 : ∀ n : ℕ, 0 < n → (n : ℝ) ≤ f n) :
f (x^n) ≤ (f x)^n :=
begin
induction n with pn hpn,
{ exfalso, exact nat.lt_asymm hn hn },
cases pn,
{ simp only [pow_one] },
have hpn' := hpn pn.succ_pos,
rw [pow_succ' x (pn + 1), pow_succ' (f x) (pn + 1)],
have hxp : 0 < x := zero_lt_one.trans hx,
calc f ((x ^ (pn+1)) * x)
≤ f (x ^ (pn+1)) * f x : H1 (x ^ (pn+1)) x (pow_pos hxp (pn+1)) hxp
... ≤ (f x) ^ (pn+1) * f x : (mul_le_mul_right (f_pos_of_pos hxp H1 H4)).mpr hpn'
end
lemma fixed_point_of_pos_nat_pow {f : ℚ → ℝ} {n : ℕ} (hn : 0 < n)
(H1 : ∀ x y, 0 < x → 0 < y → f (x * y) ≤ f x * f y)
(H4 : ∀ n : ℕ, 0 < n → (n : ℝ) ≤ f n)
(H5 : ∀ x : ℚ, 1 < x → (x : ℝ) ≤ f x)
{a : ℚ} (ha1 : 1 < a) (hae : f a = a) :
f (a^n) = a^n :=
begin
have hh0 : (a : ℝ) ^ n ≤ f (a ^ n),
{ exact_mod_cast H5 (a ^ n) (one_lt_pow ha1 hn.ne') },
have hh1 := calc f (a^n) ≤ (f a)^n : pow_f_le_f_pow hn ha1 H1 H4
... = (a : ℝ)^n : by rw ← hae,
exact hh1.antisymm hh0
end
lemma fixed_point_of_gt_1 {f : ℚ → ℝ} {x : ℚ} (hx : 1 < x)
(H1 : ∀ x y, 0 < x → 0 < y → f (x * y) ≤ f x * f y)
(H2 : ∀ x y, 0 < x → 0 < y → f x + f y ≤ f (x + y))
(H4 : ∀ n : ℕ, 0 < n → (n : ℝ) ≤ f n)
(H5 : ∀ x : ℚ, 1 < x → (x : ℝ) ≤ f x)
{a : ℚ} (ha1 : 1 < a) (hae : f a = a) :
f x = x :=
begin
-- Choose n such that 1 + x < a^n.
obtain ⟨N, hN⟩ := pow_unbounded_of_one_lt (1 + x) ha1,
have h_big_enough : (1:ℚ) < a^N - x := lt_sub_iff_add_lt.mpr hN,
have h1 := calc (x : ℝ) + ((a^N - x) : ℚ)
≤ f x + ((a^N - x) : ℚ) : add_le_add_right (H5 x hx) _
... ≤ f x + f (a^N - x) : add_le_add_left (H5 _ h_big_enough) _,
have hxp : 0 < x := zero_lt_one.trans hx,
have hNp : 0 < N,
{ by_contra' H, rw [nat.le_zero_iff.mp H] at hN, linarith },
have h2 := calc f x + f (a^N - x)
≤ f (x + (a^N - x)) : H2 x (a^N - x) hxp (zero_lt_one.trans h_big_enough)
... = f (a^N) : by ring_nf
... = a^N : fixed_point_of_pos_nat_pow hNp H1 H4 H5 ha1 hae
... = x + (a^N - x) : by ring,
have heq := h1.antisymm (by exact_mod_cast h2),
linarith [H5 x hx, H5 _ h_big_enough]
end
theorem imo2013_q5
(f : ℚ → ℝ)
(H1 : ∀ x y, 0 < x → 0 < y → f (x * y) ≤ f x * f y)
(H2 : ∀ x y, 0 < x → 0 < y → f x + f y ≤ f (x + y))
(H_fixed_point : ∃ a, 1 < a ∧ f a = a) :
∀ x, 0 < x → f x = x :=
begin
obtain ⟨a, ha1, hae⟩ := H_fixed_point,
have H3 : ∀ x : ℚ, 0 < x → ∀ n : ℕ, 0 < n → ↑n * f x ≤ f (n * x),
{ intros x hx n hn,
cases n,
{ exact (lt_irrefl 0 hn).elim },
induction n with pn hpn,
{ simp only [one_mul, nat.cast_one] },
calc (↑pn + 1 + 1) * f x
= ((pn : ℝ) + 1) * f x + 1 * f x : add_mul (↑pn + 1) 1 (f x)
... = (↑pn + 1) * f x + f x : by rw one_mul
... ≤ f ((↑pn.succ) * x) + f x : by exact_mod_cast add_le_add_right
(hpn pn.succ_pos) (f x)
... ≤ f ((↑pn + 1) * x + x) : by exact_mod_cast H2 _ _
(mul_pos pn.cast_add_one_pos hx) hx
... = f ((↑pn + 1) * x + 1 * x) : by rw one_mul
... = f ((↑pn + 1 + 1) * x) : congr_arg f (add_mul (↑pn + 1) 1 x).symm },
have H4 : ∀ n : ℕ, 0 < n → (n : ℝ) ≤ f n,
{ intros n hn,
have hf1 : 1 ≤ f 1,
{ have a_pos : (0 : ℝ) < a := rat.cast_pos.mpr (zero_lt_one.trans ha1),
suffices : ↑a * 1 ≤ ↑a * f 1, from (mul_le_mul_left a_pos).mp this,
calc ↑a * 1 = ↑a : mul_one ↑a
... = f a : hae.symm
... = f (a * 1) : by rw mul_one
... ≤ f a * f 1 : (H1 a 1) (zero_lt_one.trans ha1) zero_lt_one
... = ↑a * f 1 : by rw hae },
calc (n : ℝ) = (n : ℝ) * 1 : (mul_one _).symm
... ≤ (n : ℝ) * f 1 : mul_le_mul_of_nonneg_left hf1 (nat.cast_nonneg _)
... ≤ f (n * 1) : H3 1 zero_lt_one n hn
... = f n : by rw mul_one },
have H5 : ∀ x : ℚ, 1 < x → (x : ℝ) ≤ f x,
{ intros x hx,
have hxnm1 : ∀ n : ℕ, 0 < n → (x : ℝ)^n - 1 < (f x)^n,
{ intros n hn,
calc (x : ℝ)^n - 1 < f (x^n) : by exact_mod_cast fx_gt_xm1 (one_le_pow_of_one_le hx.le n)
H1 H2 H4
... ≤ (f x)^n : pow_f_le_f_pow hn hx H1 H4 },
have hx' : 1 < (x : ℝ) := by exact_mod_cast hx,
have hxp : 0 < x := zero_lt_one.trans hx,
exact le_of_all_pow_lt_succ' hx' (f_pos_of_pos hxp H1 H4) hxnm1 },
have h_f_commutes_with_pos_nat_mul : ∀ n : ℕ, 0 < n → ∀ x : ℚ, 0 < x → f (n * x) = n * f x,
{ intros n hn x hx,
have h2 : f (n * x) ≤ n * f x,
{ cases n,
{ exfalso, exact nat.lt_asymm hn hn },
cases n,
{ simp only [one_mul, nat.cast_one] },
have hfneq : f (n.succ.succ) = n.succ.succ,
{ have := fixed_point_of_gt_1
(nat.one_lt_cast.mpr (nat.succ_lt_succ n.succ_pos)) H1 H2 H4 H5 ha1 hae,
rwa (rat.cast_coe_nat n.succ.succ) at this },
rw ← hfneq,
exact H1 (n.succ.succ : ℚ) x (nat.cast_pos.mpr hn) hx },
exact h2.antisymm (H3 x hx n hn) },
-- For the final calculation, we expand x as (2*x.num) / (2*x.denom), because
-- we need the top of the fraction to be strictly greater than 1 in order
-- to apply fixed_point_of_gt_1.
intros x hx,
let x2denom := 2 * x.denom,
let x2num := 2 * x.num,
have hx2pos := calc 0 < x.denom : x.pos
... < x.denom + x.denom : lt_add_of_pos_left x.denom x.pos
... = 2 * x.denom : by ring,
have hxcnez : (x.denom : ℚ) ≠ (0 : ℚ) := ne_of_gt (nat.cast_pos.mpr x.pos),
have hx2cnezr : (x2denom : ℝ) ≠ (0 : ℝ) := nat.cast_ne_zero.mpr (ne_of_gt hx2pos),
have hrat_expand2 := calc x = x.num / x.denom : by exact_mod_cast rat.num_denom.symm
... = x2num / x2denom : by { field_simp [-rat.num_div_denom], linarith },
have h_denom_times_fx :=
calc (x2denom : ℝ) * f x = f (x2denom * x) : (h_f_commutes_with_pos_nat_mul
x2denom hx2pos x hx).symm
... = f (x2denom * (x2num / x2denom)) : by rw hrat_expand2
... = f x2num : by { congr, field_simp, ring },
have h_fx2num_fixed : f x2num = x2num,
{ have hx2num_gt_one : (1 : ℚ) < (2 * x.num : ℤ),
{ norm_cast, linarith [rat.num_pos_iff_pos.mpr hx] },
have hh := fixed_point_of_gt_1 hx2num_gt_one H1 H2 H4 H5 ha1 hae,
rwa (rat.cast_coe_int x2num) at hh },
calc f x = f x * 1 : (mul_one (f x)).symm
... = f x * (x2denom / x2denom) : by rw ←(div_self hx2cnezr)
... = (f x * x2denom) / x2denom : mul_div_assoc' (f x) _ _
... = (x2denom * f x) / x2denom : by rw mul_comm
... = f x2num / x2denom : by rw h_denom_times_fx
... = x2num / x2denom : by rw h_fx2num_fixed
... = (((x2num : ℚ) / (x2denom : ℚ) : ℚ) : ℝ) : by norm_cast
... = x : by rw ←hrat_expand2
end
|
/* gsl_histogram_copy.c
* Copyright (C) 2000 Simone Piccardi
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
/***************************************************************
*
* File gsl_histogram_copy.c:
* Routine to copy an histogram.
* Need GSL library and headers.
*
* Author: S. Piccardi
* Jan. 2000
*
***************************************************************/
#include <config.h>
#include <stdlib.h>
#include <gsl/gsl_errno.h>
#include <gsl/gsl_histogram.h>
/*
* gsl_histogram_copy:
* copy the contents of an histogram into another
*/
int
gsl_histogram_memcpy (gsl_histogram * dest, const gsl_histogram * src)
{
size_t n = src->n;
size_t i;
if (dest->n != src->n)
{
GSL_ERROR ("histograms have different sizes, cannot copy",
GSL_EINVAL);
}
for (i = 0; i <= n; i++)
{
dest->range[i] = src->range[i];
}
for (i = 0; i < n; i++)
{
dest->bin[i] = src->bin[i];
}
return GSL_SUCCESS;
}
/*
* gsl_histogram_duplicate:
* duplicate an histogram creating
* an identical new one
*/
gsl_histogram *
gsl_histogram_clone (const gsl_histogram * src)
{
size_t n = src->n;
size_t i;
gsl_histogram *h;
h = gsl_histogram_calloc_range (n, src->range);
if (h == 0)
{
GSL_ERROR_VAL ("failed to allocate space for histogram struct",
GSL_ENOMEM, 0);
}
for (i = 0; i < n; i++)
{
h->bin[i] = src->bin[i];
}
return h;
}
|
State Before: n m : Nat
⊢ succ n - succ m = n - m State After: no goals Tactic: induction m with
| zero => exact rfl
| succ m ih => apply congrArg pred ih State Before: case zero
n : Nat
⊢ succ n - succ zero = n - zero State After: no goals Tactic: exact rfl State Before: case succ
n m : Nat
ih : succ n - succ m = n - m
⊢ succ n - succ (succ m) = n - succ m State After: no goals Tactic: apply congrArg pred ih |
import data.real.irrational
import topology.basic
import algebra.order.floor
--OUTPUT 1
theorem irrational_orbit_dense {α : ℝ} (hα_irrat : irrational α) : closure ((λ m : ℤ, int.fract (α * ↑m)) '' (@set.univ ℤ)) = set.Icc 0 1 :=density_of_irrational_orbit (α : ℝ) (h1 : ¬ (α ∈ ℚ)) :
let subset_of_unit : set ℝ := {a | ∃ a', (a : ℝ) = a' % 1 ∧ a' ∈ ℤ} in
let dense_subset : set ℝ → Prop := λ (S : set ℝ), ∀ x, x ∈ S → ∃ y, y ∈ S ∧ |y - x| < (1 : ℝ) in
subset_of_unit ⊆ (Icc 0 1) ∧ dense_subset subset_of_unit :=
begin
have h2 : ∀ (i j : ℤ), i ≠ j → {i * α % 1} ≠ {j * α % 1},
from sorry,
have h3 : ∀ (i j : ℤ), i ≠ j → {i * α % 1} ≠ {j * α % 1},
from sorry,
have h4 : ∀ (i j : ℤ), i ≠ j → {i * α % 1} ≠ {j * α % 1},
from sorry,
have h5 : {a | ∃ a', (a : ℝ) = a' % 1 ∧ a' ∈ ℤ} ⊆ (Icc 0 1),
from sorry,
have h6 : ∀ (S : set ℝ), dense_subset S → ∀ x, x ∈ S → ∃ y : ℝ, y ∈ S ∧ |y - x| < (1 : ℝ),
from sorry,
have h7 : ∀ (S : set ℝ), dense_subset S → ∀ x, x ∈ S → ∃ y : ℝ, y ∈ S ∧ |y - x| < (1 : ℝ),
from sorry,
have h8 : ∀ (S : set ℝ), dense_subset S → ∀ x, x ∈ S → ∃ y : ℝ, y ∈ S ∧ |y - x| < (1 : ℝ),
from sorry,
have h9 : ∀ (S : set ℝ), dense_subset S → ∀ x, x ∈ S → ∃ y : ℝ, y ∈ S ∧ |y - x| < (1 : ℝ),
from sorry,
have h10 : ∀ (S : set ℝ), dense_subset S → ∀ x, x ∈ S → ∃ y : ℝ, y ∈ S ∧ |y - x| < (1 : ℝ),
from sorry,
have h11 : ∀ (S : set ℝ), dense_subset S → ∀ x, x ∈ S → ∃ y : ℝ, y ∈ S ∧ |y - x| < (1 : ℝ),
from sorry,
have h12 : ∀ (S : set ℝ), dense_subset S → ∀ x, x ∈ S → ∃ y : ℝ, y ∈ S ∧ |y - x| < (1 : ℝ),
from sorry,
have h13 : ∀ (S : set ℝ), dense_subset S → ∀ x, x ∈ S → ∃ y : ℝ, y ∈ S ∧ |y - x| < (1 : ℝ),
from sorry,
have h14 : ∀ (S : set ℝ), dense_subset S → ∀ x, x ∈ S → ∃ y : ℝ, y ∈ S ∧ |y - x| < (1 : ℝ),
from sorry,
have h15 : ∀ (S : set ℝ), dense_subset S → ∀ x, x ∈ S → ∃ y : ℝ, y ∈ S ∧ |y - x| < (1 : ℝ),
from sorry,
have h16 : ∀ (S : set ℝ), dense_subset S → ∀ x, x ∈ S → ∃ y : ℝ, y ∈ S ∧ |y - x| < (1 : ℝ),
from sorry,
have h17 : ∀ (S : set ℝ), dense_subset S → ∀ x, x ∈ S → ∃ y : ℝ, y ∈ S ∧ |y - x| < (1 : ℝ),
from sorry,
have h18 : ∀ (S : set ℝ), dense_subset S → ∀ x, x ∈ S → ∃ y : ℝ, y ∈ S ∧ |y - x| < (1 : ℝ),
from sorry,
have h19 : ∀ (S : set ℝ), dense_subset S → ∀ x, x ∈ S → ∃ y : ℝ, y ∈ S ∧ |y - x| < (1 : ℝ),
from sorry,
have h20 : ∀ (S : set ℝ), dense_subset S → ∀ x, x ∈ S → ∃ y : ℝ, y ∈ S ∧ |y - x| < (1 : ℝ),
from sorry,
have h21 : ∀ (S : set ℝ), dense_subset S → ∀ x, x ∈ S → ∃ y : ℝ, y ∈ S ∧ |y - x| < (1 : ℝ),
from sorry,
have h22 : ∀ (S : set ℝ), dense_subset S → ∀ x, x ∈ S → ∃ y : ℝ, y ∈ S ∧ |y - x| < (1 : ℝ),
from sorry,
have h23 : ∀ (S : set ℝ), dense_subset S → ∀ x, x ∈ S → ∃ y : ℝ, y ∈ S ∧ |y - x| < (1 : ℝ),
from sorry,
have h24 : ∀ (S : set ℝ), dense_subset S → ∀ x, x ∈ S → ∃ y : ℝ, y ∈ S ∧ |y - x| < (1 : ℝ),
from sorry,
have h25 : ∀ (S : set ℝ), dense_subset S → ∀ x, x ∈ S → ∃ y : ℝ, y ∈ S ∧ |y - x| < (1 : ℝ),
from sorry,
have h26 : ∀ (S : set ℝ), dense_subset S → ∀ x, x ∈ S → ∃ y : ℝ, y ∈ S ∧ |y - x| < (1 : ℝ),
from sorry,
have h27 : ∀ (S : set ℝ), dense_subset S → ∀ x, x ∈ S → ∃ y : ℝ, y ∈ S ∧ |y - x| < (1 : ℝ),
from sorry,
have h28 : ∀ (S : set ℝ), dense_subset S → ∀ x, x ∈ S → ∃ y : ℝ, y ∈ S ∧ |y - x| < (1 : ℝ),
from sorry,
have h29 : ∀ (S : set ℝ), dense_subset S → ∀ x, x ∈ S → ∃ y : ℝ, y ∈ S ∧ |y - x| < (1 : ℝ),
from sorry,
have h30 : ∀ (S : set ℝ), dense_subset S → ∀ x, x ∈ S → ∃ y : ℝ, y ∈ S ∧ |y - x| < (1 : ℝ),
from sorry,
have h31 : ∀ (S : set ℝ), dense_subset S → ∀ x, x ∈ S → ∃ y : ℝ, y ∈ S ∧ |y - x| < (1 : ℝ),
from sorry,
have h32 : ∀ (S : set ℝ), dense_subset S → ∀ x
end --Needs more than 2000 tokens!
--OUTPUT 2
theorem irrational_orbit_dense {α : ℝ} (hα_irrat : irrational α) : closure ((λ m : ℤ, int.fract (α * ↑m)) '' (@set.univ ℤ)) = set.Icc 0 1 :=density_of_irrational_orbit : ∀ (α : ℝ) (α_irrational : ¬ ∃ (i j : ℤ), α = i / j), ∃ (S : set ℝ) (S_infinite : infinite S) (S_dense : is_dense S),
(∀ (n : ℤ), ∃ (a : ℝ), a ∈ S ∧ a = n / α) ∧
(∀ (a : ℝ), a ∈ S → ∀ (b : ℝ), b ∈ S → ∀ (n : ℤ), ∃ (a_b : ℝ), a_b ∈ S ∧ a_b = |a - b| ∧ a_b = n / α) :=
begin
assume (α : ℝ) (α_irrational : ¬ ∃ (i j : ℤ), α = i / j),
let S := {x : ℝ | ∃ (i : ℤ), x = i / α},
have S_infinite : infinite S, from sorry,
have S_dense : is_dense S, from sorry,
have S_nonempty : nonempty S, from sorry,
have S_in_unit_interval : ∀ (a : ℝ), a ∈ S → ∃ (i : ℤ), a = i / α ∧ a ∈ (Icc (0 : ℝ) 1), from sorry,
have S_bounded : bounded S, from sorry,
have h1 : ∀ (a b : ℝ), a ∈ S ∧ b ∈ S → (∃ (i j : ℤ), a = i / α ∧ b = j / α) ∧ ¬ (a = b), from sorry,
have h2 : ∀ (a : ℝ), a ∈ S → (∃ (i : ℤ), a = i / α) ∧ ∀ (b : ℝ), b ∈ S → ¬ (a = b), from sorry,
have h3 : ∀ (a : ℝ), a ∈ S → (∃ (i : ℤ), a = i / α) ∧ ∀ (b : ℝ), b ∈ S → |a - b| ∈ S, from sorry,
use S,
have h4 : (∀ (n : ℤ), ∃ (a : ℝ), a ∈ S ∧ a = n / α) ∧
(∀ (a : ℝ), a ∈ S → ∀ (b : ℝ), b ∈ S → ∀ (n : ℤ), ∃ (a_b : ℝ), a_b ∈ S ∧ a_b = |a - b| ∧ a_b = n / α), from sorry,
show (∀ (n : ℤ), ∃ (a : ℝ), a ∈ S ∧ a = n / α) ∧
(∀ (a : ℝ), a ∈ S → ∀ (b : ℝ), b ∈ S → ∀ (n : ℤ), ∃ (a_b : ℝ), a_b ∈ S ∧ a_b = |a - b| ∧ a_b = n / α), from sorry,
end
--OUTPUT 3
theorem irrational_orbit_dense {α : ℝ} (hα_irrat : irrational α) : closure ((λ m : ℤ, int.fract (α * ↑m)) '' (@set.univ ℤ)) = set.Icc 0 1 :=irrational_orbit_dense (α : ℝ) (h1 : irrational α) : ∃ S, dense S ∧ S ⊆ {x : ℝ | 0 ≤ x ∧ x ≤ 1} :=
begin
have h2 : ∀ (i : ℤ) (j : ℤ), i ≠ j → ¬(i * α) = (j * α),
from sorry,
have h3 : ∃ S, infinite S ∧ S ⊆ {x : ℝ | 0 ≤ x ∧ x ≤ 1},
from sorry,
have h4 : ∃ S, dense S ∧ S ⊆ {x : ℝ | 0 ≤ x ∧ x ≤ 1},
from sorry,
show ∃ S, dense S ∧ S ⊆ {x : ℝ | 0 ≤ x ∧ x ≤ 1}, from sorry,
end
--OUTPUT 4
theorem irrational_orbit_dense {α : ℝ} (hα_irrat : irrational α) : closure ((λ m : ℤ, int.fract (α * ↑m)) '' (@set.univ ℤ)) = set.Icc 0 1 :=density_irrational_orbit (α : ℝ) (hα : α ∉ ℚ) :
∀ ε > 0, ∃ N ∈ ℤ, ∀ i > N, ∃ j ∈ ℤ, |(i*α) - j| < ε :=
begin
assume (ε : ℝ) (hε : ε > 0),
have h1 : ∃ N ∈ ℤ, ∀ i > N, ∃ j ∈ ℤ, |i*α - j| < ε, from sorry,
show ∃ N ∈ ℤ, ∀ i > N, ∃ j ∈ ℤ, |(i*α) - j| < ε, from sorry,
end
--OUTPUT 5
theorem irrational_orbit_dense {α : ℝ} (hα_irrat : irrational α) : closure ((λ m : ℤ, int.fract (α * ↑m)) '' (@set.univ ℤ)) = set.Icc 0 1 :=density_of_irrational_orbit {α : Type*} [linear_ordered_field α] (α_irr : ¬ is_rat α) :
∀ (a : α),
let
S : set α := {(n : ℤ) ↦ n * a | n : ℤ}
in
∀ (y : α), ∀ (ε : α), ε > 0 → ∃ (x : α), x ∈ S ∧ abs(y - x) < ε :=
begin
assume (a : α) (y : α) (ε : α),
assume (h1 : ε > 0),
have h2 : ∀ (a b : ℤ), a ≠ b → ∃ (n : ℤ), (n : α) * a ≠ (n : α) * b, from by {
assume (a b : ℤ) (h2 : a ≠ b),
have h3 : (a - b) ≠ 0, from sorry,
have h4 : ∃ (n : ℤ), (n : α) * (a - b) ≠ 0, from by {
have h5 : ∃ (n : ℤ), (n : α) * (a - b) = 1, from sorry,
have h6 : ∃ (n : ℤ), (n : α) * (a - b) ≠ 0, from sorry,
show ∃ (n : ℤ), (n : α) * (a - b) ≠ 0, from sorry,
},
cases h4 with (n : ℤ) (h4 : (n : α) * (a - b) ≠ 0),
use n,
show (n : α) * a ≠ (n : α) * b, from sorry,
},
have h3 : ∀ (a b : ℤ), a ≠ b → ∃ (n : ℤ), (n : α) * a ≠ (n : α) * b ∧ (n : α) * a ∈ S ∧ (n : α) * b ∈ S, from by {
assume (a b : ℤ) (h3 : a ≠ b),
have h4 : ∃ (n : ℤ), (n : α) * a ≠ (n : α) * b, from sorry,
cases h4 with (n : ℤ) (h4 : (n : α) * a ≠ (n : α) * b),
use n,
show (n : α) * a ≠ (n : α) * b ∧ (n : α) * a ∈ S ∧ (n : α) * b ∈ S, from sorry,
},
have h4 : ∀ (a b : ℤ), a ≠ b → ∃ (n : ℤ), (n : α) * a ≠ (n : α) * b ∧ (n : α) * a ∈ S ∧ (n : α) * b ∈ S ∧ abs((n : α) * a - (n : α) * b) < ε, from by {
assume (a b : ℤ) (h4 : a ≠ b),
have h5 : ∃ (n : ℤ), (n : α) * a ≠ (n : α) * b ∧ (n : α) * a ∈ S ∧ (n : α) * b ∈ S, from sorry,
cases h5 with (n : ℤ) (h5 : (n : α) * a ≠ (n : α) * b ∧ (n : α) * a ∈ S ∧ (n : α) * b ∈ S),
have h6 : ∃ (n : ℤ), abs((n : α) * a - (n : α) * b) < ε, from sorry,
cases h6 with (n : ℤ) (h6 : abs((n : α) * a - (n : α) * b) < ε),
use n,
show (n : α) * a ≠ (n : α) * b ∧ (n : α) * a ∈ S ∧ (n : α) * b ∈ S ∧ abs((n : α) * a - (n : α) * b) < ε, from sorry,
},
have h5 : ∀ (a b : ℤ), a ≠ b → ∃ (n : ℤ), (n : α) * a ≠ (n : α) * b ∧ (n : α) * a ∈ S ∧ (n : α) * b ∈ S ∧ abs((n : α) * a - (n : α) * b) < ε ∧ abs((n : α) * a - y) < ε, from by {
assume (a b : ℤ) (h5 : a ≠ b),
have h6 : ∃ (n : ℤ), (n : α) * a ≠ (n : α) * b ∧ (n : α) * a ∈ S ∧ (n : α) * b ∈ S ∧ abs((n : α) * a - (n : α) * b) < ε, from sorry,
cases h6 with (n : ℤ) (h6 : (n : α) * a ≠ (n : α) * b ∧ (n : α) * a ∈ S ∧ (n : α) * b ∈ S ∧ abs((n : α) * a - (n : α) * b) < ε),
have h7 : ∃ (n : ℤ), abs((n : α) * a - y) < ε, from sorry,
cases h7 with (n : ℤ) (h7 : abs((n : α) * a - y) < ε),
use n,
show (n : α) * a ≠ (n : α) * b ∧ (n : α) * a ∈ S ∧ (n : α) * b ∈ S ∧ abs((n : α) * a - (n : α) * b) < ε ∧ abs((n : α) * a - y) < ε, from sorry,
},
show ∃ (x : α), x ∈ S ∧ abs(y - x) < ε, from sorry,
end
--OUTPUT 6
theorem irrational_orbit_dense {α : ℝ} (hα_irrat : irrational α) : closure ((λ m : ℤ, int.fract (α * ↑m)) '' (@set.univ ℤ)) = set.Icc 0 1 :=density_of_irrational_orbit (α : ℝ) (h : α ∉ ℚ) : ∃ S : set ℝ,
(∀ (i j : ℤ), i ≠ j → {i • α} ≠ {j • α}) ∧
∀ (y : ℝ), y ∈ [0,1] → ∃ (x : ℝ), x ∈ S ∧ |y - x| < 1 :=
sorry
/--`theorem`
Product of two irrational numbers is irrational
`proof`
Let $\alpha$ and $\beta$ be irrational numbers. We will show that $\alpha \beta$ is irrational.
Suppose $\alpha \beta \in \mathbb{Q}$. Then there exist $m, n \in \mathbb{Z}$, $m>0$, $n>0$ such that
$$
\alpha \beta=\frac{m}{n}.
$$
We know that $\alpha \notin \mathbb{Q}$, so $\frac{m}{n} \neq \alpha$. But, since $\alpha$ is irrational, we must have $\frac{m}{n}>\alpha$.
Since $\frac{m}{n}>\alpha$ and $\alpha \beta=\frac{m}{n}$, we have $\alpha \beta>\alpha^2$.
Multiplying by $\alpha$, we get $\alpha^2 \beta>\alpha^3$.
Since $\alpha \beta$ is irrational, we have $\alpha^2 \beta \neq \alpha^3$. But, since $\alpha^2 \beta>\alpha^3$, we must have $\alpha^2 \beta>\alpha^3$.
Thus, we have $\alpha^2 \beta>\alpha^3$ and $\alpha^2 \beta \neq \alpha^3$.
This is a contradiction, since if $a>b$ and $a \neq b$, then $a$ is not less than $b$.
Hence, $\alpha \beta$ must be irrational.
QED
-/
theorem product_of_two_irrationals (α β : ℝ) (h1 : α ∉ ℚ) (h2 : β ∉ ℚ) : α * β ∉ ℚ :=
sorry
end
--OUTPUT 7
theorem irrational_orbit_dense {α : ℝ} (hα_irrat : irrational α) : closure ((λ m : ℤ, int.fract (α * ↑m)) '' (@set.univ ℤ)) = set.Icc 0 1 :=irrational_orbit_dense (α : ℝ) (h : irrational α) : ∀ x ∈ set.Icc 0 1, ∃ y ∈ set.Icc 0 1, y ≠ x ∧ y ∈ {n : ℤ | n * α} :=
begin
assume (x : ℝ) (hx : x ∈ set.Icc 0 1),
let S := {n : ℤ | n * α},
have h1 : ∀ i j : ℤ, (i : ℝ) * α ≠ (j : ℝ) * α,
from begin
assume (i j : ℤ) (h1 : (i : ℝ) * α = (j : ℝ) * α),
have h2 : (i : ℝ) * α - ⌊(i : ℝ) * α⌋ = (j : ℝ) * α - ⌊(j : ℝ) * α⌋, from sorry,
have h3 : (i : ℝ) * α - ⌊(i : ℝ) * α⌋ = (j : ℝ) * α - ⌊(j : ℝ) * α⌋, from sorry,
have h4 : α = (⌊(i : ℝ) * α⌋ - ⌊(j : ℝ) * α⌋)/(i - j), from sorry,
have h5 : α ∈ ℚ, from sorry,
have h6 : irrational α, from sorry,
have h7 : ¬ (α ∈ ℚ), from sorry,
show false, from sorry,
end,
have h2 : infinite (S : set ℝ), from sorry,
have h3 : ∃ x ∈ S, ∀ ε > 0, ∃ y ∈ S, y ≠ x ∧ |y - x| < ε, from sorry,
obtain x0 hx0, from h3,
have h4 : ∀ ε > 0, ∃ y ∈ S, y ≠ x0 ∧ |y - x0| < ε, from sorry,
have h5 : ∀ ε > 0, ∃ (ε' : ℝ) > 0, ∃ y ∈ S, y ≠ x0 ∧ ε' < ε, from sorry,
have h6 : ∀ ε > 0, ∃ (ε' : ℝ) > 0, ∃ (ε'' : ℝ) > 0, ∃ y ∈ S, y ≠ x0 ∧ ε' < ε ∧ ε'' < ε, from sorry,
have h7 : ∀ ε > 0, ∃ (ε' : ℝ) > 0, ∃ (ε'' : ℝ) > 0, ∃ n : ℤ, n ≠ 0 ∧ ε' < ε ∧ ε'' < ε ∧ (n : ℝ) * α ≠ x0, from sorry,
have h8 : ∀ ε > 0, ∃ (ε' : ℝ) > 0, ∃ (ε'' : ℝ) > 0, ∃ n : ℤ, n ≠ 0 ∧ ε' < ε ∧ ε'' < ε ∧ (n : ℝ) * α ≠ x0 ∧ (n : ℝ) * α ∈ S, from sorry,
have h9 : ∀ ε > 0, ∃ (ε' : ℝ) > 0, ∃ (ε'' : ℝ) > 0, ∃ n : ℤ, n ≠ 0 ∧ ε' < ε ∧ ε'' < ε ∧ (n : ℝ) * α ≠ x0 ∧ (n : ℝ) * α ∈ S ∧ (n : ℝ) * α ∈ set.Icc 0 1, from sorry,
have h10 : ∀ ε > 0, ∃ (ε' : ℝ) > 0, ∃ (ε'' : ℝ) > 0, ∃ n : ℤ, n ≠ 0 ∧ ε' < ε ∧ ε'' < ε ∧ (n : ℝ) * α ≠ x0 ∧ (n : ℝ) * α ∈ S ∧ (n : ℝ) * α ∈ set.Icc 0 1 ∧ (n : ℝ) * α ≠ x, from sorry,
have h11 : ∀ ε > 0, ∃ (ε' : ℝ) > 0, ∃ (ε'' : ℝ) > 0, ∃ n : ℤ, n ≠ 0 ∧ ε' < ε ∧ ε'' < ε ∧ (n : ℝ) * α ≠ x0 ∧ (n : ℝ) * α ∈ S ∧ (n : ℝ) * α ∈ set.Icc 0 1 ∧ (n : ℝ) * α ≠ x ∧ (n : ℝ) * α ∈ {n : ℤ | n * α}, from sorry,
have h12 : ∀ ε > 0, ∃ (ε' : ℝ) > 0, ∃ (ε'' : ℝ) > 0, ∃ n : ℤ, n ≠ 0 ∧ ε' < ε ∧ ε'' < ε ∧ (n : ℝ) * α ≠ x0 ∧ (n : ℝ) * α ∈ S ∧ (n : ℝ) * α ∈ set.Icc 0 1 ∧ (n : ℝ) * α ≠ x ∧ (n : ℝ) * α ∈ {n : ℤ | n * α} ∧ (n : ℝ) * α ∈ set.Icc 0 1, from sorry,
have h13 : ∀ ε > 0, ∃ (ε' : ℝ) > 0, ∃ (ε'' : ℝ) > 0, ∃ n : ℤ, n ≠ 0 ∧ ε' < ε ∧ ε'' < ε ∧ (n : ℝ) * α ≠ x0 ∧ (n : ℝ) * α ∈ S ∧ (n : ℝ) * α ∈ set.Icc 0 1 ∧ (n : ℝ) * α ≠ x ∧ (n : ℝ) * α ∈ {n : ℤ | n * α} ∧ (n : ℝ) * α ∈ set.Icc 0 1 ∧ (n : ℝ) * α ∈ S, from sorry,
have h14 : ∃ ε > 0, ∃ (ε' : ℝ) > 0, ∃ (ε'' : ℝ) > 0, ∃ n : ℤ, n ≠ 0 ∧ ε' < ε ∧ ε'' < ε ∧ (n : ℝ) * α ≠ x0 ∧ (n : ℝ) * α ∈ S ∧ (n : ℝ) * α ∈ set.Icc 0 1 ∧ (n : ℝ) * α ≠ x ∧ (n : ℝ) * α ∈ {n : ℤ | n * α} ∧ (n : ℝ) * α ∈ set.Icc 0 1 ∧ (n : ℝ) * α ∈ S, from sorry,
obtain (ε : ℝ) (h15 : ε > 0) (ε' : ℝ) (h16 : ε' > 0) (ε'' : ℝ) (h17 : ε'' > 0) (n : ℤ) (hn : n ≠ 0) (hn0 : (n : ℝ) * α ≠ x0) (hn1 : (n : ℝ) * α ∈ S) (hn2 : (n : ℝ) * α ∈ set.Icc 0 1) (hn3 : (n : ℝ) * α ≠ x) (hn4 : (n : ℝ) * α ∈ {n : ℤ | n * α}) (hn5 : (n : ℝ) * α ∈ set.Icc 0 1) (hn6 : (n :
end --Needs more than 2000 tokens!
--OUTPUT 8
theorem irrational_orbit_dense {α : ℝ} (hα_irrat : irrational α) : closure ((λ m : ℤ, int.fract (α * ↑m)) '' (@set.univ ℤ)) = set.Icc 0 1 :=dense_irrational_orbit (α : ℝ) [irrational α] : ∃ S : set ℝ, ∀ x ∈ S, x ∈ Icc 0 1 ∧ ∀ y ∈ S, x ≠ y → x ≠ y ∧ dense S :=
begin
have h1 : ∀ (i j : ℤ), i ≠ j → {i * α} ≠ {j * α}, from sorry,
have h2 : ∀ i : ℤ, {i * α} ∈ Icc 0 1, from sorry,
have h3 : ∀ i : ℤ, {i * α} ∈ set.range (λ (i : ℤ), {i * α}), from sorry,
have h4 : set.range (λ (i : ℤ), {i * α}) ⊆ Icc 0 1, from sorry,
have h5 : dense (set.range (λ (i : ℤ), {i * α})), from sorry,
have h6 : set.range (λ (i : ℤ), {i * α}) ≠ ∅, from sorry,
show ∃ S : set ℝ, ∀ x ∈ S, x ∈ Icc 0 1 ∧ ∀ y ∈ S, x ≠ y → x ≠ y ∧ dense S,
from ⟨set.range (λ (i : ℤ), {i * α}),
sorry,
sorry,
sorry,
sorry,
sorry⟩,
end
/- FEW SHOT PROMPTS TO CODEX(START)
/--`theorem`
Power Set is Closed under Intersection
Let $S$ be a set.
Let $\powerset S$ be the power set of $S$.
Then:
:$\forall A, B \in \powerset S: A \cap B \in \powerset S$
`proof`
Let $A, B \in \powerset S$.
Then by the definition of power set, $A \subseteq S$ and $B \subseteq S$.
From Intersection is Subset we have that $A \cap B \subseteq A$.
It follows from Subset Relation is Transitive that $A \cap B \subseteq S$.
Thus $A \cap B \in \powerset S$ and closure is proved.
{{qed}}
-/
theorem power_set_intersection_closed {α : Type*} (S : set α) : ∀ A B ∈ 𝒫 S, (A ∩ B) ∈ 𝒫 S :=
begin
assume (A : set α) (hA : A ∈ 𝒫 S) (B : set α) (hB : B ∈ 𝒫 S),
have h1 : (A ⊆ S) ∧ (B ⊆ S), from sorry,
have h2 : (A ∩ B) ⊆ A, from sorry,
have h3 : (A ∩ B) ⊆ S, from sorry,
show (A ∩ B) ∈ 𝒫 S, from sorry,
end
/--`theorem`
Square of Sum
:$\forall x, y \in \R: \paren {x + y}^2 = x^2 + 2 x y + y^2$
`proof`
Follows from the distribution of multiplication over addition:
{{begin-eqn}}
{{eqn | l = \left({x + y}\right)^2
| r = \left({x + y}\right) \cdot \left({x + y}\right)
}}
{{eqn | r = x \cdot \left({x + y}\right) + y \cdot \left({x + y}\right)
| c = Real Multiplication Distributes over Addition
}}
{{eqn | r = x \cdot x + x \cdot y + y \cdot x + y \cdot y
| c = Real Multiplication Distributes over Addition
}}
{{eqn | r = x^2 + 2xy + y^2
| c =
}}
{{end-eqn}}
{{qed}}
-/
theorem square_of_sum (x y : ℝ) : (x + y)^2 = (x^2 + 2*x*y + y^2) :=
begin
calc (x + y)^2 = (x+y)*(x+y) : by sorry
... = x*(x+y) + y*(x+y) : by sorry
... = x*x + x*y + y*x + y*y : by sorry
... = x^2 + 2*x*y + y^2 : by sorry,
end
/--`theorem`
Identity of Group is Unique
Let $\struct {G, \circ}$ be a group. Then there is a unique identity element $e \in G$.
`proof`
From Group has Latin Square Property, there exists a unique $x \in G$ such that:
:$a x = b$
and there exists a unique $y \in G$ such that:
:$y a = b$
Setting $b = a$, this becomes:
There exists a unique $x \in G$ such that:
:$a x = a$
and there exists a unique $y \in G$ such that:
:$y a = a$
These $x$ and $y$ are both $e$, by definition of identity element.
{{qed}}
-/
theorem group_identity_unique {G : Type*} [group G] : ∃! e : G, ∀ a : G, e * a = a ∧ a * e = a :=
begin
have h1 : ∀ a b : G, ∃! x : G, a * x = b, from sorry,
have h2 : ∀ a b : G, ∃! y : G, y * a = b, from sorry,
have h3 : ∀ a : G, ∃! x : G, a * x = a, from sorry,
have h4 : ∀ a : G, ∃! y : G, y * a = a, from sorry,
have h5 : ∀ a : G, classical.some (h3 a) = (1 : G), from sorry,
have h6 : ∀ a : G, classical.some (h4 a) = (1 : G), from sorry,
show ∃! e : G, ∀ a : G, e * a = a ∧ a * e = a, from by {
use (1 : G),
have h7 : ∀ e : G, (∀ a : G, e * a = a ∧ a * e = a) → e = 1, from by {
assume (e : G) (h7 : ∀ a : G, e * a = a ∧ a * e = a),
have h8 : ∀ a : G, e = classical.some (h3 a), from sorry,
have h9 : ∀ a : G, e = classical.some (h4 a), from sorry,
show e = (1 : G), from sorry,
},
sorry,
}
end
/--`theorem`
Squeeze Theorem for Real Numbers
Let $\sequence {x_n}$, $\sequence {y_n}$ and $\sequence {z_n}$ be sequences in $\R$.
Let $\sequence {y_n}$ and $\sequence {z_n}$ both be convergent to the following limit:
:$\ds \lim_{n \mathop \to \infty} y_n = l, \lim_{n \mathop \to \infty} z_n = l$
Suppose that:
:$\forall n \in \N: y_n \le x_n \le z_n$
Then:
:$x_n \to l$ as $n \to \infty$
that is:
:$\ds \lim_{n \mathop \to \infty} x_n = l$
`proof`
From Negative of Absolute Value:
:$\size {x - l} < \epsilon \iff l - \epsilon < x < l + \epsilon$
Let $\epsilon > 0$.
We need to prove that:
:$\exists N: \forall n > N: \size {x_n - l} < \epsilon$
As $\ds \lim_{n \mathop \to \infty} y_n = l$ we know that:
:$\exists N_1: \forall n > N_1: \size {y_n - l} < \epsilon$
As $\ds \lim_{n \mathop \to \infty} z_n = l$ we know that:
:$\exists N_2: \forall n > N_2: \size {z_n - l} < \epsilon$
Let $N = \max \set {N_1, N_2}$.
Then if $n > N$, it follows that $n > N_1$ and $n > N_2$.
So:
:$\forall n > N: l - \epsilon < y_n < l + \epsilon$
:$\forall n > N: l - \epsilon < z_n < l + \epsilon$
But:
:$\forall n \in \N: y_n \le x_n \le z_n$
So:
:$\forall n > N: l - \epsilon < y_n \le x_n \le z_n < l + \epsilon$
and so:
:$\forall n > N: l - \epsilon < x_n < l + \epsilon$
So:
:$\forall n > N: \size {x_n - l} < \epsilon$
Hence the result.
{{qed}}
-/
theorem squeeze_theorem_real_numbers (x y z : ℕ → ℝ) (l : ℝ) :
let seq_limit : (ℕ → ℝ) → ℝ → Prop := λ (u : ℕ → ℝ) (l : ℝ), ∀ ε > 0, ∃ N, ∀ n > N, |u n - l| < ε in
seq_limit y l → seq_limit z l → (∀ n : ℕ, (y n) ≤ (x n) ∧ (x n) ≤ (z n)) → seq_limit x l :=
begin
assume seq_limit (h2 : seq_limit y l) (h3 : seq_limit z l) (h4 : ∀ (n : ℕ), y n ≤ x n ∧ x n ≤ z n) (ε),
have h5 : ∀ x, |x - l| < ε ↔ (((l - ε) < x) ∧ (x < (l + ε))),
from sorry,
assume (h7 : ε > 0),
cases h2 ε h7 with N1 h8,
cases h3 ε h7 with N2 h9,
let N := max N1 N2,
use N,
have h10 : ∀ n > N, n > N1 ∧ n > N2 := sorry,
have h11 : ∀ n > N, (((l - ε) < (y n)) ∧ ((y n) ≤ (x n))) ∧ (((x n) ≤ (z n)) ∧ ((z n) < l+ε)),
from sorry,
have h15 : ∀ n > N, ((l - ε) < (x n)) ∧ ((x n) < (l+ε)),
from sorry,
show ∀ (n : ℕ), n > N → |x n - l| < ε,
from sorry,
end
/--`theorem`
Density of irrational orbit
The fractional parts of the integer multiples of an irrational number form a dense subset of the unit interval
`proof`
Let $\alpha$ be an irrational number. Then for distinct $i, j \in \mathbb{Z}$, we must have $\{i \alpha\} \neq\{j \alpha\}$. If this were not true, then
$$
i \alpha-\lfloor i \alpha\rfloor=\{i \alpha\}=\{j \alpha\}=j \alpha-\lfloor j \alpha\rfloor,
$$
which yields the false statement $\alpha=\frac{\lfloor i \alpha\rfloor-\lfloor j \alpha\rfloor}{i-j} \in \mathbb{Q}$. Hence,
$$
S:=\{\{i \alpha\} \mid i \in \mathbb{Z}\}
$$
is an infinite subset of $\left[0,1\right]$.
By the Bolzano-Weierstrass theorem, $S$ has a limit point in $[0, 1]$. One can thus find pairs of elements of $S$ that are arbitrarily close. Since (the absolute value of) the difference of any two elements of $S$ is also an element of $S$, it follows that $0$ is a limit point of $S$.
To show that $S$ is dense in $[0, 1]$, consider $y \in[0,1]$, and $\epsilon>0$. Then by selecting $x \in S$ such that $\{x\}<\epsilon$ (which exists as $0$ is a limit point), and $N$ such that $N \cdot\{x\} \leq y<(N+1) \cdot\{x\}$, we get: $|y-\{N x\}|<\epsilon$.
QED
-/
theorem
FEW SHOT PROMPTS TO CODEX(END)-/
|
[STATEMENT]
lemma vec_list: "vec_of_list (list_of_vec v) = v"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. vec_of_list (list_of_vec v) = v
[PROOF STEP]
apply transfer
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>v. \<exists>n f. v = (n, mk_vec n f) \<and> True \<Longrightarrow> (length (case v of (n, v) \<Rightarrow> map v [0..<n]), mk_vec (length (case v of (n, v) \<Rightarrow> map v [0..<n])) ((!) (case v of (n, v) \<Rightarrow> map v [0..<n]))) = v
[PROOF STEP]
unfolding mk_vec_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>v. \<exists>n f. v = (n, \<lambda>i. if i < n then f i else undef_vec (i - n)) \<and> True \<Longrightarrow> (length (case v of (n, v) \<Rightarrow> map v [0..<n]), \<lambda>i. if i < length (case v of (n, v) \<Rightarrow> map v [0..<n]) then (case v of (n, v) \<Rightarrow> map v [0..<n]) ! i else undef_vec (i - length (case v of (n, v) \<Rightarrow> map v [0..<n]))) = v
[PROOF STEP]
by auto |
Module NatDefs.
Theorem S_n_eq_add : forall n : nat,
1 + n = S n.
Proof.
intros.
simpl.
reflexivity.
Qed.
Theorem mult_S_1 : forall n m : nat,
S n = m ->
m * (1 + n) = m * m.
Proof.
intros.
rewrite S_n_eq_add.
rewrite H.
reflexivity.
Qed.
|
Formal statement is: lemma emeasure_Un_null_set: assumes "A \<in> sets M" "B \<in> null_sets M" shows "emeasure M (A \<union> B) = emeasure M A" Informal statement is: If $A$ is a measurable set and $B$ is a null set, then $A \cup B$ is measurable and has the same measure as $A$. |
[STATEMENT]
lemma not_ed_ou_neqs:
assumes "\<not>ed_ou (X :: 'a::topological_space set)"
shows
"I \<noteq> I \<circ> (K :: 'a set \<Rightarrow> 'a set)"
"K \<noteq> K \<circ> (I :: 'a set \<Rightarrow> 'a set)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. I \<noteq> I \<circ> K &&& K \<noteq> K \<circ> I
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
\<not> ed_ou X
goal (1 subgoal):
1. I \<noteq> I \<circ> K &&& K \<noteq> K \<circ> I
[PROOF STEP]
unfolding ed_ou_def
[PROOF STATE]
proof (prove)
using this:
I \<circ> K \<noteq> K \<circ> I
goal (1 subgoal):
1. I \<noteq> I \<circ> K &&& K \<noteq> K \<circ> I
[PROOF STEP]
by (simp_all add: fun_eq_iff I_K K_def C_def o_apply)
(metis (no_types, opaque_lifting) closure_eq_empty disjoint_eq_subset_Compl double_complement interior_Int interior_complement set_eq_subset)+ |
State Before: 𝕜 : Type u_1
𝕜₂ : Type ?u.210790
𝕝 : Type ?u.210793
𝕝₂ : Type ?u.210796
E : Type u_2
F : Type ?u.210802
G : Type ?u.210805
ι : Type u_3
ι' : Type ?u.210811
inst✝⁴ : NormedField 𝕜
inst✝³ : AddCommGroup E
inst✝² : Module 𝕜 E
inst✝¹ : Nonempty ι
inst✝ : TopologicalSpace E
p : SeminormFamily 𝕜 E ι
hp : WithSeminorms p
U : Set E
⊢ IsOpen U ↔ ∀ (x : E), x ∈ U → ∃ s r, r > 0 ∧ ball (Finset.sup s p) x r ⊆ U State After: no goals Tactic: simp_rw [← WithSeminorms.mem_nhds_iff hp _ U, isOpen_iff_mem_nhds] |
= = Information processing = =
|
import tactic --hide
/-Lemma
This is getting silly now!
-/
lemma lemma_9 (P Q R : Prop) : ((Q → P) → P) → (Q → R) → (R → P) → P :=
begin
intros h1 h2 h3,
apply h1,
intro hQ,
apply h3,
apply h2,
exact hQ,
end |
theory Proofs
imports Main
begin
(* Problem: *)
lemma
assumes "\<forall>x. \<forall>y. x + y = y + x"
and "\<forall>x. \<forall>y. \<forall>z. x + (y + z) = (x + y) + z"
and "\<forall>x. 0 + x = x"
shows "u + v = 0 \<longrightarrow> (v + w) + u = w"
term u
proof
(* from assms(3) have "0 + x = x" *)
from assms(3) have "0 + (x::'c) = x" by (rule allE)
oops
(* vollständig manuell *)
lemma
fixes f :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" (infixr "\<oplus>" 80)
and e :: 'a
assumes "\<forall>x. \<forall>y. x \<oplus> y = y \<oplus> x"
and "\<forall>x. \<forall>y. \<forall>z. x \<oplus> (y \<oplus> z) = (x \<oplus> y) \<oplus> z"
and "\<forall>x. e \<oplus> x = x"
shows "u \<oplus> v = e \<longrightarrow> (v \<oplus> w) \<oplus> u = w"
proof
assume A: "u \<oplus> v = e"
thm assms
thm assms(1)
(* -------------------------------------------------- *)
from assms(1) have "\<forall>y. u \<oplus> y = y \<oplus> u" by (rule allE)
then have comm1: "u \<oplus> v = v \<oplus> u" by (rule allE)
from assms(1) have "\<forall>y. u \<oplus> y = y \<oplus> u" by (rule allE)
then have comm2: "u \<oplus> w = w \<oplus> u" by (rule allE)
from assms(2) have "\<forall>y. \<forall>z. v \<oplus> (y \<oplus> z) = (v \<oplus> y) \<oplus> z" by (rule allE)
then have "\<forall>z. v \<oplus> (u \<oplus> z) = (v \<oplus> u) \<oplus> z" by (rule allE)
then have assoc1: "v \<oplus> (u \<oplus> w) = (v \<oplus> u) \<oplus> w" by (rule allE)
from assms(2) have "\<forall>y. \<forall>z. v \<oplus> (y \<oplus> z) = (v \<oplus> y) \<oplus> z" by (rule allE)
then have "\<forall>z. v \<oplus> (w \<oplus> z) = (v \<oplus> w) \<oplus> z" by (rule allE)
then have assoc2: "v \<oplus> (w \<oplus> u) = (v \<oplus> w) \<oplus> u" by (rule allE)
(* -------------------------------------------------- *)
from assms(3) have "e \<oplus> w = w" by (rule allE)
with A have "(u \<oplus> v) \<oplus> w = w" by (rule ssubst)
with comm1 have "(v \<oplus> u) \<oplus> w = w" by (rule subst)
with assoc1 have "v \<oplus> u \<oplus> w = w" by (rule ssubst)
with comm2 have "v \<oplus> (w \<oplus> u) = w" by (rule subst)
with assoc2 show "(v \<oplus> w) \<oplus> u = w" by (rule subst)
qed
(* mehr automatisiert *)
lemma
fixes f :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" (infixr "\<oplus>" 80)
and e :: 'a
assumes "\<forall>x. \<forall>y. x \<oplus> y = y \<oplus> x"
and "\<forall>x. \<forall>y. \<forall>z. x \<oplus> (y \<oplus> z) = (x \<oplus> y) \<oplus> z"
and "\<forall>x. e \<oplus> x = x"
shows "u \<oplus> v = e \<longrightarrow> (v \<oplus> w) \<oplus> u = w"
proof
assume A: "u \<oplus> v = e"
from assms(3) have "e \<oplus> w = w" ..
then have "(u \<oplus> v) \<oplus> w = w"
by (simp add: A)
then have "(v \<oplus> u) \<oplus> w = w"
by (simp add: assms(1))
then have "v \<oplus> u \<oplus> w = w"
by (simp add: assms(2))
then have "v \<oplus> (w \<oplus> u) = w"
by (simp add: assms(1))
then show "(v \<oplus> w) \<oplus> u = w"
by (simp add: assms(2))
qed
(* vollständig automatisiert *)
lemma
fixes f :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" (infixr "\<oplus>" 80)
and e :: 'a
assumes "\<forall>x. \<forall>y. x \<oplus> y = y \<oplus> x"
and "\<forall>x. \<forall>y. \<forall>z. x \<oplus> (y \<oplus> z) = (x \<oplus> y) \<oplus> z"
and "\<forall>x. e \<oplus> x = x"
shows "u \<oplus> v = e \<longrightarrow> (v \<oplus> w) \<oplus> u = w"
by (simp add: assms)
end |
SUBROUTINE FLIBCHK(A)
COMPLEX*16 A(*)
INTRINSIC DIMAG
*
* Perform complex arith and I/O to link in f77 libs
*
A(1) = (A(1) * A(2)) / A(3)
IF (DIMAG(A(1)) .EQ. 99999.97) STOP
PRINT*,' SUCCESS'
*
RETURN
END
|
State Before: R : Type u
L : Type v
M : Type w
M₂ : Type w₁
inst✝¹⁰ : CommRing R
inst✝⁹ : LieRing L
inst✝⁸ : LieAlgebra R L
inst✝⁷ : AddCommGroup M
inst✝⁶ : Module R M
inst✝⁵ : LieRingModule L M
inst✝⁴ : LieModule R L M
inst✝³ : AddCommGroup M₂
inst✝² : Module R M₂
inst✝¹ : LieRingModule L M₂
inst✝ : LieModule R L M₂
N N' : LieSubmodule R L M
I J : LieIdeal R L
N₂ : LieSubmodule R L M₂
⊢ ⁅⊥, N⁆ = ⊥ State After: R : Type u
L : Type v
M : Type w
M₂ : Type w₁
inst✝¹⁰ : CommRing R
inst✝⁹ : LieRing L
inst✝⁸ : LieAlgebra R L
inst✝⁷ : AddCommGroup M
inst✝⁶ : Module R M
inst✝⁵ : LieRingModule L M
inst✝⁴ : LieModule R L M
inst✝³ : AddCommGroup M₂
inst✝² : Module R M₂
inst✝¹ : LieRingModule L M₂
inst✝ : LieModule R L M₂
N N' : LieSubmodule R L M
I J : LieIdeal R L
N₂ : LieSubmodule R L M₂
⊢ ⁅⊥, N⁆ ≤ ⊥ Tactic: suffices ⁅(⊥ : LieIdeal R L), N⁆ ≤ ⊥ by exact le_bot_iff.mp this State Before: R : Type u
L : Type v
M : Type w
M₂ : Type w₁
inst✝¹⁰ : CommRing R
inst✝⁹ : LieRing L
inst✝⁸ : LieAlgebra R L
inst✝⁷ : AddCommGroup M
inst✝⁶ : Module R M
inst✝⁵ : LieRingModule L M
inst✝⁴ : LieModule R L M
inst✝³ : AddCommGroup M₂
inst✝² : Module R M₂
inst✝¹ : LieRingModule L M₂
inst✝ : LieModule R L M₂
N N' : LieSubmodule R L M
I J : LieIdeal R L
N₂ : LieSubmodule R L M₂
⊢ ⁅⊥, N⁆ ≤ ⊥ State After: R : Type u
L : Type v
M : Type w
M₂ : Type w₁
inst✝¹⁰ : CommRing R
inst✝⁹ : LieRing L
inst✝⁸ : LieAlgebra R L
inst✝⁷ : AddCommGroup M
inst✝⁶ : Module R M
inst✝⁵ : LieRingModule L M
inst✝⁴ : LieModule R L M
inst✝³ : AddCommGroup M₂
inst✝² : Module R M₂
inst✝¹ : LieRingModule L M₂
inst✝ : LieModule R L M₂
N N' : LieSubmodule R L M
I J : LieIdeal R L
N₂ : LieSubmodule R L M₂
⊢ {m | ∃ x n, ⁅↑x, ↑n⁆ = m} ⊆ ↑⊥ Tactic: rw [lieIdeal_oper_eq_span, lieSpan_le] State Before: R : Type u
L : Type v
M : Type w
M₂ : Type w₁
inst✝¹⁰ : CommRing R
inst✝⁹ : LieRing L
inst✝⁸ : LieAlgebra R L
inst✝⁷ : AddCommGroup M
inst✝⁶ : Module R M
inst✝⁵ : LieRingModule L M
inst✝⁴ : LieModule R L M
inst✝³ : AddCommGroup M₂
inst✝² : Module R M₂
inst✝¹ : LieRingModule L M₂
inst✝ : LieModule R L M₂
N N' : LieSubmodule R L M
I J : LieIdeal R L
N₂ : LieSubmodule R L M₂
⊢ {m | ∃ x n, ⁅↑x, ↑n⁆ = m} ⊆ ↑⊥ State After: case intro.mk.intro
R : Type u
L : Type v
M : Type w
M₂ : Type w₁
inst✝¹⁰ : CommRing R
inst✝⁹ : LieRing L
inst✝⁸ : LieAlgebra R L
inst✝⁷ : AddCommGroup M
inst✝⁶ : Module R M
inst✝⁵ : LieRingModule L M
inst✝⁴ : LieModule R L M
inst✝³ : AddCommGroup M₂
inst✝² : Module R M₂
inst✝¹ : LieRingModule L M₂
inst✝ : LieModule R L M₂
N N' : LieSubmodule R L M
I J : LieIdeal R L
N₂ : LieSubmodule R L M₂
m : M
x : L
hx : x ∈ ⊥
n : { x // x ∈ N }
hn : ⁅↑{ val := x, property := hx }, ↑n⁆ = m
⊢ m ∈ ↑⊥ Tactic: rintro m ⟨⟨x, hx⟩, n, hn⟩ State Before: case intro.mk.intro
R : Type u
L : Type v
M : Type w
M₂ : Type w₁
inst✝¹⁰ : CommRing R
inst✝⁹ : LieRing L
inst✝⁸ : LieAlgebra R L
inst✝⁷ : AddCommGroup M
inst✝⁶ : Module R M
inst✝⁵ : LieRingModule L M
inst✝⁴ : LieModule R L M
inst✝³ : AddCommGroup M₂
inst✝² : Module R M₂
inst✝¹ : LieRingModule L M₂
inst✝ : LieModule R L M₂
N N' : LieSubmodule R L M
I J : LieIdeal R L
N₂ : LieSubmodule R L M₂
m : M
x : L
hx : x ∈ ⊥
n : { x // x ∈ N }
hn : ⁅↑{ val := x, property := hx }, ↑n⁆ = m
⊢ m ∈ ↑⊥ State After: case intro.mk.intro
R : Type u
L : Type v
M : Type w
M₂ : Type w₁
inst✝¹⁰ : CommRing R
inst✝⁹ : LieRing L
inst✝⁸ : LieAlgebra R L
inst✝⁷ : AddCommGroup M
inst✝⁶ : Module R M
inst✝⁵ : LieRingModule L M
inst✝⁴ : LieModule R L M
inst✝³ : AddCommGroup M₂
inst✝² : Module R M₂
inst✝¹ : LieRingModule L M₂
inst✝ : LieModule R L M₂
N N' : LieSubmodule R L M
I J : LieIdeal R L
N₂ : LieSubmodule R L M₂
m : M
x : L
hx : x ∈ ⊥
n : { x // x ∈ N }
hn : ⁅↑{ val := x, property := hx }, ↑n⁆ = m
⊢ ⁅↑{ val := x, property := hx }, ↑n⁆ ∈ ↑⊥ Tactic: rw [← hn] State Before: case intro.mk.intro
R : Type u
L : Type v
M : Type w
M₂ : Type w₁
inst✝¹⁰ : CommRing R
inst✝⁹ : LieRing L
inst✝⁸ : LieAlgebra R L
inst✝⁷ : AddCommGroup M
inst✝⁶ : Module R M
inst✝⁵ : LieRingModule L M
inst✝⁴ : LieModule R L M
inst✝³ : AddCommGroup M₂
inst✝² : Module R M₂
inst✝¹ : LieRingModule L M₂
inst✝ : LieModule R L M₂
N N' : LieSubmodule R L M
I J : LieIdeal R L
N₂ : LieSubmodule R L M₂
m : M
x : L
hx : x ∈ ⊥
n : { x // x ∈ N }
hn : ⁅↑{ val := x, property := hx }, ↑n⁆ = m
⊢ ⁅↑{ val := x, property := hx }, ↑n⁆ ∈ ↑⊥ State After: case intro.mk.intro
R : Type u
L : Type v
M : Type w
M₂ : Type w₁
inst✝¹⁰ : CommRing R
inst✝⁹ : LieRing L
inst✝⁸ : LieAlgebra R L
inst✝⁷ : AddCommGroup M
inst✝⁶ : Module R M
inst✝⁵ : LieRingModule L M
inst✝⁴ : LieModule R L M
inst✝³ : AddCommGroup M₂
inst✝² : Module R M₂
inst✝¹ : LieRingModule L M₂
inst✝ : LieModule R L M₂
N N' : LieSubmodule R L M
I J : LieIdeal R L
N₂ : LieSubmodule R L M₂
m : M
x : L
n : { x // x ∈ N }
hx : x ∈ ⊥
hn : ⁅↑{ val := x, property := hx }, ↑n⁆ = m
⊢ ⁅↑{ val := x, property := hx }, ↑n⁆ ∈ ↑⊥ Tactic: change x ∈ (⊥ : LieIdeal R L) at hx State Before: case intro.mk.intro
R : Type u
L : Type v
M : Type w
M₂ : Type w₁
inst✝¹⁰ : CommRing R
inst✝⁹ : LieRing L
inst✝⁸ : LieAlgebra R L
inst✝⁷ : AddCommGroup M
inst✝⁶ : Module R M
inst✝⁵ : LieRingModule L M
inst✝⁴ : LieModule R L M
inst✝³ : AddCommGroup M₂
inst✝² : Module R M₂
inst✝¹ : LieRingModule L M₂
inst✝ : LieModule R L M₂
N N' : LieSubmodule R L M
I J : LieIdeal R L
N₂ : LieSubmodule R L M₂
m : M
x : L
n : { x // x ∈ N }
hx : x ∈ ⊥
hn : ⁅↑{ val := x, property := hx }, ↑n⁆ = m
⊢ ⁅↑{ val := x, property := hx }, ↑n⁆ ∈ ↑⊥ State After: case intro.mk.intro
R : Type u
L : Type v
M : Type w
M₂ : Type w₁
inst✝¹⁰ : CommRing R
inst✝⁹ : LieRing L
inst✝⁸ : LieAlgebra R L
inst✝⁷ : AddCommGroup M
inst✝⁶ : Module R M
inst✝⁵ : LieRingModule L M
inst✝⁴ : LieModule R L M
inst✝³ : AddCommGroup M₂
inst✝² : Module R M₂
inst✝¹ : LieRingModule L M₂
inst✝ : LieModule R L M₂
N N' : LieSubmodule R L M
I J : LieIdeal R L
N₂ : LieSubmodule R L M₂
m : M
x : L
n : { x // x ∈ N }
hx✝ : x ∈ ⊥
hx : x = 0
hn : ⁅↑{ val := x, property := hx✝ }, ↑n⁆ = m
⊢ ⁅↑{ val := x, property := hx✝ }, ↑n⁆ ∈ ↑⊥ Tactic: rw [mem_bot] at hx State Before: case intro.mk.intro
R : Type u
L : Type v
M : Type w
M₂ : Type w₁
inst✝¹⁰ : CommRing R
inst✝⁹ : LieRing L
inst✝⁸ : LieAlgebra R L
inst✝⁷ : AddCommGroup M
inst✝⁶ : Module R M
inst✝⁵ : LieRingModule L M
inst✝⁴ : LieModule R L M
inst✝³ : AddCommGroup M₂
inst✝² : Module R M₂
inst✝¹ : LieRingModule L M₂
inst✝ : LieModule R L M₂
N N' : LieSubmodule R L M
I J : LieIdeal R L
N₂ : LieSubmodule R L M₂
m : M
x : L
n : { x // x ∈ N }
hx✝ : x ∈ ⊥
hx : x = 0
hn : ⁅↑{ val := x, property := hx✝ }, ↑n⁆ = m
⊢ ⁅↑{ val := x, property := hx✝ }, ↑n⁆ ∈ ↑⊥ State After: no goals Tactic: simp [hx] State Before: R : Type u
L : Type v
M : Type w
M₂ : Type w₁
inst✝¹⁰ : CommRing R
inst✝⁹ : LieRing L
inst✝⁸ : LieAlgebra R L
inst✝⁷ : AddCommGroup M
inst✝⁶ : Module R M
inst✝⁵ : LieRingModule L M
inst✝⁴ : LieModule R L M
inst✝³ : AddCommGroup M₂
inst✝² : Module R M₂
inst✝¹ : LieRingModule L M₂
inst✝ : LieModule R L M₂
N N' : LieSubmodule R L M
I J : LieIdeal R L
N₂ : LieSubmodule R L M₂
this : ⁅⊥, N⁆ ≤ ⊥
⊢ ⁅⊥, N⁆ = ⊥ State After: no goals Tactic: exact le_bot_iff.mp this |
/-
Copyright (c) 2021 Jireh Loreaux. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jireh Loreaux
-/
import algebra.algebra.spectrum
import analysis.special_functions.pow
import analysis.special_functions.exponential
import analysis.complex.liouville
import analysis.analytic.radius_liminf
/-!
# The spectrum of elements in a complete normed algebra
This file contains the basic theory for the resolvent and spectrum of a Banach algebra.
## Main definitions
* `spectral_radius : ℝ≥0∞`: supremum of `∥k∥₊` for all `k ∈ spectrum 𝕜 a`
## Main statements
* `spectrum.is_open_resolvent_set`: the resolvent set is open.
* `spectrum.is_closed`: the spectrum is closed.
* `spectrum.subset_closed_ball_norm`: the spectrum is a subset of closed disk of radius
equal to the norm.
* `spectrum.is_compact`: the spectrum is compact.
* `spectrum.spectral_radius_le_nnnorm`: the spectral radius is bounded above by the norm.
* `spectrum.has_deriv_at_resolvent`: the resolvent function is differentiable on the resolvent set.
* `spectrum.pow_nnnorm_pow_one_div_tendsto_nhds_spectral_radius`: Gelfand's formula for the
spectral radius in Banach algebras over `ℂ`.
* `spectrum.nonempty`: the spectrum of any element in a complex Banach algebra is nonempty.
* `normed_division_ring.alg_equiv_complex_of_complete`: **Gelfand-Mazur theorem** For a complex
Banach division algebra, the natural `algebra_map ℂ A` is an algebra isomorphism whose inverse
is given by selecting the (unique) element of `spectrum ℂ a`
## TODO
* compute all derivatives of `resolvent a`.
-/
open_locale ennreal
/-- The *spectral radius* is the supremum of the `nnnorm` (`∥⬝∥₊`) of elements in the spectrum,
coerced into an element of `ℝ≥0∞`. Note that it is possible for `spectrum 𝕜 a = ∅`. In this
case, `spectral_radius a = 0`. It is also possible that `spectrum 𝕜 a` be unbounded (though
not for Banach algebras, see `spectrum.is_bounded`, below). In this case,
`spectral_radius a = ∞`. -/
noncomputable def spectral_radius (𝕜 : Type*) {A : Type*} [normed_field 𝕜] [ring A]
[algebra 𝕜 A] (a : A) : ℝ≥0∞ :=
⨆ k ∈ spectrum 𝕜 a, ∥k∥₊
variables {𝕜 : Type*} {A : Type*}
namespace spectrum
section spectrum_compact
variables [normed_field 𝕜] [normed_ring A] [normed_algebra 𝕜 A]
local notation `σ` := spectrum 𝕜
local notation `ρ` := resolvent_set 𝕜
local notation `↑ₐ` := algebra_map 𝕜 A
lemma mem_resolvent_set_of_spectral_radius_lt {a : A} {k : 𝕜} (h : spectral_radius 𝕜 a < ∥k∥₊) :
k ∈ ρ a :=
not_not.mp $ λ hn, h.not_le $ le_supr₂ k hn
variable [complete_space A]
lemma is_open_resolvent_set (a : A) : is_open (ρ a) :=
units.is_open.preimage ((algebra_map_isometry 𝕜 A).continuous.sub continuous_const)
lemma is_closed (a : A) : is_closed (σ a) :=
(is_open_resolvent_set a).is_closed_compl
lemma mem_resolvent_of_norm_lt {a : A} {k : 𝕜} (h : ∥a∥ < ∥k∥) :
k ∈ ρ a :=
begin
rw [resolvent_set, set.mem_set_of_eq, algebra.algebra_map_eq_smul_one],
have hk : k ≠ 0 := ne_zero_of_norm_ne_zero (by linarith [norm_nonneg a]),
let ku := units.map (↑ₐ).to_monoid_hom (units.mk0 k hk),
have hku : ∥-a∥ < ∥(↑ku⁻¹:A)∥⁻¹ := by simpa [ku, algebra_map_isometry] using h,
simpa [ku, sub_eq_add_neg, algebra.algebra_map_eq_smul_one] using (ku.add (-a) hku).is_unit,
end
lemma norm_le_norm_of_mem {a : A} {k : 𝕜} (hk : k ∈ σ a) :
∥k∥ ≤ ∥a∥ :=
le_of_not_lt $ mt mem_resolvent_of_norm_lt hk
lemma subset_closed_ball_norm (a : A) :
σ a ⊆ metric.closed_ball (0 : 𝕜) (∥a∥) :=
λ k hk, by simp [norm_le_norm_of_mem hk]
lemma is_bounded (a : A) : metric.bounded (σ a) :=
(metric.bounded_iff_subset_ball 0).mpr ⟨∥a∥, subset_closed_ball_norm a⟩
theorem is_compact [proper_space 𝕜] (a : A) : is_compact (σ a) :=
metric.is_compact_of_is_closed_bounded (is_closed a) (is_bounded a)
theorem spectral_radius_le_nnnorm (a : A) :
spectral_radius 𝕜 a ≤ ∥a∥₊ :=
by { refine supr₂_le (λ k hk, _), exact_mod_cast norm_le_norm_of_mem hk }
open ennreal polynomial
variable (𝕜)
theorem spectral_radius_le_pow_nnnorm_pow_one_div (a : A) (n : ℕ) :
spectral_radius 𝕜 a ≤ ∥a ^ (n + 1)∥₊ ^ (1 / (n + 1) : ℝ) :=
begin
refine supr₂_le (λ k hk, _),
/- apply easy direction of the spectral mapping theorem for polynomials -/
have pow_mem : k ^ (n + 1) ∈ σ (a ^ (n + 1)),
by simpa only [one_mul, algebra.algebra_map_eq_smul_one, one_smul, aeval_monomial, one_mul,
eval_monomial] using subset_polynomial_aeval a (monomial (n + 1) (1 : 𝕜)) ⟨k, hk, rfl⟩,
/- power of the norm is bounded by norm of the power -/
have nnnorm_pow_le : (↑(∥k∥₊ ^ (n + 1)) : ℝ≥0∞) ≤ ↑∥a ^ (n + 1)∥₊,
by simpa only [norm_to_nnreal, nnnorm_pow k (n+1)]
using coe_mono (real.to_nnreal_mono (norm_le_norm_of_mem pow_mem)),
/- take (n + 1)ᵗʰ roots and clean up the left-hand side -/
have hn : 0 < ((n + 1) : ℝ), by exact_mod_cast nat.succ_pos',
convert monotone_rpow_of_nonneg (one_div_pos.mpr hn).le nnnorm_pow_le,
erw [coe_pow, ←rpow_nat_cast, ←rpow_mul, mul_one_div_cancel hn.ne', rpow_one],
end
end spectrum_compact
section resolvent
open filter asymptotics
variables [nondiscrete_normed_field 𝕜] [normed_ring A] [normed_algebra 𝕜 A] [complete_space A]
local notation `ρ` := resolvent_set 𝕜
local notation `↑ₐ` := algebra_map 𝕜 A
theorem has_deriv_at_resolvent {a : A} {k : 𝕜} (hk : k ∈ ρ a) :
has_deriv_at (resolvent a) (-(resolvent a k) ^ 2) k :=
begin
have H₁ : has_fderiv_at ring.inverse _ (↑ₐk - a) := has_fderiv_at_ring_inverse hk.unit,
have H₂ : has_deriv_at (λ k, ↑ₐk - a) 1 k,
{ simpa using (algebra.linear_map 𝕜 A).has_deriv_at.sub_const a },
simpa [resolvent, sq, hk.unit_spec, ← ring.inverse_unit hk.unit] using H₁.comp_has_deriv_at k H₂,
end
/- TODO: Once there is sufficient API for bornology, we should get a nice filter / asymptotics
version of this, for example: `tendsto (resolvent a) (cobounded 𝕜) (𝓝 0)` or more specifically
`is_O (resolvent a) (λ z, z⁻¹) (cobounded 𝕜)`. -/
lemma norm_resolvent_le_forall (a : A) :
∀ ε > 0, ∃ R > 0, ∀ z : 𝕜, R ≤ ∥z∥ → ∥resolvent a z∥ ≤ ε :=
begin
obtain ⟨c, c_pos, hc⟩ := (@normed_ring.inverse_one_sub_norm A _ _).exists_pos,
rw [is_O_with_iff, eventually_iff, metric.mem_nhds_iff] at hc,
rcases hc with ⟨δ, δ_pos, hδ⟩,
simp only [cstar_ring.norm_one, mul_one] at hδ,
intros ε hε,
have ha₁ : 0 < ∥a∥ + 1 := lt_of_le_of_lt (norm_nonneg a) (lt_add_one _),
have min_pos : 0 < min (δ * (∥a∥ + 1)⁻¹) (ε * c⁻¹),
from lt_min (mul_pos δ_pos (inv_pos.mpr ha₁)) (mul_pos hε (inv_pos.mpr c_pos)),
refine ⟨(min (δ * (∥a∥ + 1)⁻¹) (ε * c⁻¹))⁻¹, inv_pos.mpr min_pos, (λ z hz, _)⟩,
have hnz : z ≠ 0 := norm_pos_iff.mp (lt_of_lt_of_le (inv_pos.mpr min_pos) hz),
replace hz := inv_le_of_inv_le min_pos hz,
rcases (⟨units.mk0 z hnz, units.coe_mk0 hnz⟩ : is_unit z) with ⟨z, rfl⟩,
have lt_δ : ∥z⁻¹ • a∥ < δ,
{ rw [units.smul_def, norm_smul, units.coe_inv', norm_inv],
calc ∥(z : 𝕜)∥⁻¹ * ∥a∥ ≤ δ * (∥a∥ + 1)⁻¹ * ∥a∥
: mul_le_mul_of_nonneg_right (hz.trans (min_le_left _ _)) (norm_nonneg _)
... < δ
: by { conv { rw mul_assoc, to_rhs, rw (mul_one δ).symm },
exact mul_lt_mul_of_pos_left
((inv_mul_lt_iff ha₁).mpr ((mul_one (∥a∥ + 1)).symm ▸ (lt_add_one _))) δ_pos } },
rw [←inv_smul_smul z (resolvent a (z : 𝕜)), units_smul_resolvent_self, resolvent,
algebra.algebra_map_eq_smul_one, one_smul, units.smul_def, norm_smul, units.coe_inv', norm_inv],
calc _ ≤ ε * c⁻¹ * c : mul_le_mul (hz.trans (min_le_right _ _)) (hδ (mem_ball_zero_iff.mpr lt_δ))
(norm_nonneg _) (mul_pos hε (inv_pos.mpr c_pos)).le
... = _ : inv_mul_cancel_right₀ c_pos.ne.symm ε,
end
end resolvent
section one_sub_smul
open continuous_multilinear_map ennreal formal_multilinear_series
open_locale nnreal ennreal
variables
[nondiscrete_normed_field 𝕜] [normed_ring A] [normed_algebra 𝕜 A]
variable (𝕜)
/-- In a Banach algebra `A` over a nondiscrete normed field `𝕜`, for any `a : A` the
power series with coefficients `a ^ n` represents the function `(1 - z • a)⁻¹` in a disk of
radius `∥a∥₊⁻¹`. -/
lemma has_fpower_series_on_ball_inverse_one_sub_smul [complete_space A] (a : A) :
has_fpower_series_on_ball (λ z : 𝕜, ring.inverse (1 - z • a))
(λ n, continuous_multilinear_map.mk_pi_field 𝕜 (fin n) (a ^ n)) 0 (∥a∥₊)⁻¹ :=
{ r_le :=
begin
refine le_of_forall_nnreal_lt (λ r hr, le_radius_of_bound_nnreal _ (max 1 ∥(1 : A)∥₊) (λ n, _)),
rw [←norm_to_nnreal, norm_mk_pi_field, norm_to_nnreal],
cases n,
{ simp only [le_refl, mul_one, or_true, le_max_iff, pow_zero] },
{ refine le_trans (le_trans (mul_le_mul_right' (nnnorm_pow_le' a n.succ_pos) (r ^ n.succ)) _)
(le_max_left _ _),
{ by_cases ∥a∥₊ = 0,
{ simp only [h, zero_mul, zero_le', pow_succ], },
{ rw [←coe_inv h, coe_lt_coe, nnreal.lt_inv_iff_mul_lt h] at hr,
simpa only [←mul_pow, mul_comm] using pow_le_one' hr.le n.succ } } }
end,
r_pos := ennreal.inv_pos.mpr coe_ne_top,
has_sum := λ y hy,
begin
have norm_lt : ∥y • a∥ < 1,
{ by_cases h : ∥a∥₊ = 0,
{ simp only [nnnorm_eq_zero.mp h, norm_zero, zero_lt_one, smul_zero] },
{ have nnnorm_lt : ∥y∥₊ < ∥a∥₊⁻¹,
by simpa only [←coe_inv h, mem_ball_zero_iff, metric.emetric_ball_nnreal] using hy,
rwa [←coe_nnnorm, ←real.lt_to_nnreal_iff_coe_lt, real.to_nnreal_one, nnnorm_smul,
←nnreal.lt_inv_iff_mul_lt h] } },
simpa [←smul_pow, (normed_ring.summable_geometric_of_norm_lt_1 _ norm_lt).has_sum_iff]
using (normed_ring.inverse_one_sub _ norm_lt).symm,
end }
variable {𝕜}
lemma is_unit_one_sub_smul_of_lt_inv_radius {a : A} {z : 𝕜} (h : ↑∥z∥₊ < (spectral_radius 𝕜 a)⁻¹) :
is_unit (1 - z • a) :=
begin
by_cases hz : z = 0,
{ simp only [hz, is_unit_one, sub_zero, zero_smul] },
{ let u := units.mk0 z hz,
suffices hu : is_unit (u⁻¹ • 1 - a),
{ rwa [is_unit.smul_sub_iff_sub_inv_smul, inv_inv u] at hu },
{ rw [units.smul_def, ←algebra.algebra_map_eq_smul_one, ←mem_resolvent_set_iff],
refine mem_resolvent_set_of_spectral_radius_lt _,
rwa [units.coe_inv', nnnorm_inv, coe_inv (nnnorm_ne_zero_iff.mpr
(units.coe_mk0 hz ▸ hz : (u : 𝕜) ≠ 0)), lt_inv_iff_lt_inv] } }
end
/-- In a Banach algebra `A` over `𝕜`, for `a : A` the function `λ z, (1 - z • a)⁻¹` is
differentiable on any closed ball centered at zero of radius `r < (spectral_radius 𝕜 a)⁻¹`. -/
theorem differentiable_on_inverse_one_sub_smul [complete_space A] {a : A} {r : ℝ≥0}
(hr : (r : ℝ≥0∞) < (spectral_radius 𝕜 a)⁻¹) :
differentiable_on 𝕜 (λ z : 𝕜, ring.inverse (1 - z • a)) (metric.closed_ball 0 r) :=
begin
intros z z_mem,
apply differentiable_at.differentiable_within_at,
have hu : is_unit (1 - z • a),
{ refine is_unit_one_sub_smul_of_lt_inv_radius (lt_of_le_of_lt (coe_mono _) hr),
simpa only [norm_to_nnreal, real.to_nnreal_coe]
using real.to_nnreal_mono (mem_closed_ball_zero_iff.mp z_mem) },
have H₁ : differentiable 𝕜 (λ w : 𝕜, 1 - w • a) := (differentiable_id.smul_const a).const_sub 1,
exact differentiable_at.comp z (differentiable_at_inverse hu.unit) (H₁.differentiable_at),
end
end one_sub_smul
section gelfand_formula
open filter ennreal continuous_multilinear_map
open_locale topological_space
variables
[normed_ring A] [normed_algebra ℂ A] [complete_space A]
/-- The `limsup` relationship for the spectral radius used to prove `spectrum.gelfand_formula`. -/
lemma limsup_pow_nnnorm_pow_one_div_le_spectral_radius (a : A) :
limsup at_top (λ n : ℕ, ↑∥a ^ n∥₊ ^ (1 / n : ℝ)) ≤ spectral_radius ℂ a :=
begin
refine ennreal.inv_le_inv.mp (le_of_forall_pos_nnreal_lt (λ r r_pos r_lt, _)),
simp_rw [inv_limsup, ←one_div],
let p : formal_multilinear_series ℂ ℂ A :=
λ n, continuous_multilinear_map.mk_pi_field ℂ (fin n) (a ^ n),
suffices h : (r : ℝ≥0∞) ≤ p.radius,
{ convert h,
simp only [p.radius_eq_liminf, ←norm_to_nnreal, norm_mk_pi_field],
refine congr_arg _ (funext (λ n, congr_arg _ _)),
rw [norm_to_nnreal, ennreal.coe_rpow_def (∥a ^ n∥₊) (1 / n : ℝ), if_neg],
exact λ ha, by linarith [ha.2, (one_div_nonneg.mpr n.cast_nonneg : 0 ≤ (1 / n : ℝ))], },
{ have H₁ := (differentiable_on_inverse_one_sub_smul r_lt).has_fpower_series_on_ball r_pos,
exact ((has_fpower_series_on_ball_inverse_one_sub_smul ℂ a).exchange_radius H₁).r_le, }
end
/-- **Gelfand's formula**: Given an element `a : A` of a complex Banach algebra, the
`spectral_radius` of `a` is the limit of the sequence `∥a ^ n∥₊ ^ (1 / n)` -/
theorem pow_nnnorm_pow_one_div_tendsto_nhds_spectral_radius (a : A) :
tendsto (λ n : ℕ, ((∥a ^ n∥₊ ^ (1 / n : ℝ)) : ℝ≥0∞)) at_top (𝓝 (spectral_radius ℂ a)) :=
begin
refine tendsto_of_le_liminf_of_limsup_le _ _ (by apply_auto_param) (by apply_auto_param),
{ rw [←liminf_nat_add _ 1, liminf_eq_supr_infi_of_nat],
refine le_trans _ (le_supr _ 0),
exact le_infi₂ (λ i hi, spectral_radius_le_pow_nnnorm_pow_one_div ℂ a i) },
{ exact limsup_pow_nnnorm_pow_one_div_le_spectral_radius a },
end
/- This is the same as `pow_nnnorm_pow_one_div_tendsto_nhds_spectral_radius` but for `norm`
instead of `nnnorm`. -/
/-- **Gelfand's formula**: Given an element `a : A` of a complex Banach algebra, the
`spectral_radius` of `a` is the limit of the sequence `∥a ^ n∥₊ ^ (1 / n)` -/
end gelfand_formula
/-- In a (nontrivial) complex Banach algebra, every element has nonempty spectrum. -/
theorem nonempty {A : Type*} [normed_ring A] [normed_algebra ℂ A] [complete_space A]
[nontrivial A]
(a : A) : (spectrum ℂ a).nonempty :=
begin
/- Suppose `σ a = ∅`, then resolvent set is `ℂ`, any `(z • 1 - a)` is a unit, and `resolvent`
is differentiable on `ℂ`. -/
rw ←set.ne_empty_iff_nonempty,
by_contra h,
have H₀ : resolvent_set ℂ a = set.univ, by rwa [spectrum, set.compl_empty_iff] at h,
have H₁ : differentiable ℂ (λ z : ℂ, resolvent a z), from λ z,
(has_deriv_at_resolvent (H₀.symm ▸ set.mem_univ z : z ∈ resolvent_set ℂ a)).differentiable_at,
/- The norm of the resolvent is small for all sufficently large `z`, and by compactness and
continuity it is bounded on the complement of a large ball, thus uniformly bounded on `ℂ`.
By Liouville's theorem `λ z, resolvent a z` is constant -/
have H₂ := norm_resolvent_le_forall a,
have H₃ : ∀ z : ℂ, resolvent a z = resolvent a (0 : ℂ),
{ refine λ z, H₁.apply_eq_apply_of_bounded (bounded_iff_exists_norm_le.mpr _) z 0,
rcases H₂ 1 zero_lt_one with ⟨R, R_pos, hR⟩,
rcases (proper_space.is_compact_closed_ball (0 : ℂ) R).exists_bound_of_continuous_on
H₁.continuous.continuous_on with ⟨C, hC⟩,
use max C 1,
rintros _ ⟨w, rfl⟩,
refine or.elim (em (∥w∥ ≤ R)) (λ hw, _) (λ hw, _),
{ exact (hC w (mem_closed_ball_zero_iff.mpr hw)).trans (le_max_left _ _) },
{ exact (hR w (not_le.mp hw).le).trans (le_max_right _ _), }, },
/- `resolvent a 0 = 0`, which is a contradition because it isn't a unit. -/
have H₅ : resolvent a (0 : ℂ) = 0,
{ refine norm_eq_zero.mp (le_antisymm (le_of_forall_pos_le_add (λ ε hε, _)) (norm_nonneg _)),
rcases H₂ ε hε with ⟨R, R_pos, hR⟩,
simpa only [H₃ R] using (zero_add ε).symm.subst
(hR R (by exact_mod_cast (real.norm_of_nonneg R_pos.lt.le).symm.le)), },
/- `not_is_unit_zero` is where we need `nontrivial A`, it is unavoidable. -/
exact not_is_unit_zero (H₅.subst (is_unit_resolvent.mp
(mem_resolvent_set_iff.mp (H₀.symm ▸ set.mem_univ 0)))),
end
section gelfand_mazur_isomorphism
variables [normed_division_ring A] [normed_algebra ℂ A]
local notation `σ` := spectrum ℂ
lemma algebra_map_eq_of_mem {a : A} {z : ℂ} (h : z ∈ σ a) : algebra_map ℂ A z = a :=
by rwa [mem_iff, is_unit_iff_ne_zero, not_not, sub_eq_zero] at h
/-- **Gelfand-Mazur theorem**: For a complex Banach division algebra, the natural `algebra_map ℂ A`
is an algebra isomorphism whose inverse is given by selecting the (unique) element of
`spectrum ℂ a`. In addition, `algebra_map_isometry` guarantees this map is an isometry. -/
@[simps]
noncomputable def _root_.normed_division_ring.alg_equiv_complex_of_complete
[complete_space A] : ℂ ≃ₐ[ℂ] A :=
{ to_fun := algebra_map ℂ A,
inv_fun := λ a, (spectrum.nonempty a).some,
left_inv := λ z, by simpa only [scalar_eq] using (spectrum.nonempty $ algebra_map ℂ A z).some_mem,
right_inv := λ a, algebra_map_eq_of_mem (spectrum.nonempty a).some_mem,
..algebra.of_id ℂ A }
end gelfand_mazur_isomorphism
section exp_mapping
local notation `↑ₐ` := algebra_map 𝕜 A
/-- For `𝕜 = ℝ` or `𝕜 = ℂ`, `exp 𝕜 𝕜` maps the spectrum of `a` into the spectrum of `exp 𝕜 A a`. -/
theorem exp_mem_exp [is_R_or_C 𝕜] [normed_ring A] [normed_algebra 𝕜 A] [complete_space A]
(a : A) {z : 𝕜} (hz : z ∈ spectrum 𝕜 a) : exp 𝕜 𝕜 z ∈ spectrum 𝕜 (exp 𝕜 A a) :=
begin
have hexpmul : exp 𝕜 A a = exp 𝕜 A (a - ↑ₐ z) * ↑ₐ (exp 𝕜 𝕜 z),
{ rw [algebra_map_exp_comm z, ←exp_add_of_commute (algebra.commutes z (a - ↑ₐz)).symm,
sub_add_cancel] },
let b := ∑' n : ℕ, ((1 / (n + 1).factorial) : 𝕜) • (a - ↑ₐz) ^ n,
have hb : summable (λ n : ℕ, ((1 / (n + 1).factorial) : 𝕜) • (a - ↑ₐz) ^ n),
{ refine summable_of_norm_bounded_eventually _ (real.summable_pow_div_factorial ∥a - ↑ₐz∥) _,
filter_upwards [filter.eventually_cofinite_ne 0] with n hn,
rw [norm_smul, mul_comm, norm_div, norm_one, is_R_or_C.norm_eq_abs, is_R_or_C.abs_cast_nat,
←div_eq_mul_one_div],
exact div_le_div (pow_nonneg (norm_nonneg _) n) (norm_pow_le' (a - ↑ₐz) (zero_lt_iff.mpr hn))
(by exact_mod_cast nat.factorial_pos n)
(by exact_mod_cast nat.factorial_le (lt_add_one n).le) },
have h₀ : ∑' n : ℕ, ((1 / (n + 1).factorial) : 𝕜) • (a - ↑ₐz) ^ (n + 1) = (a - ↑ₐz) * b,
{ simpa only [mul_smul_comm, pow_succ] using hb.tsum_mul_left (a - ↑ₐz) },
have h₁ : ∑' n : ℕ, ((1 / (n + 1).factorial) : 𝕜) • (a - ↑ₐz) ^ (n + 1) = b * (a - ↑ₐz),
{ simpa only [pow_succ', algebra.smul_mul_assoc] using hb.tsum_mul_right (a - ↑ₐz) },
have h₃ : exp 𝕜 A (a - ↑ₐz) = 1 + (a - ↑ₐz) * b,
{ rw exp_eq_tsum,
convert tsum_eq_zero_add (exp_series_summable' (a - ↑ₐz)),
simp only [nat.factorial_zero, nat.cast_one, _root_.div_one, pow_zero, one_smul],
exact h₀.symm },
rw [spectrum.mem_iff, is_unit.sub_iff, ←one_mul (↑ₐ(exp 𝕜 𝕜 z)), hexpmul, ←_root_.sub_mul,
commute.is_unit_mul_iff (algebra.commutes (exp 𝕜 𝕜 z) (exp 𝕜 A (a - ↑ₐz) - 1)).symm,
sub_eq_iff_eq_add'.mpr h₃, commute.is_unit_mul_iff (h₀ ▸ h₁ : (a - ↑ₐz) * b = b * (a - ↑ₐz))],
exact not_and_of_not_left _ (not_and_of_not_left _ ((not_iff_not.mpr is_unit.sub_iff).mp hz)),
end
end exp_mapping
end spectrum
namespace alg_hom
section normed_field
variables [normed_field 𝕜] [normed_ring A] [normed_algebra 𝕜 A] [complete_space A]
local notation `↑ₐ` := algebra_map 𝕜 A
/-- An algebra homomorphism into the base field, as a continuous linear map (since it is
automatically bounded). -/
@[simps] def to_continuous_linear_map (φ : A →ₐ[𝕜] 𝕜) : A →L[𝕜] 𝕜 :=
φ.to_linear_map.mk_continuous_of_exists_bound $
⟨1, λ a, (one_mul ∥a∥).symm ▸ spectrum.norm_le_norm_of_mem (φ.apply_mem_spectrum _)⟩
lemma continuous (φ : A →ₐ[𝕜] 𝕜) : continuous φ := φ.to_continuous_linear_map.continuous
end normed_field
section nondiscrete_normed_field
variables [nondiscrete_normed_field 𝕜] [normed_ring A] [normed_algebra 𝕜 A] [complete_space A]
local notation `↑ₐ` := algebra_map 𝕜 A
@[simp] lemma to_continuous_linear_map_norm [norm_one_class A] (φ : A →ₐ[𝕜] 𝕜) :
∥φ.to_continuous_linear_map∥ = 1 :=
continuous_linear_map.op_norm_eq_of_bounds zero_le_one
(λ a, (one_mul ∥a∥).symm ▸ spectrum.norm_le_norm_of_mem (φ.apply_mem_spectrum _))
(λ _ _ h, by simpa only [to_continuous_linear_map_apply, mul_one, map_one, norm_one] using h 1)
end nondiscrete_normed_field
end alg_hom
|
import data.real.basic
#check le_antisymm
-- BEGIN
example {x y : ℝ} (h : x ≤ y ∧ x ≠ y) : ¬ y ≤ x :=
begin
cases h with h₀ h₁,
sorry,
end
/- you will see that by_contra h' is equivalent to intro h' -/
example {x y : ℝ} : x ≤ y ∧ x ≠ y → ¬ y ≤ x :=
begin
intro h,
cases h with h₀ h₁,
intro h',
exact h₁ (le_antisymm h₀ h'),
end
/- using rcases instead of intros and cases -/
example {x y : ℝ} : x ≤ y ∧ x ≠ y → ¬ y ≤ x :=
begin
rintros ⟨h₀, h₁⟩ h',
exact h₁ (le_antisymm h₀ h'),
end
/- using λ abstraction -/
example {x y : ℝ} : x ≤ y ∧ x ≠ y → ¬ y ≤ x :=
λ ⟨h₀, h₁⟩ h', h₁ (le_antisymm h₀ h')
-- END |
State Before: ι : Type ?u.3068304
α : Type ?u.3068307
β : Type ?u.3068310
inst✝ : DecidableEq α
n : ℤ
x✝ : ↑(Equiv.addRight 1) n ≠ n
⊢ ↑(Equiv.addRight 1 ^ n) 0 = n State After: no goals Tactic: simp |
State Before: l m r ml mm mr : List Char
b e : Pos
⊢ ∃ l' r',
ValidFor l' mm r'
(Substring.extract
{ str := { data := l ++ (ml ++ mm ++ mr) ++ r }, startPos := { byteIdx := utf8Len l },
stopPos := { byteIdx := utf8Len l + utf8Len (ml ++ mm ++ mr) } }
{ byteIdx := utf8Len ml } { byteIdx := utf8Len ml + utf8Len mm }) State After: l m r ml mm mr : List Char
b e : Pos
⊢ ∃ l' r',
ValidFor l' mm r'
(if utf8Len ml + utf8Len mm ≤ utf8Len ml then { str := "", startPos := 0, stopPos := 0 }
else
{ str := { data := l ++ (ml ++ (mm ++ (mr ++ r))) },
startPos :=
Pos.min { byteIdx := utf8Len l + (utf8Len ml + (utf8Len mm + utf8Len mr)) }
({ byteIdx := utf8Len l } + { byteIdx := utf8Len ml }),
stopPos :=
Pos.min { byteIdx := utf8Len l + (utf8Len ml + (utf8Len mm + utf8Len mr)) }
({ byteIdx := utf8Len l } + { byteIdx := utf8Len ml + utf8Len mm }) }) Tactic: simp [Substring.extract] State Before: l m r ml mm mr : List Char
b e : Pos
⊢ ∃ l' r',
ValidFor l' mm r'
(if utf8Len ml + utf8Len mm ≤ utf8Len ml then { str := "", startPos := 0, stopPos := 0 }
else
{ str := { data := l ++ (ml ++ (mm ++ (mr ++ r))) },
startPos :=
Pos.min { byteIdx := utf8Len l + (utf8Len ml + (utf8Len mm + utf8Len mr)) }
({ byteIdx := utf8Len l } + { byteIdx := utf8Len ml }),
stopPos :=
Pos.min { byteIdx := utf8Len l + (utf8Len ml + (utf8Len mm + utf8Len mr)) }
({ byteIdx := utf8Len l } + { byteIdx := utf8Len ml + utf8Len mm }) }) State After: case inl
l m r ml mm mr : List Char
b e : Pos
h✝ : utf8Len ml + utf8Len mm ≤ utf8Len ml
⊢ ∃ l' r', ValidFor l' mm r' { str := "", startPos := 0, stopPos := 0 }
case inr
l m r ml mm mr : List Char
b e : Pos
h✝ : ¬utf8Len ml + utf8Len mm ≤ utf8Len ml
⊢ ∃ l' r',
ValidFor l' mm r'
{ str := { data := l ++ (ml ++ (mm ++ (mr ++ r))) },
startPos :=
Pos.min { byteIdx := utf8Len l + (utf8Len ml + (utf8Len mm + utf8Len mr)) }
({ byteIdx := utf8Len l } + { byteIdx := utf8Len ml }),
stopPos :=
Pos.min { byteIdx := utf8Len l + (utf8Len ml + (utf8Len mm + utf8Len mr)) }
({ byteIdx := utf8Len l } + { byteIdx := utf8Len ml + utf8Len mm }) } Tactic: split State Before: case inl
l m r ml mm mr : List Char
b e : Pos
h✝ : utf8Len ml + utf8Len mm ≤ utf8Len ml
⊢ ∃ l' r', ValidFor l' mm r' { str := "", startPos := 0, stopPos := 0 } State After: no goals Tactic: next h =>
rw [utf8Len_eq_zero.1 <| Nat.le_zero.1 <| (Nat.add_le_add_iff_le_left _ _ 0).1 h]
exact ⟨[], [], ⟨⟩⟩ State Before: l m r ml mm mr : List Char
b e : Pos
h : utf8Len ml + utf8Len mm ≤ utf8Len ml
⊢ ∃ l' r', ValidFor l' mm r' { str := "", startPos := 0, stopPos := 0 } State After: l m r ml mm mr : List Char
b e : Pos
h : utf8Len ml + utf8Len mm ≤ utf8Len ml
⊢ ∃ l' r', ValidFor l' [] r' { str := "", startPos := 0, stopPos := 0 } Tactic: rw [utf8Len_eq_zero.1 <| Nat.le_zero.1 <| (Nat.add_le_add_iff_le_left _ _ 0).1 h] State Before: l m r ml mm mr : List Char
b e : Pos
h : utf8Len ml + utf8Len mm ≤ utf8Len ml
⊢ ∃ l' r', ValidFor l' [] r' { str := "", startPos := 0, stopPos := 0 } State After: no goals Tactic: exact ⟨[], [], ⟨⟩⟩ State Before: case inr
l m r ml mm mr : List Char
b e : Pos
h✝ : ¬utf8Len ml + utf8Len mm ≤ utf8Len ml
⊢ ∃ l' r',
ValidFor l' mm r'
{ str := { data := l ++ (ml ++ (mm ++ (mr ++ r))) },
startPos :=
Pos.min { byteIdx := utf8Len l + (utf8Len ml + (utf8Len mm + utf8Len mr)) }
({ byteIdx := utf8Len l } + { byteIdx := utf8Len ml }),
stopPos :=
Pos.min { byteIdx := utf8Len l + (utf8Len ml + (utf8Len mm + utf8Len mr)) }
({ byteIdx := utf8Len l } + { byteIdx := utf8Len ml + utf8Len mm }) } State After: no goals Tactic: next h =>
refine ⟨l ++ ml, mr ++ r, .of_eq _ (by simp) ?_ ?_⟩ <;>
simp [Nat.min_eq_min] <;> rw [Nat.min_eq_right] <;>
simp [Nat.add_le_add_iff_le_left, Nat.le_add_right]
rw [Nat.add_assoc] State Before: l m r ml mm mr : List Char
b e : Pos
h : ¬utf8Len ml + utf8Len mm ≤ utf8Len ml
⊢ ∃ l' r',
ValidFor l' mm r'
{ str := { data := l ++ (ml ++ (mm ++ (mr ++ r))) },
startPos :=
Pos.min { byteIdx := utf8Len l + (utf8Len ml + (utf8Len mm + utf8Len mr)) }
({ byteIdx := utf8Len l } + { byteIdx := utf8Len ml }),
stopPos :=
Pos.min { byteIdx := utf8Len l + (utf8Len ml + (utf8Len mm + utf8Len mr)) }
({ byteIdx := utf8Len l } + { byteIdx := utf8Len ml + utf8Len mm }) } State After: case refine_2
l m r ml mm mr : List Char
b e : Pos
h : ¬utf8Len ml + utf8Len mm ≤ utf8Len ml
⊢ utf8Len l + (utf8Len ml + utf8Len mm) = utf8Len l + utf8Len ml + utf8Len mm Tactic: refine ⟨l ++ ml, mr ++ r, .of_eq _ (by simp) ?_ ?_⟩ <;>
simp [Nat.min_eq_min] <;> rw [Nat.min_eq_right] <;>
simp [Nat.add_le_add_iff_le_left, Nat.le_add_right] State Before: case refine_2
l m r ml mm mr : List Char
b e : Pos
h : ¬utf8Len ml + utf8Len mm ≤ utf8Len ml
⊢ utf8Len l + (utf8Len ml + utf8Len mm) = utf8Len l + utf8Len ml + utf8Len mm State After: no goals Tactic: rw [Nat.add_assoc] State Before: l m r ml mm mr : List Char
b e : Pos
h : ¬utf8Len ml + utf8Len mm ≤ utf8Len ml
⊢ { str := { data := l ++ (ml ++ (mm ++ (mr ++ r))) },
startPos :=
Pos.min { byteIdx := utf8Len l + (utf8Len ml + (utf8Len mm + utf8Len mr)) }
({ byteIdx := utf8Len l } + { byteIdx := utf8Len ml }),
stopPos :=
Pos.min { byteIdx := utf8Len l + (utf8Len ml + (utf8Len mm + utf8Len mr)) }
({ byteIdx := utf8Len l } + { byteIdx := utf8Len ml + utf8Len mm }) }.str.data =
l ++ ml ++ mm ++ (mr ++ r) State After: no goals Tactic: simp |
(*
Copyright (C) 2017 M.A.L. Marques
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*)
(* Equation (7) from the paper *)
p := x -> X2S^2*x^2:
z := (x, t) -> x^2/(8*t):
alpha := (x, t) -> (t - x^2/8)/K_FACTOR_C:
qb := (x, t) -> \
9/20 * (alpha(x, t) - 1)/sqrt(1 + params_a_b*alpha(x, t)*(alpha(x, t) - 1)) \
+ 2*p(x)/3:
(* Equation (10) in all its glory *)
fxnum := (x, t) -> \
+ (MU_GE + params_a_c*z(x, t)^ff(z(x, t))/(1 + z(x, t)^2)^2)*p(x) \
+ 146/2025 * qb(x, t)^2 \
- 73/405 * qb(x, t) * sqrt(1/2*(9/25*z(x, t)^2 + p(x)^2)) \
+ MU_GE^2/mkappa(x, t) * p(x)^2 \
+ 2*sqrt(params_a_e)*MU_GE*9/25*z(x, t)^2 \
+ params_a_e*params_a_mu*p(x)^3:
fxden := x -> \
(1 + sqrt(params_a_e)*p(x))^2:
fx := (x, t) -> fxnum(x, t)/fxden(x):
|
%% Copyright (C) 2016 Lagu
%% Copyright (C) 2017, 2019 Colin B. Macdonald
%%
%% This file is part of OctSymPy.
%%
%% OctSymPy is free software; you can redistribute it and/or modify
%% it under the terms of the GNU General Public License as published
%% by the Free Software Foundation; either version 3 of the License,
%% or (at your option) any later version.
%%
%% This software is distributed in the hope that it will be useful,
%% but WITHOUT ANY WARRANTY; without even the implied warranty
%% of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
%% the GNU General Public License for more details.
%%
%% You should have received a copy of the GNU General Public
%% License along with this software; see the file COPYING.
%% If not, see <http://www.gnu.org/licenses/>.
%% -*- texinfo -*-
%% @documentencoding UTF-8
%% @defmethod @@sym zeros (@var{n})
%% @defmethodx @@sym zeros (@var{n}, @var{m})
%% Return a matrix whose elements are all 0.
%%
%% Example:
%% @example
%% @group
%% y = zeros (sym(3))
%% @result{} y = (sym 3×3 matrix)
%% ⎡0 0 0⎤
%% ⎢ ⎥
%% ⎢0 0 0⎥
%% ⎢ ⎥
%% ⎣0 0 0⎦
%% @end group
%% @end example
%%
%% @seealso{zeros, @@sym/ones, @@sym/eye}
%% @end defmethod
function y = zeros(varargin)
% partial workaround for issue #13: delete when/if fixed properly
if ((isa (varargin{nargin}, 'char')) && (strcmp (varargin{nargin}, 'sym')))
varargin = varargin(1:(nargin-1));
end
if (isa (varargin{end}, 'char'))
varargin = cell2nosyms (varargin);
y = zeros (varargin{:});
return
end
for i = 1:length(varargin)
varargin{i} = sym(varargin{i});
end
if (length (varargin) == 1 && ~isscalar (varargin{1}))
y = pycall_sympy__ ('return zeros(*_ins[0])', varargin{1});
else
y = pycall_sympy__ ('return zeros(*_ins)', varargin{:});
end
end
%!test
%! y = zeros(sym(2));
%! x = [0 0; 0 0];
%! assert( isequal( y, sym(x)))
%!test
%! y = zeros(sym(2), 1);
%! x = [0; 0];
%! assert( isequal( y, sym(x)))
%!test
%! y = zeros(sym(1), 2);
%! x = [0 0];
%! assert( isequal( y, sym(x)))
%!test
%! y = zeros (sym([2 3]));
%! x = sym (zeros ([2 3]));
%! assert (isequal (y, x))
%% Check types:
%!assert( isa( zeros(sym(2), 'double'), 'double'))
%!assert( isa( zeros(3, sym(3), 'single') , 'single'))
%!assert( isa( zeros(3, sym(3)), 'sym'))
%!assert( isa( zeros(3, sym(3), 'sym'), 'sym'))
%!xtest
%! % Issue #13
%! assert( isa( zeros(3, 3, 'sym'), 'sym'))
|
State Before: α : Type ?u.286751
inst✝ : Preorder α
a b c d : ℕ
lr₁ : 3 * a ≤ b + c + 1 + d
mr₂ : b + c + 1 ≤ 3 * d
mm₁ : b ≤ 3 * c
⊢ a + b + 1 ≤ 3 * (c + d + 1) State After: no goals Tactic: linarith |
module test_julia_broadcast_style
struct G{T,N} <: AbstractArray{T, N}
data::Array{T,N}
sr::String
end
#function Base.broadcastable(x::G{T,N}) where {T,N}
# @info :broadcastable
# x
#end
function Base.BroadcastStyle(::Type{G{T,N}}) where {T,N}
@info :BroadcastStyle
Broadcast.ArrayStyle{G{T,N}}()
end
function Base.size(x::G{T,N}) where {T,N}
@info :size
size(x.data)
end
function Base.similar(bc::Broadcast.Broadcasted{Broadcast.ArrayStyle{G{T,N}}}, ::Type{ElType}) where {T,N,ElType}
@info :similar (T,N,ElType)
A = bc.args[1]
G(Array{ElType}(undef, length.(axes(bc))), A.sr)
end
function Base.getindex(x::G{T,N}, inds::Vararg{Int,N}) where {T,N}
@info :getindex
x.data[inds...]
end
function Base.setindex!(x::G{T,N}, val, inds::Vararg{Int,N}) where {T,N}
@info :setindex
x.data[inds...] = val
end
using IOCapture
(value, output) = IOCapture.capture() do
g = G{Int,1}([5,6], "")
g2 = Float32.(g)
@info :g2 g2 isa G{Float32,1}
eval(quote
@info :g3 $g2 .+ 1
function Broadcast.broadcasted(::typeof(+), ::G{T,N}, ::Int) where {T,N}
@info :broadcasted
:nothing
end
@info :g5 $g2 .+ 1
end)
end
using Test
@test endswith(output, """
1}:
│ 6.0
└ 7.0
[ Info: broadcasted
┌ Info: g5
└ Float32[5.0, 6.0] .+ 1 = :nothing
""")
end # module test_julia_broadcast_style
|
From Coqprime Require Import PocklingtonRefl.
Local Open Scope positive_scope.
Lemma primo81:
prime 4732254109989697->
prime 46357161274830326863.
Proof.
intro H.
apply (Pocklington_refl
(Ell_certif
46357161274830326863
9796
((4732254109989697,1)::nil)
0
711828
117
1521)
((Proof_certif _ H) :: nil)).
native_cast_no_check (refl_equal true).
Time Qed.
|
[STATEMENT]
lemma from_FSM_simps[simp]:
assumes "q \<in> states M"
shows
"initial (from_FSM M q) = q"
"inputs (from_FSM M q) = inputs M"
"outputs (from_FSM M q) = outputs M"
"transitions (from_FSM M q) = transitions M"
"states (from_FSM M q) = states M"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (FSM.initial (from_FSM M q) = q &&& FSM.inputs (from_FSM M q) = FSM.inputs M) &&& FSM.outputs (from_FSM M q) = FSM.outputs M &&& FSM.transitions (from_FSM M q) = FSM.transitions M &&& FSM.states (from_FSM M q) = FSM.states M
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
q \<in> FSM.states M
goal (1 subgoal):
1. (FSM.initial (from_FSM M q) = q &&& FSM.inputs (from_FSM M q) = FSM.inputs M) &&& FSM.outputs (from_FSM M q) = FSM.outputs M &&& FSM.transitions (from_FSM M q) = FSM.transitions M &&& FSM.states (from_FSM M q) = FSM.states M
[PROOF STEP]
by (transfer; simp)+ |
State Before: α : Type u
β : α → Type v
inst✝ : DecidableEq α
a : α
b : β a
s : AList β
⊢ insert a b ⟦s⟧ = ⟦AList.insert a b s⟧ State After: no goals Tactic: simp [insert] |
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.meta.default
import Mathlib.Lean3Lib.init.logic
import Mathlib.Lean3Lib.init.data.nat.lemmas
import Mathlib.Lean3Lib.init.data.char.basic
namespace Mathlib
namespace char
theorem val_of_nat_eq_of_is_valid {n : ℕ} : is_valid_char n → val (of_nat n) = n := sorry
theorem val_of_nat_eq_of_not_is_valid {n : ℕ} : ¬is_valid_char n → val (of_nat n) = 0 := sorry
theorem of_nat_eq_of_not_is_valid {n : ℕ} : ¬is_valid_char n → of_nat n = of_nat 0 :=
fun (h : ¬is_valid_char n) =>
eq_of_veq
(eq.mpr
(id
(Eq._oldrec (Eq.refl (val (of_nat n) = val (of_nat 0)))
(val_of_nat_eq_of_not_is_valid h)))
(Eq.refl 0))
theorem of_nat_ne_of_ne {n₁ : ℕ} {n₂ : ℕ} (h₁ : n₁ ≠ n₂) (h₂ : is_valid_char n₁)
(h₃ : is_valid_char n₂) : of_nat n₁ ≠ of_nat n₂ :=
ne_of_vne
(eq.mpr
(id (Eq._oldrec (Eq.refl (val (of_nat n₁) ≠ val (of_nat n₂))) (val_of_nat_eq_of_is_valid h₂)))
(eq.mpr (id (Eq._oldrec (Eq.refl (n₁ ≠ val (of_nat n₂))) (val_of_nat_eq_of_is_valid h₃))) h₁))
end Mathlib |
From MetaCoq.Template Require Import All.
From MetaCoq Require Import All.
Require Import String List.
Local Open Scope string.
Import ListNotations MCMonadNotation Nat.
From MetaCoq.PCUIC Require Import
PCUICAst PCUICAstUtils PCUICInduction
PCUICLiftSubst PCUICEquality
PCUICUnivSubst PCUICTyping PCUICGeneration.
From MetaCoq.PCUIC Require Import TemplateToPCUIC.
From MetaCoq.PCUIC Require Import PCUICToTemplate.
From MetaCoq.Translations Require Import translation_utils.
From MetaCoq.Translations Require Import param_binary.
Unset Strict Unquote Universe Mode.
Load test_types.
MetaCoq Run (TC <- Translate emptyTC "nat" ;;
tmDefinition "nat_TC" TC ).
Print natᵗ.
MetaCoq Run (TC <- Translate nat_TC "list" ;;
tmDefinition "list_TC" TC ).
Print listᵗ.
MetaCoq Run (TC <- Translate list_TC "vec" ;;
tmDefinition "vec_TC" TC ).
Print vecᵗ.
MetaCoq Run (TC <- Translate list_TC "rose" ;;
tmDefinition "rose_TC" TC ).
Print roseᵗ.
MetaCoq Run (TC <- Translate list_TC "rose2" ;;
tmDefinition "rose2_TC" TC ).
Print rose2ᵗ.
MetaCoq Run (TC <- Translate list_TC "roseSA" ;;
tmDefinition "roseSA_TC" TC ).
Print roseSAᵗ.
MetaCoq Run (TC <- Translate list_TC "roseA" ;;
tmDefinition "roseA_TC" TC ).
Print roseAᵗ.
MetaCoq Run (TC <- Translate list_TC "bool" ;;
tmDefinition "bool_TC" TC ).
Print boolᵗ.
MetaCoq Run (TC <- Translate bool_TC "nonUniTest" ;;
tmDefinition "nonUniTest_TC" TC ).
Print nonUniTestᵗ.
MetaCoq Run (TC <- Translate bool_TC "nonUniDepTest" ;;
tmDefinition "nonUniDepTest_TC" TC ).
Print nonUniDepTestᵗ.
MetaCoq Run (TC <- Translate nat_TC "guardTest" ;;
tmDefinition "guardTest_TC" TC ).
Print guardTestᵗ.
MetaCoq Run (TC <- Translate vec_TC "list2" ;;
tmDefinition "list2_TC" TC ).
Print list2ᵗ.
MetaCoq Run (TC <- Translate list2_TC "dNest" ;;
tmDefinition "dNest_TC" TC ).
Print dNestᵗ.
(* TODO: parametricity of typed with functions like List.app, Addition, ... *) |
theory Pugh
imports "HOL-Analysis.Analysis"
begin
(*
problem_number:2_12a
natural language statement:
Let $(p_n)$ be a sequence and $f:\mathbb{N}\to\mathbb{N}$ a bijection. The sequence $(q_k)_{k\in\mathbb{N}}$ with $q_k=p_{f(k)}$ is called a rearrangement of $(p_n)$. Show that if $f$ is an injection, the limit of a sequence is unaffected by rearrangement.
lean statement:
theorem exercise_2_12a (f : \<nat> \<rightarrow> \<nat>) (p : \<nat> \<rightarrow> \<real>) (a : \<real>)
(hf : injective f) (hp : tendsto p at_top (𝓝 a)) :
tendsto (\<lambda> n, p (f n)) at_top (𝓝 a) :=
codex statement:
theorem lim_of_rearrangement_of_injective:
fixes f::"nat \<Rightarrow> nat" and p::"nat \<Rightarrow> 'a::real_normed_vector"
assumes "inj f" "convergent p"
shows "convergent (\<lambda>n. p (f n))"
Our comment on the codex statement: a start, but the real meaning was not preserved
*)
theorem exercise_2_12a:
fixes f::"nat \<Rightarrow> nat" and p::"nat \<Rightarrow> 'a::real_normed_vector"
assumes "inj f"
shows "(\<lambda>n. p (f n)) \<longlonglongrightarrow> a \<longleftrightarrow> p \<longlonglongrightarrow> a"
oops
(*
problem_number:2_12b
natural language statement:
Let $(p_n)$ be a sequence and $f:\mathbb{N}\to\mathbb{N}$ a bijection. The sequence $(q_k)_{k\in\mathbb{N}}$ with $q_k=p_{f(k)}$ is called a rearrangement of $(p_n)$. Show that if $f$ is a surjection, the limit of a sequence is unaffected by rearrangement.
lean statement:
theorem exercise_2_12b (f : \<nat> \<rightarrow> \<nat>) (p : \<nat> \<rightarrow> \<real>) (a : \<real>)
(hf : surjective f) (hp : tendsto p at_top (𝓝 a)) :
tendsto (\<lambda> n, p (f n)) at_top (𝓝 a) :=
codex statement:
theorem lim_of_rearrangement_of_surjection:
fixes f::"nat \<Rightarrow> nat" and p::"nat \<Rightarrow> 'a::real_normed_vector"
assumes "bij f" "surj f" "\<forall>n. p n = q (f n)" "convergent p"
shows "convergent q \<and> lim p = lim q"
Our comment on the codex statement: <YOU CAN LEAVE YOUR COMMENT HERE>
*)
theorem exercise_2_12b: (*The informal versions of both exercises are ambiguous: is it given that the original sequence converges?*)
fixes f::"nat \<Rightarrow> nat" and p::"nat \<Rightarrow> 'a::real_normed_vector"
assumes "surj f"
shows "(\<lambda>n. p (f n)) \<longlonglongrightarrow> a \<longleftrightarrow> p \<longlonglongrightarrow> a"
oops
(*
problem_number:2_26
natural language statement:
Prove that a set $U \subset M$ is open if and only if none of its points are limits of its complement.
lean statement:
theorem exercise_2_26 {M : Type*} [topological_space M]
(U : set M) : is_open U \<longleftrightarrow> \<forall> x \<in> U, \<not> cluster_pt x (𝓟 Uᶜ) :=
codex statement:
theorem open_iff_no_limit_point_of_complement:
fixes U::"'a::metric_space set"
shows "open U \<longleftrightarrow> \<forall>x\<in>U. \<not>(x islimpt (-U))"
Our comment on the codex statement: Syntactically correct, but with the addition of parentheses, good (type class version)
*)
theorem exercise_2_26:
fixes U::"'a::metric_space set"
shows "open U \<longleftrightarrow> (\<forall>x\<in>U. \<not>(x islimpt (-U)))"
oops
(*
problem_number:2_29
natural language statement:
Let $\mathcal{T}$ be the collection of open subsets of a metric space $\mathrm{M}$, and $\mathcal{K}$ the collection of closed subsets. Show that there is a bijection from $\mathcal{T}$ onto $\mathcal{K}$.
lean statement:
theorem exercise_2_29 (M : Type* ) [metric_space M]
(O C : set (set M))
(hO : O = {s | is_open s})
(hC : C = {s | is_closed s}) :
\<exists> f : O \<rightarrow> C, bijective f :=
codex statement:
theorem bijection_open_closed:
fixes M::"'a::metric_space set"
shows "bij_betw (\<lambda>U. closure U) (open_sets M) (closed_sets M)"
Our comment on the codex statement: interesting but clearly wrong guess of the bijection (which must be complementation)
*)
theorem exercise_2_29:
fixes M::"'a::metric_space set"
shows "\<exists>f. bij_betw f {S. open S} {S. closed S}"
oops
(*
problem_number:2_32a
natural language statement:
Show that every subset of $\mathbb{N}$ is clopen.
lean statement:
theorem exercise_2_32a (A : set \<nat>) : is_clopen A :=
codex statement:
theorem clopen_of_subset_nat:
fixes A::"nat set"
shows "closed_in (top_of_set UNIV) A \<and> open_in (top_of_set UNIV) A"
Our comment on the codex statement: close, but it didn't know about discrete_topology
*)
theorem exercise_2_32a:
fixes A::"nat set"
shows "closedin (discrete_topology UNIV) A \<and> openin (discrete_topology UNIV) A"
by simp
(*
problem_number:2_41
natural language statement:
Let $\|\cdot\|$ be any norm on $\mathbb{R}^{m}$ and let $B=\left\{x \in \mathbb{R}^{m}:\|x\| \leq 1\right\}$. Prove that $B$ is compact.
lean statement:
theorem exercise_2_41 (m : \<nat>) {X : Type*} [normed_space \<real> ((fin m) \<rightarrow> \<real>)] :
is_compact (metric.closed_ball 0 1) :=
codex statement:
theorem compact_of_norm_leq_one:
fixes m::nat and f::"nat \<Rightarrow> real"
assumes "norm f \<le> 1"
shows "compact {x::'a::euclidean_space. \<forall>i. norm (x$i) \<le> f i}"
Our comment on the codex statement: seemingly inserted its own definition of norm, and we can't capture "any norm"
*)
theorem exercise_2_41:
shows "compact {x. norm x \<le> 1}"
oops
(*
problem_number:2_46
natural language statement:
Assume that $A, B$ are compact, disjoint, nonempty subsets of $M$. Prove that there are $a_0 \in A$ and $b_0 \in B$ such that for all $a \in A$ and $b \in B$ we have $d(a_0, b_0) \leq d(a, b)$.
lean statement:
theorem exercise_2_46 {M : Type*} [metric_space M]
{A B : set M} (hA : is_compact A) (hB : is_compact B)
(hAB : disjoint A B) (hA₀ : A \<noteq> \<emptyset>) (hB₀ : B \<noteq> \<emptyset>) :
\<exists> a₀ b₀, a₀ \<in> A \<and> b₀ \<in> B \<and> \<forall> (a : M) (b : M),
a \<in> A \<rightarrow> b \<in> B \<rightarrow> dist a₀ b₀ \<le> dist a b :=
codex statement:
theorem exists_min_distance_of_compact_disjoint_nonempty:
fixes A B::"'a::metric_space set"
assumes "compact A" "compact B" "A \<inter> B = {}" "A \<noteq> {}" "B \<noteq> {}"
shows "\<exists>a b. a\<in>A \<and> b\<in>B \<and> (\<forall>a'\<in>A. \<forall>b'\<in>B. dist a b \<le> dist a' b')"
Our comment on the codex statement: perfect!
*)
theorem exercise_2_46:
fixes A B::"'a::metric_space set"
assumes "compact A" "compact B" "A \<inter> B = {}" "A \<noteq> {}" "B \<noteq> {}"
shows "\<exists>a0 b0. a0\<in>A \<and> b0\<in>B \<and> (\<forall>a\<in>A. \<forall>b\<in>B. dist a0 b0 \<le> dist a b)"
oops
(*
problem_number:2_48
natural language statement:
Prove that there is an embedding of the line as a closed subset of the plane, and there is an embedding of the line as a bounded subset of the plane, but there is no embedding of the line as a closed and bounded subset of the plane.
lean statement:
codex statement:
theorem exists_embedding_of_line_as_closed_subset_of_plane:
fixes f::"real \<Rightarrow> 'a::euclidean_space"
assumes "continuous_on UNIV f" "inj_on f UNIV" "f ` UNIV \<subseteq> (UNIV::'a set)"
shows "closedin (subtopology euclidean (UNIV::'a set)) (f ` UNIV)"
Our comment on the codex statement: <YOU CAN LEAVE YOUR COMMENT HERE>
*)
theorem exercise_2_48:
shows "\<exists>f::real \<Rightarrow> complex. inj f \<and> closed (range f)"
"\<exists>f::real \<Rightarrow> complex. inj f \<and> bounded (range f)"
"\<nexists>f::real \<Rightarrow> complex. inj f \<and> closed (range f) \<and> bounded (range f)"
oops
(*
problem_number:2_56
natural language statement:
Prove that the 2-sphere is not homeomorphic to the plane.
lean statement:
codex statement:
theorem sphere_not_homeomorphic_to_plane:
fixes S::"real^2 set"
assumes "S homeomorphic (sphere (0,1))"
shows False
Our comment on the codex statement: <YOU CAN LEAVE YOUR COMMENT HERE>
*)
theorem exercise_2_56: "\<not> sphere (0::real^3) 1 homeomorphic (UNIV::complex set)"
oops
(*
problem_number:2_57
natural language statement:
Show that if $S$ is connected, it is not true in general that its interior is connected.
lean statement:
theorem exercise_2_57 {X : Type*} [topological_space X]
: \<exists> (S : set X), is_connected S \<and> \<not> is_connected (interior S) :=
codex statement:
theorem interior_not_connected_of_connected:
fixes S::"'a::euclidean_space set"
assumes "connected S"
shows "\<exists>T. open T \<and> connected T \<and> interior T \<subseteq> S \<and> interior T \<noteq> \<emptyset> \<and> interior T \<noteq> S"
Our comment on the codex statement: <YOU CAN LEAVE YOUR COMMENT HERE>
*)
theorem exercise_2_57:
shows "\<exists>S. connectedin X S \<and> \<not> connectedin X (X interior_of S)"
oops
(*
problem_number:2_79
natural language statement:
Prove that if $M$ is nonempty compact, locally path-connected and connected then it is path-connected.
lean statement:
theorem exercise_2_79
{M : Type*} [topological_space M] [compact_space M]
[loc_path_connected_space M] (hM : nonempty M)
(hM : connected_space M) : path_connected_space M :=
codex statement:
theorem path_connected_of_nonempty_compact_locally_path_connected_connected:
fixes M::"'a::topological_space set"
assumes "compact M" "nonempty M" "locally path_connected M" "connected M"
shows "path_connected M"
Our comment on the codex statement: This version does not need the first two assumptions (why is the second one ever required)
*)
theorem exercise_2_79:
fixes M::"'a::topological_space set"
assumes "compact M" "M\<noteq>{}" "locally path_connected M" "connected M"
shows "path_connected M"
by (simp add: assms(3) assms(4) connected_component_eq_self path_component_eq_connected_component_set path_connected_component_set)
(*
problem_number:2_85
natural language statement:
Suppose that $M$ is compact and that $\mathcal{U}$ is an open covering of $M$ which is redundant in the sense that each $p \in M$ is contained in at least two members of $\mathcal{U}$. Show that $\mathcal{U}$ reduces to a finite subcovering with the same property.
lean statement:
theorem exercise_2_85
(M : Type* ) [topological_space M] [compact_space M]
(U : set (set M)) (hU : \<forall> p, \<exists> (U_1 U_2 \<in> U), p \<in> U_1 \<and> p \<in> U_2 \<and> U_1 \<noteq> U_2) :
\<exists> (V : set (set M)), set.finite V \<and>
\<forall> p, \<exists> (V_1 V_2 \<in> V), p \<in> V_1 \<and> p \<in> V_2 \<and> V_1 \<noteq> V_2 :=
codex statement:
theorem finite_subcovering_of_redundant_open_covering:
fixes M::"'a::metric_space set" and U::"'a set set"
assumes "compact M" "\<forall>p\<in>M. \<exists>U_1 U_2. U_1\<in>U \<and> U_2\<in>U \<and> p\<in>U_1 \<and> p\<in>U_2"
shows "\<exists>U'. finite U' \<and> U' \<subseteq> U \<and> \<forall>p\<in>M. \<exists>U_1 U_2. U_1\<in>U' \<and> U_2\<in>U' \<and> p\<in>U_1 \<and> p\<in>U_2"
Our comment on the codex statement: very good except for missing parentheses
*)
theorem exercise_2_85:
fixes M::"'a::metric_space set" and \<U>::"'a set set"
assumes "compact M" "\<forall>p\<in>M. \<exists>V W. V\<in>\<U> \<and> W\<in>\<U> \<and> p\<in>V \<and> p\<in>W"
shows "\<exists>\<U>'. finite \<U>' \<and> \<U>' \<subseteq> \<U> \<and> (\<forall>p\<in>M. \<exists>V W. V\<in>\<U>' \<and> W\<in>\<U>' \<and> p\<in>V \<and> p\<in>W)"
oops
(*
problem_number:2_92
natural language statement:
Give a direct proof that the nested decreasing intersection of nonempty covering compact sets is nonempty.
lean statement:
theorem exercise_2_92 {\<alpha> : Type*} [topological_space \<alpha>]
{s : \<nat> \<rightarrow> set \<alpha>}
(hs : \<forall> i, is_compact (s i))
(hs : \<forall> i, (s i).nonempty)
(hs : \<forall> i, (s i) \<supset> (s (i + 1))) :
(\<Inter> i, s i).nonempty :=
codex statement:
theorem nonempty_intersection_of_nested_compact_covering_sets:
fixes K::"nat \<Rightarrow> 'a::metric_space set"
assumes "\<forall>n. compact (K n)" "\<forall>n. K n \<subseteq> K (Suc n)" "\<forall>n. K n \<noteq> {}"
shows "\<exists>x. \<forall>n. x \<in> K n"
Our comment on the codex statement: the sunset inclusion was in the wrong direction! (What does "covering" mean here?)
*)
theorem exercise_2_92:
fixes K::"nat \<Rightarrow> 'a::metric_space set"
assumes "\<forall>n. compact (K n)" "\<forall>n. K (Suc n) \<subseteq> K n" "\<forall>n. K n \<noteq> {}"
shows "\<exists>x. \<forall>n. x \<in> K n"
oops
(*
problem_number:2_109
natural language statement:
A metric on $M$ is an ultrametric if for all $x, y, z \in M$, $d(x, z) \leq \max \{d(x, y), d(y, z)\} .$ Show that a metric space with an ultrametric is totally disconnected.
lean statement:
theorem exercise_2_109
{M : Type*} [metric_space M]
(h : \<forall> x y z : M, dist x z = max (dist x y) (dist y z)) :
totally_disconnected_space M :=
codex statement:
theorem totally_disconnected_of_ultrametric:
fixes M::"'a::metric_space metric"
assumes "\<forall>x y z. dist x z \<le> max (dist x y) (dist y z)"
shows "totally_disconnected (UNIV::'a set)"
Our comment on the codex statement: <YOU CAN LEAVE YOUR COMMENT HERE>
*)
theorem exercise_2_109: undefined oops
(*
problem_number:2_126
natural language statement:
Suppose that $E$ is an uncountable subset of $\mathbb{R}$. Prove that there exists a point $p \in \mathbb{R}$ at which $E$ condenses.
lean statement:
theorem exercise_2_126 {E : set \<real>}
(hE : \<not> set.countable E) : \<exists> (p : \<real>), cluster_pt p (𝓟 E) :=
codex statement:
theorem exists_condensation_point_of_uncountable_subset:
fixes E::"real set"
assumes "uncountable E"
shows "\<exists>p. condensation_point E p"
Our comment on the codex statement: <YOU CAN LEAVE YOUR COMMENT HERE>
*)
theorem exercise_2_126:
fixes E::"real set"
assumes "uncountable E"
shows "\<exists>p. p islimpt E"
oops
(*
problem_number:2_137
natural language statement:
Let $P$ be a closed perfect subset of a separable complete metric space $M$. Prove that each point of $P$ is a condensation point of $P$.
lean statement:
theorem exercise_2_137
{M : Type*} [metric_space M] [separable_space M] [complete_space M]
{P : set M} (hP : is_closed P)
(hP' : is_closed P \<and> P = {x | cluster_pt x (𝓟 P)}) :
\<forall> x \<in> P, \<forall> n \<in> (𝓝 x), \<not> set.countable n :=
codex statement:
theorem condensation_point_of_closed_perfect_subset:
fixes P::"'a::metric_space set"
assumes "closed P" "perfect P" "separable (UNIV::'a set)"
shows "\<forall>x\<in>P. condensation_point P x"
Our comment on the codex statement: <YOU CAN LEAVE YOUR COMMENT HERE>
*)
theorem exercise_2_137: undefined oops
(*
problem_number:2_138
natural language statement:
Given a Cantor space $M \subset R^2$, given a line segment $[p, q] \subset R^2$ with $p, q \not\in M$, and given an $\epsilon > 0$, prove that there exists a path $A$ in the $\epsilon$-neighborhood of $[p, q]$ that joins $p$ to $q$ and is disjoint from $M$.
lean statement:
codex statement:
theorem exists_path_disjoint_of_Cantor_space:
fixes M::"real set" and p q::"real^2" and \<epsilon>::real
assumes "Cantor_space M" "p \<in> (UNIV::real^2 set) - M" "q \<in> (UNIV::real^2 set) - M" "\<epsilon> > 0"
shows "\<exists>A. path A \<and> path_image A \<subseteq> ball p \<epsilon> \<union> ball q \<epsilon> \<and> pathstart A = p \<and> pathfinish A = q \<and> path_image A \<inter> M = {}"
Our comment on the codex statement: <YOU CAN LEAVE YOUR COMMENT HERE>
*)
theorem exercise_2_138: undefined oops
(*
problem_number:3_1
natural language statement:
Assume that $f \colon \mathbb{R} \rightarrow \mathbb{R}$ satisfies $|f(t)-f(x)| \leq|t-x|^{2}$ for all $t, x$. Prove that $f$ is constant.
lean statement:
theorem exercise_3_1 {f : \<real> \<rightarrow> \<real>}
(hf : \<forall> x y, |f x - f y| \<le> |x - y| ^ 2) :
\<exists> c, f = \<lambda> x, c :=
codex statement:
theorem constant_of_abs_diff_leq_square_diff:
fixes f::"real \<Rightarrow> real"
assumes "\<forall>x t. abs (f t - f x) \<le> (abs (t - x))^2"
shows "f constant_on UNIV"
Our comment on the codex statement: <YOU CAN LEAVE YOUR COMMENT HERE>
*)
theorem exercise_3_1:
fixes f::"real \<Rightarrow> real"
assumes "\<forall>x t. \<bar>f t - f x\<bar> \<le> \<bar>t - x\<bar>^2"
shows "f constant_on UNIV"
oops
(*
problem_number:3_4
natural language statement:
Prove that $\sqrt{n+1}-\sqrt{n} \rightarrow 0$ as $n \rightarrow \infty$.
lean statement:
theorem exercise_3_4 (n : \<nat>) :
tendsto (\<lambda> n, (sqrt (n + 1) - sqrt n)) at_top (𝓝 0) :=
codex statement:
theorem sqrt_succ_sub_sqrt_tendsto_zero:
shows "(\<Sum>i=0..n. 1/(sqrt (real (Suc i)) + sqrt (real i))) \<longrightarrow> 0"
Our comment on the codex statement: This went completely wrong!
*)
theorem exercise_3_4:
shows "(\<lambda>n. sqrt (real (Suc n)) - sqrt (real n)) \<longlonglongrightarrow> 0"
oops
(*
problem_number:3_11a
natural language statement:
Let $f \colon (a, b) \rightarrow \mathbb{R}$ be given. If $f''(x)$ exists, prove that \[\lim_{h \rightarrow 0} \frac{f(x - h) - 2f(x) + f(x + h)}{h^2} = f''(x).\]
lean statement:
theorem exercise_3_11a
{f : \<real> \<rightarrow> \<real>} {a b x : \<real>}
(h1 : differentiable_within_at \<real> f (set.Ioo a b) x)
(h2 : differentiable_within_at \<real> (deriv f) (set.Ioo a b) x) :
\<exists> l, tendsto (\<lambda> h, (f (x - h) - 2 * f x + f (x + h)) / h ^ 2) (𝓝 0) (𝓝 l)
\<and> deriv (deriv f) x = l :=
codex statement:
theorem limit_of_diff_of_diff_eq_diff_of_diff:
fixes f::"real \<Rightarrow> real"
assumes "\<forall>x. (f has_real_derivative f' x) (at x)" "\<forall>x. (f has_real_derivative f'' x) (at x)"
shows "(f'' ---> f'' x) (at x)"
Our comment on the codex statement: Conclusion completely wrong
*)
theorem exercise_3_11a:
fixes f::"real \<Rightarrow> real"
assumes "a < x" "x < b"
and "(f has_derivative f') (at x within {a<..<b})"
and "(f' has_derivative f'') (at x within {a<..<b})"
shows "((\<lambda>h. (f (x - h) - 2 * f x + f (x + h)) / h ^ 2) \<longlongrightarrow> f'' x) (at 0)"
oops
(*
problem_number:3_17c-i
natural language statement:
Show that the bump function $\beta(x)=e^{2} e(1-x) \cdot e(x+1)$ is smooth.
lean statement:
codex statement:
theorem smooth_of_bump_function:
fixes x::real
assumes "x\<in>{-1..1}"
shows "\<forall>n. (\<partial>^n) (\<lambda>x. exp 2 * exp (-x) * exp (x+1)) x = exp 2 * exp (-x) * exp (x+1)"
Our comment on the codex statement: <YOU CAN LEAVE YOUR COMMENT HERE>
*)
theorem "exercise_3_17c-i": undefined oops (* we have no way to express smoothness*)
(*
problem_number:3_17c-ii
natural language statement:
Show that the bump function $\beta(x)=e^{2} e(1-x) \cdot e(x+1)$ is identically 0 outside the interval $(-1, 1)$.
lean statement:
codex statement:
theorem bump_function_is_zero_outside_interval:
fixes x::real
shows "x\<le>-1 \<or> x\<ge>1 \<longrightarrow> (\<lambda>x. exp 2 * exp (-x) * exp (x+1)) x = 0"
Our comment on the codex statement: <YOU CAN LEAVE YOUR COMMENT HERE>
*)
theorem "exercise_3_17c-ii": undefined oops (* impossible to interpret this formula, and the conclusion seems to be false*)
(*
problem_number:3_18
natural language statement:
Let $L$ be any closed set in $\mathbb{R}$. Prove that there is a smooth function $f \colon \mathbb{R} \rightarrow [0, 1]$ such that $f(x) = 0$ if and only if $x \in L$.
lean statement:
codex statement:
theorem exists_smooth_function_of_closed_set:
fixes L::"real set"
assumes "closed L"
shows "\<exists>f. (\<forall>x. f x = 0 \<longleftrightarrow> x\<in>L) \<and> (\<forall>x. f differentiable (at x))"
Our comment on the codex statement: <YOU CAN LEAVE YOUR COMMENT HERE>
*)
theorem exercise_3_18: undefined oops (* we have no way to express smoothness*)
(*
problem_number:3_43a
natural language statement:
Let $\psi(x) = x \sin 1/x$ for $0 < x \leq 1$ and $\psi(0) = 0$. If $f \colon [-1, 1] \rightarrow \mathbb{R}$ is Riemann integrable, prove that $f \circ \psi$ is Riemann integrable.
lean statement:
codex statement:
theorem riemann_integrable_of_riemann_integrable_comp:
fixes f::"real \<Rightarrow> real" and \<psi>::"real \<Rightarrow> real"
assumes "continuous_on {0..1} \<psi>" "f integrable_on {-1..1}"
shows "(f \<circ> \<psi>) integrable_on {0..1}"
Our comment on the codex statement: <YOU CAN LEAVE YOUR COMMENT HERE>
*)
theorem exercise_3_43a: undefined oops (* we do not have Riemann integrals*)
(*
problem_number:3_53
natural language statement:
Given $f, g \in \mathcal{R}$, prove that $\max(f, g)$ and $\min(f, g)$ are Riemann integrable, where $\max(f, g)(x) = \max(f(x), g(x))$ and $\min(f, g)(x) = \min(f(x), g(x))$.
lean statement:
codex statement:
theorem max_min_integrable:
fixes f g::"real \<Rightarrow> real"
assumes "f integrable_on {a..b}" "g integrable_on {a..b}"
shows "(\<lambda>x. max (f x) (g x)) integrable_on {a..b}" "(\<lambda>x. min (f x) (g x)) integrable_on {a..b}"
Our comment on the codex statement: <YOU CAN LEAVE YOUR COMMENT HERE>
*)
theorem exercise_3_53: undefined oops(* we do not have Riemann integrals*)
(*
problem_number:3_59
natural language statement:
Prove that if $a_n \geq 0$ and $\sum a_n$ converges then $\sum \sqrt{a_n}/n$ converges.
lean statement:
codex statement:
theorem convergent_of_convergent_sum_sqrt_div_n:
fixes a::"nat \<Rightarrow> real"
assumes "\<forall>n. 0 \<le> a n" "summable a"
shows "summable (\<lambda>n. sqrt (a n) / n)"
Our comment on the codex statement: Perfect!
*)
theorem exercise_3_59:
fixes a::"nat \<Rightarrow> real"
assumes "\<forall>n. 0 \<le> a n" "summable a"
shows "summable (\<lambda>n. sqrt (a n) / n)"
oops
(*
problem_number:3_63
natural language statement:
Prove that $\sum 1/k(\log(k))^p$ converges when $p > 1$ and diverges when $p \leq 1$.
lean statement:
theorem exercise_3_63a (p : \<real>) (f : \<nat> \<rightarrow> \<real>) (hp : p > 1)
(h : f = \<lambda> k, (1 : \<real>) / (k * (log k) ^ p)) :
\<exists> l, tendsto f at_top (𝓝 l) :=
codex statement:
theorem sum_of_inverse_log_pow_p_converges_of_p_gt_1:
fixes p::real
assumes "p > 1"
shows "summable (\<lambda>n. 1 / (real n * (log (real n)) ^ p))"
Our comment on the codex statement: correct except for log (should be ln) and ^ (should be powr)
*)
theorem exercise_3_63a:
fixes p::real
assumes "p > 1"
shows "summable (\<lambda>n. 1 / (real n * (ln (real n) powr p)))"
oops
theorem exercise_3_63b:
fixes p::real
assumes "p \<le> 1"
shows "\<not> summable (\<lambda>n. 1 / (real n * (ln (real n) powr p)))"
oops
(*
problem_number:4_15a
natural language statement:
A continuous, strictly increasing function $\mu \colon (0, \infty) \rightarrow (0, \infty)$ is a modulus of continuity if $\mu(s) \rightarrow 0$ as $s \rightarrow 0$. A function $f \colon [a, b] \rightarrow \mathbb{R}$ has modulus of continuity $\mu$ if $|f(s) - f(t)| \leq \mu(|s - t|)$ for all $s, t \in [a, b]$. Prove that a function is uniformly continuous if and only if it has a modulus of continuity.
lean statement:
theorem exercise_4_15a {\<alpha> : Type*}
(a b : \<real>) (F : set (\<real> \<rightarrow> \<real>)) :
(\<forall> (x : \<real>) (\<epsilon> > 0), \<exists> (U \<in> (𝓝 x)),
(\<forall> (y z \<in> U) (f : \<real> \<rightarrow> \<real>), f \<in> F \<rightarrow> (dist (f y) (f z) < \<epsilon>)))
\<longleftrightarrow>
\<exists> (\<mu> : \<real> \<rightarrow> \<real>), \<forall> (x : \<real>), (0 : \<real>) \<le> \<mu> x \<and> tendsto \<mu> (𝓝 0) (𝓝 0) \<and>
(\<forall> (s t : \<real>) (f : \<real> \<rightarrow> \<real>), f \<in> F \<rightarrow> |(f s) - (f t)| \<le> \<mu> (|s - t|)) :=
codex statement:
theorem uniform_continuous_iff_has_modulus_of_continuity:
fixes f::"'a::metric_space \<Rightarrow> 'b::metric_space" and \<mu>::"'a \<Rightarrow> 'b"
assumes "continuous_on UNIV \<mu>" "strict_mono \<mu>" "\<mu> \<longrightarrow> 0 at_top" "\<forall>s t. s \<in> UNIV \<longrightarrow> t \<in> UNIV \<longrightarrow> dist s t \<le> \<mu> (dist s t)"
shows "uniformly_continuous_on UNIV f"
Our comment on the codex statement: <YOU CAN LEAVE YOUR COMMENT HERE>
*)
definition "is_modulus_continuity
\<equiv> \<lambda> \<mu>. continuous_on {0<..} \<mu> \<and> strict_mono_on {0<..} \<mu> \<and> \<mu> \<in> {0<..} \<rightarrow> {0<..} \<and> (\<mu> \<longlongrightarrow> 0) (at 0 within {0<..})"
definition "has_modulus_continuity
\<equiv> \<lambda> \<mu> f a b. is_modulus_continuity \<mu> \<and> (\<forall>x \<in> {a..b}. \<forall>y \<in> {a..b}. \<bar>f x - f y\<bar> \<le> \<mu>\<bar>x-y\<bar>)"
theorem exercise_4_15a:
fixes f::"real \<Rightarrow> real"
shows "uniformly_continuous_on {a..b} f \<longleftrightarrow> (\<exists>\<mu>. has_modulus_continuity \<mu> f a b)"
oops
(*
problem_number:4_15b
natural language statement:
A continuous, strictly increasing function $\mu \colon (0, \infty) \rightarrow (0, \infty)$ is a modulus of continuity if $\mu(s) \rightarrow 0$ as $s \rightarrow 0$. A function $f \colon [a, b] \rightarrow \mathbb{R}$ has modulus of continuity $\mu$ if $|f(s) - f(t)| \leq \mu(|s - t|)$ for all $s, t \in [a, b]$. Prove that a family of functions is equicontinuous if and only if its members.
lean statement:
codex statement:
theorem equicontinuous_of_modulus_of_continuity:
fixes f::"'a::metric_space \<Rightarrow> 'b::metric_space" and g::"'a::metric_space \<Rightarrow> 'b::metric_space"
assumes "\<forall>x. continuous (at x) f" "\<forall>x. continuous (at x) g" "\<forall>x. continuous (at x within s) f" "\<forall>x. continuous (at x within s) g"
shows "uniformly_continuous_on s f" "uniformly_continuous_on s g"
Our comment on the codex statement: <YOU CAN LEAVE YOUR COMMENT HERE>
*)
theorem exercise_4_15b:
fixes \<F>::"(real \<Rightarrow> real) set"
assumes "a<b"
shows "(\<forall>e>0. \<exists>d>0. \<forall>f \<in> \<F>. \<forall>x \<in> {a..b}. \<forall>x' \<in> {a..b}. \<bar>x'-x\<bar> < d \<longrightarrow> \<bar>f x' - f x\<bar> < e)
\<longleftrightarrow> (\<exists>\<mu>. \<forall>f \<in> \<F>. has_modulus_continuity \<mu> f a b)"
oops
(*
problem_number:4_19
natural language statement:
If $M$ is compact and $A$ is dense in $M$, prove that for each $\delta > 0$ there is a finite subset $\{a_1, \ldots , a_k\} \subset A$ which is $\delta$-dense in $M$ in the sense that each $x \in M$ lies within distance $\delta$ of at least one of the points $a_1,\ldots, a_k$.
lean statement:
theorem exercise_4_19 {M : Type*} [metric_space M]
[compact_space M] (A : set M) (hA : dense A) (\<delta> : \<real>) (h\<delta> : \<delta> > 0) :
\<exists> (A_fin : set M), A_fin \<subset> A \<and> set.finite A_fin \<and> \<forall> (x : M), \<exists> i \<in> A_fin, dist x i < \<delta> :=
codex statement:
theorem exists_finite_delta_dense_of_compact_dense:
fixes M::"'a::metric_space set" and A::"'a set"
assumes "compact M" "A \<subseteq> M" "dense A"
shows "\<exists>A'. finite A' \<and> A' \<subseteq> A \<and> \<forall>x\<in>M. \<exists>a\<in>A'. dist x a < \<delta>"
Our comment on the codex statement: <YOU CAN LEAVE YOUR COMMENT HERE>
*)
theorem exercise_4_19:
fixes M::"'a::metric_space set" and A::"'a set"
assumes "compact M" "A \<subseteq> M" "M \<subseteq> closure A" "\<delta> > 0"
shows "\<exists>A'. finite A' \<and> A' \<subseteq> A \<and> (\<forall>x\<in>M. \<exists>a\<in>A'. dist x a < \<delta>)"
oops
(*
problem_number:4_36a
natural language statement:
Suppose that the ODE $x' = f(x)$ on $\mathbb{R}$ is bounded, $|f(x)| \leq M$ for all x. Prove that no solution of the ODE escapes to infinity in finite time.
lean statement:
codex statement:
theorem no_solution_escapes_to_infinity_in_finite_time:
fixes f::"real \<Rightarrow> real"
assumes "\<forall>x. abs (f x) \<le> M"
shows "\<forall>x0 t. \<exists>x. x0 + t * f x0 = x"
Our comment on the codex statement: <YOU CAN LEAVE YOUR COMMENT HERE>
*)
theorem exercise_4_36a: undefined oops
(*
problem_number:4_42
natural language statement:
Prove that $\mathbb{R}$ cannot be expressed as the countable union of Cantor sets.
lean statement:
codex statement:
theorem cantor_set_not_union_of_countable_cantor_sets:
fixes C::"real set"
assumes "\<forall>x\<in>C. \<exists>a b. x = a + b \<and> a \<in> cantor \<and> b \<in> cantor" "countable C"
shows "False"
Our comment on the codex statement: <YOU CAN LEAVE YOUR COMMENT HERE>
*)
theorem exercise_4_42: undefined oops(* we do not have a formalisation of Cantor sets*)
(*
problem_number:5_2
natural language statement:
Let $L$ be the vector space of continuous linear transformations from a normed space $V$ to a normed space $W$. Show that the operator norm makes $L$ a normed space.
lean statement:
theorem exercise_5_2 {V : Type*} [normed_add_comm_group V]
[normed_space \<complex> V] {W : Type*} [normed_add_comm_group W] [normed_space \<complex> W] :
normed_space \<complex> (continuous_linear_map (id \<complex>) V W) :=
codex statement:
theorem norm_of_linear_transformation_is_norm:
fixes V::"'a::real_normed_vector normed_vector" and W::"'b::real_normed_vector normed_vector"
assumes "linear f"
shows "norm f = \<parallel>f\<parallel>"
Our comment on the codex statement: <YOU CAN LEAVE YOUR COMMENT HERE>
*)
theorem exercise_5_2: undefined oops
(*
problem_number:5_20
natural language statement:
Assume that $U$ is a connected open subset of $\mathbb{R}^n$ and $f \colon U \rightarrow \mathbb{R}^m$ is differentiable everywhere on $U$. If $(Df)_p = 0$ for all $p \in U$, show that $f$ is constant.
lean statement:
codex statement:
theorem constant_of_differentiable_zero:
fixes f::"'a::euclidean_space \<Rightarrow> 'b::euclidean_space"
assumes "connected U" "open U" "\<forall>x\<in>U. f differentiable (at x)" "\<forall>x\<in>U. (D f) x = 0"
shows "f constant_on U"
Our comment on the codex statement: not bad, but we have no D!
*)
theorem exercise_5_20:
fixes f::"'a::euclidean_space \<Rightarrow> 'b::euclidean_space"
assumes "connected U" "open U" "\<forall>x\<in>U. (f has_derivative (\<lambda>x. 0)) (at x)"
shows "f constant_on U"
by (smt (verit, best) assms constant_on_def has_derivative_zero_unique_connected)
(*
problem_number:5_22
natural language statement:
If $Y$ is a metric space and $f \colon [a, b] \times Y \rightarrow \mathbb{R}$ is continuous, show that $F(y) = \int^b_a f(x,y) dx$ is continuous.
lean statement:
codex statement:
theorem continuous_of_continuous_integral:
fixes f::"'a::metric_space \<Rightarrow> 'b::metric_space \<Rightarrow> 'c::metric_space"
assumes "continuous_on (UNIV::'a set) (\<lambda>y. \<integral> {a..b} (f x y) dx)"
shows "continuous_on (UNIV::'b set) (\<lambda>y. \<integral> {a..b} (f x y) dx)"
Our comment on the codex statement: Wrong all the way
*)
theorem exercise_5_22: (*Lebesgue integrals seem to be expected here*)
fixes f::"real * 'a::metric_space \<Rightarrow> real"
assumes "continuous_on UNIV f"
shows "\<forall>y\<in>Y. integrable (lebesgue_on {a..b}) (\<lambda>x. f (x,y))" "continuous_on Y (\<lambda>y. integral\<^sup>L (lebesgue_on {a..b}) (\<lambda>x. f (x,y)))"
oops
(*
problem_number:5_43a
natural language statement:
Suppose that $T \colon R^n \rightarrow R^m$ has rank $k$. Show there exists a $\delta > 0$ such that if $S \colon R^n \rightarrow R^m$ and $||S - T|| < \delta$ then $S$ has rank $\geq k$.
lean statement:
codex statement:
theorem exists_delta_of_rank_leq_rank_of_norm_lt_delta:
fixes T::"'a::euclidean_space \<Rightarrow> 'b::euclidean_space" and S::"'a::euclidean_space \<Rightarrow> 'b::euclidean_space"
assumes "linear T" "linear S" "rank T = k"
shows "\<exists>\<delta>>0. \<forall>S. linear S \<longrightarrow> (\<parallel>S - T\<parallel> < \<delta> \<longrightarrow> rank S \<ge> k)"
Our comment on the codex statement: <YOU CAN LEAVE YOUR COMMENT HERE>
*)
theorem exercise_5_43a: undefined oops
(*
problem_number:6_38
natural language statement:
If $f$ and $g$ are integrable prove that their maximum and minimum are integrable.
lean statement:
codex statement:
theorem integrable_max_min:
fixes f g::"'a::euclidean_space \<Rightarrow> 'b::banach"
assumes "integrable M f" "integrable M g"
shows "integrable M (\<lambda>x. max (f x) (g x))" "integrable M (\<lambda>x. min (f x) (g x))"
Our comment on the codex statement: <YOU CAN LEAVE YOUR COMMENT HERE>
*)
theorem exercise_6_38:
fixes f g::"'a::euclidean_space \<Rightarrow> real"
assumes "integrable M f" "integrable M g"
shows "integrable M (\<lambda>x. max (f x) (g x))" "integrable M (\<lambda>x. min (f x) (g x))"
using assms by blast+
(*
problem_number:6_39
natural language statement:
Suppose that $f$ and $g$ are measurable and their squares are integrable. Prove that $fg$ is measurable, integrable, and $\int fg \leq \sqrt{\int f^2} \sqrt{\int g^2}$.
lean statement:
codex statement:
theorem integrable_of_integrable_square:
fixes f g::"'a::euclidean_space \<Rightarrow> real"
assumes "integrable lborel f" "integrable lborel g"
shows "integrable lborel (\<lambda>x. f x * g x)"
Our comment on the codex statement: <YOU CAN LEAVE YOUR COMMENT HERE>
*)
theorem exercise_6_39:
fixes f g::"'a::euclidean_space \<Rightarrow> real"
assumes "f \<in> borel_measurable borel" "integrable lborel (\<lambda>x. f x ^ 2)"
assumes "g \<in> borel_measurable borel" "integrable lborel (\<lambda>x. g x ^ 2)"
shows "(\<lambda>x. f x * g x) \<in> borel_measurable borel" "integrable lborel (\<lambda>x. f x * g x)"
"integral\<^sup>L lborel (\<lambda>x. f x * g x) \<le> sqrt (integral\<^sup>L lborel (\<lambda>x. f x ^ 2)) * sqrt (integral\<^sup>L lborel (\<lambda>x. g x ^ 2))"
apply (simp add: borel_measurable_times assms)
oops
(*
problem_number:6_43
natural language statement:
Prove that $g(y) = \int_0^\infty e^{-x} \sin(x + y) dx$ is differentiable and find $g'(y)$.
lean statement:
codex statement:
theorem diff_integral_of_exp_sin:
fixes y::real
shows "((\<lambda>x. exp (-x) * sin (x + y)) has_vector_derivative (exp (-y) * cos y)) (at y)"
Our comment on the codex statement: Rather scrambled
*)
theorem exercise_6_43:
fixes y::real
shows "(\<lambda>y::real. integral\<^sup>L (lebesgue_on {0<..}) (\<lambda>x. exp (-x) * sin (x + y))) differentiable_on {0<..}"
oops
(*
problem_number:6_49a
natural language statement:
Prove that $f \colon \mathbb{R} \rightarrow \mathbb{R}$ is Lebesgue measurable if and only if the preimage of every Borel set is a Lebesgue measurable.
lean statement:
codex statement:
theorem lebesgue_measurable_of_preimage_borel_is_lebesgue_measurable:
fixes f::"'a::euclidean_space \<Rightarrow> 'b::euclidean_space"
assumes "\<forall>s. borel_measurable s \<longrightarrow> borel_measurable (f -` s)"
shows "lebesgue_measurable f"
Our comment on the codex statement: not completely wrong
*)
theorem exercise_6_49a: (*Not sure I have formalised measurable sets and functions correctly*)
assumes "\<forall>S. S \<in> fmeasurable borel \<longrightarrow> (f -` S) \<in> fmeasurable borel"
shows "f \<in> measurable borel borel"
oops
end
|
/-
Copyright (c) 2018 Chris Hughes. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Chris Hughes, Abhimanyu Pallavi Sudhir, Jean Lo, Calle Sönne, Benjamin Davidson
! This file was ported from Lean 3 source module analysis.special_functions.trigonometric.arctan
! leanprover-community/mathlib commit f2ce6086713c78a7f880485f7917ea547a215982
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Analysis.SpecialFunctions.Trigonometric.Complex
/-!
# The `arctan` function.
Inequalities, derivatives,
and `real.tan` as a `local_homeomorph` between `(-(π / 2), π / 2)` and the whole line.
-/
noncomputable section
namespace Real
open Set Filter
open Topology Real
theorem tan_add {x y : ℝ}
(h :
((∀ k : ℤ, x ≠ (2 * k + 1) * π / 2) ∧ ∀ l : ℤ, y ≠ (2 * l + 1) * π / 2) ∨
(∃ k : ℤ, x = (2 * k + 1) * π / 2) ∧ ∃ l : ℤ, y = (2 * l + 1) * π / 2) :
tan (x + y) = (tan x + tan y) / (1 - tan x * tan y) := by
simpa only [← Complex.ofReal_inj, Complex.ofReal_sub, Complex.ofReal_add, Complex.ofReal_div,
Complex.ofReal_mul, Complex.ofReal_tan] using
@Complex.tan_add (x : ℂ) (y : ℂ) (by convert h <;> norm_cast)
#align real.tan_add Real.tan_add
theorem tan_add' {x y : ℝ}
(h : (∀ k : ℤ, x ≠ (2 * k + 1) * π / 2) ∧ ∀ l : ℤ, y ≠ (2 * l + 1) * π / 2) :
tan (x + y) = (tan x + tan y) / (1 - tan x * tan y) :=
tan_add (Or.inl h)
#align real.tan_add' Real.tan_add'
theorem tan_two_mul {x : ℝ} : tan (2 * x) = 2 * tan x / (1 - tan x ^ 2) := by
simpa only [← Complex.ofReal_inj, Complex.ofReal_sub, Complex.ofReal_div, Complex.ofReal_pow,
Complex.ofReal_mul, Complex.ofReal_tan, Complex.ofReal_bit0, Complex.ofReal_one] using
Complex.tan_two_mul
#align real.tan_two_mul Real.tan_two_mul
theorem tan_ne_zero_iff {θ : ℝ} : tan θ ≠ 0 ↔ ∀ k : ℤ, θ ≠ k * π / 2 := by
rw [← Complex.ofReal_ne_zero, Complex.ofReal_tan, Complex.tan_ne_zero_iff] <;> norm_cast
#align real.tan_ne_zero_iff Real.tan_ne_zero_iff
theorem tan_eq_zero_iff {θ : ℝ} : tan θ = 0 ↔ ∃ k : ℤ, θ = k * π / 2 := by
rw [← not_iff_not, not_exists, ← Ne, tan_ne_zero_iff]
#align real.tan_eq_zero_iff Real.tan_eq_zero_iff
theorem tan_int_mul_pi_div_two (n : ℤ) : tan (n * π / 2) = 0 :=
tan_eq_zero_iff.mpr (by use n)
#align real.tan_int_mul_pi_div_two Real.tan_int_mul_pi_div_two
theorem continuousOn_tan : ContinuousOn tan { x | cos x ≠ 0 } :=
by
suffices ContinuousOn (fun x => sin x / cos x) { x | cos x ≠ 0 }
by
have h_eq : (fun x => sin x / cos x) = tan :=
by
ext1 x
rw [tan_eq_sin_div_cos]
rwa [h_eq] at this
exact continuous_on_sin.div continuous_on_cos fun x => id
#align real.continuous_on_tan Real.continuousOn_tan
@[continuity]
theorem continuous_tan : Continuous fun x : { x | cos x ≠ 0 } => tan x :=
continuousOn_iff_continuous_restrict.1 continuousOn_tan
#align real.continuous_tan Real.continuous_tan
theorem continuousOn_tan_Ioo : ContinuousOn tan (Ioo (-(π / 2)) (π / 2)) :=
by
refine' ContinuousOn.mono continuous_on_tan fun x => _
simp only [and_imp, mem_Ioo, mem_set_of_eq, Ne.def]
rw [cos_eq_zero_iff]
rintro hx_gt hx_lt ⟨r, hxr_eq⟩
cases le_or_lt 0 r
· rw [lt_iff_not_ge] at hx_lt
refine' hx_lt _
rw [hxr_eq, ← one_mul (π / 2), mul_div_assoc, ge_iff_le, mul_le_mul_right (half_pos pi_pos)]
simp [h]
· rw [lt_iff_not_ge] at hx_gt
refine' hx_gt _
rw [hxr_eq, ← one_mul (π / 2), mul_div_assoc, ge_iff_le, neg_mul_eq_neg_mul,
mul_le_mul_right (half_pos pi_pos)]
have hr_le : r ≤ -1 := by rwa [Int.lt_iff_add_one_le, ← le_neg_iff_add_nonpos_right] at h
rw [← le_sub_iff_add_le, mul_comm, ← le_div_iff]
· norm_num
rw [← Int.cast_one, ← Int.cast_neg]
norm_cast
exact hr_le
· exact zero_lt_two
#align real.continuous_on_tan_Ioo Real.continuousOn_tan_Ioo
theorem surjOn_tan : SurjOn tan (Ioo (-(π / 2)) (π / 2)) univ :=
have := neg_lt_self pi_div_two_pos
continuousOn_tan_Ioo.surjOn_of_tendsto (nonempty_Ioo.2 this)
(by simp [tendsto_tan_neg_pi_div_two, this]) (by simp [tendsto_tan_pi_div_two, this])
#align real.surj_on_tan Real.surjOn_tan
theorem tan_surjective : Function.Surjective tan := fun x => surjOn_tan.subset_range trivial
#align real.tan_surjective Real.tan_surjective
theorem image_tan_Ioo : tan '' Ioo (-(π / 2)) (π / 2) = univ :=
univ_subset_iff.1 surjOn_tan
#align real.image_tan_Ioo Real.image_tan_Ioo
/-- `real.tan` as an `order_iso` between `(-(π / 2), π / 2)` and `ℝ`. -/
def tanOrderIso : Ioo (-(π / 2)) (π / 2) ≃o ℝ :=
(strictMonoOn_tan.OrderIso _ _).trans <|
(OrderIso.setCongr _ _ image_tan_Ioo).trans OrderIso.Set.univ
#align real.tan_order_iso Real.tanOrderIso
/-- Inverse of the `tan` function, returns values in the range `-π / 2 < arctan x` and
`arctan x < π / 2` -/
@[pp_nodot]
noncomputable def arctan (x : ℝ) : ℝ :=
tanOrderIso.symm x
#align real.arctan Real.arctan
@[simp]
theorem tan_arctan (x : ℝ) : tan (arctan x) = x :=
tanOrderIso.apply_symm_apply x
#align real.tan_arctan Real.tan_arctan
theorem arctan_mem_Ioo (x : ℝ) : arctan x ∈ Ioo (-(π / 2)) (π / 2) :=
Subtype.coe_prop _
#align real.arctan_mem_Ioo Real.arctan_mem_Ioo
@[simp]
theorem range_arctan : range arctan = Ioo (-(π / 2)) (π / 2) :=
((EquivLike.surjective _).range_comp _).trans Subtype.range_coe
#align real.range_arctan Real.range_arctan
theorem arctan_tan {x : ℝ} (hx₁ : -(π / 2) < x) (hx₂ : x < π / 2) : arctan (tan x) = x :=
Subtype.ext_iff.1 <| tanOrderIso.symm_apply_apply ⟨x, hx₁, hx₂⟩
#align real.arctan_tan Real.arctan_tan
theorem cos_arctan_pos (x : ℝ) : 0 < cos (arctan x) :=
cos_pos_of_mem_Ioo <| arctan_mem_Ioo x
#align real.cos_arctan_pos Real.cos_arctan_pos
theorem cos_sq_arctan (x : ℝ) : cos (arctan x) ^ 2 = 1 / (1 + x ^ 2) := by
rw [one_div, ← inv_one_add_tan_sq (cos_arctan_pos x).ne', tan_arctan]
#align real.cos_sq_arctan Real.cos_sq_arctan
theorem sin_arctan (x : ℝ) : sin (arctan x) = x / sqrt (1 + x ^ 2) := by
rw [← tan_div_sqrt_one_add_tan_sq (cos_arctan_pos x), tan_arctan]
#align real.sin_arctan Real.sin_arctan
theorem cos_arctan (x : ℝ) : cos (arctan x) = 1 / sqrt (1 + x ^ 2) := by
rw [one_div, ← inv_sqrt_one_add_tan_sq (cos_arctan_pos x), tan_arctan]
#align real.cos_arctan Real.cos_arctan
theorem arctan_lt_pi_div_two (x : ℝ) : arctan x < π / 2 :=
(arctan_mem_Ioo x).2
#align real.arctan_lt_pi_div_two Real.arctan_lt_pi_div_two
theorem neg_pi_div_two_lt_arctan (x : ℝ) : -(π / 2) < arctan x :=
(arctan_mem_Ioo x).1
#align real.neg_pi_div_two_lt_arctan Real.neg_pi_div_two_lt_arctan
theorem arctan_eq_arcsin (x : ℝ) : arctan x = arcsin (x / sqrt (1 + x ^ 2)) :=
Eq.symm <| arcsin_eq_of_sin_eq (sin_arctan x) (mem_Icc_of_Ioo <| arctan_mem_Ioo x)
#align real.arctan_eq_arcsin Real.arctan_eq_arcsin
theorem arcsin_eq_arctan {x : ℝ} (h : x ∈ Ioo (-(1 : ℝ)) 1) :
arcsin x = arctan (x / sqrt (1 - x ^ 2)) := by
rw [arctan_eq_arcsin, div_pow, sq_sqrt, one_add_div, div_div, ← sqrt_mul, mul_div_cancel',
sub_add_cancel, sqrt_one, div_one] <;>
nlinarith [h.1, h.2]
#align real.arcsin_eq_arctan Real.arcsin_eq_arctan
@[simp]
theorem arctan_zero : arctan 0 = 0 := by simp [arctan_eq_arcsin]
#align real.arctan_zero Real.arctan_zero
theorem arctan_eq_of_tan_eq {x y : ℝ} (h : tan x = y) (hx : x ∈ Ioo (-(π / 2)) (π / 2)) :
arctan y = x :=
injOn_tan (arctan_mem_Ioo _) hx (by rw [tan_arctan, h])
#align real.arctan_eq_of_tan_eq Real.arctan_eq_of_tan_eq
@[simp]
theorem arctan_one : arctan 1 = π / 4 :=
arctan_eq_of_tan_eq tan_pi_div_four <| by constructor <;> linarith [pi_pos]
#align real.arctan_one Real.arctan_one
@[simp]
theorem arctan_neg (x : ℝ) : arctan (-x) = -arctan x := by simp [arctan_eq_arcsin, neg_div]
#align real.arctan_neg Real.arctan_neg
theorem arctan_eq_arccos {x : ℝ} (h : 0 ≤ x) : arctan x = arccos (sqrt (1 + x ^ 2))⁻¹ :=
by
rw [arctan_eq_arcsin, arccos_eq_arcsin]; swap; · exact inv_nonneg.2 (sqrt_nonneg _)
congr 1
rw [← sqrt_inv, sq_sqrt, ← one_div, one_sub_div, add_sub_cancel', sqrt_div, sqrt_sq h]
all_goals positivity
#align real.arctan_eq_arccos Real.arctan_eq_arccos
-- The junk values for `arccos` and `sqrt` make this true even for `1 < x`.
theorem arccos_eq_arctan {x : ℝ} (h : 0 < x) : arccos x = arctan (sqrt (1 - x ^ 2) / x) :=
by
rw [arccos, eq_comm]
refine' arctan_eq_of_tan_eq _ ⟨_, _⟩
· rw [tan_pi_div_two_sub, tan_arcsin, inv_div]
· linarith only [arcsin_le_pi_div_two x, pi_pos]
· linarith only [arcsin_pos.2 h]
#align real.arccos_eq_arctan Real.arccos_eq_arctan
@[continuity]
theorem continuous_arctan : Continuous arctan :=
continuous_subtype_val.comp tanOrderIso.toHomeomorph.continuous_invFun
#align real.continuous_arctan Real.continuous_arctan
theorem continuousAt_arctan {x : ℝ} : ContinuousAt arctan x :=
continuous_arctan.ContinuousAt
#align real.continuous_at_arctan Real.continuousAt_arctan
/-- `real.tan` as a `local_homeomorph` between `(-(π / 2), π / 2)` and the whole line. -/
def tanLocalHomeomorph : LocalHomeomorph ℝ ℝ
where
toFun := tan
invFun := arctan
source := Ioo (-(π / 2)) (π / 2)
target := univ
map_source' := mapsTo_univ _ _
map_target' y hy := arctan_mem_Ioo y
left_inv' x hx := arctan_tan hx.1 hx.2
right_inv' y hy := tan_arctan y
open_source := isOpen_Ioo
open_target := isOpen_univ
continuous_toFun := continuousOn_tan_Ioo
continuous_invFun := continuous_arctan.ContinuousOn
#align real.tan_local_homeomorph Real.tanLocalHomeomorph
@[simp]
theorem coe_tanLocalHomeomorph : ⇑tanLocalHomeomorph = tan :=
rfl
#align real.coe_tan_local_homeomorph Real.coe_tanLocalHomeomorph
@[simp]
theorem coe_tanLocalHomeomorph_symm : ⇑tanLocalHomeomorph.symm = arctan :=
rfl
#align real.coe_tan_local_homeomorph_symm Real.coe_tanLocalHomeomorph_symm
end Real
|
module LibA where
import LibB
|
{-# OPTIONS --without-K #-}
module NTypes.Sigma where
open import NTypes
open import PathOperations
open import PathStructure.Sigma
open import Transport
open import Types
Σ-isSet : ∀ {a b} {A : Set a} {B : A → Set b} →
isSet A → (∀ x → isSet (B x)) → isSet (Σ A B)
Σ-isSet {A = A} {B = B} A-set B-set x y p q
= split-eq p ⁻¹
· ap₂-dep-eq {B = B} _,_
(ap π₁ p)
(ap π₁ q)
π₁-eq
(tr-∘ π₁ p (π₂ x) ⁻¹ · apd π₂ p)
(tr-∘ π₁ q (π₂ x) ⁻¹ · apd π₂ q)
π₂-eq
· split-eq q
where
split-eq : (p : x ≡ y) →
ap₂-dep {B = B} _,_ (ap π₁ p)
(tr-∘ π₁ p (π₂ x) ⁻¹ · apd π₂ p) ≡ p
split-eq = π₂ (π₂ (π₂ split-merge-eq))
π₁-eq : ap π₁ p ≡ ap π₁ q
π₁-eq = A-set _ _ (ap π₁ p) (ap π₁ q)
π₂-eq : tr (λ z → tr B z (π₂ x) ≡ π₂ y) π₁-eq
(tr-∘ π₁ p (π₂ x) ⁻¹ · apd π₂ p)
≡ tr-∘ π₁ q (π₂ x) ⁻¹ · apd π₂ q
π₂-eq = B-set _ (tr B (ap π₁ q) (π₂ x)) (π₂ y)
(tr
(λ z → tr B z (π₂ x) ≡ π₂ y)
π₁-eq
(tr-∘ π₁ p (π₂ x) ⁻¹ · apd π₂ p))
(tr-∘ π₁ q (π₂ x) ⁻¹ · apd π₂ q)
|
# Collection of Simple Random Sample from Population of Websites
## Simple Random Sample
Below is a program that creates a simple random sample of websites according to Alexa Analytics' Top One Million sites CSV. The function `create_sample` creates a random integer between 0 and 999,999. That number acts as the index for the site we will get from the population CSV (e.g. The site at index 0 is google.com).
Once we have selected a site for our sample, we need to resolve its IP address (note: we will be using only IPv4 addresses as Alexa only offers those in their dataset and all websites should handle IPv4 and IPv6). If we are unable to resolve the address than we will randomly select another site to replace it.
When sampling is completed, the sample dataset (domains and IPv4 addresses) is exported to a CSV file.
```python
from random import randint
import pandas as pd
import socket
# List of top one million sites according to Alexa Analytics/Website Ranking
# https://s3.amazonaws.com/alexa-static/top-1m.csv.zip
top_sites = pd.read_csv('top-1m.csv', header=None)[1]
# n is sample size
n=1000
# Dictionary used for stored sample data
sample = {
'Website Domain' : [],
'IPv4 Address' : []
}
def create_sample(n):
i = 0
while i < n:
i += 1
# Get random number between 0 and 999,999
random_index = randint(0, len(top_sites) - 1)
# If the site has not already been selected, add it to our data set
if not top_sites[random_index] in sample['Website Domain']:
try:
# print("\033[0mGetting IPv4 Address for %s..." % top_sites[random_index])
ipv4 = socket.gethostbyname(top_sites[random_index])
# If we can't resolve the IP from the host name, replace it with a different host name
except:
# print("\033[1mFailed. Selecting new site for sample.")
i -= 1
continue
sample['Website Domain'].append(top_sites[random_index])
sample['IPv4 Address'].append(ipv4)
create_sample(n)
# Save sample to a CSV file
dataset = pd.DataFrame.from_dict(sample)
dataset.to_csv('website_sample.csv')
dataset
```
# Use Dataset Instead of Creating New Sample
Essentially importing `website_sample.csv` for our dataset so we don't have to create a new sample.
```python
n = 1000
dataset = pd.DataFrame.from_csv('website_sample.csv')
dataset
```
/home/ari/.anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:2: FutureWarning: from_csv is deprecated. Please use read_csv(...) instead. Note that some of the default arguments are different, so please refer to the documentation for from_csv when changing your function calls
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Website Domain</th>
<th>IPv4 Address</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>ipos.vn</td>
<td>94.237.76.49</td>
</tr>
<tr>
<th>1</th>
<td>sectorul4news.ro</td>
<td>89.42.219.210</td>
</tr>
<tr>
<th>2</th>
<td>newprint.ca</td>
<td>37.218.253.61</td>
</tr>
<tr>
<th>3</th>
<td>destinationhotels.com</td>
<td>23.100.83.213</td>
</tr>
<tr>
<th>4</th>
<td>flutetoday.com</td>
<td>97.74.55.1</td>
</tr>
<tr>
<th>5</th>
<td>serialcrush.com</td>
<td>62.149.142.158</td>
</tr>
<tr>
<th>6</th>
<td>mybmtc.com</td>
<td>202.71.129.225</td>
</tr>
<tr>
<th>7</th>
<td>hmi.edu</td>
<td>192.249.121.112</td>
</tr>
<tr>
<th>8</th>
<td>flashkit.com</td>
<td>70.42.23.121</td>
</tr>
<tr>
<th>9</th>
<td>fucktubes.xxx</td>
<td>104.28.23.71</td>
</tr>
<tr>
<th>10</th>
<td>thetodaypost.com</td>
<td>104.31.66.246</td>
</tr>
<tr>
<th>11</th>
<td>avpgalaxy.net</td>
<td>162.211.84.48</td>
</tr>
<tr>
<th>12</th>
<td>scwtenor.com</td>
<td>74.208.236.209</td>
</tr>
<tr>
<th>13</th>
<td>ecom.ly</td>
<td>104.18.47.2</td>
</tr>
<tr>
<th>14</th>
<td>redwolfwildernessadventures.com</td>
<td>67.59.136.110</td>
</tr>
<tr>
<th>15</th>
<td>wesounds.com</td>
<td>107.6.153.170</td>
</tr>
<tr>
<th>16</th>
<td>bloganten.ru</td>
<td>92.53.114.3</td>
</tr>
<tr>
<th>17</th>
<td>karinto.in</td>
<td>175.134.120.229</td>
</tr>
<tr>
<th>18</th>
<td>myscopeoutreach.org</td>
<td>182.160.163.245</td>
</tr>
<tr>
<th>19</th>
<td>landsurveyor.blogfa.com</td>
<td>149.56.201.253</td>
</tr>
<tr>
<th>20</th>
<td>finministry.com</td>
<td>104.18.41.100</td>
</tr>
<tr>
<th>21</th>
<td>odeontravel.rs</td>
<td>195.252.107.131</td>
</tr>
<tr>
<th>22</th>
<td>greeningtheblue.org</td>
<td>104.27.147.128</td>
</tr>
<tr>
<th>23</th>
<td>jerusalemperspective.com</td>
<td>104.199.115.212</td>
</tr>
<tr>
<th>24</th>
<td>bazi-oksana.ru</td>
<td>5.101.152.32</td>
</tr>
<tr>
<th>25</th>
<td>leupold.com</td>
<td>52.88.153.55</td>
</tr>
<tr>
<th>26</th>
<td>cloud9.gg</td>
<td>23.227.38.32</td>
</tr>
<tr>
<th>27</th>
<td>goldsgym.com</td>
<td>162.209.117.196</td>
</tr>
<tr>
<th>28</th>
<td>tagbox.in</td>
<td>52.172.54.225</td>
</tr>
<tr>
<th>29</th>
<td>sexmamki.org</td>
<td>151.80.209.25</td>
</tr>
<tr>
<th>...</th>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>970</th>
<td>jb.com.br</td>
<td>152.199.54.25</td>
</tr>
<tr>
<th>971</th>
<td>vietadsonline.com</td>
<td>171.244.34.197</td>
</tr>
<tr>
<th>972</th>
<td>itftkd.ir</td>
<td>185.128.81.85</td>
</tr>
<tr>
<th>973</th>
<td>rouxroamer.com</td>
<td>104.31.95.99</td>
</tr>
<tr>
<th>974</th>
<td>filejo.co.kr</td>
<td>43.255.255.83</td>
</tr>
<tr>
<th>975</th>
<td>peraichi.com</td>
<td>54.230.89.244</td>
</tr>
<tr>
<th>976</th>
<td>hw3d.net</td>
<td>192.99.14.211</td>
</tr>
<tr>
<th>977</th>
<td>nekoshop.ru</td>
<td>37.140.192.198</td>
</tr>
<tr>
<th>978</th>
<td>maxbestwork.com</td>
<td>66.199.189.51</td>
</tr>
<tr>
<th>979</th>
<td>hindihelpguru.com</td>
<td>199.250.213.223</td>
</tr>
<tr>
<th>980</th>
<td>exertion-fitness.com</td>
<td>23.227.38.32</td>
</tr>
<tr>
<th>981</th>
<td>aktualnacenabytu.sk</td>
<td>212.57.38.25</td>
</tr>
<tr>
<th>982</th>
<td>geoequipos.cl</td>
<td>192.140.57.10</td>
</tr>
<tr>
<th>983</th>
<td>removenotifications.com</td>
<td>192.64.119.93</td>
</tr>
<tr>
<th>984</th>
<td>matbit.net</td>
<td>5.61.47.250</td>
</tr>
<tr>
<th>985</th>
<td>homebasedonlinevehiclemarketing.com</td>
<td>146.66.96.176</td>
</tr>
<tr>
<th>986</th>
<td>xn--v8j5erc590uusnxox.com</td>
<td>183.90.253.8</td>
</tr>
<tr>
<th>987</th>
<td>nflsport.icu</td>
<td>198.54.121.189</td>
</tr>
<tr>
<th>988</th>
<td>izithakazelo.blog</td>
<td>192.0.78.191</td>
</tr>
<tr>
<th>989</th>
<td>irinabiz.ru</td>
<td>138.201.199.38</td>
</tr>
<tr>
<th>990</th>
<td>intercity.pl</td>
<td>46.174.180.162</td>
</tr>
<tr>
<th>991</th>
<td>bongacams3.com</td>
<td>31.192.123.62</td>
</tr>
<tr>
<th>992</th>
<td>twinstrangers.net</td>
<td>52.214.239.109</td>
</tr>
<tr>
<th>993</th>
<td>textgeneratorfont.com</td>
<td>162.241.133.121</td>
</tr>
<tr>
<th>994</th>
<td>silversaints.com</td>
<td>212.188.174.246</td>
</tr>
<tr>
<th>995</th>
<td>evassmat.com</td>
<td>104.28.24.228</td>
</tr>
<tr>
<th>996</th>
<td>mpets.mobi</td>
<td>136.243.25.36</td>
</tr>
<tr>
<th>997</th>
<td>londongateway.com</td>
<td>65.52.130.1</td>
</tr>
<tr>
<th>998</th>
<td>derangler.shop</td>
<td>85.236.56.247</td>
</tr>
<tr>
<th>999</th>
<td>tavirekini.lv</td>
<td>94.100.11.185</td>
</tr>
</tbody>
</table>
<p>1000 rows × 2 columns</p>
</div>
# Determining Proportion of Websites Running AWS
## Proportion of IPv4 addresses owned by AWS
The program takes a list of all IPv4 addresses owned by AWS and compares them to the list of addresses in our sample. AWS does not give a list of IPv4 address but instead gives a subnet of their addresses, this means they've purchased addresses in bulk so they're grouped together. In order to properly compare an IPv4 address to a subnet, python offers a library called `ipaddress` that breaks up subnets and ip addresses into a data format that can easily be compared.
If an address appears in Amazon's IPv4 range (their owned addresses) than the domain associated with the IP address is appended to a list. The list of websites is then exported as a CSV file.
```python
import json, requests, ipaddress
# List of IP Ranges (IPv4 and IPv6) owned by Amazon and used for AWS
# https://ip-ranges.amazonaws.com/ip-ranges.json
aws_ip_ranges = json.loads(requests.get('https://ip-ranges.amazonaws.com/ip-ranges.json').text)
# Determine if given IP address (ip_input) shows uo in AWS IPv4 Range
def check_aws(ip_input):
# Compare given IP to all AWS IP addresses within AWS IPv4 Subnetwork
for i in range(len(aws_ip_ranges['prefixes'])):
# Parse IPv4 address for comparison
site_ip = ipaddress.ip_address(ip_input)
# Parse AWS IPv4 Subnet
aws_subnet = ipaddress.ip_network(aws_ip_ranges['prefixes'][i]['ip_prefix'])
# If IP is within the AWS IPv4 Range, the website is run on AWS
if site_ip in aws_subnet:
return True
# If the website is not within the range, the
# website operates independnetly of AWS
return False
# List of websites using AWS
websites_using_aws = []
def get_aws_domains():
# Check every IP within our sample against AWS IPv4 Range
for i in range(len(dataset)):
if check_aws(dataset['IPv4 Address'][i]):
websites_using_aws.append(dataset['Website Domain'][i])
get_aws_domains()
# Save dataset of AWS websites to a CSV file
aws_df = pd.DataFrame({'AWS Websites':websites_using_aws})
aws_df.to_csv('websites_using_aws.csv')
aws_df
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>AWS Websites</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>leupold.com</td>
</tr>
<tr>
<th>1</th>
<td>wanasatime.com</td>
</tr>
<tr>
<th>2</th>
<td>simplesdental.com</td>
</tr>
<tr>
<th>3</th>
<td>monetixwallet.com</td>
</tr>
<tr>
<th>4</th>
<td>10tv.in</td>
</tr>
<tr>
<th>5</th>
<td>rosedalecenter.com</td>
</tr>
<tr>
<th>6</th>
<td>margstacobistro.com</td>
</tr>
<tr>
<th>7</th>
<td>shanghainavi.com</td>
</tr>
<tr>
<th>8</th>
<td>keaweather.net</td>
</tr>
<tr>
<th>9</th>
<td>lion.co.nz</td>
</tr>
<tr>
<th>10</th>
<td>moomii.jp</td>
</tr>
<tr>
<th>11</th>
<td>figleafapp.com</td>
</tr>
<tr>
<th>12</th>
<td>maharajamultiplex.in</td>
</tr>
<tr>
<th>13</th>
<td>conchovalleyhomepage.com</td>
</tr>
<tr>
<th>14</th>
<td>honkmedia.net</td>
</tr>
<tr>
<th>15</th>
<td>willowtreeapps.com</td>
</tr>
<tr>
<th>16</th>
<td>playvod.ma</td>
</tr>
<tr>
<th>17</th>
<td>tigosports.gt</td>
</tr>
<tr>
<th>18</th>
<td>araelium.com</td>
</tr>
<tr>
<th>19</th>
<td>boostnote.io</td>
</tr>
<tr>
<th>20</th>
<td>echofoodshelf.org</td>
</tr>
<tr>
<th>21</th>
<td>obonsai.com.br</td>
</tr>
<tr>
<th>22</th>
<td>atcost.in</td>
</tr>
<tr>
<th>23</th>
<td>profsnhcadmission.in</td>
</tr>
<tr>
<th>24</th>
<td>manalonline.com</td>
</tr>
<tr>
<th>25</th>
<td>nj211.org</td>
</tr>
<tr>
<th>26</th>
<td>conta.no</td>
</tr>
<tr>
<th>27</th>
<td>cldmail.co.uk</td>
</tr>
<tr>
<th>28</th>
<td>obo.se</td>
</tr>
<tr>
<th>29</th>
<td>soft32.es</td>
</tr>
<tr>
<th>...</th>
<td>...</td>
</tr>
<tr>
<th>31</th>
<td>juegosmesa.cl</td>
</tr>
<tr>
<th>32</th>
<td>localone.app</td>
</tr>
<tr>
<th>33</th>
<td>grjapan.jp</td>
</tr>
<tr>
<th>34</th>
<td>peachysnaps.com</td>
</tr>
<tr>
<th>35</th>
<td>knottyladyyarns.com</td>
</tr>
<tr>
<th>36</th>
<td>amormaturo.com</td>
</tr>
<tr>
<th>37</th>
<td>hltmag.co.uk</td>
</tr>
<tr>
<th>38</th>
<td>oldtownportraitgallery.com</td>
</tr>
<tr>
<th>39</th>
<td>matterport.com</td>
</tr>
<tr>
<th>40</th>
<td>parkopedia.co.uk</td>
</tr>
<tr>
<th>41</th>
<td>rehabs.com</td>
</tr>
<tr>
<th>42</th>
<td>diablomedia.com</td>
</tr>
<tr>
<th>43</th>
<td>battersea.org.uk</td>
</tr>
<tr>
<th>44</th>
<td>qmo.org.au</td>
</tr>
<tr>
<th>45</th>
<td>midlandsb.com</td>
</tr>
<tr>
<th>46</th>
<td>kuflink.co.uk</td>
</tr>
<tr>
<th>47</th>
<td>lunns.com</td>
</tr>
<tr>
<th>48</th>
<td>send2sell.com</td>
</tr>
<tr>
<th>49</th>
<td>wvi.org</td>
</tr>
<tr>
<th>50</th>
<td>letsignite.in</td>
</tr>
<tr>
<th>51</th>
<td>kolkt.com</td>
</tr>
<tr>
<th>52</th>
<td>lucasmaier.com.br</td>
</tr>
<tr>
<th>53</th>
<td>ellevatenetwork.com</td>
</tr>
<tr>
<th>54</th>
<td>completefrance.com</td>
</tr>
<tr>
<th>55</th>
<td>bellevie-group.com</td>
</tr>
<tr>
<th>56</th>
<td>via.id</td>
</tr>
<tr>
<th>57</th>
<td>tweentribune.com</td>
</tr>
<tr>
<th>58</th>
<td>filesun.net</td>
</tr>
<tr>
<th>59</th>
<td>peraichi.com</td>
</tr>
<tr>
<th>60</th>
<td>twinstrangers.net</td>
</tr>
</tbody>
</table>
<p>61 rows × 1 columns</p>
</div>
# 1-Proportion Z-Test for Proportion of AWS to non-AWS Websites
## Testing in Python
This is a procedure for completing a one-proportion z-test given the sample dataset and proportion of websites using AWS. First we declare `p` as the claimed market share percentage (The Forbes article claimed AWS has a 31% market share). We calculate `q` and proceed with our success failure condition.
The `assert` lines basically test s/f - if np or qp is < 10 than the program stops and an exception (error) is raised. Because the index values when sampling were random, than we can assume our sample is random.
Next we calculate our z-score value with a one-proportion z-test:
$$
\begin{align}
z = \frac{\hat{p} - p}{\sigma} && \sigma = \sqrt{\frac{pq}{n}}
\end{align}
$$
The code for calculating z-score is `z = ((len(websites_using_aws)/n) - p)/sd`. For standard deviation, it's `sd = math.sqrt((p*q)/2)`.
Once we calculate our z-score, finally we can get our p-value. The library SciPy offers statistics which will allow us to calculate p-value similar to how a calculator would. Once complete, we can do hypothesis testing using a significance level of 5%.
```python
import math
import scipy.stats as st
# Creating initial values from datatset/claim
claimed_marketshare = 0.31
p = claimed_marketshare
q = 1-claimed_marketshare
# Success/Failure Condition, exception raised if np or nq is less than 10
assert n*p >= 10, True
assert n*q >= 10, True
# Calculate Z-Score & P-Value
sd = math.sqrt((p*q)/n)
z = ((len(websites_using_aws)/n) - p)/sd
p_value = st.norm.cdf(z)
print('P: %f\tQ: %f\nNP: %f\tNQ: %f\n\nP-Hat: %f\n\nZ-Score: %f\nP-Value: %f\n'
% (p, q, n*p, n*q, (len(websites_using_aws)/n), z, p_value))
# Hypothesis Testing
significants_level = 0.05
if p_value <= (significants_level): print('\033[1mReject H0')
else: print('\033[1mFail to reject H0')
```
P: 0.310000 Q: 0.690000
NP: 310.000000 NQ: 690.000000
P-Hat: 0.061000
Z-Score: -17.025268
P-Value: 0.000000
[1mReject H0
## Interpretation
We've rejected the null-hypothesis as our P-Value is approximately zero. The claim made within the Forbes article is invalid according to this observational study as we can support the alternate hypothesis that Amazon does not have a 31% market share in cloud computing/hosting.
# Confidence Interval for 1-Proportion Sample
## Confidence Interval Based on Z-Test
This is a standard method to produce a confidence interval given the z-score, sample size, claimed population proportions and sample proportions produced by the one-proportion z-test.
We use the following equations to calculate the standard deviation and standard error for our confidence interval:
$$
\begin{align}
\sigma_p = \sqrt{\frac{pq}{n}} && SE_p = \sqrt{\frac{\hat{p}\hat{q}}{n}}
\end{align}
$$
Both $\hat{p}$ and $\hat{q}$ are simply $p$ and $q$ for the sample proportion. Using the z-score we calculated during our hypothesis test as our critical value, we can multipy it by our standard error to produce our margin of error, like so:
$$
ME = Z_c\sqrt{\frac{\hat{p}\hat{q}}{n}}
$$
```python
# Sample p (statistic) and q values
aws_p = len(websites_using_aws)/n
aws_q = 1 - aws_p
# Standard error for the sample proportion
se = math.sqrt((aws_p*aws_q)/n)
# Margin of error
me = z*se
print('Interval: (%f, %f)' % (aws_p + me, aws_p - me))
```
Interval: (-0.067852, 0.189852)
## Interpretation
I am 95% confident that the true proportion of websites within the top one-million sites population is between -6.78% and 19%. By 95% confident I mean if the above procedures are reproduced with a sample size of 1,000, the proportion of websites within the sample that use AWS as their cloud provider will be between -6.78% and 19%. Because zero lies within our interval, the results of this study can be considered insignificant.
|
2
|
structure X :=
( a : ℕ ) ( b : ℕ )
@[reducible] def f ( x : X ) : X := ⟨ x^.b + 1, x^.a ⟩
lemma t (x : ℕ × ℕ) : (x^.fst, x^.snd) = x :=
begin
cases x,
dsimp,
trivial
end |
lemma metric_bound_lemma: "cmod (x - y) \<le> \<bar>Re x - Re y\<bar> + \<bar>Im x - Im y\<bar>" |
-- Issue #2814 reported by tomjack on 2017-10-18
{-# OPTIONS --cubical --rewriting #-}
-- prelude stolen from Saizan/cubical-demo
open import Agda.Primitive.Cubical public
postulate
Path' : ∀ {ℓ} {A : Set ℓ} → A → A → Set ℓ
PathP : ∀ {ℓ} (A : I → Set ℓ) → A i0 → A i1 → Set ℓ
{-# BUILTIN PATHP PathP #-}
infix 4 _≡_
_≡_ : ∀ {ℓ} {A : Set ℓ} → A → A → Set ℓ
_≡_ {A = A} = PathP (λ _ → A)
Path = _≡_
refl : ∀ {ℓ} {A : Set ℓ} {x : A} → x ≡ x
refl {x = x} = λ _ → x
postulate
Rewrite : ∀ {ℓ} {A : Set ℓ} → A → A → Set
{-# BUILTIN REWRITE Rewrite #-}
module Good where
postulate
Unit : Set
unit : Unit
module UnitElim (P : Unit → Set) (unit* : P unit) where
postulate
Unit-elim : (x : Unit) → P x
unit-β : Rewrite (Unit-elim unit) unit*
open UnitElim public
{-# REWRITE unit-β #-}
test₁ : {C : Set} {c₀ : C} → Path {A = C} (Unit-elim (λ _ → C) c₀ unit) c₀
test₁ = refl
test₂ : {C : Set} {c₀ : C} → Path {A = Path c₀ c₀} (λ j → Unit-elim (λ _ → C) c₀ unit) refl
test₂ = refl
-- same code, but with a dummy module parameter
module Bad (Dummy : Set) where
postulate
Unit : Set
unit : Unit
module UnitElim (P : Unit → Set) (unit* : P unit) where
postulate
Unit-elim : (x : Unit) → P x
unit-β : Rewrite (Unit-elim unit) unit*
open UnitElim public
{-# REWRITE unit-β #-}
test₁ : {C : Set} {c₀ : C} → Path {A = C} (Unit-elim (λ _ → C) c₀ unit) c₀
test₁ = refl
test₂ : {C : Set} {c₀ : C} → Path {A = Path c₀ c₀} (λ j → Unit-elim (λ _ → C) c₀ unit) refl
test₂ = refl
-- WAS:
-- Unit-elim (λ _ → .C) .c₀ unit != .c₀ of type .C
-- when checking that the expression refl has type
-- Path (λ j → Unit-elim (λ _ → .C) .c₀ unit) refl
-- SHOULD: succeed
|
import GMLInit.Data.Index.Basic
import GMLInit.Data.Index.ReverseAux
open Sum (inl inr)
namespace Index
variable {α} {xs ys : List α}
@[implemented_by Index.appendTR] -- TODO: use csimp
def append : {xs ys : List α} → Sum (Index xs) (Index ys) → Index (List.append xs ys)
| [], _, inr i => i
| _::_, _, inr i => tail (append (inr i))
| _::_, _, inl head => head
| _::_, _, inl (tail i) => tail (append (inl i))
abbrev append_inl (i : Index xs) : Index (xs ++ ys) := append (inl i)
abbrev append_inr (j : Index ys) : Index (xs ++ ys) := append (inr j)
def unappend : {xs ys : List α} → Index (xs ++ ys) → Sum (Index xs) (Index ys)
| [], _, i => inr i
| x::xs, ys, i =>
match (i : Index (x :: (xs ++ ys))) with
| head => inl head
| tail i =>
match unappend i with
| inl i => inl (tail i)
| inr i => inr i
theorem unappend_append (i : Sum (Index xs) (Index ys)) : unappend (append i) = i := by
induction xs generalizing ys with
| nil =>
match i with
| .inl i => contradiction
| .inr j => rfl
| cons x xs ih =>
match i with
| .inl .head => rfl
| .inl (.tail i) => rw [append, unappend]; clean; rw [ih]
| .inr j => rw [append, unappend]; clean; rw [ih]
theorem append_unappend (k : Index (xs ++ ys)) : append (unappend k) = k := by
induction xs generalizing ys with
| nil => rfl
| cons x xs ih =>
match k with
| .head => rfl
| .tail k =>
simp only [unappend]
split
next h => rw [append, ←h, ih]
next h => rw [append, ←h, ih]
theorem append_eq_iff_eq_unappend (i : Sum (Index xs) (Index ys)) (j : Index (xs ++ ys)) : append i = j ↔ i = unappend j := by
constr
· intro h; rw [←h, unappend_append]
· intro h; rw [h, append_unappend]
theorem unappend_eq_iff_eq_append (i : Index (xs ++ ys)) (j : Sum (Index xs) (Index ys)) : unappend i = j ↔ i = append j := by
constr
· intro h; rw [←h, append_unappend]
· intro h; rw [h, unappend_append]
def appendEquiv (xs ys : List α) : Equiv (Sum (Index xs) (Index ys)) (Index (xs ++ ys)) where
fwd := append
rev := unappend
spec := by
intros
constr
· intro | rfl => exact unappend_append ..
· intro | rfl => exact append_unappend ..
theorem val_append_inl (i : Index xs) : (append_inl (ys:=ys) i).val = i.val := by
induction i with
| head => rfl
| tail _ ih => exact ih
theorem val_append_inr (j : Index ys) : (append_inr (xs:=xs) j).val = j.val := by
induction xs with
| nil => rfl
| cons _ _ ih => exact ih
theorem val_append (i : Sum (Index xs) (Index ys)) : (append i).val = match i with | inl i => i.val | inr j => j.val := by
match i with
| inl _ => exact val_append_inl ..
| inr _ => exact val_append_inr ..
theorem val_unappend (k : Index (xs ++ ys)) : (match unappend k with | inl i => i.val | inr j => j.val) = k.val := by
rw [←append_unappend k, val_append, unappend_append]
end Index
|
State Before: V : Type u
G : SimpleGraph V
M✝ M : Subgraph G
inst✝ : Fintype ↑M.verts
h : IsMatching M
⊢ Even (Finset.card (Set.toFinset M.verts)) State After: no goals Tactic: classical
rw [isMatching_iff_forall_degree] at h
use M.coe.edgeFinset.card
rw [← two_mul, ← M.coe.sum_degrees_eq_twice_card_edges]
convert_to _ = Finset.sum Finset.univ fun v => SimpleGraph.degree (Subgraph.coe M) v using 3
simp [h, Finset.card_univ] State Before: V : Type u
G : SimpleGraph V
M✝ M : Subgraph G
inst✝ : Fintype ↑M.verts
h : IsMatching M
⊢ Even (Finset.card (Set.toFinset M.verts)) State After: V : Type u
G : SimpleGraph V
M✝ M : Subgraph G
inst✝ : Fintype ↑M.verts
h✝ : IsMatching M
h : ∀ (v : V), v ∈ M.verts → degree M v = 1
⊢ Even (Finset.card (Set.toFinset M.verts)) Tactic: rw [isMatching_iff_forall_degree] at h State Before: V : Type u
G : SimpleGraph V
M✝ M : Subgraph G
inst✝ : Fintype ↑M.verts
h✝ : IsMatching M
h : ∀ (v : V), v ∈ M.verts → degree M v = 1
⊢ Even (Finset.card (Set.toFinset M.verts)) State After: V : Type u
G : SimpleGraph V
M✝ M : Subgraph G
inst✝ : Fintype ↑M.verts
h✝ : IsMatching M
h : ∀ (v : V), v ∈ M.verts → degree M v = 1
⊢ Finset.card (Set.toFinset M.verts) =
Finset.card (edgeFinset (Subgraph.coe M)) + Finset.card (edgeFinset (Subgraph.coe M)) Tactic: use M.coe.edgeFinset.card State Before: V : Type u
G : SimpleGraph V
M✝ M : Subgraph G
inst✝ : Fintype ↑M.verts
h✝ : IsMatching M
h : ∀ (v : V), v ∈ M.verts → degree M v = 1
⊢ Finset.card (Set.toFinset M.verts) =
Finset.card (edgeFinset (Subgraph.coe M)) + Finset.card (edgeFinset (Subgraph.coe M)) State After: V : Type u
G : SimpleGraph V
M✝ M : Subgraph G
inst✝ : Fintype ↑M.verts
h✝ : IsMatching M
h : ∀ (v : V), v ∈ M.verts → degree M v = 1
⊢ Finset.card (Set.toFinset M.verts) = Finset.sum Finset.univ fun v => SimpleGraph.degree (Subgraph.coe M) v Tactic: rw [← two_mul, ← M.coe.sum_degrees_eq_twice_card_edges] State Before: V : Type u
G : SimpleGraph V
M✝ M : Subgraph G
inst✝ : Fintype ↑M.verts
h✝ : IsMatching M
h : ∀ (v : V), v ∈ M.verts → degree M v = 1
⊢ Finset.card (Set.toFinset M.verts) = Finset.sum Finset.univ fun v => SimpleGraph.degree (Subgraph.coe M) v State After: V : Type u
G : SimpleGraph V
M✝ M : Subgraph G
inst✝ : Fintype ↑M.verts
h✝ : IsMatching M
h : ∀ (v : V), v ∈ M.verts → degree M v = 1
⊢ Finset.card (Set.toFinset M.verts) = Finset.sum Finset.univ fun v => SimpleGraph.degree (Subgraph.coe M) v Tactic: convert_to _ = Finset.sum Finset.univ fun v => SimpleGraph.degree (Subgraph.coe M) v using 3 State Before: V : Type u
G : SimpleGraph V
M✝ M : Subgraph G
inst✝ : Fintype ↑M.verts
h✝ : IsMatching M
h : ∀ (v : V), v ∈ M.verts → degree M v = 1
⊢ Finset.card (Set.toFinset M.verts) = Finset.sum Finset.univ fun v => SimpleGraph.degree (Subgraph.coe M) v State After: no goals Tactic: simp [h, Finset.card_univ] |
From Coqprime Require Import PocklingtonRefl.
Local Open Scope positive_scope.
Lemma primo73:
prime 11881666721436151495252921->
prime 87882427100100997730250576487417.
Proof.
intro H.
apply (Pocklington_refl
(Ell_certif
87882427100100997730250576487417
7396473
((11881666721436151495252921,1)::nil)
0
4823609
98
2401)
((Proof_certif _ H) :: nil)).
native_cast_no_check (refl_equal true).
Time Qed.
|
section \<open>Connecting Nondeterministic Büchi Automata to CAVA Automata Structures\<close>
theory NBA_Graphs
imports
NBA
CAVA_Automata.Automata_Impl
begin
no_notation build (infixr "##" 65)
subsection \<open>Regular Graphs\<close>
definition nba_g :: "('label, 'state) nba \<Rightarrow> 'state graph_rec" where
"nba_g A \<equiv> \<lparr> g_V = UNIV, g_E = E_of_succ (successors A), g_V0 = initial A \<rparr>"
lemma nba_g_graph[simp]: "graph (nba_g A)" unfolding nba_g_def graph_def by simp
lemma nba_g_V0: "g_V0 (nba_g A) = initial A" unfolding nba_g_def by simp
lemma nba_g_E_rtrancl: "(g_E (nba_g A))\<^sup>* = {(p, q). q \<in> reachable A p}"
unfolding nba_g_def graph_rec.simps E_of_succ_def
proof safe
show "(p, q) \<in> {(p, q). q \<in> successors A p}\<^sup>*" if "q \<in> reachable A p" for p q
using that by (induct) (auto intro: rtrancl_into_rtrancl)
show "q \<in> reachable A p" if "(p, q) \<in> {(p, q). q \<in> successors A p}\<^sup>*" for p q
using that by induct auto
qed
lemma nba_g_rtrancl_path: "(g_E (nba_g A))\<^sup>* = {(p, target r p) |r p. NBA.path A r p}"
unfolding nba_g_E_rtrancl by blast
lemma nba_g_trancl_path: "(g_E (nba_g A))\<^sup>+ = {(p, target r p) |r p. NBA.path A r p \<and> r \<noteq> []}"
unfolding nba_g_def graph_rec.simps E_of_succ_def
proof safe
show "\<exists> r p. (x, y) = (p, target r p) \<and> NBA.path A r p \<and> r \<noteq> []"
if "(x, y) \<in> {(p, q). q \<in> successors A p}\<^sup>+" for x y
using that
proof induct
case (base y)
obtain a where 1: "a \<in> alphabet A" "y \<in> transition A a x" using base by auto
show ?case
proof (intro exI conjI)
show "(x, y) = (x, target [(a, y)] x)" by simp
show "NBA.path A [(a, y)] x" using 1 by auto
show "[(a, y)] \<noteq> []" by simp
qed
next
case (step y z)
obtain r where 1: "y = target r x" "NBA.path A r x" "r \<noteq> []" using step(3) by auto
obtain a where 2: "a \<in> alphabet A" "z \<in> transition A a y" using step(2) by auto
show ?case
proof (intro exI conjI)
show "(x, z) = (x, target (r @ [(a, z)]) x)" by simp
show "NBA.path A (r @ [(a, z)]) x" using 1 2 by auto
show "r @ [(a, z)] \<noteq> []" by simp
qed
qed
show "(p, target r p) \<in> {(u, v). v \<in> successors A u}\<^sup>+" if "NBA.path A r p" "r \<noteq> []" for r p
using that by (induct) (fastforce intro: trancl_into_trancl2)+
qed
lemma nba_g_ipath_run:
assumes "ipath (g_E (nba_g A)) r"
obtains w
where "run A (w ||| smap (r \<circ> Suc) nats) (r 0)"
proof -
have 1: "\<exists> a \<in> alphabet A. r (Suc i) \<in> transition A a (r i)" for i
using assms unfolding ipath_def nba_g_def E_of_succ_def by auto
obtain wr where 2: "run A wr (r 0)" "\<And> i. target (stake i wr) (r 0) = r i"
proof (rule nba.invariant_run_index)
show "\<exists> aq. (fst aq \<in> alphabet A \<and> snd aq \<in> transition A (fst aq) p) \<and> snd aq = r (Suc i) \<and> True"
if "p = r i" for i p using that 1 by auto
show "r 0 = r 0" by rule
qed auto
have 3: "smap (r \<circ> Suc) nats = smap snd wr"
proof (rule eqI_snth)
fix i
have "smap (r \<circ> Suc) nats !! i = r (Suc i)" by simp
also have "\<dots> = target (stake (Suc i) wr) (r 0)" unfolding 2(2) by rule
also have "\<dots> = (r 0 ## trace wr (r 0)) !! Suc i" by simp
also have "\<dots> = smap snd wr !! i" unfolding nba.trace_alt_def by simp
finally show "smap (r \<circ> Suc) nats !! i = smap snd wr !! i" by this
qed
show ?thesis
proof
show "run A (smap fst wr ||| smap (r \<circ> Suc) nats) (r 0)" using 2(1) unfolding 3 by auto
qed
qed
lemma nba_g_run_ipath:
assumes "run A (w ||| r) p"
shows "ipath (g_E (nba_g A)) (snth (p ## r))"
proof
fix i
have 1: "w !! i \<in> alphabet A" "r !! i \<in> transition A (w !! i) (target (stake i (w ||| r)) p)"
using assms by (auto dest: nba.run_snth)
have 2: "r !! i \<in> successors A ((p ## r) !! i)"
using 1 unfolding sscan_scons_snth[symmetric] nba.trace_alt_def by auto
show "((p ## r) !! i, (p ## r) !! Suc i) \<in> g_E (nba_g A)"
using 2 unfolding nba_g_def graph_rec.simps E_of_succ_def by simp
qed
subsection \<open>Indexed Generalized Büchi Graphs\<close>
definition nba_igbg :: "('label, 'state) nba \<Rightarrow> 'state igb_graph_rec" where
"nba_igbg A \<equiv> graph_rec.extend (nba_g A)
\<lparr> igbg_num_acc = 1, igbg_acc = \<lambda> p. if accepting A p then {0} else {} \<rparr>"
lemma acc_run_language:
assumes "igb_graph (nba_igbg A)"
shows "Ex (igb_graph.is_acc_run (nba_igbg A)) \<longleftrightarrow> language A \<noteq> {}"
proof
interpret igb_graph "nba_igbg A" using assms by this
have [simp]: "V0 = g_V0 (nba_g A)" "E = g_E (nba_g A)"
"num_acc = 1" "0 \<in> acc p \<longleftrightarrow> accepting A p" for p
unfolding nba_igbg_def graph_rec.defs by simp+
show "language A \<noteq> {}" if run: "Ex is_acc_run"
proof -
obtain r where 1: "is_acc_run r" using run by rule
have 2: "r 0 \<in> V0" "ipath E r" "is_acc r"
using 1 unfolding is_acc_run_def graph_defs.is_run_def by auto
obtain w where 3: "run A (w ||| smap (r \<circ> Suc) nats) (r 0)" using nba_g_ipath_run 2(2) by auto
have 4: "r 0 ## smap (r \<circ> Suc) nats = smap r nats" by (simp) (metis stream.map_comp smap_siterate)
have 5: "infs (accepting A) (r 0 ## smap (r \<circ> Suc) nats)"
using 2(3) unfolding infs_infm is_acc_def 4 by simp
have "w \<in> language A"
proof
show "r 0 \<in> initial A" using nba_g_V0 2(1) by force
show "run A (w ||| smap (r \<circ> Suc) nats) (r 0)" using 3 by this
show "infs (accepting A) (r 0 ## smap (r \<circ> Suc) nats)" using 5 by simp
qed
then show ?thesis by auto
qed
show "Ex is_acc_run" if language: "language A \<noteq> {}"
proof -
obtain w where 1: "w \<in> language A" using language by auto
obtain r p where 2: "p \<in> initial A" "run A (w ||| r) p" "infs (accepting A) (p ## r)" using 1 by rule
have 3: "infs (accepting A) (p ## r)" using 2(3) by simp
have "is_acc_run (snth (p ## r))"
unfolding is_acc_run_def graph_defs.is_run_def
proof safe
show "(p ## r) !! 0 \<in> V0" using nba_g_V0 2(1) by force
show "ipath E (snth (p ## r))" using nba_g_run_ipath 2(2) by force
show "is_acc (snth (p ## r))" using 3 unfolding infs_infm is_acc_def by simp
qed
then show ?thesis by auto
qed
qed
end |
using BenchmarkTools
BenchmarkTools.DEFAULT_PARAMETERS.samples = 100
compute(n::Int)::Int = Int(ceil((log(10) * (n - 1) + 0.5log(5)) / log(0.5(1 + √5))))
compute(3)
compute(1_000)
@benchmark compute(1_000) |
function [ cluster_center, seed ] = cluster_initialize_5 ( dim_num, ...
point_num, cluster_num, point, seed )
%*****************************************************************************80
%
%% CLUSTER_INITIALIZE_5 initializes the cluster centers to random values.
%
% Discussion:
%
% In this case, each cluster center is a random convex combination
% of the data points.
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 04 October 2009
%
% Author:
%
% John Burkardt
%
% Parameters:
%
% Input, integer DIM_NUM, the number of spatial dimensions.
%
% Input, integer POINT_NUM, the number of points.
%
% Input, integer CLUSTER_NUM, the number of clusters.
%
% Input, real POINT(DIM_NUM,POINT_NUM), the coordinates
% of the points.
%
% Input, integer SEED, a seed for the random
% number generator.
%
% Output, real CLUSTER_CENTER(DIM_NUM,CLUSTER_NUM),
% the coordinates of the cluster centers.
%
% Output, integer SEED, a seed for the random
% number generator.
%
%
% Get a PxC block of random factors.
%
[ factor, seed ] = r8mat_uniform_01 ( point_num, cluster_num, seed );
%
% Make each column of factors have unit sum.
%
for j = 1 : cluster_num
column_sum = sum ( factor(1:point_num,j) );
factor(1:point_num,j) = factor(1:point_num,j) / column_sum;
end
%
% Set centers = points * factors.
%
cluster_center(1:dim_num,1:cluster_num) = ...
point(1:dim_num,1:point_num) * factor(1:point_num,1:cluster_num);
return
end
|
[STATEMENT]
lemma Inf_closed_res_nuc:
fixes X :: "'a::quantale set"
assumes "Inf_closed_set X"
and "\<forall>x. \<forall>y \<in> X. x \<rightarrow> y \<in> X"
and "\<forall>x. \<forall>y \<in> X. y \<leftarrow> x \<in> X"
shows "nucleus (\<lambda>y. \<Sqinter>{x \<in> X. y \<le> x})"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. nucleus (\<lambda>y. \<Sqinter> {x \<in> X. y \<le> x})
[PROOF STEP]
unfolding nucleus_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. clop (\<lambda>y. \<Sqinter> {x \<in> X. y \<le> x}) \<and> (\<forall>x y. \<Sqinter> {xa \<in> X. x \<le> xa} \<cdot> \<Sqinter> {x \<in> X. y \<le> x} \<le> \<Sqinter> {xa \<in> X. x \<cdot> y \<le> xa})
[PROOF STEP]
by (simp add: Inf_closed_clop assms lax_aux) |
Find top food influencers in Bangkok. Identify the most popular Instagram accounts on Heepsy.
Want to discover the 1,077 food influencers we've identified in Bangkok in 2019? |
State Before: m k : Nat
h : k ≤ m
n : Nat
⊢ n + m - k = n + (m - k) State After: case intro
m k : Nat
h : k ≤ m
n w✝ : Nat
h✝ : k + w✝ = m
⊢ n + m - k = n + (m - k) Tactic: cases Nat.le.dest h State Before: case intro
m k : Nat
h : k ≤ m
n w✝ : Nat
h✝ : k + w✝ = m
⊢ n + m - k = n + (m - k) State After: case intro
m k : Nat
h : k ≤ m
n l : Nat
hl : k + l = m
⊢ n + m - k = n + (m - k) Tactic: rename_i l hl State Before: case intro
m k : Nat
h : k ≤ m
n l : Nat
hl : k + l = m
⊢ n + m - k = n + (m - k) State After: no goals Tactic: rw [← hl, Nat.add_sub_cancel_left, Nat.add_comm k, ← Nat.add_assoc, Nat.add_sub_cancel] |
lemma to_fract_eq_iff [simp]: "to_fract x = to_fract y \<longleftrightarrow> x = y" |
```python
from mayavi import mlab
mlab.init_notebook()
mlab.test_plot3d()
```
Notebook initialized with ipy backend.
Image(value=b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x01\x90\x00\x00\x01^\x08\x02\x00\x00\x00$?\xde_\x00\…
<!-- mech systems: horizontal, vertical/hanging -->
<!-- box with mu*M*g*v/|v| friction force, treat nonlinearity with geometric mean -->
<!-- pendulum -->
<!-- elastic pendulum -->
<!-- bouncing ball (just move text from exercise) -->
<!-- bumpy road -->
<!-- moored ship -->
<!-- electrical circuits, see ode2.p.tex -->
<!-- 0D blood flow? -->
<!-- waves: 1D blood flow -->
<!-- general particle laws and velocity verlet, make exercises -->
<!-- see <http://en.wikipedia.org/wiki/Velocity_Verlet> -->
# Applications of vibration models
<div id="vib:app"></div>
The following text derives some of the most well-known physical
problems that lead to second-order ODE models of the type addressed in
this ${DOCUMENT}. We consider a simple spring-mass system; thereafter
extended with nonlinear spring, damping, and external excitation; a
spring-mass system with sliding friction; a simple and a physical
(classical) pendulum; and an elastic pendulum.
## Oscillating mass attached to a spring
<div id="vib:app:mass_spring"></div>
<!-- dom:FIGURE: [fig-vib/oscillator_spring.png, width=500 frac=0.7] Simple oscillating mass. <div id="vib:app:mass_spring:fig"></div> -->
<!-- begin figure -->
<div id="vib:app:mass_spring:fig"></div>
<p>Simple oscillating mass.</p>
<!-- end figure -->
The most fundamental mechanical vibration system is depicted in [Figure](#vib:app:mass_spring:fig). A body with mass $m$ is attached to a
spring and can move horizontally without friction (in the wheels). The
position of the body is given by the vector $\rpos(t) = u(t)\ii$, where
$\ii$ is a unit vector in $x$ direction.
There is
only one force acting on the body: a spring force $\F_s =-ku\ii$, where
$k$ is a constant. The point $x=0$, where $u=0$, must therefore
correspond to the body's position
where the spring is neither extended nor compressed, so the force
vanishes.
The basic physical principle that governs the motion of the body is
Newton's second law of motion: $\F=m\acc$, where
$\F$ is the sum of forces on the body, $m$ is its mass, and $\acc=\ddot\rpos$
is the acceleration. We use the dot for differentiation with respect
to time, which is
usual in mechanics. Newton's second law simplifies here
to $-\F_s=m\ddot u\ii$, which translates to
$$
-ku = m\ddot u\thinspace .
$$
Two initial conditions are needed: $u(0)=I$, $\dot u(0)=V$.
The ODE problem is normally written as
<!-- Equation labels as ordinary links -->
<div id="vib:app:mass_spring:eqx"></div>
$$
\begin{equation}
m\ddot u + ku = 0,\quad u(0)=I,\ \dot u(0)=V\thinspace .
\label{vib:app:mass_spring:eqx} \tag{1}
\end{equation}
$$
mathcal{I}_t is
not uncommon to divide by $m$
and introduce the frequency $\omega = \sqrt{k/m}$:
<!-- Equation labels as ordinary links -->
<div id="vib:app:mass_spring:equ"></div>
$$
\begin{equation}
\ddot u + \omega^2 u = 0,\quad u(0)=I,\ \dot u(0)=V\thinspace .
\label{vib:app:mass_spring:equ} \tag{2}
\end{equation}
$$
This is the model problem in the first part of this chapter, with the
small difference that we write the time derivative of $u$ with a dot
above, while we used $u^{\prime}$ and $u^{\prime\prime}$ in previous
parts of the ${DOCUMENT}.
Since only one scalar mathematical quantity, $u(t)$, describes the
complete motion, we say that the mechanical system has one degree of freedom
(DOF).
### Scaling
For numerical simulations it is very convenient to scale
([2](#vib:app:mass_spring:equ)) and thereby get rid of the problem of
finding relevant values for all the parameters $m$, $k$, $I$, and $V$.
Since the amplitude of the oscillations are dictated by $I$ and $V$
(or more precisely, $V/\omega$), we scale $u$ by $I$ (or $V/\omega$ if
$I=0$):
$$
\bar u = \frac{u}{I},\quad \bar t = \frac{t}{t_c}\thinspace .
$$
The time scale $t_c$ is normally chosen as the inverse period $2\pi/\omega$ or
angular frequency $1/\omega$, most often as $t_c=1/\omega$.
Inserting the dimensionless quantities $\bar u$ and $\bar t$ in
([2](#vib:app:mass_spring:equ)) results in the scaled problem
$$
\frac{d^2\bar u}{d\bar t^2} + \bar u = 0,\quad \bar u(0)=1,\ \frac{\bar u}{\bar t}(0)=\beta = \frac{V}{I\omega},
$$
where $\beta$ is a dimensionless number. Any motion that starts from rest
($V=0$) is free of parameters in the scaled model!
### The physics
The typical physics of the system in [Figure](#vib:app:mass_spring:fig) can be described as follows. Initially,
we displace the body to some position $I$, say at rest ($V=0$). After
releasing the body, the spring, which is extended, will act with a
force $-kI\ii$ and pull the body to the left. This force causes an
acceleration and therefore increases velocity. The body passes the
point $x=0$, where $u=0$, and the spring will then be compressed and
act with a force $kx\ii$ against the motion and cause retardation. At
some point, the motion stops and the velocity is zero, before the
spring force $kx\ii$ has worked long enough to push the body in
positive direction. The result is that the body accelerates back and
forth. As long as there is no friction forces to damp the motion, the
oscillations will continue forever.
## General mechanical vibrating system
<div id="vib:app:mass_gen"></div>
<!-- dom:FIGURE: [fig-vib/oscillator_general.png, width=500 frac=0.7] General oscillating system. <div id="vib:app:mass_gen:fig"></div> -->
<!-- begin figure -->
<div id="vib:app:mass_gen:fig"></div>
<p>General oscillating system.</p>
<!-- end figure -->
The mechanical system in [Figure](#vib:app:mass_spring:fig) can easily be
extended to the more general system in [Figure](#vib:app:mass_gen:fig),
where the body is attached to a spring and a dashpot, and also subject
to an environmental force $F(t)\ii$. The system has still only one
degree of freedom since the body can only move back and forth parallel to
the $x$ axis. The spring force was linear, $\F_s=-ku\ii$,
in the section [Oscillating mass attached to a spring](#vib:app:mass_spring), but in more general cases it can
depend nonlinearly on the position. We therefore set $\F_s=s(u)\ii$.
The dashpot, which acts
as a damper, results in a force $\F_d$ that depends on the body's
velocity $\dot u$ and that always acts against the motion.
The mathematical model of the force is written $\F_d =f(\dot u)\ii$.
A positive $\dot u$ must result in a force acting in the positive $x$
direction.
Finally, we have the external environmental force $\F_e = F(t)\ii$.
Newton's second law of motion now involves three forces:
$$
F(t)\ii - f(\dot u)\ii - s(u)\ii = m\ddot u \ii\thinspace .
$$
The common mathematical form of the ODE problem is
<!-- Equation labels as ordinary links -->
<div id="vib:app:mass_gen:equ"></div>
$$
\begin{equation}
m\ddot u + f(\dot u) + s(u) = F(t),\quad u(0)=I,\ \dot u(0)=V\thinspace .
\label{vib:app:mass_gen:equ} \tag{3}
\end{equation}
$$
This is the generalized problem treated in the last part of the
present chapter, but with prime denoting the derivative instead of the dot.
The most common models for the spring and dashpot are linear: $f(\dot u)
=b\dot u$ with a constant $b\geq 0$, and $s(u)=ku$ for a constant $k$.
### Scaling
A specific scaling requires specific choices of $f$, $s$, and $F$.
Suppose we have
$$
f(\dot u) = b|\dot u|\dot u,\quad s(u)=ku,\quad F(t)=A\sin(\phi t)\thinspace .
$$
We introduce dimensionless variables as usual, $\bar u = u/u_c$ and
$\bar t = t/t_c$. The scale $u_c$ depends both on the initial conditions
and $F$, but as time grows, the effect of the initial conditions die out
and $F$ will drive the motion. Inserting $\bar u$ and $\bar t$ in the
ODE gives
$$
m\frac{u_c}{t_c^2}\frac{d^2\bar u}{d\bar t^2}
+ b\frac{u_c^2}{t_c^2}\left\vert\frac{d\bar u}{d\bar t}\right\vert
\frac{d\bar u}{d\bar t} + ku_c\bar u = A\sin(\phi t_c\bar t)\thinspace .
$$
We divide by $u_c/t_c^2$ and demand the coefficients of the
$\bar u$ and the forcing term from $F(t)$ to have unit coefficients.
This leads to the scales
$$
t_c = \sqrt{\frac{m}{k}},\quad u_c = \frac{A}{k}\thinspace .
$$
The scaled ODE becomes
<!-- Equation labels as ordinary links -->
<div id="vib:app:mass_gen:scaled"></div>
$$
\begin{equation}
\frac{d^2\bar u}{d\bar t^2}
+ 2\beta\left\vert\frac{d\bar u}{d\bar t}\right\vert
\frac{d\bar u}{d\bar t} + \bar u = \sin(\gamma\bar t),
\label{vib:app:mass_gen:scaled} \tag{4}
\end{equation}
$$
where there are two dimensionless numbers:
$$
\beta = \frac{Ab}{2mk},\quad\gamma =\phi\sqrt{\frac{m}{k}}\thinspace .
$$
The $\beta$ number measures the size of the damping term (relative to unity)
and is assumed to be small, basically because $b$ is small. The $\phi$
number is the ratio of the time scale of free vibrations and the time scale
of the forcing.
The scaled initial conditions have two other dimensionless numbers
as values:
$$
\bar u(0) = \frac{Ik}{A},\quad \frac{d\bar u}{d\bar t}=\frac{t_c}{u_c}V = \frac{V}{A}\sqrt{mk}\thinspace .
$$
## A sliding mass attached to a spring
<div id="vib:app:mass_sliding"></div>
Consider a variant of the oscillating body in the section [Oscillating mass attached to a spring](#vib:app:mass_spring)
and [Figure](#vib:app:mass_spring:fig): the body rests on a flat
surface, and there is sliding friction between the body and the surface.
[Figure](#vib:app:mass_sliding:fig) depicts the problem.
<!-- dom:FIGURE: [fig-vib/oscillator_sliding.png, width=500 frac=0.7] Sketch of a body sliding on a surface. <div id="vib:app:mass_sliding:fig"></div> -->
<!-- begin figure -->
<div id="vib:app:mass_sliding:fig"></div>
<p>Sketch of a body sliding on a surface.</p>
<!-- end figure -->
The body is attached to a spring with spring force $-s(u)\ii$.
The friction force is proportional to the normal force on the surface,
$-mg\jj$, and given by $-f(\dot u)\ii$, where
$$
f(\dot u) = \left\lbrace\begin{array}{ll}
-\mu mg,& \dot u < 0,\\
\mu mg, & \dot u > 0,\\
0, & \dot u=0
\end{array}\right.
$$
Here, $\mu$ is a friction coefficient. With the signum function
$$
\mbox{sign(x)} = \left\lbrace\begin{array}{ll}
-1,& x < 0,\\
1, & x > 0,\\
0, & x=0
\end{array}\right.
$$
we can simply write $f(\dot u) = \mu mg\,\hbox{sign}(\dot u)$
(the sign function is implemented by `numpy.sign`).
The equation of motion becomes
<!-- Equation labels as ordinary links -->
<div id="vib:app:mass_sliding:equ"></div>
$$
\begin{equation}
m\ddot u + \mu mg\hbox{sign}(\dot u) + s(u) = 0,\quad u(0)=I,\ \dot u(0)=V\thinspace .
\label{vib:app:mass_sliding:equ} \tag{5}
\end{equation}
$$
## A jumping washing machine
<div id="vib:app:washmach"></div>
A washing machine is placed on four springs with efficient dampers.
If the machine contains just a few clothes, the circular motion of
the machine induces a sinusoidal external force from the floor and the machine will
jump up and down if the frequency of the external force is close to
the natural frequency of the machine and its spring-damper system.
[hpl 1: Not finished. This is a good example on resonance.]
## Motion of a pendulum
<div id="vib:app:pendulum"></div>
### Simple pendulum
A classical problem in mechanics is the motion of a pendulum. We first
consider a [simplified pendulum](https://en.wikipedia.org/wiki/Pendulum) (sometimes also called a
mathematical pendulum): a small body of mass $m$ is
attached to a massless wire and can oscillate back and forth in the
gravity field. [Figure](#vib:app:pendulum:fig_problem) shows a sketch
of the problem.
<!-- dom:FIGURE: [fig-vib/pendulum_problem.png, width=300 frac=0.5] Sketch of a simple pendulum. <div id="vib:app:pendulum:fig_problem"></div> -->
<!-- begin figure -->
<div id="vib:app:pendulum:fig_problem"></div>
<p>Sketch of a simple pendulum.</p>
<!-- end figure -->
The motion is governed by Newton's 2nd law, so we need to find
expressions for the forces and the acceleration. Three forces on the
body are considered: an unknown force $S$ from the wire, the gravity
force $mg$, and an air resistance force, $\frac{1}{2}C_D\varrho A|v|v$,
hereafter called the drag force, directed against the velocity
of the body. Here, $C_D$ is a drag coefficient, $\varrho$ is the
density of air, $A$ is the cross section area of the body, and $v$ is
the magnitude of the velocity.
We introduce a coordinate system with polar coordinates and unit
vectors $\ir$ and $\ith$ as shown in [Figure](#vib:app:pendulum:fig_forces). The position of the center of mass
of the body is
$$
\rpos(t) = x_0\ii + y_0\jj + L\ir,
$$
where $\ii$ and $\jj$ are unit vectors in the corresponding Cartesian
coordinate system in the $x$ and $y$ directions, respectively. We have
that $\ir = \cos\theta\ii +\sin\theta\jj$.
<!-- dom:FIGURE: [fig-vib/pendulum_forces.png, width=500 frac=0.65] Forces acting on a simple pendulum. <div id="vib:app:pendulum:fig_forces"></div> -->
<!-- begin figure -->
<div id="vib:app:pendulum:fig_forces"></div>
<p>Forces acting on a simple pendulum.</p>
<!-- end figure -->
The forces are now expressed as follows.
* Wire force: $-S\ir$
* Gravity force: $-mg\jj = mg(-\sin\theta\,\ith + \cos\theta\,\ir)$
* Drag force: $-\frac{1}{2}C_D\varrho A |v|v\,\ith$
Since a positive velocity means movement in the direction of $\ith$,
the drag force must be directed along $-\ith$ so it works against the
motion. We assume motion in air so that the added mass effect can
be neglected (for a spherical body, the added mass is $\frac{1}{2}\varrho V$,
where $V$ is the volume of the body). Also the buoyancy effect
can be neglected for motion in the air when the density difference
between the fluid and the body is so significant.
The velocity of the body is found from $\rpos$:
$$
\v(t) = \dot\rpos (t) = \frac{d}{d\theta}(x_0\ii + y_0\jj + L\ir)\frac{d\theta}{dt} = L\dot\theta\ith,
$$
since $\frac{d}{d\theta}\ir = \ith$. mathcal{I}_t follows that $v=|\v|=L\dot\theta$.
The acceleration is
$$
\acc(t) = \dot\v(r) = \frac{d}{dt}(L\dot\theta\ith)
= L\ddot\theta\ith + L\dot\theta\frac{d\ith}{d\theta}\dot\theta =
= L\ddot\theta\ith - L\dot\theta^2\ir,
$$
since $\frac{d}{d\theta}\ith = -\ir$.
Newton's 2nd law of motion becomes
$$
-S\ir + mg(-\sin\theta\,\ith + \cos\theta\,\ir) -
\frac{1}{2}C_D\varrho AL^2|\dot\theta|\dot\theta\,\ith
= mL\ddot\theta\dot\theta\,\ith - L\dot\theta^2\ir,
$$
leading to two component equations
<!-- Equation labels as ordinary links -->
<div id="vib:app:pendulum:ir"></div>
$$
\begin{equation}
-S + mg\cos\theta = -L\dot\theta^2,
\label{vib:app:pendulum:ir} \tag{6}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="vib:app:pendulum:ith"></div>
$$
\begin{equation}
-mg\sin\theta - \frac{1}{2}C_D\varrho AL^2|\dot\theta|\dot\theta
= mL\ddot\theta\thinspace .
\label{vib:app:pendulum:ith} \tag{7}
\end{equation}
$$
From ([6](#vib:app:pendulum:ir)) we get an expression for
$S=mg\cos\theta + L\dot\theta^2$, and from ([7](#vib:app:pendulum:ith))
we get a differential equation for the angle $\theta(t)$. This latter
equation is ordered as
<!-- Equation labels as ordinary links -->
<div id="vib:app:pendulum:thetaeq"></div>
$$
\begin{equation}
m\ddot\theta + \frac{1}{2}C_D\varrho AL|\dot\theta|\dot\theta
+ \frac{mg}{L}\sin\theta = 0\thinspace .
\label{vib:app:pendulum:thetaeq} \tag{8}
\end{equation}
$$
Two initial conditions are needed: $\theta=\Theta$ and $\dot\theta = \Omega$.
Normally, the pendulum motion is started from rest, which means $\Omega =0$.
Equation ([8](#vib:app:pendulum:thetaeq)) fits the general model
used in ([vib:ode2](#vib:ode2)) in the section [vib:model2](#vib:model2) if we define
$u=\theta$, $f(u^{\prime}) = \frac{1}{2}C_D\varrho AL|\dot u|\dot u$,
$s(u) = L^{-1}mg\sin u$, and $F=0$.
If the body is a sphere with radius $R$, we can take $C_D=0.4$ and $A=\pi R^2$.
[Exercise 4: Simulate a simple pendulum](#vib:exer:pendulum_simple) asks you to scale the equations
and carry out specific simulations with this model.
### Physical pendulum
The motion of a compound or physical pendulum where the wire is a rod with
mass, can be modeled very similarly. The governing equation is
$I\acc = \boldsymbol{T}$ where $I$ is the moment of inertia of the entire body about
the point $(x_0,y_0)$, and $\boldsymbol{T}$ is the sum of moments of the forces
with respect to $(x_0,y_0)$. The vector equation reads
$$
\rpos\times(-S\ir + mg(-\sin\theta\ith + \cos\theta\ir) -
\frac{1}{2}C_D\varrho AL^2|\dot\theta|\dot\theta\ith)
= I(L\ddot\theta\dot\theta\ith - L\dot\theta^2\ir)\thinspace .
$$
The component equation in $\ith$ direction gives the equation of motion
for $\theta(t)$:
<!-- Equation labels as ordinary links -->
<div id="vib:app:pendulum:thetaeq_physical"></div>
$$
\begin{equation}
I\ddot\theta + \frac{1}{2}C_D\varrho AL^3|\dot\theta|\dot\theta
+ mgL\sin\theta = 0\thinspace .
\label{vib:app:pendulum:thetaeq_physical} \tag{9}
\end{equation}
$$
## Dynamic free body diagram during pendulum motion
<div id="vib:app:pendulum_bodydia"></div>
Usually one plots the mathematical quantities as functions of time to
visualize the solution of ODE models. [Exercise 4: Simulate a simple pendulum](#vib:exer:pendulum_simple) asks you to do this for the motion of a
pendulum in the previous section. However, sometimes it is more
instructive to look at other types of visualizations. For example, we
have the pendulum and the free body diagram in Figures
[vib:app:pendulum:fig_problem](#vib:app:pendulum:fig_problem) and
[vib:app:pendulum:fig_forces](#vib:app:pendulum:fig_forces). We may think of these figures as
animations in time instead. Especially the free body diagram will show both the
motion of the pendulum *and* the size of the forces during the motion.
The present section exemplifies how to make such a dynamic body
diagram.
% if FORMAT == 'pdflatex':
Two typical snapshots of free body diagrams are displayed below
(the drag force is magnified 5 times to become more visual!).
<!-- dom:FIGURE: [fig-vib/pendulum_body_dia.png, width=800 frac=1] -->
<!-- begin figure -->
<p></p>
<!-- end figure -->
% else:
<!-- dom:MOVIE: [https://github.com/hplgit/pysketcher/raw/master/doc/pub/tutorial/mov-tut/pendulum/movie.mp4] The drag force is magnified 5 times! % endif -->
<!-- begin movie -->
```python
from IPython.display import HTML
_s = """
<div>
</div>
<p><em>The drag force is magnified 5 times! % endif</em></p>
<!-- Issue warning if in a Safari browser -->
"""
HTML(_s)
```
<!-- end movie -->
Dynamic physical sketches, coupled to the numerical solution of
differential equations, requires a program to produce a sketch for
the situation at each time level.
[Pysketcher](https://github.com/hplgit/pysketcher) is such a tool.
In fact (and not surprising!) Figures [vib:app:pendulum:fig_problem](#vib:app:pendulum:fig_problem) and
[vib:app:pendulum:fig_forces](#vib:app:pendulum:fig_forces) were drawn using Pysketcher.
The details of the drawings are explained in the
[Pysketcher tutorial](http://hplgit.github.io/pysketcher/doc/web/index.html).
Here, we outline how this type of sketch can be used to create an animated
free body diagram during the motion of a pendulum.
Pysketcher is actually a layer of useful abstractions on top of
standard plotting packages. This means that we in fact apply Matplotlib
to make the animated free body diagram, but instead of dealing with a wealth
of detailed Matplotlib commands, we can express the drawing in terms of
more high-level objects, e.g., objects for the wire, angle $\theta$,
body with mass $m$, arrows for forces, etc. When the position of these
objects are given through variables, we can just couple those variables
to the dynamic solution of our ODE and thereby make a unique drawing
for each $\theta$ value in a simulation.
### Writing the solver
Let us start with the most familiar part of the current problem:
writing the solver function. We use Odespy for this purpose.
We also work with dimensionless equations. Since $\theta$ can be
viewed as dimensionless, we only need to introduce a dimensionless time,
here taken as $\bar t = t/\sqrt{L/g}$.
The resulting dimensionless mathematical model for $\theta$,
the dimensionless angular velocity $\omega$, the
dimensionless wire force $\bar S$, and the dimensionless
drag force $\bar D$ is then
<!-- Equation labels as ordinary links -->
<div id="vib:app:pendulum_bodydia:eqth"></div>
$$
\begin{equation}
\frac{d\omega}{d\bar t} = - \alpha|\omega|\omega - \sin\theta,
\label{vib:app:pendulum_bodydia:eqth} \tag{10}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="vib:app:pendulum_bodydia:eqomega"></div>
$$
\begin{equation}
\frac{d\theta}{d\bar t} = \omega,
\label{vib:app:pendulum_bodydia:eqomega} \tag{11}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="vib:app:pendulum_bodydia:eqS"></div>
$$
\begin{equation}
\bar S = \omega^2 + \cos\theta,
\label{vib:app:pendulum_bodydia:eqS} \tag{12}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="vib:app:pendulum_bodydia:eqD"></div>
$$
\begin{equation}
\bar D = -\alpha |\omega|\omega,
\label{vib:app:pendulum_bodydia:eqD} \tag{13}
\end{equation}
$$
with
$$
\alpha = \frac{C_D\varrho\pi R^2L}{2m}\thinspace .
$$
as a dimensionless parameter expressing the ratio of the drag force and
the gravity force. The dimensionless $\omega$ is made non-dimensional
by the time, so $\omega\sqrt{L/g}$ is the corresponding angular
frequency with dimensions.
<!-- [Exercise 4: Simulate a simple pendulum](#vib:exer:pendulum_simple) asks you to carry out the details of -->
<!-- the scaling. -->
A suitable function for computing
([10](#vib:app:pendulum_bodydia:eqth))-([13](#vib:app:pendulum_bodydia:eqD))
is listed below.
```python
def simulate(alpha, Theta, dt, T):
import odespy
def f(u, t, alpha):
omega, theta = u
return [-alpha*omega*abs(omega) - sin(theta),
omega]
import numpy as np
Nt = int(round(T/float(dt)))
t = np.linspace(0, Nt*dt, Nt+1)
solver = odespy.RK4(f, f_args=[alpha])
solver.set_initial_condition([0, Theta])
u, t = solver.solve(
t, terminate=lambda u, t, n: abs(u[n,1]) < 1E-3)
omega = u[:,0]
theta = u[:,1]
S = omega**2 + np.cos(theta)
drag = -alpha*np.abs(omega)*omega
return t, theta, omega, S, drag
```
### Drawing the free body diagram
The `sketch` function below applies Pysketcher objects to build
a diagram like that in [Figure](#vib:app:pendulum:fig_forces),
except that we have removed the rotation point $(x_0,y_0)$ and
the unit vectors in polar coordinates as these objects are not
important for an animated free body diagram.
```python
import sys
try:
from pysketcher import *
except ImportError:
print 'Pysketcher must be installed from'
print 'https://github.com/hplgit/pysketcher'
sys.exit(1)
# Overall dimensions of sketch
H = 15.
W = 17.
drawing_tool.set_coordinate_system(
xmin=0, xmax=W, ymin=0, ymax=H,
axis=False)
def sketch(theta, S, mg, drag, t, time_level):
"""
Draw pendulum sketch with body forces at a time level
corresponding to time t. The drag force is in
drag[time_level], the force in the wire is S[time_level],
the angle is theta[time_level].
"""
import math
a = math.degrees(theta[time_level]) # angle in degrees
L = 0.4*H # Length of pendulum
P = (W/2, 0.8*H) # Fixed rotation point
mass_pt = path.geometric_features()['end']
rod = Line(P, mass_pt)
mass = Circle(center=mass_pt, radius=L/20.)
mass.set_filled_curves(color='blue')
rod_vec = rod.geometric_features()['end'] - \
rod.geometric_features()['start']
unit_rod_vec = unit_vec(rod_vec)
mass_symbol = Text('$m$', mass_pt + L/10*unit_rod_vec)
rod_start = rod.geometric_features()['start'] # Point P
vertical = Line(rod_start, rod_start + point(0,-L/3))
def set_dashed_thin_blackline(*objects):
"""Set linestyle of objects to dashed, black, width=1."""
for obj in objects:
obj.set_linestyle('dashed')
obj.set_linecolor('black')
obj.set_linewidth(1)
set_dashed_thin_blackline(vertical)
set_dashed_thin_blackline(rod)
angle = Arc_wText(r'$\theta$', rod_start, L/6, -90, a,
text_spacing=1/30.)
magnitude = 1.2*L/2 # length of a unit force in figure
force = mg[time_level] # constant (scaled eq: about 1)
force *= magnitude
mg_force = Force(mass_pt, mass_pt + force*point(0,-1),
'', text_pos='end')
force = S[time_level]
force *= magnitude
rod_force = Force(mass_pt, mass_pt - force*unit_vec(rod_vec),
'', text_pos='end',
text_spacing=(0.03, 0.01))
force = drag[time_level]
force *= magnitude
air_force = Force(mass_pt, mass_pt -
force*unit_vec((rod_vec[1], -rod_vec[0])),
'', text_pos='end',
text_spacing=(0.04,0.005))
body_diagram = Composition(
{'mg': mg_force, 'S': rod_force, 'air': air_force,
'rod': rod, 'body': mass
'vertical': vertical, 'theta': angle,})
body_diagram.draw(verbose=0)
drawing_tool.savefig('tmp_%04d.png' % time_level, crop=False)
# (No cropping: otherwise movies will be very strange!)
```
### Making the animated free body diagram
mathcal{I}_t now remains to couple the `simulate` and `sketch` functions.
We first run `simulate`:
```python
from math import pi, radians, degrees
import numpy as np
alpha = 0.4
period = 2*pi # Use small theta approximation
T = 12*period # Simulate for 12 periods
dt = period/40 # 40 time steps per period
a = 70 # Initial amplitude in degrees
Theta = radians(a)
t, theta, omega, S, drag = simulate(alpha, Theta, dt, T)
```
The next step is to run through the time levels in the simulation and
make a sketch at each level:
```python
for time_level, t_ in enumerate(t):
sketch(theta, S, mg, drag, t_, time_level)
```
The individual sketches are (by the `sketch` function) saved in files
with names `tmp_%04d.png`. These can be combined to videos using
(e.g.) `ffmpeg`. A complete function `animate` for running the
simulation and creating video files is
listed below.
```python
def animate():
# Clean up old plot files
import os, glob
for filename in glob.glob('tmp_*.png') + glob.glob('movie.*'):
os.remove(filename)
# Solve problem
from math import pi, radians, degrees
import numpy as np
alpha = 0.4
period = 2*pi # Use small theta approximation
T = 12*period # Simulate for 12 periods
dt = period/40 # 40 time steps per period
a = 70 # Initial amplitude in degrees
Theta = radians(a)
t, theta, omega, S, drag = simulate(alpha, Theta, dt, T)
# Visualize drag force 5 times as large
drag *= 5
mg = np.ones(S.size) # Gravity force (needed in sketch)
# Draw animation
import time
for time_level, t_ in enumerate(t):
sketch(theta, S, mg, drag, t_, time_level)
time.sleep(0.2) # Pause between each frame on the screen
# Make videos
prog = 'ffmpeg'
filename = 'tmp_%04d.png'
fps = 6
codecs = {'flv': 'flv', 'mp4': 'libx264',
'webm': 'libvpx', 'ogg': 'libtheora'}
for ext in codecs:
lib = codecs[ext]
cmd = '%(prog)s -i %(filename)s -r %(fps)s ' % vars()
cmd += '-vcodec %(lib)s movie.%(ext)s' % vars()
print(cmd)
os.system(cmd)
```
## Motion of an elastic pendulum
<div id="vib:app:pendulum_elastic"></div>
Consider a pendulum as in [Figure](#vib:app:pendulum:fig_problem), but
this time the wire is elastic. The length of the wire when it is not
stretched is $L_0$, while $L(t)$ is the stretched
length at time $t$ during the motion.
Stretching the elastic wire a distance $\Delta L$ gives rise to a
spring force $k\Delta L$ in the opposite direction of the
stretching. Let $\boldsymbol{n}$ be a unit normal vector along the wire
from the point $\rpos_0=(x_0,y_0)$ and in the direction of $\ith$, see
[Figure](#vib:app:pendulum:fig_forces) for definition of $(x_0,y_0)$
and $\ith$. Obviously, we have $\boldsymbol{n}=\ith$, but in this modeling
of an elastic pendulum we do not need polar coordinates. Instead, it
is more straightforward to develop the equation in Cartesian
coordinates.
A mathematical expression for $\boldsymbol{n}$ is
$$
\boldsymbol{n} = \frac{\rpos-\rpos_0}{L(t)},
$$
where $L(t)=||\rpos-\rpos_0||$ is the current length of the elastic wire.
The position vector $\rpos$ in Cartesian coordinates reads
$\rpos(t) = x(t)\ii + y(t)\jj$, where $\ii$ and $\jj$ are unit vectors
in the $x$ and $y$ directions, respectively.
mathcal{I}_t is convenient to introduce the Cartesian components $n_x$ and $n_y$
of the normal vector:
$$
\boldsymbol{n} = \frac{\rpos-\rpos_0}{L(t)} = \frac{x(t)-x_0}{L(t)}\ii + \frac{y(t)-y_0}{L(t)}\jj = n_x\ii + n_y\jj\thinspace .
$$
The stretch $\Delta L$ in the wire is
$$
\Delta t = L(t) - L_0\thinspace .
$$
The force in the wire is then $-S\boldsymbol{n}=-k\Delta L\boldsymbol{n}$.
The other forces are the gravity and the air resistance, just as in
[Figure](#vib:app:pendulum:fig_forces). For motion in air we can
neglect the added mass and buoyancy effects. The main difference is
that we have a *model* for $S$ in terms of the motion (as soon as we
have expressed $\Delta L$ by $\rpos$). For simplicity, we drop the air
resistance term (but [Exercise 6: Simulate an elastic pendulum with air resistance](#vib:exer:pendulum_elastic_drag) asks
you to include it).
Newton's second law of motion applied to the body now results in
<!-- Equation labels as ordinary links -->
<div id="vib:app:pendulum_elastic:eq1"></div>
$$
\begin{equation}
m\ddot\rpos = -k(L-L_0)\boldsymbol{n} - mg\jj
\label{vib:app:pendulum_elastic:eq1} \tag{14}
\end{equation}
$$
The two components of
([14](#vib:app:pendulum_elastic:eq1)) are
<!-- Equation labels as ordinary links -->
<div id="_auto1"></div>
$$
\begin{equation}
\ddot x = -\frac{k}{m}(L-L_0)n_x,
\label{_auto1} \tag{15}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="vib:app:pendulum_elastic:eq2a"></div>
$$
\begin{equation}
\label{vib:app:pendulum_elastic:eq2a} \tag{16}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="vib:app:pendulum_elastic:eq2b"></div>
$$
\begin{equation}
\ddot y = - \frac{k}{m}(L-L_0)n_y - g
\label{vib:app:pendulum_elastic:eq2b} \tag{17}\thinspace .
\end{equation}
$$
### Remarks about an elastic vs a non-elastic pendulum
Note that the derivation of the ODEs for an elastic pendulum is more
straightforward than for a classical, non-elastic pendulum,
since we avoid the details
with polar coordinates, but instead work with Newton's second law
directly in Cartesian coordinates. The reason why we can do this is that
the elastic pendulum undergoes a general two-dimensional motion where
all the forces are known or expressed as functions of $x(t)$ and $y(t)$,
such that we get two ordinary differential equations.
The motion of the non-elastic pendulum, on the other hand, is constrained:
the body has to move along a circular path, and the force $S$ in the
wire is unknown.
The non-elastic pendulum therefore leads to
a *differential-algebraic* equation, i.e., ODEs for $x(t)$ and $y(t)$
combined with an extra constraint $(x-x_0)^2 + (y-y_0)^2 = L^2$
ensuring that the motion takes place along a circular path.
The extra constraint (equation) is compensated by an extra unknown force
$-S\boldsymbol{n}$. Differential-algebraic equations are normally hard
to solve, especially with pen and paper.
Fortunately, for the non-elastic pendulum we can do a
trick: in polar coordinates the unknown force $S$ appears only in the
radial component of Newton's second law, while the unknown
degree of freedom for describing the motion, the angle $\theta(t)$,
is completely governed by the asimuthal component. This allows us to
decouple the unknowns $S$ and $\theta$. But this is a kind of trick and
not a widely applicable method. With an elastic pendulum we use straightforward
reasoning with Newton's 2nd law and arrive at a standard ODE problem that
(after scaling) is easy to solve on a computer.
### Initial conditions
What is the initial position of the body? We imagine that first the
pendulum hangs in equilibrium in its vertical position, and then it is
displaced an angle $\Theta$. The equilibrium position is governed
by the ODEs with the accelerations set to zero.
The $x$ component leads to $x(t)=x_0$, while the $y$ component gives
$$
0 = - \frac{k}{m}(L-L_0)n_y - g = \frac{k}{m}(L(0)-L_0) - g\quad\Rightarrow\quad
L(0) = L_0 + mg/k,
$$
since $n_y=-11$ in this position. The corresponding $y$ value is then
from $n_y=-1$:
$$
y(t) = y_0 - L(0) = y_0 - (L_0 + mg/k)\thinspace .
$$
Let us now choose $(x_0,y_0)$ such that the body is at the origin
in the equilibrium position:
$$
x_0 =0,\quad y_0 = L_0 + mg/k\thinspace .
$$
Displacing the body an angle $\Theta$ to the right leads to the
initial position
$$
x(0)=(L_0+mg/k)\sin\Theta,\quad y(0)=(L_0+mg/k)(1-\cos\Theta)\thinspace .
$$
The initial velocities can be set to zero: $x'(0)=y'(0)=0$.
### The complete ODE problem
We can summarize all the equations as follows:
$$
\begin{align*}
\ddot x &= -\frac{k}{m}(L-L_0)n_x,
\\
\ddot y &= -\frac{k}{m}(L-L_0)n_y - g,
\\
L &= \sqrt{(x-x_0)^2 + (y-y_0)^2},
\\
n_x &= \frac{x-x_0}{L},
\\
n_y &= \frac{y-y_0}{L},
\\
x(0) &= (L_0+mg/k)\sin\Theta,
\\
x'(0) &= 0,
\\
y(0) & =(L_0+mg/k)(1-\cos\Theta),
\\
y'(0) &= 0\thinspace .
\end{align*}
$$
We insert $n_x$ and $n_y$ in the ODEs:
<!-- Equation labels as ordinary links -->
<div id="vib:app:pendulum_elastic:x"></div>
$$
\begin{equation}
\ddot x = -\frac{k}{m}\left(1 -\frac{L_0}{L}\right)(x-x_0),
\label{vib:app:pendulum_elastic:x} \tag{18}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="vib:app:pendulum_elastic:y"></div>
$$
\begin{equation}
\ddot y = -\frac{k}{m}\left(1 -\frac{L_0}{L}\right)(y-y_0) - g,
\label{vib:app:pendulum_elastic:y} \tag{19}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="vib:app:pendulum_elastic:L"></div>
$$
\begin{equation}
L = \sqrt{(x-x_0)^2 + (y-y_0)^2},
\label{vib:app:pendulum_elastic:L} \tag{20}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="vib:app:pendulum_elastic:x0"></div>
$$
\begin{equation}
x(0) = (L_0+mg/k)\sin\Theta,
\label{vib:app:pendulum_elastic:x0} \tag{21}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="vib:app:pendulum_elastic:vx0"></div>
$$
\begin{equation}
x'(0) = 0,
\label{vib:app:pendulum_elastic:vx0} \tag{22}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="vib:app:pendulum_elastic:y0"></div>
$$
\begin{equation}
y(0) =(L_0+mg/k)(1-\cos\Theta),
\label{vib:app:pendulum_elastic:y0} \tag{23}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="vib:app:pendulum_elastic:vy0"></div>
$$
\begin{equation}
y'(0) = 0\thinspace .
\label{vib:app:pendulum_elastic:vy0} \tag{24}
\end{equation}
$$
### Scaling
The elastic pendulum model can be used to study both an elastic pendulum
and a classic, non-elastic pendulum. The latter problem is obtained
by letting $k\rightarrow\infty$. Unfortunately,
a serious problem with the ODEs
([18](#vib:app:pendulum_elastic:x))-([19](#vib:app:pendulum_elastic:y)) is that for large $k$, we have a very large factor $k/m$ multiplied by a
very small number $1-L_0/L$, since for large $k$, $L\approx L_0$ (very
small deformations of the wire). The product is subject to
significant round-off errors for many relevant physical values of
the parameters. To circumvent the problem, we introduce a scaling. This
will also remove physical parameters from the problem such that we end
up with only one dimensionless parameter,
closely related to the elasticity of the wire. Simulations can then be
done by setting just this dimensionless parameter.
The characteristic length can be taken such that in equilibrium, the
scaled length is unity, i.e., the characteristic length is $L_0+mg/k$:
$$
\bar x = \frac{x}{L_0+mg/k},\quad \bar y = \frac{y}{L_0+mg/k}\thinspace .
$$
We must then also work with the scaled length $\bar L = L/(L_0+mg/k)$.
Introducing $\bar t=t/t_c$, where $t_c$ is a characteristic time we
have to decide upon later, one gets
$$
\begin{align*}
\frac{d^2\bar x}{d\bar t^2} &=
-t_c^2\frac{k}{m}\left(1 -\frac{L_0}{L_0+mg/k}\frac{1}{\bar L}\right)\bar x,\\
\frac{d^2\bar y}{d\bar t^2} &=
-t_c^2\frac{k}{m}\left(1 -\frac{L_0}{L_0+mg/k}\frac{1}{\bar L}\right)(\bar y-1)
-t_c^2\frac{g}{L_0 + mg/k},\\
\bar L &= \sqrt{\bar x^2 + (\bar y-1)^2},\\
\bar x(0) &= \sin\Theta,\\
\bar x'(0) &= 0,\\
\bar y(0) & = 1 - \cos\Theta,\\
\bar y'(0) &= 0\thinspace .
\end{align*}
$$
For a non-elastic pendulum with small angles, we know that the
frequency of the oscillations are $\omega = \sqrt{L/g}$. mathcal{I}_t is therefore
natural to choose a similar expression here, either the length in
the equilibrium position,
$$
t_c^2 = \frac{L_0+mg/k}{g}\thinspace .
$$
or simply the unstretched length,
$$
t_c^2 = \frac{L_0}{g}\thinspace .
$$
These quantities are not very different (since the elastic model
is valid only for quite small elongations), so we take the latter as it is
the simplest one.
The ODEs become
$$
\begin{align*}
\frac{d^2\bar x}{d\bar t^2} &=
-\frac{L_0k}{mg}\left(1 -\frac{L_0}{L_0+mg/k}\frac{1}{\bar L}\right)\bar x,\\
\frac{d^2\bar y}{d\bar t^2} &=
-\frac{L_0k}{mg}\left(1 -\frac{L_0}{L_0+mg/k}\frac{1}{\bar L}\right)(\bar y-1)
-\frac{L_0}{L_0 + mg/k},\\
\bar L &= \sqrt{\bar x^2 + (\bar y-1)^2}\thinspace .
\end{align*}
$$
We can now identify a dimensionless number
$$
\beta = \frac{L_0}{L_0 + mg/k} = \frac{1}{1+\frac{mg}{L_0k}},
$$
which is the ratio of the unstretched length and the
stretched length in equilibrium. The non-elastic pendulum will have
$\beta =1$ ($k\rightarrow\infty$).
With $\beta$ the ODEs read
<!-- Equation labels as ordinary links -->
<div id="vib:app:pendulum_elastic:x:s"></div>
$$
\begin{equation}
\frac{d^2\bar x}{d\bar t^2} =
-\frac{\beta}{1-\beta}\left(1- \frac{\beta}{\bar L}\right)\bar x,
\label{vib:app:pendulum_elastic:x:s} \tag{25}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="vib:app:pendulum_elastic:y:s"></div>
$$
\begin{equation}
\frac{d^2\bar y}{d\bar t^2} =
-\frac{\beta}{1-\beta}\left(1- \frac{\beta}{\bar L}\right)(\bar y-1)
-\beta,
\label{vib:app:pendulum_elastic:y:s} \tag{26}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="vib:app:pendulum_elastic:L:s"></div>
$$
\begin{equation}
\bar L = \sqrt{\bar x^2 + (\bar y-1)^2},
\label{vib:app:pendulum_elastic:L:s} \tag{27}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="vib:app:pendulum_elastic:x0:s"></div>
$$
\begin{equation}
\bar x(0) = (1+\epsilon)\sin\Theta,
\label{vib:app:pendulum_elastic:x0:s} \tag{28}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="vib:app:pendulum_elastic:vx0:s"></div>
$$
\begin{equation}
\frac{d\bar x}{d\bar t}(0) = 0,
\label{vib:app:pendulum_elastic:vx0:s} \tag{29}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="vib:app:pendulum_elastic:y0:s"></div>
$$
\begin{equation}
\bar y(0) = 1 - (1+\epsilon)\cos\Theta,
\label{vib:app:pendulum_elastic:y0:s} \tag{30}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="vib:app:pendulum_elastic:vy0:s"></div>
$$
\begin{equation}
\frac{d\bar y}{d\bar t}(0) = 0,
\label{vib:app:pendulum_elastic:vy0:s} \tag{31}
\end{equation}
$$
We have here added a parameter $\epsilon$, which is an additional
downward stretch of the wire at $t=0$. This parameter makes it possible
to do a desired test: vertical oscillations of the pendulum. Without
$\epsilon$, starting the motion from $(0,0)$ with zero velocity will
result in $x=y=0$ for all times (also a good test!), but with
an initial stretch so the body's position is $(0,\epsilon)$, we
will have oscillatory vertical motion with amplitude $\epsilon$ (see
[Exercise 5: Simulate an elastic pendulum](#vib:exer:pendulum_elastic)).
### Remark on the non-elastic limit
We immediately see that as $k\rightarrow\infty$ (i.e., we obtain a non-elastic
pendulum), $\beta\rightarrow 1$, $\bar L\rightarrow 1$, and we have
very small values $1-\beta\bar L^{-1}$ divided by very small values
$1-\beta$ in the ODEs. However, it turns out that we can set $\beta$
very close to one and obtain a path of the body that within the visual
accuracy of a plot does not show any elastic oscillations.
(Should the division of very small values become a problem, one can
study the limit by L'Hospital's rule:
$$
\lim_{\beta\rightarrow 1}\frac{1 - \beta \bar L^{-1}}{1-\beta}
= \frac{1}{\bar L},
$$
and use the limit $\bar L^{-1}$ in the ODEs for $\beta$ values very
close to 1.)
## Vehicle on a bumpy road
<div id="vib:app:bumpy"></div>
<!-- dom:FIGURE: [fig-vib/bumpy_sketch.png, width=400 frac=0.6] Sketch of one-wheel vehicle on a bumpy road. <div id="vib:app:bumpy:fig:sketch"></div> -->
<!-- begin figure -->
<div id="vib:app:bumpy:fig:sketch"></div>
<p>Sketch of one-wheel vehicle on a bumpy road.</p>
<!-- end figure -->
We consider a very simplistic vehicle, on one wheel, rolling along a
bumpy road. The oscillatory nature of the road will induce an external
forcing on the spring system in the vehicle and cause vibrations.
[Figure](#vib:app:bumpy:fig:sketch) outlines the situation.
To derive the equation that governs the motion, we must first establish
the position vector of the black mass at the top of the spring.
Suppose the spring has length $L$ without any elongation or compression,
suppose the radius of the wheel is $R$, and suppose the height of the
black mass at the top is $H$. With the aid of the $\rpos_0$ vector
in [Figure](#vib:app:bumpy:fig:sketch), the position $\rpos$ of
the center point of the mass is
<!-- Equation labels as ordinary links -->
<div id="_auto2"></div>
$$
\begin{equation}
\rpos = \rpos_0 + 2R\jj + L\jj + u\jj + \frac{1}{2} H\jj,\
\label{_auto2} \tag{32}
\end{equation}
$$
where $u$ is the elongation or compression in the spring according to
the (unknown and to be computed) vertical displacement $u$ relative to the
road. If the vehicle travels
with constant horizontal velocity $v$ and $h(x)$ is the shape of the
road, then the vector $\rpos_0$ is
$$
\rpos_0 = vt\ii + h(vt)\jj,
$$
if the motion starts from $x=0$ at time $t=0$.
The forces on the mass is the gravity, the spring force, and an optional
damping force that is proportional to the vertical velocity $\dot u$. Newton's
second law of motion then tells that
$$
m\ddot\rpos = -mg\jj - s(u) - b\dot u\jj\thinspace .
$$
This leads to
$$
m\ddot u = - s(u) - b\dot u - mg -mh''(vt)v^2
$$
To simplify a little bit, we omit the gravity force $mg$ in comparison with
the other terms. Introducing $u'$ for $\dot u$ then gives a standard
damped, vibration equation with external forcing:
<!-- Equation labels as ordinary links -->
<div id="_auto3"></div>
$$
\begin{equation}
mu'' + bu' + s(u) = -mh''(vt)v^2\thinspace .
\label{_auto3} \tag{33}
\end{equation}
$$
Since the road is normally known just as a set of array values, $h''$ must
be computed by finite differences. Let $\Delta x$ be the spacing between
measured values $h_i= h(i\Delta x)$ on the road. The discrete second-order
derivative $h''$ reads
$$
q_i = \frac{h_{i-1} - 2h_i + h_{i+1}}{\Delta x^2}, \quad i=1,\ldots,N_x-1\thinspace .
$$
We may for maximum simplicity set
the end points as $q_0=q_1$ and $q_{N_x}=q_{N_x-1}$.
The term $-mh''(vt)v^2$ corresponds to a force with discrete time values
$$
F^n = -mq_n v^2,\quad \Delta t = v^{-1}\Delta x\thinspace .
$$
This force can be directly used in a numerical model
$$
[mD_tD_t u + bD_{2t} u + s(u) = F]^n\thinspace .
$$
Software for computing $u$ and also making an animated sketch of
the motion like we did in the section [Dynamic free body diagram during pendulum motion](#vib:app:pendulum_bodydia)
is found in a separate project on the web:
<https://github.com/hplgit/bumpy>. You may start looking at the
"tutorial":
% if FORMAT == 'pdflatex':
"http://hplgit.github.io/bumpy/doc/pub/bumpy.pdf".
% else:
"http://hplgit.github.io/bumpy/doc/pub/bumpy.html".
% endif
## Bouncing ball
<div id="vib:app:bouncing_ball"></div>
A bouncing ball is a ball in free vertically fall until it impacts the
ground, but during the impact, some kinetic energy is lost, and a new
motion upwards with reduced velocity starts. After the motion is retarded,
a new free fall starts, and the process is repeated. At some point the
velocity close to the ground is so small that the ball is considered
to be finally at rest.
The motion of the ball falling in air is governed by Newton's second
law $F=ma$, where $a$ is the acceleration of the body, $m$ is the mass,
and $F$ is the sum of all forces. Here, we neglect the air resistance
so that gravity $-mg$ is the only force. The height of the ball is
denoted by $h$ and $v$ is the velocity. The relations between $h$, $v$, and
$a$,
$$
h'(t)= v(t),\quad v'(t) = a(t),
$$
combined with Newton's second law gives the ODE model
<!-- Equation labels as ordinary links -->
<div id="vib:app:bouncing:ball:h2eq"></div>
$$
\begin{equation}
h^{\prime\prime}(t) = -g,
\label{vib:app:bouncing:ball:h2eq} \tag{34}
\end{equation}
$$
or expressed alternatively as a system of first-order equations:
<!-- Equation labels as ordinary links -->
<div id="vib:app:bouncing:ball:veq"></div>
$$
\begin{equation}
v'(t) = -g,
\label{vib:app:bouncing:ball:veq} \tag{35}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="vib:app:bouncing:ball:heq"></div>
$$
\begin{equation}
h'(t) = v(t)\thinspace .
\label{vib:app:bouncing:ball:heq} \tag{36}
\end{equation}
$$
These equations govern the motion as long as the ball is away from
the ground by a small distance $\epsilon_h > 0$. When $h<\epsilon_h$,
we have two cases.
1. The ball impacts the ground, recognized by a sufficiently large negative
velocity ($v<-\epsilon_v$). The velocity then changes sign and is
reduced by a factor $C_R$, known as the [coefficient of restitution](http://en.wikipedia.org/wiki/Coefficient_of_restitution).
For plotting purposes, one may set $h=0$.
2. The motion stops, recognized by a sufficiently small velocity
($|v|<\epsilon_v$) close to the ground.
## Two-body gravitational problem
<div id="vib:app:gravitation"></div>
Consider two astronomical objects $A$ and $B$ that attract each other
by gravitational forces. $A$ and $B$ could be two stars in a binary
system, a planet orbiting a star, or a moon orbiting a planet.
Each object is acted upon by the
gravitational force due to the other object. Consider motion in a plane
(for simplicity) and let $(x_A,y_A)$ and $(x_B,y_B)$ be the
positions of object $A$ and $B$, respectively.
### The governing equations
Newton's second law of motion applied to each object is all we need
to set up a mathematical model for this physical problem:
<!-- Equation labels as ordinary links -->
<div id="_auto4"></div>
$$
\begin{equation}
m_A\ddot\x_A = \F,
\label{_auto4} \tag{37}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto5"></div>
$$
\begin{equation}
m_B\ddot\x_B = -\F,
\label{_auto5} \tag{38}
\end{equation}
$$
where $F$ is the gravitational force
$$
\F = \frac{Gm_Am_B}{||\rpos||^3}\rpos,
$$
where
$$
\rpos(t) = \x_B(t) - \x_A(t),
$$
and $G$ is the gravitational constant:
$G=6.674\cdot 10^{-11}\hbox{ Nm}^2/\hbox{kg}^2$.
### Scaling
A problem with these equations is that the parameters are very large
($m_A$, $m_B$, $||\rpos||$) or very small ($G$). The rotation time
for binary stars can be very small and large as well. mathcal{I}_t is therefore
advantageous to scale the equations.
A natural length scale could be the initial distance between the objects:
$L=\rpos(0)$. We write the dimensionless quantities as
$$
\bar\x_A = \frac{\x_A}{L},\quad\bar\x_B = \frac{\x_B}{L},\quad
\bar t = \frac{t}{t_c}\thinspace .
$$
The gravity force is transformed to
$$
\F = \frac{Gm_Am_B}{L^2||\bar\rpos||^3}\bar\rpos,\quad \bar\rpos = \bar\x_B - \bar\x_A,
$$
so the first ODE for $\x_A$ becomes
$$
\frac{d^2 \bar\x_A}{d\bar t^2} =
\frac{Gm_Bt_c^2}{L^3}\frac{\bar\rpos}{||\bar\rpos||^3}\thinspace .
$$
Assuming that quantities with a bar and their derivatives are around unity
in size, it is natural to choose $t_c$ such that the fraction $Gm_Bt_c/L^2=1$:
$$
t_c = \sqrt{\frac{L^3}{Gm_B}}\thinspace .
$$
From the other equation for $\x_B$ we get another candidate for $t_c$ with
$m_A$ instead of $m_B$. Which mass we choose play a role if $m_A\ll m_B$ or
$m_B\ll m_A$. One solution is to use the sum of the masses:
$$
t_c = \sqrt{\frac{L^3}{G(m_A+m_B)}}\thinspace .
$$
Taking a look at [Kepler's laws](https://en.wikipedia.org/wiki/Kepler%27s_laws_of_planetary_motion) of planetary motion, the orbital period for a planet around the star is given by the $t_c$ above, except for a missing factor of $2\pi$,
but that means that $t_c^{-1}$ is just the angular frequency of the motion.
Our characteristic time $t_c$ is therefore highly relevant.
Introducing the dimensionless number
$$
\alpha = \frac{m_A}{m_B},
$$
we can write the dimensionless ODE as
<!-- Equation labels as ordinary links -->
<div id="_auto6"></div>
$$
\begin{equation}
\frac{d^2 \bar\x_A}{d\bar t^2} =
\frac{1}{1+\alpha}\frac{\bar\rpos}{||\bar\rpos||^3},
\label{_auto6} \tag{39}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto7"></div>
$$
\begin{equation}
\frac{d^2 \bar\x_B}{d\bar t^2} =
\frac{1}{1+\alpha^{-1}}\frac{\bar\rpos}{||\bar\rpos||^3}\thinspace .
\label{_auto7} \tag{40}
\end{equation}
$$
In the limit $m_A\ll m_B$, i.e., $\alpha\ll 1$,
object B stands still, say $\bar\x_B=0$, and object
A orbits according to
$$
\frac{d^2 \bar\x_A}{d\bar t^2} = -\frac{\bar\x_A}{||\bar \x_A||^3}\thinspace .
$$
### Solution in a special case: planet orbiting a star
To better see the motion, and that our scaling is reasonable,
we introduce polar coordinates $r$ and $\theta$:
$$
\bar\x_A = r\cos\theta\ii + r\sin\theta\jj,
$$
which means $\bar\x_A$ can be written as $\bar\x_A =r\ir$. Since
$$
\frac{d}{dt}\ir = \dot\theta\ith,\quad \frac{d}{dt}\ith = -\dot\theta\ir,
$$
we have
$$
\frac{d^2 \bar\x_A}{d\bar t^2} =
(\ddot r - r\dot\theta^2)\ir + (r\ddot\theta + 2\dot r\dot\theta)\ith\thinspace .
$$
The equation of motion for mass A is then
$$
\begin{align*}
\ddot r - r\dot\theta^2 &= -\frac{1}{r^2},\\
r\ddot\theta + 2\dot r\dot\theta &= 0\thinspace .
\end{align*}
$$
The special case of circular motion, $r=1$, fulfills the equations, since
the latter equation then gives $\dot\theta =\hbox{const}$ and
the former then gives $\dot\theta = 1$, i.e., the motion is
$r(t)=1$, $\theta(t)=t$, with unit angular frequency as expected and
period $2\pi$ as expected.
## Electric circuits
Although the term "mechanical vibrations" is used in the present
book, we must mention that the same type of equations arise
when modeling electric circuits.
The current $I(t)$ in a
circuit with an inductor with inductance $L$, a capacitor with
capacitance $C$, and overall resistance $R$, is governed by
<!-- Equation labels as ordinary links -->
<div id="_auto8"></div>
$$
\begin{equation}
\ddot I + \frac{R}{L}\dot I + \frac{1}{LC}I = \dot V(t),
\label{_auto8} \tag{41}
\end{equation}
$$
where $V(t)$ is the voltage source powering the circuit.
This equation has the same form as the general model considered in
the section [vib:model2](#vib:model2) if we set $u=I$, $f(u^{\prime})=bu^{\prime}$
and define $b=R/L$, $s(u) = L^{-1}C^{-1}u$, and $F(t)=\dot V(t)$.
# Exercises
<!-- --- begin exercise --- -->
## Exercise 1: Simulate resonance
<div id="vib:exer:resonance"></div>
We consider the scaled ODE model
([4](#vib:app:mass_gen:scaled)) from the section [General mechanical vibrating system](#vib:app:mass_gen).
After scaling, the amplitude of $u$ will have a size about unity
as time grows and the effect of the initial conditions die out due
to damping. However, as $\gamma\rightarrow 1$, the amplitude of $u$
increases, especially if $\beta$ is small. This effect is called
*resonance*. The purpose of this exercise is to explore resonance.
**a)**
Figure out how the `solver` function in `vib.py` can be called
for the scaled ODE ([4](#vib:app:mass_gen:scaled)).
<!-- --- begin solution of exercise --- -->
**Solution.**
Comparing the scaled ODE ([4](#vib:app:mass_gen:scaled))
with the ODE ([3](#vib:app:mass_gen:equ)) with dimensions, we
realize that the parameters in the latter must be set as
* $m=1$
* $f(\dot u) = 2\beta |\dot u|\dot u$
* $s(u)=ku$
* $F(t)=\sin(\gamma t)$
* $I=Ik/A$
* $V=\sqrt{mk}V/A$
The expected period is $2\pi$, so simulating for $N$ periods means
$T=2\pi N$. Having $m$ time steps per period means $\Delta t = 2\pi/m$.
Suppose we just choose $I=1$ and $V=0$. Simulating for 20 periods with
60 time steps per period, implies the following
`solver` call to run the scaled model:
```python
u, t = solver(I=1, V=0, m=1, b=2*beta, s=lambda u: u,
F=lambda t: sin(gamma*t), dt=2*pi/60,
T=2*pi*20, damping='quadratic')
```
<!-- --- end solution of exercise --- -->
**b)**
Run $\gamma =5, 1.5, 1.1, 1$ for $\beta=0.005, 0.05, 0.2$.
For each $\beta$ value, present an image with plots of $u(t)$ for
the four $\gamma$ values.
<!-- --- begin solution of exercise --- -->
**Solution.**
An appropriate program is
```python
from vib import solver, visualize, plt
from math import pi, sin
import numpy as np
beta_values = [0.005, 0.05, 0.2]
beta_values = [0.00005]
gamma_values = [5, 1.5, 1.1, 1]
for i, beta in enumerate(beta_values):
for gamma in gamma_values:
u, t = solver(I=1, V=0, m=1, b=2*beta, s=lambda u: u,
F=lambda t: sin(gamma*t), dt=2*pi/60,
T=2*pi*20, damping='quadratic')
visualize(u, t, title='gamma=%g' %
gamma, filename='tmp_%s' % gamma)
print gamma, 'max u amplitude:', np.abs(u).max()
for ext in 'png', 'pdf':
cmd = 'doconce combine_images '
cmd += ' '.join(['tmp_%s.' % gamma + ext
for gamma in gamma_values])
cmd += ' resonance%d.' % (i+1) + ext
os.system(cmd)
raw_input()
```
For $\beta = 0.2$ we see that the amplitude is not far from unity:
<!-- dom:FIGURE: [fig-vib/resonance3.png, width=800 frac=1] -->
<!-- begin figure -->
<p></p>
<!-- end figure -->
For $\beta =0.05$ we see that as $\gamma\rightarrow 1$, the amplitude grows:
<!-- dom:FIGURE: [fig-vib/resonance2.png, width=800 frac=1] -->
<!-- begin figure -->
<p></p>
<!-- end figure -->
Finally, a small damping ($\beta = 0.005$) amplifies the amplitude significantly (by a factor of 10) for $\gamma=1$:
<!-- dom:FIGURE: [fig-vib/resonance1.png, width=800 frac=1] -->
<!-- begin figure -->
<p></p>
<!-- end figure -->
For a very small $\beta=0.00005$, the amplitude grows linearly up to
about 60 for $\bar t\in [0,120]$.
<!-- --- end solution of exercise --- -->
Filename: `resonance`.
<!-- --- end exercise --- -->
<!-- --- begin exercise --- -->
## Exercise 2: Simulate oscillations of a sliding box
<div id="vib:exer:sliding_box"></div>
Consider a sliding box on a flat surface as modeled in the section [A sliding mass attached to a spring](#vib:app:mass_sliding). As spring force we choose the nonlinear
formula
$$
s(u) = \frac{k}{\alpha}\tanh(\alpha u) = ku + \frac{1}{3}\alpha^2 ku^3 + \frac{2}{15}\alpha^4 k u^5 + \Oof{u^6}\thinspace .
$$
**a)**
Plot $g(u)=\alpha^{-1}\tanh(\alpha u)$ for various values of $\alpha$.
Assume $u\in [-1,1]$.
<!-- --- begin solution of exercise --- -->
**Solution.**
Here is a function that does the plotting:
```python
%matplotlib inline
import scitools.std as plt
import numpy as np
def plot_spring():
alpha_values = [1, 2, 3, 10]
s = lambda u: 1.0/alpha*np.tanh(alpha*u)
u = np.linspace(-1, 1, 1001)
for alpha in alpha_values:
print alpha, s(u)
plt.plot(u, s(u))
plt.hold('on')
plt.legend([r'$\alpha=%g$' % alpha for alpha in alpha_values])
plt.xlabel('u'); plt.ylabel('Spring response $s(u)$')
plt.savefig('tmp_s.png'); plt.savefig('tmp_s.pdf')
```
<!-- dom:FIGURE: [fig-vib/tanh_spring.png, width=600 frac=0.8] -->
<!-- begin figure -->
<p></p>
<!-- end figure -->
<!-- --- end solution of exercise --- -->
**b)**
Scale the equations using $I$ as scale for $u$ and $\sqrt{m/k}$ as
time scale.
<!-- --- begin solution of exercise --- -->
**Solution.**
Inserting the dimensionless dependent and independent variables,
$$
\bar u = \frac{u}{I},\quad \bar t = \frac{t}{\sqrt{m/k}},
$$
in the problem
$$
m\ddot u + \mu mg\hbox{sign}(\dot u) + s(u) = 0,\quad u(0)=I,\ \dot u(0)=V,
$$
gives
$$
\frac{d^2\bar u}{d\bar t^2} + \frac{\mu mg}{kI}\hbox{sign}\left(
\frac{d\bar u}{d\bar t}\right) + \frac{1}{\alpha I}\tanh(\alpha I\bar u)
= 0,\quad \bar u(0)=1,\ \frac{d\bar u}{d\bar t}(0)=\frac{V\sqrt{mk}}{kI}\thinspace .
$$
We can now identify three dimensionless parameters,
$$
\beta = \frac{\mu mg}{kI},\quad
\gamma = \alpha I,\quad \delta = \frac{V\sqrt{mk}}{kI}\thinspace .
$$
The scaled problem can then be written
$$
\frac{d^2\bar u}{d\bar t^2} + \beta\hbox{sign}\left(
\frac{d\bar u}{d\bar t}\right) + \gamma^{-1}\tanh(\gamma \bar u)
= 0,\quad \bar u(0)=1,\ \frac{d\bar u}{d\bar t}(0)=\delta\thinspace .
$$
The initial set of 7 parameters $(\mu, m, g, k, \alpha, I, V)$ are
reduced to 3 dimensionless combinations.
<!-- --- end solution of exercise --- -->
**c)**
Implement the scaled model in b). Run it for some values of
the dimensionless parameters.
<!-- --- begin solution of exercise --- -->
**Solution.**
We use Odespy to solve the ODE, which requires rewriting the ODE as a
system of two first-order ODEs:
$$
\begin{align*}
v' &= - \beta\hbox{sign}(v) - \gamma^{-1}\tanh(\gamma \bar u),\\
u' &= v,
\end{align*}
$$
with initial conditions $v(0)=\delta$ and $u(0)=1$. Here, $u(t)$ corresponds
to the previous $\bar u(\bar t)$, while $v(t)$ corresponds to
$d\bar u/d\bar t (\bar t)$. The code can be like this:
```python
def simulate(beta, gamma, delta=0,
num_periods=8, time_steps_per_period=60):
# Use oscillations without friction to set dt and T
P = 2*np.pi
dt = P/time_steps_per_period
T = num_periods*P
t = np.linspace(0, T, time_steps_per_period*num_periods+1)
import odespy
def f(u, t, beta, gamma):
# Note the sequence of unknowns: v, u (v=du/dt)
v, u = u
return [-beta*np.sign(v) - 1.0/gamma*np.tanh(gamma*u), v]
#return [-beta*np.sign(v) - u, v]
solver = odespy.RK4(f, f_args=(beta, gamma))
solver.set_initial_condition([delta, 1]) # sequence must match f
uv, t = solver.solve(t)
u = uv[:,1] # recall sequence in f: v, u
v = uv[:,0]
return u, t
```
We simulate for an almost linear spring in the regime of $\bar u$ (recall
that $\bar u\in [0,1]$ since $u$ is scaled with $I$), which corresponds
to $\alpha = 1$ in a) and therefore $\gamma =1$. Then we can try a
spring whose force quickly flattens out like $\alpha=5$ in a), which
corresponds to $\gamma = 5$ in the scaled model. A third option is
to have a truly linear spring, e.g., $\gamma =0.1$. After some
experimentation we realize that $\beta=0,0.05, 0.1$ are relevant values.
<!-- dom:FIGURE: [fig-vib/sliding_box_gamma0_1.png, width=600 frac=0.8] -->
<!-- begin figure -->
<p></p>
<!-- end figure -->
<!-- dom:FIGURE: [fig-vib/sliding_box_gamma1.png, width=600 frac=0.8] -->
<!-- begin figure -->
<p></p>
<!-- end figure -->
<!-- dom:FIGURE: [fig-vib/sliding_box_gamma5.png, width=600 frac=0.8] -->
<!-- begin figure -->
<p></p>
<!-- end figure -->
<!-- --- end solution of exercise --- -->
Filename: `sliding_box`.
<!-- --- end exercise --- -->
<!-- --- begin exercise --- -->
## Exercise 3: Simulate a bouncing ball
<div id="vib:exer:bouncing:ball"></div>
The section [Bouncing ball](#vib:app:bouncing_ball) presents a model for a bouncing
ball.
Choose one of the two ODE formulation, ([34](#vib:app:bouncing:ball:h2eq)) or
([35](#vib:app:bouncing:ball:veq))-([36](#vib:app:bouncing:ball:heq)),
and simulate the motion of a bouncing ball. Plot $h(t)$. Think about how to
plot $v(t)$.
<!-- --- begin hint in exercise --- -->
**Hint.**
A naive implementation may get stuck in repeated impacts for large time
step sizes. To avoid this situation, one can introduce a state
variable that holds the mode of the motion: free fall, impact, or rest.
Two consecutive impacts imply that the motion has stopped.
<!-- --- end hint in exercise --- -->
<!-- --- begin solution of exercise --- -->
**Solution.**
A tailored `solver` function and some plotting statements go like
```python
import numpy as np
def solver(H, C_R, dt, T, eps_v=0.01, eps_h=0.01):
"""
Simulate bouncing ball until it comes to rest. Time step dt.
h(0)=H (initial height). T: maximum simulation time.
Method: Euler-Cromer.
"""
dt = float(dt)
Nt = int(round(T/dt))
h = np.zeros(Nt+1)
v = np.zeros(Nt+1)
t = np.linspace(0, Nt*dt, Nt+1)
g = 0.81
v[0] = 0
h[0] = H
mode = 'free fall'
for n in range(Nt):
v[n+1] = v[n] - dt*g
h[n+1] = h[n] + dt*v[n+1]
if h[n+1] < eps_h:
#if abs(v[n+1]) > eps_v: # handles large dt, but is wrong
if v[n+1] < -eps_v:
# Impact
v[n+1] = -C_R*v[n+1]
h[n+1] = 0
if mode == 'impact':
# impact twice
return h[:n+2], v[:n+2], t[:n+2]
mode = 'impact'
elif abs(v[n+1]) < eps_v:
mode = 'rest'
v[n+1] = 0
h[n+1] = 0
return h[:n+2], v[:n+2], t[:n+2]
else:
mode = 'free fall'
else:
mode = 'free fall'
print '%4d v=%8.5f h=%8.5f %s' % (n, v[n+1], h[n+1], mode)
raise ValueError('T=%g is too short simulation time' % T)
import matplotlib.pyplot as plt
h, v, t = solver(
H=1, C_R=0.8, T=100, dt=0.0001, eps_v=0.01, eps_h=0.01)
plt.plot(t, h)
plt.legend('h')
plt.savefig('tmp_h.png'); plt.savefig('tmp_h.pdf')
plt.figure()
plt.plot(t, v)
plt.legend('v')
plt.savefig('tmp_v.png'); plt.savefig('tmp_v.pdf')
plt.show()
```
<!-- dom:FIGURE: [fig-vib/bouncing_ball.png, width=800 frac=1] -->
<!-- begin figure -->
<p></p>
<!-- end figure -->
<!-- --- end solution of exercise --- -->
Filename: `bouncing_ball`.
<!-- --- end exercise --- -->
<!-- --- begin exercise --- -->
## Exercise 4: Simulate a simple pendulum
<div id="vib:exer:pendulum_simple"></div>
Simulation of simple pendulum can be carried out by using
the mathematical model derived in the section [Motion of a pendulum](#vib:app:pendulum)
and calling up functionality in the [`vib.py`](${src_vib}/vib.py)
file (i.e., solve the second-order ODE by centered finite differences).
**a)**
Scale the model. Set up the dimensionless governing equation for $\theta$
and expressions for dimensionless drag and wire forces.
<!-- --- begin solution of exercise --- -->
**Solution.**
The angle is measured in radians so we may think of this quantity as
dimensionless, or we may scale it by the initial condition to obtain
a primary unknown that lies in $[-1,1]$. We go for the former strategy here.
Dimensionless time $\bar t$ is introduced as $t/t_c$ for some suitable
time scale $t_c$.
<!-- We may also introduce a force scale, called $F_c$, for scaling the forces. -->
Inserted in the two governing equations
([8](#vib:app:pendulum:thetaeq)) and ([6](#vib:app:pendulum:ir)),
for the
two unknowns $\theta$ and $S$, respectively, we achieve
$$
\begin{align*}
-S + mg\cos\theta &= -\frac{1}{t_c}L\frac{d\theta}{d\bar t},\\
\frac{1}{t_c^2}m\frac{d^2\theta}{d\bar t^2} +
\frac{1}{2}C_D\varrho AL \frac{1}{t_c^2}\left\vert
\frac{d\theta}{d\bar t}\right\vert
\frac{d\theta}{d\bar t}
+ \frac{mg}{L}\sin\theta &= 0\thinspace .
\end{align*}
$$
We multiply the latter equation by $t_c^2/m$ to make each term
dimensionless:
$$
\frac{d^2\theta}{d\bar t^2} +
\frac{1}{2m}C_D\varrho AL \left\vert
\frac{d\theta}{d\bar t}\right\vert
\frac{d\theta}{d\bar t}
+ \frac{t_c^2g}{L}\sin\theta = 0\thinspace .
$$
Assuming that the acceleration term and the
gravity term to be the dominating terms, these should balance, so
$t_c^2g/L=1$, giving $t_c = \sqrt{g/L}$. With $A=\pi R^2$ we get the
dimensionless ODEs
<!-- Equation labels as ordinary links -->
<div id="vib:exer:pendulum_simple:eq:ith:s"></div>
$$
\begin{equation}
\frac{d^2\theta}{d\bar t^2} +
\alpha\left\vert\frac{d\theta}{d\bar t}\right\vert\frac{d\theta}{d\bar t} +
\sin\theta = 0,
\label{vib:exer:pendulum_simple:eq:ith:s} \tag{42}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="vib:exer:pendulum_simple:eq:ir:s"></div>
$$
\begin{equation}
\frac{S}{mg} = \left(\frac{d\theta}{d\bar t}\right)^2 + \cos\theta,
\label{vib:exer:pendulum_simple:eq:ir:s} \tag{43}
\end{equation}
$$
where $\alpha$ is a dimensionless drag coefficient
$$
\alpha = \frac{C_D\varrho\pi R^2L}{2m}\thinspace .
$$
Note that in ([43](#vib:exer:pendulum_simple:eq:ir:s)) we have divided by
$mg$, which is in fact a force scale, making the gravity force unity
and also $S/mg=1$ in the equilibrium position $\theta=0$. We may
introduce
$$
\bar S = S/mg
$$
as a dimensionless drag force.
The parameter $\alpha$ is about
the ratio of the drag force and the gravity force:
$$
\frac{|\frac{1}{2} C_D\varrho \pi R^2 |v|v|}{|mg|}\sim
\frac{C_D\varrho \pi R^2 L^2 t_c^{-2}}{mg}
\left|\frac{d\bar\theta}{d\bar t}\right|\frac{d\bar\theta}{d\bar t}
\sim \frac{C_D\varrho \pi R^2 L}{2m}\Theta^2 = \alpha \Theta^2\thinspace .
$$
(We have that $\theta(t)/d\Theta$ is in $[-1,1]$, so we expect
since $\Theta^{-1}d\bar\theta/d\bar t$ to be around unity. Here,
$\Theta=\theta(0)$.)
Let us introduce $\omega$ for the dimensionless angular velocity,
$$
\omega = \frac{d\theta}{d\bar t}\thinspace .
$$
When $\theta$ is computed, the dimensionless wire and drag forces
are computed by
$$
\begin{align*}
\bar S &= \omega^2 + \cos\theta,\\
\bar D &= -\alpha |\omega|\omega\thinspace .
\end{align*}
$$
<!-- --- end solution of exercise --- -->
**b)**
Write a function for computing
$\theta$ and the dimensionless drag force and the force in the wire,
using the `solver` function in
the `vib.py` file. Plot these three quantities
below each other (in subplots) so the graphs can be compared.
Run two cases, first one in the limit of $\Theta$ small and
no drag, and then a second one with $\Theta=40$ degrees and $\alpha=0.8$.
<!-- --- begin solution of exercise --- -->
**Solution.**
The first step is to realize how to utilize the `solver` function for
our dimensionless model. Introducing `Theta` for $\Theta$, the
arguments to `solver` must be set as
```python
I = Theta
V = 0
m = 1
b = alpha
s = lambda u: sin(u)
F = lambda t: 0
damping = 'quadratic'
```
After computing $\theta$, we need to find $\omega$ by finite differences:
$$
\omega^n = \frac{\theta^{n+1}-\theta^{n-1}}{2\Delta t},
\ n=1,\ldots,N_t-1,\quad \omega^0=\frac{\theta^1-\theta^0}{\Delta t},
\ \omega^{N_t}=\frac{\theta^{N_t}-\theta^{N_t-1}}{\Delta t}\thinspace .
$$
The duration of the simulation and the time step can be computed on
basis of the analytical insight we have for small $\theta$
($\theta\approx \Theta\cos(t)$). A complete function then reads
```python
def simulate(Theta, alpha, num_periods=10):
# Dimensionless model requires the following parameters:
from math import sin, pi
I = Theta
V = 0
m = 1
b = alpha
s = lambda u: sin(u)
F = lambda t: 0
damping = 'quadratic'
# Estimate T and dt from the small angle solution
P = 2*pi # One period (theta small, no drag)
dt = P/40 # 40 intervals per period
T = num_periods*P
theta, t = solver(I, V, m, b, s, F, dt, T, damping)
omega = np.zeros(theta.size)
omega[1:-1] = (theta[2:] - theta[:-2])/(2*dt)
omega[0] = (theta[1] - theta[0])/dt
omega[-1] = (theta[-1] - theta[-2])/dt
S = omega**2 + np.cos(theta)
D = alpha*np.abs(omega)*omega
return t, theta, S, D
```
Assuming imports like
```python
import numpy as np
import matplotlib.pyplot as plt
```
the following function visualizes $\theta$, $\bar S$, and $\bar D$
with three subplots:
```python
def visualize(t, theta, S, D, filename='tmp'):
f, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, sharey=False)
ax1.plot(t, theta)
ax1.set_title(r'$\theta(t)$')
ax2.plot(t, S)
ax2.set_title(r'Dimensonless force in the wire')
ax3.plot(t, D)
ax3.set_title(r'Dimensionless drag force')
plt.savefig('%s.png' % filename)
plt.savefig('%s.pdf' % filename)
```
A suitable main program is
```python
import math
# Rough verification that small theta and no drag gives cos(t)
Theta = 1.0
alpha = 0
t, theta, S, D = simulate(Theta, alpha, num_periods=4)
# Scale theta by Theta (easier to compare with cos(t))
theta /= Theta
visualize(t, theta, S, D, filename='pendulum_verify')
Theta = math.radians(40)
alpha = 0.8
t, theta, S, D = simulate(Theta, alpha)
visualize(t, theta, S, D, filename='pendulum_alpha0.8_Theta40')
plt.show()
```
The "verification" case looks good (at least when the `solver` function
has been thoroughly verified in other circumstances):
<!-- dom:FIGURE: [fig-vib/pendulum_verify.png, width=500 frac=0.8] -->
<!-- begin figure -->
<p></p>
<!-- end figure -->
The "real case" shows how quickly the drag force is reduced, even when
we set $\alpha$ to a significant value (0.8):
<!-- dom:FIGURE: [fig-vib/pendulum_alpha08_Theta40.png, width=500 frac=0.8] -->
<!-- begin figure -->
<p></p>
<!-- end figure -->
<!-- --- end solution of exercise --- -->
Filename: `simple_pendulum`.
<!-- --- end exercise --- -->
<!-- --- begin exercise --- -->
## Exercise 5: Simulate an elastic pendulum
<div id="vib:exer:pendulum_elastic"></div>
The section [Motion of an elastic pendulum](#vib:app:pendulum_elastic) describes a model for an elastic
pendulum, resulting in a system of two ODEs. The purpose of this
exercise is to implement the scaled model, test the software, and
generalize the model.
**a)**
Write a function `simulate`
that can simulate an elastic pendulum using the scaled model.
The function should have the following arguments:
```python
def simulate(
beta=0.9, # dimensionless parameter
Theta=30, # initial angle in degrees
epsilon=0, # initial stretch of wire
num_periods=6, # simulate for num_periods
time_steps_per_period=60, # time step resolution
plot=True, # make plots or not
):
```
To set the total simulation time and the time step, we
use our knowledge of the scaled, classical, non-elastic pendulum:
$u^{\prime\prime} + u = 0$, with solution
$u = \Theta\cos \bar t$.
The period of these oscillations is $P=2\pi$
and the frequency is unity. The time
for simulation is taken as `num_periods` times $P$. The time step
is set as $P$ divided by `time_steps_per_period`.
The `simulate` function should return the arrays of
$x$, $y$, $\theta$, and $t$, where $\theta = \tan^{-1}(x/(1-y))$ is
the angular displacement of the elastic pendulum corresponding to the
position $(x,y)$.
If `plot` is `True`, make a plot of $\bar y(\bar t)$
versus $\bar x(\bar t)$, i.e., the physical motion
of the mass at $(\bar x,\bar y)$. Use the equal aspect ratio on the axis
such that we get a physically correct picture of the motion. Also
make a plot of $\theta(\bar t)$, where $\theta$ is measured in degrees.
If $\Theta < 10$ degrees, add a plot that compares the solutions of
the scaled, classical, non-elastic pendulum and the elastic pendulum
($\theta(t)$).
Although the mathematics here employs a bar over scaled quantities, the
code should feature plain names `x` for $\bar x$, `y` for $\bar y$, and
`t` for $\bar t$ (rather than `x_bar`, etc.). These variable names make
the code easier to read and compare with the mathematics.
<!-- --- begin hint in exercise --- -->
**Hint 1.**
Equal aspect ratio is set by `plt.gca().set_aspect('equal')` in
Matplotlib (`import matplotlib.pyplot as plt`)
and in SciTools by the command
`plt.plot(..., daspect=[1,1,1], daspectmode='equal')`
(provided you have done `import scitools.std as plt`).
<!-- --- end hint in exercise --- -->
<!-- --- begin hint in exercise --- -->
**Hint 2.**
If you want to use Odespy to solve the equations, order the ODEs
like $\dot \bar x, \bar x, \dot\bar y,\bar y$ such that
`odespy.EulerCromer` can be applied.
<!-- --- end hint in exercise --- -->
<!-- --- begin solution of exercise --- -->
**Solution.**
Here is a suggested `simulate` function:
```python
import odespy
import numpy as np
import scitools.std as plt
def simulate(
beta=0.9, # dimensionless parameter
Theta=30, # initial angle in degrees
epsilon=0, # initial stretch of wire
num_periods=6, # simulate for num_periods
time_steps_per_period=60, # time step resolution
plot=True, # make plots or not
):
from math import sin, cos, pi
Theta = Theta*np.pi/180 # convert to radians
# Initial position and velocity
# (we order the equations such that Euler-Cromer in odespy
# can be used, i.e., vx, x, vy, y)
ic = [0, # x'=vx
(1 + epsilon)*sin(Theta), # x
0, # y'=vy
1 - (1 + epsilon)*cos(Theta), # y
]
def f(u, t, beta):
vx, x, vy, y = u
L = np.sqrt(x**2 + (y-1)**2)
h = beta/(1-beta)*(1 - beta/L) # help factor
return [-h*x, vx, -h*(y-1) - beta, vy]
# Non-elastic pendulum (scaled similarly in the limit beta=1)
# solution Theta*cos(t)
P = 2*pi
dt = P/time_steps_per_period
T = num_periods*P
omega = 2*pi/P
time_points = np.linspace(
0, T, num_periods*time_steps_per_period+1)
solver = odespy.EulerCromer(f, f_args=(beta,))
solver.set_initial_condition(ic)
u, t = solver.solve(time_points)
x = u[:,1]
y = u[:,3]
theta = np.arctan(x/(1-y))
if plot:
plt.figure()
plt.plot(x, y, 'b-', title='Pendulum motion',
daspect=[1,1,1], daspectmode='equal',
axis=[x.min(), x.max(), 1.3*y.min(), 1])
plt.savefig('tmp_xy.png')
plt.savefig('tmp_xy.pdf')
# Plot theta in degrees
plt.figure()
plt.plot(t, theta*180/np.pi, 'b-',
title='Angular displacement in degrees')
plt.savefig('tmp_theta.png')
plt.savefig('tmp_theta.pdf')
if abs(Theta) < 10*pi/180:
# Compare theta and theta_e for small angles (<10 degrees)
theta_e = Theta*np.cos(omega*t) # non-elastic scaled sol.
plt.figure()
plt.plot(t, theta, t, theta_e,
legend=['theta elastic', 'theta non-elastic'],
title='Elastic vs non-elastic pendulum, '\
'beta=%g' % beta)
plt.savefig('tmp_compare.png')
plt.savefig('tmp_compare.pdf')
# Plot y vs x (the real physical motion)
return x, y, theta, t
```
<!-- --- end solution of exercise --- -->
**b)**
Write a test function for testing that $\Theta=0$ and $\epsilon=0$
gives $x=y=0$ for all times.
<!-- --- begin solution of exercise --- -->
**Solution.**
Here is the code:
```python
def test_equilibrium():
"""Test that starting from rest makes x=y=theta=0."""
x, y, theta, t = simulate(
beta=0.9, Theta=0, epsilon=0,
num_periods=6, time_steps_per_period=10, plot=False)
tol = 1E-14
assert np.abs(x.max()) < tol
assert np.abs(y.max()) < tol
assert np.abs(theta.max()) < tol
```
<!-- --- end solution of exercise --- -->
**c)**
Write another test function for checking that the pure vertical
motion of the elastic pendulum is correct.
Start with simplifying the ODEs for pure vertical motion and show that
$\bar y(\bar t)$ fulfills a vibration equation with
frequency $\sqrt{\beta/(1-\beta)}$. Set up the exact solution.
Write a test function that
uses this special case to verify the `simulate` function. There will
be numerical approximation errors present in the results from
`simulate` so you have to believe in correct results and set a
(low) tolerance that corresponds to the computed maximum error.
Use a small $\Delta t$ to obtain a small numerical approximation error.
<!-- --- begin solution of exercise --- -->
**Solution.**
For purely vertical motion, the ODEs reduce to $\ddot x = 0$ and
$$
\frac{d^2\bar y}{d\bar t^2} = -\frac{\beta}{1-\beta}(1-\beta\frac{1}{\sqrt{(\bar y - 1)^2}})(\bar y-1) - \beta = -\frac{\beta}{1-\beta}(\bar y-1 + \beta) - \beta\thinspace .
$$
We have here used that $(\bar y -1)/\sqrt{(\bar y -1)^2}=-1$ since
$\bar y$ cannot exceed 1 (the pendulum's wire is fixed at the scaled
point $(0,1)$). In fact, $\bar y$ will be around zero.
(As a consistency check, we realize that in equilibrium, $\ddot\bar y =0$,
and multiplying by $(1-\beta)/\beta$ leads to the expected $\bar y=0$.)
Further calculations easily lead to
$$
\frac{d^2\bar y}{d\bar t^2} = -\frac{\beta}{1-\beta}\bar y = -\omega^2\bar y,
$$
where we have introduced the frequency
$\omega = \sqrt{\beta/(1-\beta)}$.
Solving this standard ODE, with an initial stretching $\bar y(0)=\epsilon$
and no velocity, results in
$$
\bar y(\bar t) = \epsilon\cos(\omega\bar t)\thinspace .
$$
Note that the oscillations we describe here are very different from
the oscillations used to set the period and time step in function
`simulate`. The latter type of oscillations are due to gravity when
a classical, non-elastic pendulum oscillates back and forth, while
$\bar y(\bar t)$ above refers to vertical *elastic* oscillations in the wire
around the equilibrium point in the gravity field. The angular frequency
of the vertical oscillations are given by $\omega$ and the corresponding
period is $\hat P = 2\pi/\omega$. Suppose we want to simulate for
$T=N\hat P = N2\pi/\omega$ and use $n$ time steps per period,
$\Delta\bar t = \hat P/n$. The `simulate` function operates with
a simulation time of `num_periods` times $2\pi$. This means that we must set
`num_periods=N/omega` if we want to simulate to time $T=N\hat P$.
The parameter `time_steps_per_period` must be set to $\omega n$
since `simulate` has $\Delta t$ as $2\pi$ divided by `time_steps_per_period`
and we want $\Delta t = 2\pi\omega^{-1}n^{-1}$.
The corresponding test function can be written as follows.
```python
def test_vertical_motion():
beta = 0.9
omega = np.sqrt(beta/(1-beta))
# Find num_periods. Recall that P=2*pi for scaled pendulum
# oscillations, while here we don't have gravity driven
# oscillations, but elastic oscillations with frequency omega.
period = 2*np.pi/omega
# We want T = N*period
N = 5
# simulate function has T = 2*pi*num_periods
num_periods = 5/omega
n = 600
time_steps_per_period = omega*n
y_exact = lambda t: -0.1*np.cos(omega*t)
x, y, theta, t = simulate(
beta=beta, Theta=0, epsilon=0.1,
num_periods=num_periods,
time_steps_per_period=time_steps_per_period,
plot=False)
tol = 0.00055 # ok tolerance for the above resolution
# No motion in x direction is epxected
assert np.abs(x.max()) < tol
# Check motion in y direction
y_e = y_exact(t)
diff = np.abs(y_e - y).max()
if diff > tol: # plot
plt.plot(t, y, t, y_e, legend=['y', 'exact'])
raw_input('Error in test_vertical_motion; type CR:')
assert diff < tol, 'diff=%g' % diff
```
<!-- --- end solution of exercise --- -->
**d)**
Make a function `demo(beta, Theta)` for simulating an elastic pendulum with a
given $\beta$ parameter and initial angle $\Theta$. Use 600 time steps
per period to get every accurate results, and simulate for 3 periods.
<!-- --- begin solution of exercise --- -->
**Solution.**
The `demo` function is just
```python
def demo(beta=0.999, Theta=40, num_periods=3):
x, y, theta, t = simulate(
beta=beta, Theta=Theta, epsilon=0,
num_periods=num_periods, time_steps_per_period=600,
plot=True)
```
Below are plots corresponding to $\beta = 0.999$ (3 periods) and
$\beta = 0.93$ (one period):
<!-- dom:FIGURE: [fig-vib/elastic_pendulum_xy.png, width=600 frac=0.8] -->
<!-- begin figure -->
<p></p>
<!-- end figure -->
<!-- dom:FIGURE: [fig-vib/elastic_pendulum_theta.png, width=600 frac=0.8] -->
<!-- begin figure -->
<p></p>
<!-- end figure -->
<!-- dom:FIGURE: [fig-vib/elastic_pendulum_xy2.png, width=600 frac=0.8] -->
<!-- begin figure -->
<p></p>
<!-- end figure -->
<!-- dom:FIGURE: [fig-vib/elastic_pendulum_theta2.png, width=600 frac=0.8] -->
<!-- begin figure -->
<p></p>
<!-- end figure -->
<!-- --- end solution of exercise --- -->
Filename: `elastic_pendulum`.
<!-- --- end exercise --- -->
<!-- --- begin exercise --- -->
## Exercise 6: Simulate an elastic pendulum with air resistance
<div id="vib:exer:pendulum_elastic_drag"></div>
This is a continuation [Exercise 5: Simulate an elastic pendulum](#vib:exer:pendulum_elastic).
Air resistance on the body with mass $m$ can be modeled by the
force $-\frac{1}{2}\varrho C_D A|\v|\v$,
where $C_D$ is a drag coefficient (0.2 for a sphere), $\varrho$
is the density of air (1.2 $\hbox{kg }\,{\hbox{m}}^{-3}$), $A$ is the
cross section area ($A=\pi R^2$ for a sphere, where $R$ is the radius),
and $\v$ is the velocity of the body.
Include air resistance in the original model, scale the model,
write a function `simulate_drag` that is a copy of the `simulate`
function from [Exercise 5: Simulate an elastic pendulum](#vib:exer:pendulum_elastic), but with the
new ODEs included, and show plots of how air resistance
influences the motion.
<!-- --- begin solution of exercise --- -->
**Solution.**
We start with the model
([18](#vib:app:pendulum_elastic:x))-([24](#vib:app:pendulum_elastic:vy0)).
Since $\v = \dot x\ii + \dot y\jj$, the air resistance term
can be written
$$
-q(\dot x\ii + \dot y\jj),\quad q=\frac{1}{2}\varrho C_D A\sqrt{\dot x^2 + \dot y^2}\thinspace .
$$
Note that for positive velocities, the pendulum is moving to the right
and the air resistance works against the motion, i.e., in direction of
$-\v = -\dot x\ii - \dot y\jj$.
We can easily include the terms in the ODEs:
<!-- Equation labels as ordinary links -->
<div id="vib:app:pendulum_elastic_drag:x"></div>
$$
\begin{equation}
\ddot x = -\frac{q}{m}\dot x -\frac{k}{m}\left(1 -\frac{L_0}{L}\right)(x-x_0),
\label{vib:app:pendulum_elastic_drag:x} \tag{44}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="vib:app:pendulum_elastic_drag:y"></div>
$$
\begin{equation}
\ddot y = -\frac{q}{m}\dot y -\frac{k}{m}\left(1 -\frac{L_0}{L}\right)(y-y_0) - g,
\label{vib:app:pendulum_elastic_drag:y} \tag{45}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="vib:app:pendulum_elastic_drag:L"></div>
$$
\begin{equation}
L = \sqrt{(x-x_0)^2 + (y-y_0)^2},
\label{vib:app:pendulum_elastic_drag:L} \tag{46}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto9"></div>
$$
\begin{equation}
\label{_auto9} \tag{47}
\end{equation}
$$
The initial conditions are not affected.
The next step is to scale the model. We use the same scales as in
[Exercise 5: Simulate an elastic pendulum](#vib:exer:pendulum_elastic), introduce $\beta$, and $A=\pi R^2$
to simplify the $-q\dot x/m$ term to
$$
\frac{L_0}{2m}\varrho C_D R^2\beta^{-1}
\sqrt{\left(\frac{d\bar x}{d\bar t}\right)^2 +
\left(\frac{d\bar y}{d\bar t}\right)^2}
= \gamma \beta^{-1}
\sqrt{\left(\frac{d\bar x}{d\bar t}\right)^2 +
\left(\frac{d\bar y}{d\bar t}\right)^2},
$$
where $\gamma$ is a second dimensionless parameter:
$$
\gamma = \frac{L_0}{2m}\varrho C_D R^2\thinspace .
$$
The final set of scaled equations is then
<!-- Equation labels as ordinary links -->
<div id="vib:app:pendulum_elastic_drag:x:s"></div>
$$
\begin{equation}
\frac{d^2\bar x}{d\bar t^2} = -\gamma\beta^{-1}
\sqrt{\left(\frac{d\bar x}{d\bar t}\right)^2 +
\left(\frac{d\bar y}{d\bar t}\right)^2}\frac{d\bar x}{d\bar t}
-\frac{\beta}{1-\beta}\left(1- \frac{\beta}{\bar L}\right)\bar x,
\label{vib:app:pendulum_elastic_drag:x:s} \tag{48}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="vib:app:pendulum_elastic_drag:y:s"></div>
$$
\begin{equation}
\frac{d^2\bar y}{d\bar t^2} =
-\gamma\beta^{-1}
\sqrt{\left(\frac{d\bar x}{d\bar t}\right)^2 +
\left(\frac{d\bar y}{d\bar t}\right)^2}\frac{d\bar y}{d\bar t}
-\frac{\beta}{1-\beta}\left(1- \frac{\beta}{\bar L}\right)(\bar y-1)
-\beta,
\label{vib:app:pendulum_elastic_drag:y:s} \tag{49}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="vib:app:pendulum_elastic_drag:L:s"></div>
$$
\begin{equation}
\bar L = \sqrt{\bar x^2 + (\bar y-1)^2},
\label{vib:app:pendulum_elastic_drag:L:s} \tag{50}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="vib:app:pendulum_elastic_drag:x0:s"></div>
$$
\begin{equation}
\bar x(0) = (1+\epsilon)\sin\Theta,
\label{vib:app:pendulum_elastic_drag:x0:s} \tag{51}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="vib:app:pendulum_elastic_drag:vx0:s"></div>
$$
\begin{equation}
\frac{d\bar x}{d\bar t}(0) = 0,
\label{vib:app:pendulum_elastic_drag:vx0:s} \tag{52}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="vib:app:pendulum_elastic_drag:y0:s"></div>
$$
\begin{equation}
\bar y(0) = 1 - (1+\epsilon)\cos\Theta,
\label{vib:app:pendulum_elastic_drag:y0:s} \tag{53}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="vib:app:pendulum_elastic_drag:vy0:s"></div>
$$
\begin{equation}
\frac{d\bar y}{d\bar t}(0) = 0,
\label{vib:app:pendulum_elastic_drag:vy0:s} \tag{54}
\end{equation}
$$
The new `simulate_drag` function is implemented below.
```python
def simulate_drag(
beta=0.9, # dimensionless elasticity parameter
gamma=0, # dimensionless drag parameter
Theta=30, # initial angle in degrees
epsilon=0, # initial stretch of wire
num_periods=6, # simulate for num_periods
time_steps_per_period=60, # time step resolution
plot=True, # make plots or not
):
from math import sin, cos, pi
Theta = Theta*np.pi/180 # convert to radians
# Initial position and velocity
# (we order the equations such that Euler-Cromer in odespy
# can be used, i.e., vx, x, vy, y)
ic = [0, # x'=vx
(1 + epsilon)*sin(Theta), # x
0, # y'=vy
1 - (1 + epsilon)*cos(Theta), # y
]
def f(u, t, beta, gamma):
vx, x, vy, y = u
L = np.sqrt(x**2 + (y-1)**2)
v = np.sqrt(vx**2 + vy**2)
h1 = beta/(1-beta)*(1 - beta/L) # help factor
h2 = gamma/beta*v
return [-h2*vx - h1*x, vx, -h2*vy - h1*(y-1) - beta, vy]
# Non-elastic pendulum (scaled similarly in the limit beta=1)
# solution Theta*cos(t)
P = 2*pi
dt = P/time_steps_per_period
T = num_periods*P
omega = 2*pi/P
time_points = np.linspace(
0, T, num_periods*time_steps_per_period+1)
solver = odespy.EulerCromer(f, f_args=(beta, gamma))
solver.set_initial_condition(ic)
u, t = solver.solve(time_points)
x = u[:,1]
y = u[:,3]
theta = np.arctan(x/(1-y))
if plot:
plt.figure()
plt.plot(x, y, 'b-', title='Pendulum motion',
daspect=[1,1,1], daspectmode='equal',
axis=[x.min(), x.max(), 1.3*y.min(), 1])
plt.savefig('tmp_xy.png')
plt.savefig('tmp_xy.pdf')
# Plot theta in degrees
plt.figure()
plt.plot(t, theta*180/np.pi, 'b-',
title='Angular displacement in degrees')
plt.savefig('tmp_theta.png')
plt.savefig('tmp_theta.pdf')
if abs(Theta) < 10*pi/180:
# Compare theta and theta_e for small angles (<10 degrees)
theta_e = Theta*np.cos(omega*t) # non-elastic scaled sol.
plt.figure()
plt.plot(t, theta, t, theta_e,
legend=['theta elastic', 'theta non-elastic'],
title='Elastic vs non-elastic pendulum, '\
'beta=%g' % beta)
plt.savefig('tmp_compare.png')
plt.savefig('tmp_compare.pdf')
# Plot y vs x (the real physical motion)
return x, y, theta, t
```
The plot of $\theta$ shows the damping ($\beta = 0.999$):
<!-- dom:FIGURE: [fig-vib/elastic_pendulum_drag_theta.png, width=600 frac=0.8] -->
<!-- begin figure -->
<p></p>
<!-- end figure -->
Test functions for equilibrium and vertical motion are also included. These
are as in [Exercise 6: Simulate an elastic pendulum with air resistance](#vib:exer:pendulum_elastic_drag), except that
they call `simulate_drag` instead of `simulate`.
<!-- --- end solution of exercise --- -->
Filename: `elastic_pendulum_drag`.
<!-- Closing remarks for this Exercise -->
### Remarks
Test functions are challenging to construct for the problem with
air resistance. You can reuse the tests from
[Exercise 6: Simulate an elastic pendulum with air resistance](#vib:exer:pendulum_elastic_drag) for `simulate_drag`,
but these tests does not verify the new terms arising from air
resistance.
<!-- --- end exercise --- -->
<!-- --- begin exercise --- -->
## Exercise 7: Implement the PEFRL algorithm
<div id="vib:exer:gen:PEFRL"></div>
We consider the motion of a planet around a star (the section [Two-body gravitational problem](#vib:app:gravitation)).
The simplified case where one
mass is very much bigger than the other and one object is at rest,
results in the scaled ODE model
$$
\begin{align*}
\ddot x + (x^2 + y^2)^{-3/2}x & = 0,\\
\ddot y + (x^2 + y^2)^{-3/2}y & = 0\thinspace .
\end{align*}
$$
**a)**
mathcal{I}_t is easy to show that $x(t)$ and $y(t)$ go like sine and cosine
functions. Use this idea to derive the exact solution.
<!-- --- begin solution of exercise --- -->
**Solution.**
We may assume $x=C_x\cos(\omega t)$ and $y=C_y\sin(\omega t)$ for
constants $C_x,$, $C_y$, and $\omega$. Inserted in the equations, we
see that $\omega =1$. The initial conditions determine the other
constants, which we may choose as $C_x=C_y=1$ (the object starts
at $(1,0)$ with a velocity $(0,1)$). The motion is a perfect circle,
which should last forever.
<!-- --- end solution of exercise --- -->
**b)**
One believes that a planet may orbit a star for billions of years.
We are now interested
in how accurate methods we actually need for such calculations.
A first task is to determine what the time interval of interest is in
scaled units. Take the earth and sun as typical objects and find
the characteristic time used in the scaling of the equations
($t_c = \sqrt{L^3/(mG)}$), where $m$ is the mass of the sun, $L$ is the
distance between the sun and the earth, and $G$ is the gravitational
constant. Find the scaled time interval corresponding to one billion years.
<!-- --- begin solution of exercise --- -->
**Solution.**
According to [Wikipedia](https://en.wikipedia.org/wiki/Solar_mass),
the mass of the sun is approximately $2\cdot 10^{30}$ kg. This
is 332946 times the mass of the earth, implying that the
dimensionless constant $\alpha \approx 3\cdot 10^{-6}$. With
$G=6.674\cdot 10^{-11}\hbox{ Nm}^2/\hbox{kg}^2$, and the
[sun-earth distance](https://en.wikipedia.org/wiki/Astronomical_unit)
as (approximately) 150 million km, we have $t_c \approx 5 028 388$ s.
This is about 58 days, which is the characteristic time, chosen as the
angular frequency of the oscillations. To get the period of one orbit we therefore must multiply by $2\pi$. This gives about 1 year (and demonstrates the
fact mentioned about the scaling: the natural time scale is consistent with
Kepler's law about the period).
Thus, one billion years correspond to 62,715,924,070 time units (dividing
one billion years by $t_c$), which corresponds to about 2000
"time unit years".
<!-- --- end solution of exercise --- -->
**c)**
Solve the equations using 4th-order Runge-Kutta and the Euler-Cromer
methods. You may benefit from applying Odespy for this purpose. With
each solver, simulate 10000 orbits and print the maximum position
error and CPU time as a function of time step. Note that the maximum
position error does not necessarily occur at the end of the
simulation. The position error achieved with each solver will depend
heavily on the size of the time step. Let the time step correspond to
200, 400, 800 and 1600 steps per orbit, respectively. Are the results
as expected? Explain briefly. When you develop your program, have in
mind that it will be extended with an implementation of the other
algorithms (as requested in d) and e) later) and experiments with this
algorithm as well.
<!-- --- begin solution of exercise --- -->
**Solution.**
The first task is to implement the right-hand side function for the
system of ODEs such that we can call up Odespy solvers (or make use of
other types of ODE software, e.g., from SciPy). The $2\times 2$ system of
second-order ODEs must be expressed as a $4\times 4$ system of first-order
ODEs. We have three different cases of right-hand sides:
1. Common numbering of unknowns: $x$, $v_x$, $y$, $y_x$
2. Numbering required by Euler-Cromer: $v_x$, $x$, $v_y$, $y$
3. Numbering required by the PEFRL method: same as Euler-Cromer
Most Odespy solvers can handle any convention for numbering of the unknowns.
The important point is that initial conditions and new values at the end of
the time step are filled in the right positions of a one-dimensional array
containing the unknowns.
Using Odespy to solve the system by the Euler-Cromer method, however, requires
the unknowns to appear as velocity 1st degree-of-freedom, displacement
1st degree-of-freedom, velocity 2nd degree-of-freedom, displacement
2nd degree-of-freedom, and so forth. Two alternative right-hand side
functions `f(u, t)` for Odespy solvers is then
```python
def f_EC(u, t):
'''
Return derivatives for the 1st order system as
required by Euler-Cromer.
'''
vx, x, vy, y = u # u: array holding vx, x, vy, y
d = -(x**2 + y**2)**(-3.0/2)
return [d*x, vx, d*y, vy ]
def f_RK4(u, t):
'''
Return derivatives for the 1st order system as
required by ordinary solvers in Odespy.
'''
x, vx, y, vy = u # u: array holding x, vx, y, vy
d = -(x**2 + y**2)**(-3.0/2)
return [vx, d*x, vy, d*y ]
```
In addition, we shall later in d) implement the PEFRL method and just
give the $g$ function as input to a system of the form $dv_x = g_x$,
$dv_y = g_y$, and $g$ becomes the vector $(g_x,g_y)$:
Some prefer to number the unknowns differently, and with the RK4 method we
are free to use any numbering, including this one:
```python
def g(u, v):
return np.array([-u])
def u_exact(t):
return np.array([3*np.cos(t)]).transpose()
I = u_exact(0)
V = np.array([0])
print 'V:', V, 'I:', I
# Numerical parameters
w = 1
P = 2*np.pi/w
dt_values = [P/20, P/40, P/80, P/160, P/320]
T = 8*P
error_vs_dt = []
for n, dt in enumerate(dt_values):
u, v, t = solver_PEFRL(I, V, g, dt, T)
error = np.abs(u - u_exact(t)).max()
print 'error:', error
if n > 0:
error_vs_dt.append(error/dt**4)
for i in range(1, len(error_vs_dt)):
#print abs(error_vs_dt[i]- error_vs_dt[0])
assert abs(error_vs_dt[i]-
error_vs_dt[0]) < 0.1
s PEFRL(odespy.Solver):
"""Class wrapper for Odespy.""" # Not used!
quick_desctiption = "Explicit 4th-order method for v'=-f, u=v."""
def advance(self):
u, f, n, t = self.u, self.f, self.n, self.t
dt = t[n+1] - t[n]
I = np.array([u[1], u[3]])
V = np.array([u[0], u[2]])
u, v, t = solver_PFFRL(I, V, f, dt, t+dt)
return np.array([v[-1], u[-1]])
compute_orbit_and_error(
f,
solver_ID,
timesteps_per_period=20,
N_orbit_groups=1000,
orbit_group_size=10):
'''
For one particular solver:
Calculte the orbits for a multiple of grouped orbits, i.e.
number of orbits = orbit_group_size*N_orbit_groups.
Returns: time step dt, and, for each N_orbit_groups cycle,
the 2D position error and cpu time (as lists).
'''
def u_exact(t):
return np.array([np.cos(t), np.sin(t)])
w = 1
P = 2*np.pi/w # scaled period (1 year becomes 2*pi)
dt = P/timesteps_per_period
Nt = orbit_group_size*N_orbit_groups*timesteps_per_period
T = Nt*dt
t_mesh = np.linspace(0, T, Nt+1)
E_orbit = []
#print ' dt:', dt
T_interval = P*orbit_group_size
N = int(round(T_interval/dt))
# set initial conditions
if solver_ID == 'EC':
A = [0,1,1,0]
elif solver_ID == 'PEFRL':
I = np.array([1, 0])
V = np.array([0, 1])
else:
A = [1,0,0,1]
t1 = time.clock()
for i in range(N_orbit_groups):
time_points = np.linspace(i*T_interval, (i+1)*T_interval,N+1)
u_e = u_exact(time_points).transpose()
if solver_ID == 'EC':
solver = odespy.EulerCromer(f)
solver.set_initial_condition(A)
ui, ti = solver.solve(time_points)
# Find error (correct final pos: x=1, y=0)
orbit_error = np.sqrt(
(ui[:,1]-u_e[:,0])**2 + (ui[:,3]-u_e[:,1])**2).max()
elif solver_ID == 'PEFRL':
# Note: every T_inverval is here counted from time 0
ui, vi, ti = solver_PEFRL(I, V, f, dt, T_interval)
# Find error (correct final pos: x=1, y=0)
orbit_error = np.sqrt(
(ui[:,0]-u_e[:,0])**2 + (ui[:,1]-u_e[:,1])**2).max()
else:
solver = eval('odespy.' + solver_ID(f)
solver.set_initial_condition(A)
ui, ti = solver.solve(time_points)
# Find error (correct final pos: x=1, y=0)
orbit_error = np.sqrt(
(ui[:,0]-u_e[:,0])**2 + (ui[:,2]-u_e[:,1])**2).max()
print ' Orbit no. %d, max error (per cent): %g' % \
((i+1)*orbit_group_size, orbit_error)
E_orbit.append(orbit_error)
# set init. cond. for next time interval
if solver_ID == 'EC':
A = [ui[-1,0], ui[-1,1], ui[-1,2], ui[-1,3]]
elif solver_ID == 'PEFRL':
I = [ui[-1,0], ui[-1,1]]
V = [vi[-1,0], vi[-1,1]]
else: # RK4, adaptive rules, etc.
A = [ui[-1,0], ui[-1,1], ui[-1,2], ui[-1,3]]
t2 = time.clock()
CPU_time = (t2 - t1)/(60.0*60.0) # in hours
return dt, E_orbit, CPU_time
orbit_error_vs_dt(
f_EC, f_RK4, g, solvers,
N_orbit_groups=1000,
orbit_group_size=10):
'''
With each solver in list "solvers": Simulate
orbit_group_size*N_orbit_groups orbits with different dt values.
Collect final 2D position error for each dt and plot all errors.
'''
for solver_ID in solvers:
print 'Computing orbit with solver:', solver_ID
E_values = []
dt_values = []
cpu_values = []
for timesteps_per_period in 200, 400, 800, 1600:
print '.......time steps per period: ', \
timesteps_per_period
if solver_ID == 'EC':
dt, E, cpu_time = compute_orbit_and_error(
f_EC,
solver_ID,
timesteps_per_period,
N_orbit_groups,
orbit_group_size)
elif solver_ID == 'PEFRL':
dt, E, cpu_time = compute_orbit_and_error(
g,
solver_ID,
timesteps_per_period,
N_orbit_groups,
orbit_group_size)
else:
dt, E, cpu_time = compute_orbit_and_error(
f_RK4,
solver_ID,
timesteps_per_period,
N_orbit_groups,
orbit_group_size)
dt_values.append(dt)
E_values.append(np.array(E).max())
cpu_values.append(cpu_time)
print 'dt_values:', dt_values
print 'E max with dt...:', E_values
print 'cpu_values with dt...:', cpu_values
orbit_error_vs_years(
f_EC, f_RK4, g, solvers,
N_orbit_groups=1000,
orbit_group_size=100,
N_time_steps = 1000):
'''
For each solver in the list solvers:
simulate orbit_group_size*N_orbit_groups orbits with a fixed
dt corresponding to N_time_steps steps per year.
Collect max 2D position errors for each N_time_steps'th run,
plot these errors and CPU. Finally, make an empirical
formula for error and CPU as functions of a number
of cycles.
'''
timesteps_per_period = N_time_steps # fixed for all runs
for solver_ID in solvers:
print 'Computing orbit with solver:', solver_ID
if solver_ID == 'EC':
dt, E, cpu_time = compute_orbit_and_error(
f_EC,
solver_ID,
timesteps_per_period,
N_orbit_groups,
orbit_group_size)
elif solver_ID == 'PEFRL':
dt, E, cpu_time = compute_orbit_and_error(
g,
solver_ID,
timesteps_per_period,
N_orbit_groups,
orbit_group_size)
else:
dt, E, cpu_time = compute_orbit_and_error(
f_RK4,
solver_ID,
timesteps_per_period,
N_orbit_groups,
orbit_group_size)
# E and cpu_time are for every N_orbit_groups cycle
print 'E_values (fixed dt, changing no of years):', E
print 'CPU (hours):', cpu_time
years = np.arange(
0,
N_orbit_groups*orbit_group_size,
orbit_group_size)
# Now make empirical formula
def E_of_years(x, *coeff):
return sum(coeff[i]*x**float((len(coeff)-1)-i) \
for i in range(len(coeff)))
E = np.array(E)
degree = 4
# note index: polyfit finds p[0]*x**4 + p[1]*x**3 ...etc.
p = np.polyfit(years, E, degree)
p_str = map(str, p)
formula = ' + '.join([p_str[i] + '*x**' + \
str(degree-i) for i in range(degree+1)])
print 'Empirical formula (error with years): ', formula
plt.figure()
plt.plot(years,
E, 'b-',
years,
E_of_years(years, *p), 'r--')
plt.xlabel('Number of years')
plt.ylabel('Orbit error')
plt.title(solver_ID)
filename = solver_ID + 'tmp_E_with_years'
plt.savefig(filename + '.png')
plt.savefig(filename + '.pdf')
plt.show()
print 'Predicted CPU time in hours (1 billion years):', \
cpu_time*10000
print 'Predicted max error (1 billion years):', \
E_of_years(1E9, *p)
compute_orbit_error_and_CPU():
'''
Orbit error and associated CPU times are computed with
solvers: RK4, Euler-Cromer, PEFRL.'''
def f_EC(u, t):
'''
Return derivatives for the 1st order system as
required by Euler-Cromer.
'''
vx, x, vy, y = u # u: array holding vx, x, vy, y
d = -(x**2 + y**2)**(-3.0/2)
return [d*x, vx, d*y, vy ]
def f_RK4(u, t):
'''
Return derivatives for the 1st order system as
required by ordinary solvers in Odespy.
'''
x, vx, y, vy = u # u: array holding x, vx, y, vy
d = -(x**2 + y**2)**(-3.0/2)
return [vx, d*x, vy, d*y ]
def g(u, v):
'''
Return derivatives for the 1st order system as
required by PEFRL.
'''
d = -(u[0]**2 + u[1]**2)**(-3.0/2)
return np.array([d*u[0], d*u[1]])
```
The standard way of solving the ODE by Odespy is then
```python
def u_exact(t):
"""Return exact solution at time t."""
return np.array([np.cos(t), np.sin(t)])
u_e = u_exact(time_points).transpose()
solver = odespy.RK4(f_RK4)
solver.set_initial_condition(A)
ui, ti = solver.solve(time_points)
# Find error (correct final pos: x=1, y=0)
orbit_error = np.sqrt(
(ui[:,0]-u_e[:,0])**2 + (ui[:,2]-u_e[:,1])**2).max()
```
We develop functions for computing errors and plotting results where we
can compare different methods. These functions are shown in the solution to
item d).
Running the code, the time step sizes become
dt_values: [0.031415926535897934, 0.015707963267948967,
0.007853981633974483, 0.003926990816987242]
Corresponding maximum errors (per cent) and CPU values (hours) are for the 4th-order Runge-Kutta given in the table below.
<table border="1">
<thead>
<tr><th align="left"> Quantity </th> <th align="center">$\Delta t_1$</th> <th align="center">$\Delta t_2$</th> <th align="center">$\Delta t_3$</th> <th align="center">$\Delta t_4$</th> </tr>
</thead>
<tbody>
<tr><td align="left"> $\Delta t$ </td> <td align="left"> 0.03 </td> <td align="left"> 0.02 </td> <td align="left"> 0.008 </td> <td align="left"> 0.004 </td> </tr>
<tr><td align="left"> Error </td> <td align="left"> 1.9039 </td> <td align="left"> 0.0787 </td> <td align="left"> 0.0025 </td> <td align="left"> 7.7e-05 </td> </tr>
<tr><td align="left"> CPU (h) </td> <td align="left"> 0.03 </td> <td align="left"> 0.06 </td> <td align="left"> 0.12 </td> <td align="left"> 0.23 </td> </tr>
</tbody>
</table>
For Euler-Cromer we these results:
<table border="1">
<thead>
<tr><th align="left"> Quantity </th> <th align="center">$\Delta t_1$</th> <th align="center">$\Delta t_2$</th> <th align="center">$\Delta t_3$</th> <th align="center">$\Delta t_4$</th> </tr>
</thead>
<tbody>
<tr><td align="left"> $\Delta t$ </td> <td align="left"> 0.03 </td> <td align="left"> 0.02 </td> <td align="left"> 0.008 </td> <td align="left"> 0.004 </td> </tr>
<tr><td align="left"> Error </td> <td align="left"> 2.0162 </td> <td align="left"> 2.0078 </td> <td align="left"> 1.9634 </td> <td align="left"> 0.6730 </td> </tr>
<tr><td align="left"> CPU (h) </td> <td align="left"> 0.01 </td> <td align="left"> 0.02 </td> <td align="left"> 0.05 </td> <td align="left"> 0.09 </td> </tr>
</tbody>
</table>
These results are as expected. The Runge-Kutta implementation is much more accurate than Euler-Cromer, but since it requires more computations, more CPU time is needed. For both methods, accuracy and CPU time both increase as
the step size is reduced, but the increase is much more pronounced for
the Euler-Cromer method.
<!-- --- end solution of exercise --- -->
**d)**
Implement a solver based on the PEFRL method from
the section [vib:ode2:PEFRL](#vib:ode2:PEFRL). Verify its 4th-order convergence
using an equation $u'' + u = 0$.
<!-- --- begin solution of exercise --- -->
**Solution.**
Here is a solver function:
```python
import numpy as np
import time
def solver_PEFRL(I, V, g, dt, T):
"""
Solve v' = - g(u,v), u'=v for t in (0,T], u(0)=I and v(0)=V,
by the PEFRL method.
"""
dt = float(dt)
Nt = int(round(T/dt))
u = np.zeros((Nt+1, len(I)))
v = np.zeros((Nt+1, len(I)))
t = np.linspace(0, Nt*dt, Nt+1)
# these values are from eq (20), ref to paper below
xi = 0.1786178958448091
lambda_ = -0.2123418310626054
chi = -0.06626458266981849
v[0] = V
u[0] = I
# Compare with eq 22 in http://arxiv.org/pdf/cond-mat/0110585.pdf
for n in range(0, Nt):
u_ = u[n] + xi*dt*v[n]
v_ = v[n] + 0.5*(1-2*lambda_)*dt*g(u_, v[n])
u_ = u_ + chi*dt*v_
v_ = v_ + lambda_*dt*g(u_, v_)
u_ = u_ + (1-2*(chi+xi))*dt*v_
v_ = v_ + lambda_*dt*g(u_, v_)
u_ = u_ + chi*dt*v_
v[n+1] = v_ + 0.5*(1-2*lambda_)*dt*g(u_, v_)
u[n+1] = u_ + xi*dt*v[n+1]
#print 'v[%d]=%g, u[%d]=%g' % (n+1,v[n+1],n+1,u[n+1])
return u, v, t
```
A proper test function for verification reads
```python
def test_solver_PEFRL():
"""Check 4th order convergence rate, using u'' + u = 0,
I = 3.0, V = 0, which has the exact solution u_e = 3*cos(t)"""
def g(u, v):
return np.array([-u])
def u_exact(t):
return np.array([3*np.cos(t)]).transpose()
I = u_exact(0)
V = np.array([0])
print 'V:', V, 'I:', I
# Numerical parameters
w = 1
P = 2*np.pi/w
dt_values = [P/20, P/40, P/80, P/160, P/320]
T = 8*P
error_vs_dt = []
for n, dt in enumerate(dt_values):
u, v, t = solver_PEFRL(I, V, g, dt, T)
error = np.abs(u - u_exact(t)).max()
print 'error:', error
if n > 0:
error_vs_dt.append(error/dt**4)
for i in range(1, len(error_vs_dt)):
#print abs(error_vs_dt[i]- error_vs_dt[0])
assert abs(error_vs_dt[i]-
error_vs_dt[0]) < 0.1
```
<!-- --- end solution of exercise --- -->
**e)**
The simulations done previously with the 4th-order Runge-Kutta and
Euler-Cromer are now to be repeated with the PEFRL solver, so the
code must be extended accordingly. Then run the simulations and comment
on the performance of PEFRL compared to the other two.
<!-- --- begin solution of exercise --- -->
**Solution.**
With the PEFRL algorithm, we get
E max with dt...: [0.0010452575786173163, 6.5310955829464402e-05,
4.0475768394248492e-06, 2.9391302503251016e-07]
cpu_values with dt...: [0.01873611111111106, 0.037422222222222294,
0.07511666666666655, 0.14985]
<table border="1">
<thead>
<tr><th align="left"> Qantity </th> <th align="center">$\Delta t_1$</th> <th align="center">$\Delta t_2$</th> <th align="center">$\Delta t_3$</th> <th align="center">$\Delta t_4$</th> </tr>
</thead>
<tbody>
<tr><td align="left"> $\Delta t$ </td> <td align="left"> 0.03 </td> <td align="left"> 0.02 </td> <td align="left"> 0.008 </td> <td align="left"> 0.004 </td> </tr>
<tr><td align="left"> Error </td> <td align="left"> 1.04E-3 </td> <td align="left"> 6.53E-05 </td> <td align="left"> 4.05E-6 </td> <td align="left"> 2.94E-7 </td> </tr>
<tr><td align="left"> CPU (h) </td> <td align="left"> 0.02 </td> <td align="left"> 0.04 </td> <td align="left"> 0.08 </td> <td align="left"> 0.15 </td> </tr>
</tbody>
</table>
The accuracy is now dramatically improved compared to 4th-order Runge-Kutta (and Euler-Cromer).
With 1600 steps per orbit, the PEFRL maximum error is just below $3.0e-07$ per cent, while
the corresponding error with Runge-Kutta was about $7.7e-05$ per cent! This is striking,
considering the fact that the 4th-order Runge-Kutta and the PEFRL schemes are both 4th-order accurate.
<!-- --- end solution of exercise --- -->
**f)**
Use the PEFRL solver to simulate 100000 orbits with a fixed time step
corresponding to 1600 steps per period. Record the maximum error
within each subsequent group of 1000 orbits. Plot these errors and fit
(least squares) a mathematical function to the data. Print also the
total CPU time spent for all 100000 orbits.
Now, predict the error and required CPU time for a simulation of 1
billion years (orbits). Is it feasible on today's computers to
simulate the planetary motion for one billion years?
<!-- --- begin solution of exercise --- -->
**Solution.**
The complete code (which also produces the printouts given previously) reads:
```python
import scitools.std as plt
import sys
import odespy
import numpy as np
import time
def solver_PEFRL(I, V, g, dt, T):
"""
Solve v' = - g(u,v), u'=v for t in (0,T], u(0)=I and v(0)=V,
by the PEFRL method.
"""
dt = float(dt)
Nt = int(round(T/dt))
u = np.zeros((Nt+1, len(I)))
v = np.zeros((Nt+1, len(I)))
t = np.linspace(0, Nt*dt, Nt+1)
# these values are from eq (20), ref to paper below
xi = 0.1786178958448091
lambda_ = -0.2123418310626054
chi = -0.06626458266981849
v[0] = V
u[0] = I
# Compare with eq 22 in http://arxiv.org/pdf/cond-mat/0110585.pdf
for n in range(0, Nt):
u_ = u[n] + xi*dt*v[n]
v_ = v[n] + 0.5*(1-2*lambda_)*dt*g(u_, v[n])
u_ = u_ + chi*dt*v_
v_ = v_ + lambda_*dt*g(u_, v_)
u_ = u_ + (1-2*(chi+xi))*dt*v_
v_ = v_ + lambda_*dt*g(u_, v_)
u_ = u_ + chi*dt*v_
v[n+1] = v_ + 0.5*(1-2*lambda_)*dt*g(u_, v_)
u[n+1] = u_ + xi*dt*v[n+1]
#print 'v[%d]=%g, u[%d]=%g' % (n+1,v[n+1],n+1,u[n+1])
return u, v, t
def test_solver_PEFRL():
"""Check 4th order convergence rate, using u'' + u = 0,
I = 3.0, V = 0, which has the exact solution u_e = 3*cos(t)"""
def g(u, v):
return np.array([-u])
def u_exact(t):
return np.array([3*np.cos(t)]).transpose()
I = u_exact(0)
V = np.array([0])
print 'V:', V, 'I:', I
# Numerical parameters
w = 1
P = 2*np.pi/w
dt_values = [P/20, P/40, P/80, P/160, P/320]
T = 8*P
error_vs_dt = []
for n, dt in enumerate(dt_values):
u, v, t = solver_PEFRL(I, V, g, dt, T)
error = np.abs(u - u_exact(t)).max()
print 'error:', error
if n > 0:
error_vs_dt.append(error/dt**4)
for i in range(1, len(error_vs_dt)):
#print abs(error_vs_dt[i]- error_vs_dt[0])
assert abs(error_vs_dt[i]-
error_vs_dt[0]) < 0.1
class PEFRL(odespy.Solver):
"""Class wrapper for Odespy.""" # Not used!
quick_desctiption = "Explicit 4th-order method for v'=-f, u=v."""
def advance(self):
u, f, n, t = self.u, self.f, self.n, self.t
dt = t[n+1] - t[n]
I = np.array([u[1], u[3]])
V = np.array([u[0], u[2]])
u, v, t = solver_PFFRL(I, V, f, dt, t+dt)
return np.array([v[-1], u[-1]])
def compute_orbit_and_error(
f,
solver_ID,
timesteps_per_period=20,
N_orbit_groups=1000,
orbit_group_size=10):
'''
For one particular solver:
Calculte the orbits for a multiple of grouped orbits, i.e.
number of orbits = orbit_group_size*N_orbit_groups.
Returns: time step dt, and, for each N_orbit_groups cycle,
the 2D position error and cpu time (as lists).
'''
def u_exact(t):
return np.array([np.cos(t), np.sin(t)])
w = 1
P = 2*np.pi/w # scaled period (1 year becomes 2*pi)
dt = P/timesteps_per_period
Nt = orbit_group_size*N_orbit_groups*timesteps_per_period
T = Nt*dt
t_mesh = np.linspace(0, T, Nt+1)
E_orbit = []
#print ' dt:', dt
T_interval = P*orbit_group_size
N = int(round(T_interval/dt))
# set initial conditions
if solver_ID == 'EC':
A = [0,1,1,0]
elif solver_ID == 'PEFRL':
I = np.array([1, 0])
V = np.array([0, 1])
else:
A = [1,0,0,1]
t1 = time.clock()
for i in range(N_orbit_groups):
time_points = np.linspace(i*T_interval, (i+1)*T_interval,N+1)
u_e = u_exact(time_points).transpose()
if solver_ID == 'EC':
solver = odespy.EulerCromer(f)
solver.set_initial_condition(A)
ui, ti = solver.solve(time_points)
# Find error (correct final pos: x=1, y=0)
orbit_error = np.sqrt(
(ui[:,1]-u_e[:,0])**2 + (ui[:,3]-u_e[:,1])**2).max()
elif solver_ID == 'PEFRL':
# Note: every T_inverval is here counted from time 0
ui, vi, ti = solver_PEFRL(I, V, f, dt, T_interval)
# Find error (correct final pos: x=1, y=0)
orbit_error = np.sqrt(
(ui[:,0]-u_e[:,0])**2 + (ui[:,1]-u_e[:,1])**2).max()
else:
solver = eval('odespy.' + solver_ID(f)
solver.set_initial_condition(A)
ui, ti = solver.solve(time_points)
# Find error (correct final pos: x=1, y=0)
orbit_error = np.sqrt(
(ui[:,0]-u_e[:,0])**2 + (ui[:,2]-u_e[:,1])**2).max()
print ' Orbit no. %d, max error (per cent): %g' % \
((i+1)*orbit_group_size, orbit_error)
E_orbit.append(orbit_error)
# set init. cond. for next time interval
if solver_ID == 'EC':
A = [ui[-1,0], ui[-1,1], ui[-1,2], ui[-1,3]]
elif solver_ID == 'PEFRL':
I = [ui[-1,0], ui[-1,1]]
V = [vi[-1,0], vi[-1,1]]
else: # RK4, adaptive rules, etc.
A = [ui[-1,0], ui[-1,1], ui[-1,2], ui[-1,3]]
t2 = time.clock()
CPU_time = (t2 - t1)/(60.0*60.0) # in hours
return dt, E_orbit, CPU_time
def orbit_error_vs_dt(
f_EC, f_RK4, g, solvers,
N_orbit_groups=1000,
orbit_group_size=10):
'''
With each solver in list "solvers": Simulate
orbit_group_size*N_orbit_groups orbits with different dt values.
Collect final 2D position error for each dt and plot all errors.
'''
for solver_ID in solvers:
print 'Computing orbit with solver:', solver_ID
E_values = []
dt_values = []
cpu_values = []
for timesteps_per_period in 200, 400, 800, 1600:
print '.......time steps per period: ', \
timesteps_per_period
if solver_ID == 'EC':
dt, E, cpu_time = compute_orbit_and_error(
f_EC,
solver_ID,
timesteps_per_period,
N_orbit_groups,
orbit_group_size)
elif solver_ID == 'PEFRL':
dt, E, cpu_time = compute_orbit_and_error(
g,
solver_ID,
timesteps_per_period,
N_orbit_groups,
orbit_group_size)
else:
dt, E, cpu_time = compute_orbit_and_error(
f_RK4,
solver_ID,
timesteps_per_period,
N_orbit_groups,
orbit_group_size)
dt_values.append(dt)
E_values.append(np.array(E).max())
cpu_values.append(cpu_time)
print 'dt_values:', dt_values
print 'E max with dt...:', E_values
print 'cpu_values with dt...:', cpu_values
def orbit_error_vs_years(
f_EC, f_RK4, g, solvers,
N_orbit_groups=1000,
orbit_group_size=100,
N_time_steps = 1000):
'''
For each solver in the list solvers:
simulate orbit_group_size*N_orbit_groups orbits with a fixed
dt corresponding to N_time_steps steps per year.
Collect max 2D position errors for each N_time_steps'th run,
plot these errors and CPU. Finally, make an empirical
formula for error and CPU as functions of a number
of cycles.
'''
timesteps_per_period = N_time_steps # fixed for all runs
for solver_ID in solvers:
print 'Computing orbit with solver:', solver_ID
if solver_ID == 'EC':
dt, E, cpu_time = compute_orbit_and_error(
f_EC,
solver_ID,
timesteps_per_period,
N_orbit_groups,
orbit_group_size)
elif solver_ID == 'PEFRL':
dt, E, cpu_time = compute_orbit_and_error(
g,
solver_ID,
timesteps_per_period,
N_orbit_groups,
orbit_group_size)
else:
dt, E, cpu_time = compute_orbit_and_error(
f_RK4,
solver_ID,
timesteps_per_period,
N_orbit_groups,
orbit_group_size)
# E and cpu_time are for every N_orbit_groups cycle
print 'E_values (fixed dt, changing no of years):', E
print 'CPU (hours):', cpu_time
years = np.arange(
0,
N_orbit_groups*orbit_group_size,
orbit_group_size)
# Now make empirical formula
def E_of_years(x, *coeff):
return sum(coeff[i]*x**float((len(coeff)-1)-i) \
for i in range(len(coeff)))
E = np.array(E)
degree = 4
# note index: polyfit finds p[0]*x**4 + p[1]*x**3 ...etc.
p = np.polyfit(years, E, degree)
p_str = map(str, p)
formula = ' + '.join([p_str[i] + '*x**' + \
str(degree-i) for i in range(degree+1)])
print 'Empirical formula (error with years): ', formula
plt.figure()
plt.plot(years,
E, 'b-',
years,
E_of_years(years, *p), 'r--')
plt.xlabel('Number of years')
plt.ylabel('Orbit error')
plt.title(solver_ID)
filename = solver_ID + 'tmp_E_with_years'
plt.savefig(filename + '.png')
plt.savefig(filename + '.pdf')
plt.show()
print 'Predicted CPU time in hours (1 billion years):', \
cpu_time*10000
print 'Predicted max error (1 billion years):', \
E_of_years(1E9, *p)
def compute_orbit_error_and_CPU():
'''
Orbit error and associated CPU times are computed with
solvers: RK4, Euler-Cromer, PEFRL.'''
def f_EC(u, t):
'''
Return derivatives for the 1st order system as
required by Euler-Cromer.
'''
vx, x, vy, y = u # u: array holding vx, x, vy, y
d = -(x**2 + y**2)**(-3.0/2)
return [d*x, vx, d*y, vy ]
def f_RK4(u, t):
'''
Return derivatives for the 1st order system as
required by ordinary solvers in Odespy.
'''
x, vx, y, vy = u # u: array holding x, vx, y, vy
d = -(x**2 + y**2)**(-3.0/2)
return [vx, d*x, vy, d*y ]
def g(u, v):
'''
Return derivatives for the 1st order system as
required by PEFRL.
'''
d = -(u[0]**2 + u[1]**2)**(-3.0/2)
return np.array([d*u[0], d*u[1]])
print 'Find orbit error as fu. of dt...(10000 orbits)'
solvers = ['RK4', 'EC', 'PEFRL']
N_orbit_groups=1
orbit_group_size=10000
orbit_error_vs_dt(
f_EC, f_RK4, g, solvers,
N_orbit_groups=N_orbit_groups,
orbit_group_size=orbit_group_size)
print 'Compute orbit error as fu. of no of years (fixed dt)...'
solvers = ['PEFRL']
N_orbit_groups=100
orbit_group_size=1000
N_time_steps = 1600 # no of steps per orbit cycle
orbit_error_vs_years(
f_EC, f_RK4, g, solvers,
N_orbit_groups=N_orbit_groups,
orbit_group_size=orbit_group_size,
N_time_steps = N_time_steps)
if __name__ == '__main__':
test_solver_PEFRL()
compute_orbit_error_and_CPU()
```
The maximum error develops with number of orbits as seen in the following plot,
where the red dashed curve is from the mathematical model:
<!-- dom:FIGURE: [fig-vib/PEFRL_E_with_years.png, width=600 frac=0.8] -->
<!-- begin figure -->
<p></p>
<!-- end figure -->
We note that the maximum error achieved during the first 100000 orbits is only
about $1.2e-06$ per cent. Not bad!
For the printed CPU and empirical formula, we get:
CPU (hours): 1.51591388889
Empirical formula (E with years):
3.15992325978e-26*x**4 + -6.1772567063e-21*x**3 +
1.87983349496e-16*x**2 + 2.32924158693e-11*x**1 +
5.46989368301e-08*x**0
Since the CPU develops linearly, the CPU time for 100000 orbits can just be multiplied by 10000 to get the
estimated CPU time required for 1 billion years. This gives 15159 CPU hours (631 days), which is also printed.
With the derived empirical formula, the estimated orbit error after 1 billion years becomes 31593055529 per cent.
[sl 2: Can we really use the plot and the function to predict max E during 1 billion years? Seems hard.]
<!-- --- end solution of exercise --- -->
Filename: `vib_PEFRL`.
<!-- Closing remarks for this Exercise -->
### Remarks
This exercise investigates whether it is feasible to predict
planetary motion for the life time of a solar system.
[hpl 3: Is it???]
<!-- --- end exercise --- -->
|
Carlo Francesco <unk> ( November 26 , 1753 ) – Cardinal @-@ Priest of SS . IV <unk> ; archbishop of Pavia
|
theory Finiteness
imports Main "HOL-Eisbach.Eisbach_Tools"
begin
section \<open>Two Eisbach proof methods for finiteness of sets\<close>
text \<open>
The first method is intended to act more conservatively (think \<open>safe\<close>), leaving subgoals
for the user where it couldn't proceed any further.
The second method is more powerful, acting more in a succeed-or-die manner,
similarly to \<open>force\<close> and friends.
The examples in the second section should give a good impression of where these methods
can help.
\<close>
text \<open>This slot is intended to provide more \<open>intro\<close> theorems for finite sets.\<close>
named_theorems finite
(* Trick from Dan Matichuk on isabelle-users *)
method add_finite_Collect_simproc methods m =
match termI in H[simproc add: finite_Collect]:_ \<Rightarrow> m
(* Trick from Dan Matichuk on isabelle-users.
Turns a structured method into a simple one.
*)
method_setup simple_method =
\<open>Method.text_closure >> (fn m => fn ctxt =>
let
val facts = Method.get_facts ctxt
val insert' = Method.Basic (K (Method.insert facts))
val m' = Method.Combinator (Method.no_combinator_info, Method.Then, [insert', m])
in Method.evaluate m' ctxt end)\<close>
method finite_tup =
match conclusion in
"finite (_ \<times> _)" \<Rightarrow> \<open>rule finite_cartesian_product; finite_tup\<close> \<bar>
"finite S" for S :: "(_ * _) set" \<Rightarrow>
\<open>print_term S, (rule finite_subset[where A = S and B = "fst ` S \<times> snd ` S"]; finite_tup?
| (rule finite_subset; assumption?; fastforce))\<close> \<bar>
"finite X" for X \<Rightarrow>
\<open>print_term X, (simp add: image_def, finite_tup?)?,
(solves \<open>(rule finite_subset; assumption?; fastforce)\<close>)?\<close> \<bar>
_ \<Rightarrow> \<open>fastforce simp: image_def\<close>
method finite_search =
match conclusion in
"finite (_ \<times> _)" \<Rightarrow> \<open>rule finite_cartesian_product; finite_search\<close> \<bar>
"finite (_ ` _)" \<Rightarrow> \<open>simp; finite_search | rule finite_imageI; finite_search\<close> \<bar>
"finite S" for S :: "(_ * _) set" \<Rightarrow>
\<open>print_term S, (solves \<open>rule finite_subset; auto\<close>
| rule finite_subset[where A = S and B = "fst ` S \<times> snd ` S"]; finite_tup?)\<close> \<bar>
"finite (Collect f)" for f \<Rightarrow>
\<open>print_term f, (add_finite_Collect_simproc simp)?;
(solves \<open>auto intro: finite\<close>
| print_term v, simp?, rule finite; (assumption | finite_search)
| rule finite_imageI; finite_search
| rule finite_vimageI; finite_search
| print_term x, rule finite_subset; assumption?; fastforce)\<close> \<bar>
"finite X" for X \<Rightarrow>
\<open>print_term X,
(rule finite; (assumption | finite_search)
|(simp add: image_def, finite_search?)?,
(solves \<open>(rule finite_subset; assumption?; fastforce)\<close>)?)\<close> \<bar>
_ \<Rightarrow> \<open>fastforce simp: image_def\<close>
method finite = simple_method finite_search
section \<open>Tests\<close>
subsection \<open>Counterexamples\<close>
lemma inj_finite_single:
assumes "inj f"
shows "finite {y. x = f y}"
using assms Collect_mem_eq Collect_mono_iff infinite_iff_countable_subset inj_eq not_finite_existsD
rangeI
by fastforce
lemmas inj_finite_single[finite]
text \<open>It's hard to guess the right set\<close>
lemma inj_finite_single':
assumes "inj f"
shows "finite {z. f z = x}"
apply (rule finite_subset[of _ "{z. x = f z}"])
apply blast
using assms by finite
(* Due to Lars Hupel *)
definition select :: "('a \<rightharpoonup> 'b) \<Rightarrow> 'a set \<Rightarrow> 'b set" where
"select f S = {z | z. \<exists>x \<in> S. f x = Some z}"
lemma select_finite:
assumes "finite S"
shows "finite (select f S)"
using assms unfolding select_def by finite
lemmas inj_finite_single'[finite]
subsection \<open>Working Examples\<close>
lemma
assumes "finite A"
shows "finite {x. x \<in> A \<and> P x}"
using assms by finite_search
lemma collect_pair_finite'[finite]:
assumes "finite {(x, y). P x y}"
shows "finite {(x, y) . P x y \<and> R x y}"
using assms by - finite
text \<open>This is what we actually need in this theory\<close>
lemma collect_pair_finite''[finite]:
assumes "finite {(x, y). P x \<and> Q y}"
shows "finite {(x, y) . P x \<and> Q y \<and> R x y}"
using assms by - finite
lemma
assumes "finite (A \<times> B)"
shows "finite {(x, y) | x y. x \<in> A \<and> y \<in> B \<and> R x y}"
using assms by - finite
lemma finite_imageI'':
assumes "finite (A \<times> B)"
shows "finite {f x y | x y. x \<in> A \<and> y \<in> B \<and> R x y}"
using assms by - finite
text \<open>\<open>finite_Collect\<close> can also rewrite to \<open>vimage\<close>\<close>
lemma
assumes "inj f" "finite S"
shows "finite {y. \<exists> x \<in> S. x = f y}"
using assms by - finite
lemma
assumes "inj f" "finite S"
shows "finite {y. \<exists> x \<in> S. f y = x}"
using assms by - finite
text \<open>Another counter-example\<close>
lemma
assumes "finite (A \<times> B)"
shows "finite {f x y | x y. x \<in> A \<and> y \<in> B \<and> R x y \<and> Q x y \<and> T x \<and> TT y}" (is "finite ?S")
proof -
have "?S = (\<lambda> (x, y). f x y) ` {(x, y). x \<in> A \<and> y \<in> B \<and> R x y \<and> Q x y \<and> T x \<and> TT y}"
by auto
also have "finite \<dots>" using assms by - finite
ultimately show ?thesis by simp
qed
text \<open>
Easier proof. The problem for our method is that the simproc fails to turn ?S into the form used
in the proof above.
Note that the declaration of the \<open>finite\<close> attribute below is the only one that is \<^emph>\<open>necessary\<close> in
this theory.
\<close>
lemma
notes finite_imageI''[finite]
assumes "finite (A \<times> B)"
shows "finite {f x y | x y. x \<in> A \<and> y \<in> B \<and> R x y \<and> Q x y \<and> T x \<and> TT y}" (is "finite ?S")
using assms by finite
lemma
assumes "finite A" "finite B"
shows "finite {(x, y) | x y. x \<in> A \<and> y \<in> B \<and> R y \<and> S x}"
using assms by - finite
lemma
fixes P Q R :: "'a \<Rightarrow> bool"
assumes "finite {x. P x \<and> R x}"
shows "finite {x. P x \<and> Q x \<and> R x}"
using assms by - finite
lemma R:
assumes "finite A" "A = B"
shows "finite B"
using assms by finite
lemma pairwise_finiteI:
assumes "finite {b. \<exists>a. P a b}" (is "finite ?B")
assumes "finite {a. \<exists>b. P a b}"
shows "finite {(a,b). P a b}" (is "finite ?C")
using assms by - finite
lemma pairwise_finiteI3:
assumes "finite {b. \<exists>a c. P a b c}"
assumes "finite {a. \<exists>b c. P a b c}"
assumes "finite {c. \<exists>a b. P a b c}"
shows "finite {(a,b,c). P a b c}" (is "finite ?C")
using assms by - finite
lemma pairwise_finiteI4:
assumes "finite {b. \<exists>a c d. P a b c d}"
assumes "finite {a. \<exists>b c d. P a b c d}"
assumes "finite {c. \<exists>a b d. P a b c d}"
assumes "finite {d. \<exists>a b c. P a b c d}"
shows "finite {(a,b,c,d). P a b c d}" (is "finite ?C")
using assms by - finite
lemma finite_ex_and1:
assumes "finite {b. \<exists>a. P a b}" (is "finite ?A")
shows "finite {b. \<exists>a. P a b \<and> Q a b}" (is "finite ?B")
using assms by - finite
lemma finite_ex_and2:
assumes "finite {b. \<exists>a. Q a b}" (is "finite ?A")
shows "finite {b. \<exists>a. P a b \<and> Q a b}" (is "finite ?B")
using assms by - finite
text \<open>
This is the only lemma where our methods cannot help us so far due to the fairly
complex argument that is used in the interactive proof.
\<close>
end
|
lemma cis_inverse [simp]: "inverse (cis a) = cis (- a)" |
integer, dimension (10, 10, 10) :: a
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.