text
stringlengths 0
3.34M
|
---|
theory Iterator
imports
It_to_It
SetIteratorOperations
SetIteratorGA
Proper_Iterator
Gen_Iterator
Idx_Iterator
begin
text {* Folding over a list created by a proper iterator can be replaced
by a single iteration *}
lemma proper_it_to_list_opt[refine_transfer_post_subst]:
assumes PR: "proper_it' it it'"
shows "foldli o it_to_list it \<equiv> it'"
proof (rule eq_reflection, intro ext)
fix s c f \<sigma>
obtain l where "it s = foldli l" and "it' s = foldli l"
by (rule proper_itE[OF PR[THEN proper_it'D[where s=s]]])
thus "(foldli o it_to_list it) s c f \<sigma> = it' s c f \<sigma>"
by (simp add: comp_def it_to_list_def)
qed
lemma iterator_cnv_to_comp[refine_transfer_post_simp]:
"foldli (it_to_list it x) = (foldli o it_to_list it) x"
by auto
declare idx_iteratei_eq_foldli[autoref_rules]
end
|
/-
Copyright (c) 2022 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov, SΓ©bastien GouΓ«zel
-/
import order.bounds.basic
/-!
# Monotonicity on intervals
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
In this file we prove that a function is (strictly) monotone (or antitone) on a linear order `Ξ±`
provided that it is (strictly) monotone on `(-β, a]` and on `[a, +β)`. This is a special case
of a more general statement where one deduces monotonicity on a union from monotonicity on each
set.
-/
open set
variables {Ξ± Ξ² : Type*} [linear_order Ξ±] [preorder Ξ²] {a : Ξ±} {f : Ξ± β Ξ²}
/-- If `f` is strictly monotone both on `s` and `t`, with `s` to the left of `t` and the center
point belonging to both `s` and `t`, then `f` is strictly monotone on `s βͺ t` -/
protected lemma strict_mono_on.union {s t : set Ξ±} {c : Ξ±} (hβ : strict_mono_on f s)
(hβ : strict_mono_on f t) (hs : is_greatest s c) (ht : is_least t c) :
strict_mono_on f (s βͺ t) :=
begin
have A : β x, x β s βͺ t β x β€ c β x β s,
{ assume x hx hxc,
cases hx, { exact hx },
rcases eq_or_lt_of_le hxc with rfl|h'x, { exact hs.1 },
exact (lt_irrefl _ (h'x.trans_le (ht.2 hx))).elim },
have B : β x, x β s βͺ t β c β€ x β x β t,
{ assume x hx hxc,
cases hx, swap, { exact hx },
rcases eq_or_lt_of_le hxc with rfl|h'x, { exact ht.1 },
exact (lt_irrefl _ (h'x.trans_le (hs.2 hx))).elim },
assume x hx y hy hxy,
rcases lt_or_le x c with hxc|hcx,
{ have xs : x β s, from A _ hx hxc.le,
rcases lt_or_le y c with hyc|hcy,
{ exact hβ xs (A _ hy hyc.le) hxy },
{ exact (hβ xs hs.1 hxc).trans_le (hβ.monotone_on ht.1 (B _ hy hcy) hcy) } },
{ have xt : x β t, from B _ hx hcx,
have yt : y β t, from B _ hy (hcx.trans hxy.le),
exact hβ xt yt hxy }
end
/-- If `f` is strictly monotone both on `(-β, a]` and `[a, β)`, then it is strictly monotone on the
whole line. -/
protected lemma strict_mono_on.Iic_union_Ici (hβ : strict_mono_on f (Iic a))
(hβ : strict_mono_on f (Ici a)) : strict_mono f :=
begin
rw [β strict_mono_on_univ, β @Iic_union_Ici _ _ a],
exact strict_mono_on.union hβ hβ is_greatest_Iic is_least_Ici,
end
/-- If `f` is strictly antitone both on `s` and `t`, with `s` to the left of `t` and the center
point belonging to both `s` and `t`, then `f` is strictly antitone on `s βͺ t` -/
protected lemma strict_anti_on.union {s t : set Ξ±} {c : Ξ±} (hβ : strict_anti_on f s)
(hβ : strict_anti_on f t) (hs : is_greatest s c) (ht : is_least t c) :
strict_anti_on f (s βͺ t) :=
(hβ.dual_right.union hβ.dual_right hs ht).dual_right
/-- If `f` is strictly antitone both on `(-β, a]` and `[a, β)`, then it is strictly antitone on the
whole line. -/
protected lemma strict_anti_on.Iic_union_Ici (hβ : strict_anti_on f (Iic a))
(hβ : strict_anti_on f (Ici a)) : strict_anti f :=
(hβ.dual_right.Iic_union_Ici hβ.dual_right).dual_right
/-- If `f` is monotone both on `s` and `t`, with `s` to the left of `t` and the center
point belonging to both `s` and `t`, then `f` is monotone on `s βͺ t` -/
protected lemma monotone_on.union_right {s t : set Ξ±} {c : Ξ±} (hβ : monotone_on f s)
(hβ : monotone_on f t) (hs : is_greatest s c) (ht : is_least t c) :
monotone_on f (s βͺ t) :=
begin
have A : β x, x β s βͺ t β x β€ c β x β s,
{ assume x hx hxc,
cases hx, { exact hx },
rcases eq_or_lt_of_le hxc with rfl|h'x, { exact hs.1 },
exact (lt_irrefl _ (h'x.trans_le (ht.2 hx))).elim },
have B : β x, x β s βͺ t β c β€ x β x β t,
{ assume x hx hxc,
cases hx, swap, { exact hx },
rcases eq_or_lt_of_le hxc with rfl|h'x, { exact ht.1 },
exact (lt_irrefl _ (h'x.trans_le (hs.2 hx))).elim },
assume x hx y hy hxy,
rcases lt_or_le x c with hxc|hcx,
{ have xs : x β s, from A _ hx hxc.le,
rcases lt_or_le y c with hyc|hcy,
{ exact hβ xs (A _ hy hyc.le) hxy },
{ exact (hβ xs hs.1 hxc.le).trans (hβ ht.1 (B _ hy hcy) hcy) } },
{ have xt : x β t, from B _ hx hcx,
have yt : y β t, from B _ hy (hcx.trans hxy),
exact hβ xt yt hxy }
end
/-- If `f` is monotone both on `(-β, a]` and `[a, β)`, then it is monotone on the whole line. -/
protected lemma monotone_on.Iic_union_Ici (hβ : monotone_on f (Iic a))
(hβ : monotone_on f (Ici a)) : monotone f :=
begin
rw [β monotone_on_univ, β @Iic_union_Ici _ _ a],
exact monotone_on.union_right hβ hβ is_greatest_Iic is_least_Ici
end
/-- If `f` is antitone both on `s` and `t`, with `s` to the left of `t` and the center
point belonging to both `s` and `t`, then `f` is antitone on `s βͺ t` -/
protected lemma antitone_on.union_right {s t : set Ξ±} {c : Ξ±} (hβ : antitone_on f s)
(hβ : antitone_on f t) (hs : is_greatest s c) (ht : is_least t c) :
antitone_on f (s βͺ t) :=
(hβ.dual_right.union_right hβ.dual_right hs ht).dual_right
/-- If `f` is antitone both on `(-β, a]` and `[a, β)`, then it is antitone on the whole line. -/
protected lemma antitone_on.Iic_union_Ici (hβ : antitone_on f (Iic a))
(hβ : antitone_on f (Ici a)) : antitone f :=
(hβ.dual_right.Iic_union_Ici hβ.dual_right).dual_right
|
/**
*
* @generated c Tue Jan 7 11:45:26 2014
*
**/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cblas.h>
#include <lapacke.h>
#include <plasma.h>
#include <core_blas.h>
#include "auxiliary.h"
/*-------------------------------------------------------------------
* Check the orthogonality of Q
*/
int c_check_orthogonality(int M, int N, int LDQ, PLASMA_Complex32_t *Q)
{
float alpha, beta;
float normQ;
int info_ortho;
int i;
int minMN = min(M, N);
float eps;
float *work = (float *)malloc(minMN*sizeof(float));
eps = LAPACKE_slamch_work('e');
alpha = 1.0;
beta = -1.0;
/* Build the idendity matrix USE DLASET?*/
PLASMA_Complex32_t *Id = (PLASMA_Complex32_t *) malloc(minMN*minMN*sizeof(PLASMA_Complex32_t));
memset((void*)Id, 0, minMN*minMN*sizeof(PLASMA_Complex32_t));
for (i = 0; i < minMN; i++)
Id[i*minMN+i] = (PLASMA_Complex32_t)1.0;
/* Perform Id - Q'Q */
if (M >= N)
cblas_cherk(CblasColMajor, CblasUpper, CblasConjTrans, N, M, alpha, Q, LDQ, beta, Id, N);
else
cblas_cherk(CblasColMajor, CblasUpper, CblasNoTrans, M, N, alpha, Q, LDQ, beta, Id, M);
normQ = LAPACKE_clansy_work(LAPACK_COL_MAJOR, 'i', 'u', minMN, Id, minMN, work);
printf("============\n");
printf("Checking the orthogonality of Q \n");
printf("||Id-Q'*Q||_oo / (N*eps) = %e \n",normQ/(minMN*eps));
if ( isnan(normQ / (minMN * eps)) || (normQ / (minMN * eps) > 10.0) ) {
printf("-- Orthogonality is suspicious ! \n");
info_ortho=1;
}
else {
printf("-- Orthogonality is CORRECT ! \n");
info_ortho=0;
}
free(work); free(Id);
return info_ortho;
}
/*------------------------------------------------------------
* Check the factorization QR
*/
int c_check_QRfactorization(int M, int N, PLASMA_Complex32_t *A1, PLASMA_Complex32_t *A2, int LDA, PLASMA_Complex32_t *Q)
{
float Anorm, Rnorm;
PLASMA_Complex32_t alpha, beta;
int info_factorization;
int i,j;
float eps;
eps = LAPACKE_slamch_work('e');
PLASMA_Complex32_t *Ql = (PLASMA_Complex32_t *)malloc(M*N*sizeof(PLASMA_Complex32_t));
PLASMA_Complex32_t *Residual = (PLASMA_Complex32_t *)malloc(M*N*sizeof(PLASMA_Complex32_t));
float *work = (float *)malloc(max(M,N)*sizeof(float));
alpha=1.0;
beta=0.0;
if (M >= N) {
/* Extract the R */
PLASMA_Complex32_t *R = (PLASMA_Complex32_t *)malloc(N*N*sizeof(PLASMA_Complex32_t));
memset((void*)R, 0, N*N*sizeof(PLASMA_Complex32_t));
LAPACKE_clacpy_work(LAPACK_COL_MAJOR,'u', M, N, A2, LDA, R, N);
/* Perform Ql=Q*R */
memset((void*)Ql, 0, M*N*sizeof(PLASMA_Complex32_t));
cblas_cgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, M, N, N, CBLAS_SADDR(alpha), Q, LDA, R, N, CBLAS_SADDR(beta), Ql, M);
free(R);
}
else {
/* Extract the L */
PLASMA_Complex32_t *L = (PLASMA_Complex32_t *)malloc(M*M*sizeof(PLASMA_Complex32_t));
memset((void*)L, 0, M*M*sizeof(PLASMA_Complex32_t));
LAPACKE_clacpy_work(LAPACK_COL_MAJOR,'l', M, N, A2, LDA, L, M);
/* Perform Ql=LQ */
memset((void*)Ql, 0, M*N*sizeof(PLASMA_Complex32_t));
cblas_cgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, M, N, M, CBLAS_SADDR(alpha), L, M, Q, LDA, CBLAS_SADDR(beta), Ql, M);
free(L);
}
/* Compute the Residual */
for (i = 0; i < M; i++)
for (j = 0 ; j < N; j++)
Residual[j*M+i] = A1[j*LDA+i]-Ql[j*M+i];
Rnorm = LAPACKE_clange_work(LAPACK_COL_MAJOR, 'i', M, N, Residual, M, work);
Anorm = LAPACKE_clange_work(LAPACK_COL_MAJOR, 'i', M, N, A2, LDA, work);
if (M >= N) {
printf("============\n");
printf("Checking the QR Factorization \n");
printf("-- ||A-QR||_oo/(||A||_oo.N.eps) = %e \n",Rnorm/(Anorm*N*eps));
}
else {
printf("============\n");
printf("Checking the LQ Factorization \n");
printf("-- ||A-LQ||_oo/(||A||_oo.N.eps) = %e \n",Rnorm/(Anorm*N*eps));
}
if (isnan(Rnorm / (Anorm * N *eps)) || (Rnorm / (Anorm * N * eps) > 10.0) ) {
printf("-- Factorization is suspicious ! \n");
info_factorization = 1;
}
else {
printf("-- Factorization is CORRECT ! \n");
info_factorization = 0;
}
free(work); free(Ql); free(Residual);
return info_factorization;
}
/*------------------------------------------------------------------------
* Check the factorization of the matrix A2
*/
int c_check_LLTfactorization(int N, PLASMA_Complex32_t *A1, PLASMA_Complex32_t *A2, int LDA, int uplo)
{
float Anorm, Rnorm;
PLASMA_Complex32_t alpha;
int info_factorization;
int i,j;
float eps;
eps = LAPACKE_slamch_work('e');
PLASMA_Complex32_t *Residual = (PLASMA_Complex32_t *)malloc(N*N*sizeof(PLASMA_Complex32_t));
PLASMA_Complex32_t *L1 = (PLASMA_Complex32_t *)malloc(N*N*sizeof(PLASMA_Complex32_t));
PLASMA_Complex32_t *L2 = (PLASMA_Complex32_t *)malloc(N*N*sizeof(PLASMA_Complex32_t));
float *work = (float *)malloc(N*sizeof(float));
memset((void*)L1, 0, N*N*sizeof(PLASMA_Complex32_t));
memset((void*)L2, 0, N*N*sizeof(PLASMA_Complex32_t));
alpha= 1.0;
LAPACKE_clacpy_work(LAPACK_COL_MAJOR,' ', N, N, A1, LDA, Residual, N);
/* Dealing with L'L or U'U */
if (uplo == PlasmaUpper){
LAPACKE_clacpy_work(LAPACK_COL_MAJOR,'u', N, N, A2, LDA, L1, N);
LAPACKE_clacpy_work(LAPACK_COL_MAJOR,'u', N, N, A2, LDA, L2, N);
cblas_ctrmm(CblasColMajor, CblasLeft, CblasUpper, CblasConjTrans, CblasNonUnit, N, N, CBLAS_SADDR(alpha), L1, N, L2, N);
}
else{
LAPACKE_clacpy_work(LAPACK_COL_MAJOR,'l', N, N, A2, LDA, L1, N);
LAPACKE_clacpy_work(LAPACK_COL_MAJOR,'l', N, N, A2, LDA, L2, N);
cblas_ctrmm(CblasColMajor, CblasRight, CblasLower, CblasConjTrans, CblasNonUnit, N, N, CBLAS_SADDR(alpha), L1, N, L2, N);
}
/* Compute the Residual || A -L'L|| */
for (i = 0; i < N; i++)
for (j = 0; j < N; j++)
Residual[j*N+i] = L2[j*N+i] - Residual[j*N+i];
Rnorm = LAPACKE_clange_work(LAPACK_COL_MAJOR, 'i', N, N, Residual, N, work);
Anorm = LAPACKE_clange_work(LAPACK_COL_MAJOR, 'i', N, N, A1, LDA, work);
printf("============\n");
printf("Checking the Cholesky Factorization \n");
printf("-- ||L'L-A||_oo/(||A||_oo.N.eps) = %e \n",Rnorm/(Anorm*N*eps));
if ( isnan(Rnorm/(Anorm*N*eps)) || (Rnorm/(Anorm*N*eps) > 10.0) ){
printf("-- Factorization is suspicious ! \n");
info_factorization = 1;
}
else{
printf("-- Factorization is CORRECT ! \n");
info_factorization = 0;
}
free(Residual); free(L1); free(L2); free(work);
return info_factorization;
}
/*--------------------------------------------------------------
* Check the gemm
*/
float c_check_gemm(PLASMA_enum transA, PLASMA_enum transB, int M, int N, int K,
PLASMA_Complex32_t alpha, PLASMA_Complex32_t *A, int LDA,
PLASMA_Complex32_t *B, int LDB,
PLASMA_Complex32_t beta, PLASMA_Complex32_t *Cplasma,
PLASMA_Complex32_t *Cref, int LDC,
float *Cinitnorm, float *Cplasmanorm, float *Clapacknorm )
{
PLASMA_Complex32_t beta_const = -1.0;
float Rnorm;
float *work = (float *)malloc(max(K,max(M, N))* sizeof(float));
*Cinitnorm = LAPACKE_clange_work(LAPACK_COL_MAJOR, 'i', M, N, Cref, LDC, work);
*Cplasmanorm = LAPACKE_clange_work(LAPACK_COL_MAJOR, 'i', M, N, Cplasma, LDC, work);
cblas_cgemm(CblasColMajor, (CBLAS_TRANSPOSE)transA, (CBLAS_TRANSPOSE)transB, M, N, K,
CBLAS_SADDR(alpha), A, LDA, B, LDB, CBLAS_SADDR(beta), Cref, LDC);
*Clapacknorm = LAPACKE_clange_work(LAPACK_COL_MAJOR, 'i', M, N, Cref, LDC, work);
cblas_caxpy(LDC * N, CBLAS_SADDR(beta_const), Cplasma, 1, Cref, 1);
Rnorm = LAPACKE_clange_work(LAPACK_COL_MAJOR, 'i', M, N, Cref, LDC, work);
free(work);
return Rnorm;
}
/*--------------------------------------------------------------
* Check the trsm
*/
float c_check_trsm(PLASMA_enum side, PLASMA_enum uplo, PLASMA_enum trans, PLASMA_enum diag,
int M, int NRHS, PLASMA_Complex32_t alpha,
PLASMA_Complex32_t *A, int LDA,
PLASMA_Complex32_t *Bplasma, PLASMA_Complex32_t *Bref, int LDB,
float *Binitnorm, float *Bplasmanorm, float *Blapacknorm )
{
PLASMA_Complex32_t beta_const = -1.0;
float Rnorm;
float *work = (float *)malloc(max(M, NRHS)* sizeof(float));
/*float eps = LAPACKE_slamch_work('e');*/
*Binitnorm = LAPACKE_clange_work(LAPACK_COL_MAJOR, 'i', M, NRHS, Bref, LDB, work);
*Bplasmanorm = LAPACKE_clange_work(LAPACK_COL_MAJOR, 'm', M, NRHS, Bplasma, LDB, work);
cblas_ctrsm(CblasColMajor, (CBLAS_SIDE)side, (CBLAS_UPLO)uplo,
(CBLAS_TRANSPOSE)trans, (CBLAS_DIAG)diag, M, NRHS,
CBLAS_SADDR(alpha), A, LDA, Bref, LDB);
*Blapacknorm = LAPACKE_clange_work(LAPACK_COL_MAJOR, 'm', M, NRHS, Bref, LDB, work);
cblas_caxpy(LDB * NRHS, CBLAS_SADDR(beta_const), Bplasma, 1, Bref, 1);
Rnorm = LAPACKE_clange_work(LAPACK_COL_MAJOR, 'm', M, NRHS, Bref, LDB, work);
Rnorm = Rnorm / *Blapacknorm;
/* max(M,NRHS) * eps);*/
free(work);
return Rnorm;
}
/*--------------------------------------------------------------
* Check the solution
*/
float c_check_solution(int M, int N, int NRHS, PLASMA_Complex32_t *A, int LDA,
PLASMA_Complex32_t *B, PLASMA_Complex32_t *X, int LDB,
float *anorm, float *bnorm, float *xnorm )
{
/* int info_solution; */
float Rnorm = -1.00;
PLASMA_Complex32_t zone = 1.0;
PLASMA_Complex32_t mzone = -1.0;
float *work = (float *)malloc(max(M, N)* sizeof(float));
*anorm = LAPACKE_clange_work(LAPACK_COL_MAJOR, 'i', M, N, A, LDA, work);
*xnorm = LAPACKE_clange_work(LAPACK_COL_MAJOR, 'i', M, NRHS, X, LDB, work);
*bnorm = LAPACKE_clange_work(LAPACK_COL_MAJOR, 'i', N, NRHS, B, LDB, work);
cblas_cgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, M, NRHS, N, CBLAS_SADDR(zone), A, LDA, X, LDB, CBLAS_SADDR(mzone), B, LDB);
Rnorm = LAPACKE_clange_work(LAPACK_COL_MAJOR, 'i', N, NRHS, B, LDB, work);
free(work);
return Rnorm;
}
|
This mindlessness is connected to the context in which Brooks was writing . He declared : " at this point we 're pretty much living in an irrational time " , full of human suffering and lacking reason or logic . When asked in a subsequent interview about how he would compare terrorists with zombies , Brooks said :
|
Require Export Program.Basics. Open Scope program_scope.
From Paco Require Import paco.
From Paco Require Import paconotation_internal paco_internal pacotac_internal.
From Paco Require Export paconotation.
From Fairness Require Import pind_internal.
Set Implicit Arguments.
Section PIND3.
Variable T0 : Type.
Variable T1 : forall (x0: @T0), Type.
Variable T2 : forall (x0: @T0) (x1: @T1 x0), Type.
(** ** Predicates of Arity 3
*)
Definition pind3(gf : rel3 T0 T1 T2 -> rel3 T0 T1 T2)(r: rel3 T0 T1 T2) : rel3 T0 T1 T2 :=
@curry3 T0 T1 T2 (pind (fun R0 => @uncurry3 T0 T1 T2 (gf (@curry3 T0 T1 T2 R0))) (@uncurry3 T0 T1 T2 r)).
Definition upind3(gf : rel3 T0 T1 T2 -> rel3 T0 T1 T2)(r: rel3 T0 T1 T2) := pind3 gf r /3\ r.
Arguments pind3 : clear implicits.
Arguments upind3 : clear implicits.
#[local] Hint Unfold upind3 : core.
Lemma monotone3_inter (gf gf': rel3 T0 T1 T2 -> rel3 T0 T1 T2)
(MON1: monotone3 gf)
(MON2: monotone3 gf'):
monotone3 (gf /4\ gf').
Proof.
red; intros. destruct IN. split; eauto.
Qed.
Lemma _pind3_mon_gen (gf gf': rel3 T0 T1 T2 -> rel3 T0 T1 T2) r r'
(LEgf: gf <4= gf')
(LEr: r <3= r'):
pind3 gf r <3== pind3 gf' r'.
Proof.
apply curry_map3. red; intros. eapply pind_mon_gen. apply PR.
- intros. apply LEgf, PR0.
- intros. apply LEr, PR0.
Qed.
Lemma pind3_mon_gen (gf gf': rel3 T0 T1 T2 -> rel3 T0 T1 T2) r r' x0 x1 x2
(REL: pind3 gf r x0 x1 x2)
(LEgf: gf <4= gf')
(LEr: r <3= r'):
pind3 gf' r' x0 x1 x2.
Proof.
eapply _pind3_mon_gen; [apply LEgf | apply LEr | apply REL].
Qed.
Lemma pind3_mon_bot (gf gf': rel3 T0 T1 T2 -> rel3 T0 T1 T2) r' x0 x1 x2
(REL: pind3 gf bot3 x0 x1 x2)
(LEgf: gf <4= gf'):
pind3 gf' r' x0 x1 x2.
Proof.
eapply pind3_mon_gen; [apply REL | apply LEgf | intros; contradiction PR].
Qed.
Definition top3 { T0 T1 T2} (x0: T0) (x1: T1 x0) (x2: T2 x0 x1) := True.
Lemma pind3_mon_top (gf gf': rel3 T0 T1 T2 -> rel3 T0 T1 T2) r x0 x1 x2
(REL: pind3 gf r x0 x1 x2)
(LEgf: gf <4= gf'):
pind3 gf' top3 x0 x1 x2.
Proof.
eapply pind3_mon_gen; eauto. red. auto.
Qed.
Lemma upind3_mon_gen (gf gf': rel3 T0 T1 T2 -> rel3 T0 T1 T2) r r' x0 x1 x2
(REL: upind3 gf r x0 x1 x2)
(LEgf: gf <4= gf')
(LEr: r <3= r'):
upind3 gf' r' x0 x1 x2.
Proof.
destruct REL. split; eauto.
eapply pind3_mon_gen; [apply H | apply LEgf | apply LEr].
Qed.
Lemma upind3_mon_bot (gf gf': rel3 T0 T1 T2 -> rel3 T0 T1 T2) r' x0 x1 x2
(REL: upind3 gf bot3 x0 x1 x2)
(LEgf: gf <4= gf'):
upind3 gf' r' x0 x1 x2.
Proof.
eapply upind3_mon_gen; [apply REL | apply LEgf | intros; contradiction PR].
Qed.
Lemma upind3mon_top (gf gf': rel3 T0 T1 T2 -> rel3 T0 T1 T2) r x0 x1 x2
(REL: upind3 gf r x0 x1 x2)
(LEgf: gf <4= gf'):
upind3 gf' top3 x0 x1 x2.
Proof.
eapply upind3_mon_gen; eauto. red. auto.
Qed.
Section Arg3.
Variable gf : rel3 T0 T1 T2 -> rel3 T0 T1 T2.
Arguments gf : clear implicits.
Theorem _pind3_mon: _monotone3 (pind3 gf).
Proof.
red; intros. eapply curry_map3, _pind_mon; apply uncurry_map3; assumption.
Qed.
Theorem _pind3_acc: forall
l r (OBG: forall rr (DEC: rr <3== r) (IH: rr <3== l), pind3 gf rr <3== l),
pind3 gf r <3== l.
Proof.
intros. apply curry_adjoint2_3.
eapply _pind_acc. intros.
apply curry_adjoint2_3 in DEC. apply curry_adjoint2_3 in IH.
apply curry_adjoint1_3.
eapply le3_trans. 2: eapply (OBG _ DEC IH).
apply curry_map3.
apply _pind_mon; try apply le1_refl; apply curry_bij2_3.
Qed.
Theorem _pind3_mult_strong: forall r,
pind3 gf r <3== pind3 gf (upind3 gf r).
Proof.
intros. apply curry_map3.
eapply le1_trans; [eapply _pind_mult_strong |].
apply _pind_mon; intros [] H. apply H.
Qed.
Theorem _pind3_fold: forall r,
gf (upind3 gf r) <3== pind3 gf r.
Proof.
intros. apply uncurry_adjoint1_3.
eapply le1_trans; [| apply _pind_fold]. apply le1_refl.
Qed.
Theorem _pind3_unfold: forall (MON: _monotone3 gf) r,
pind3 gf r <3== gf (upind3 gf r).
Proof.
intros. apply curry_adjoint2_3.
eapply _pind_unfold; apply monotone3_map; assumption.
Qed.
Theorem pind3_acc: forall
l r (OBG: forall rr (DEC: rr <3= r) (IH: rr <3= l), pind3 gf rr <3= l),
pind3 gf r <3= l.
Proof.
apply _pind3_acc.
Qed.
Theorem pind3_mon: monotone3 (pind3 gf).
Proof.
apply monotone3_eq.
apply _pind3_mon.
Qed.
Theorem upind3_mon: monotone3 (upind3 gf).
Proof.
red; intros.
destruct IN. split; eauto.
eapply pind3_mon. apply H. apply LE.
Qed.
Theorem pind3_mult_strong: forall r,
pind3 gf r <3= pind3 gf (upind3 gf r).
Proof.
apply _pind3_mult_strong.
Qed.
Corollary pind3_mult: forall r,
pind3 gf r <3= pind3 gf (pind3 gf r).
Proof. intros; eapply pind3_mult_strong in PR. eapply pind3_mon; eauto. intros. destruct PR0. eauto. Qed.
Theorem pind3_fold: forall r,
gf (upind3 gf r) <3= pind3 gf r.
Proof.
apply _pind3_fold.
Qed.
Theorem pind3_unfold: forall (MON: monotone3 gf) r,
pind3 gf r <3= gf (upind3 gf r).
Proof.
intro. eapply _pind3_unfold; apply monotone3_eq; assumption.
Qed.
End Arg3.
Arguments pind3_acc : clear implicits.
Arguments pind3_mon : clear implicits.
Arguments upind3_mon : clear implicits.
Arguments pind3_mult_strong : clear implicits.
Arguments pind3_mult : clear implicits.
Arguments pind3_fold : clear implicits.
Arguments pind3_unfold : clear implicits.
End PIND3.
Global Opaque pind3.
#[export] Hint Unfold upind3 : core.
#[export] Hint Resolve pind3_fold : core.
#[export] Hint Unfold monotone3 : core.
|
------------------------------------------------------------------------------
-- Well-founded induction on the relation LTC
------------------------------------------------------------------------------
{-# OPTIONS --exact-split #-}
{-# OPTIONS --no-sized-types #-}
{-# OPTIONS --no-universe-polymorphism #-}
{-# OPTIONS --without-K #-}
module FOTC.Data.List.WF-Relation.LT-Cons.Induction.Acc.WF-I where
open import FOTC.Base
open import FOTC.Data.List
open import FOTC.Data.List.WF-Relation.LT-Cons
open import FOTC.Data.List.WF-Relation.LT-Cons.PropertiesI
open import FOTC.Data.List.WF-Relation.LT-Length
open import FOTC.Data.List.WF-Relation.LT-Length.Induction.Acc.WF-I
open import FOTC.Induction.WF
-- Parametrized modules
open module S = FOTC.Induction.WF.Subrelation {List} {LTC} LTCβLTL
------------------------------------------------------------------------------
-- The relation LTL is well-founded (using the subrelation combinator).
LTC-wf : WellFounded LTC
LTC-wf Lxs = well-founded LTL-wf Lxs
-- Well-founded induction on the relation LTC.
LTC-wfind : (A : D β Set) β
(β {xs} β List xs β (β {ys} β List ys β LTC ys xs β A ys) β A xs) β
β {xs} β List xs β A xs
LTC-wfind A = WellFoundedInduction LTC-wf
|
[STATEMENT]
lemma "A * true * \<up>(b \<and> c) * true * B \<Longrightarrow>\<^sub>A \<up>b * \<up>c * true *A * B"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. A * true * \<up> (b \<and> c) * true * B \<Longrightarrow>\<^sub>A \<up> b * \<up> c * true * A * B
[PROOF STEP]
by simp |
inductive Vector (Ξ± : Type u): Nat β Type u where
| nil : Vector Ξ± 0
| cons (head : Ξ±) (tail : Vector Ξ± n) : Vector Ξ± (n+1)
namespace Vector
def nth : β{n}, Vector Ξ± n β Fin n β Ξ±
| n+1, cons x xs, β¨ 0, _β© => x
| n+1, cons x xs, β¨k+1, hβ© => xs.nth β¨k, sorryβ©
def snoc : β{n : Nat} (xs : Vector Ξ± n) (x : Ξ±), Vector Ξ± (n+1)
| _, nil, x' => cons x' nil
| _, cons x xs, x' => cons x (snoc xs x')
theorem nth_snoc_eq (k: Fin (n+1))(v : Vector Ξ± n)
(h: k.val = n):
(v.snoc x).nth k = x := by
cases k; rename_i k hk
induction v generalizing k <;> subst h
Β· simp only [nth]
Β· simp! [*]
theorem nth_snoc_eq_works (k: Fin (n+1))(v : Vector Ξ± n)
(h: k.val = n):
(v.snoc x).nth k = x := by
cases k; rename_i k hk
induction v generalizing k <;> subst h
Β· simp only [nth]
Β· simp[*,nth]
end Vector
|
-- Written by P. Hausmann
module Vec where
open import IO
open import Data.Vec
open import Data.Nat
open import Data.Nat.Show
open import Level using (0β)
Matrix : Set -> β -> β -> Set
Matrix a n m = Vec (Vec a m) n
madd : {n m : β} -> Matrix β m n -> Matrix β m n -> Matrix β m n
madd a b = map (Ξ» x β \y -> map _+_ x β y) a β b
idMatrix : {n : β} -> Matrix β n n
idMatrix {zero} = []
idMatrix {suc n} = (1 β· (replicate zero)) β· (map (Ξ» x β zero β· x) idMatrix)
transposeM : {n m : β} {a : Set} -> Matrix a m n -> Matrix a n m
transposeM {zero} {zero} aβ = []
transposeM {zero} {suc m} {a} x = []
transposeM {suc n} {zero} aβ = replicate []
transposeM {suc n} {suc m} {a} (_β·_ xβ xβ) with map head (xβ β· xβ)
... | vm = vm β· (map _β·_ (tail xβ) β transposeM (map tail xβ))
-- We use quite small numbers right now, as with big number the computation
-- gets very slow (at least in MAlonzo)
-- correct result : 109
compute : β
compute = sum (map sum g)
where m : Matrix β 3 3
m = (3 β· 5 β· 9 β· []) β·
(12 β· 0 β· 7 β· []) β· (11 β· 2 β· 4 β· []) β· []
g : Matrix β 3 3
g = madd (transposeM (transposeM m)) (transposeM (madd m idMatrix))
main = run {0β} (putStrLn (show compute))
|
function readinput(filename)
map(value -> parse(Int, value), readlines(filename))
end
lines = readinput("d01.in")
length = size(lines, 1)
sum = 2020
P1, P2 = 0, 0
for i in 1:length
for j in i+1:length
if lines[i] + lines[j] == sum
global P1 = lines[i] * lines[j]
end
for k in j+1:length
if lines[i] + lines[j] + lines[k] == sum
global P2 = lines[i] * lines[j] * lines[k]
end
end
end
end
println("P1: ", P1)
println("P2: ", P2)
|
data Bool : Set where
true false : Bool
postulate
and : Bool β Bool β Bool
-- WAS: splitting on y removes the x@ as-pattern
test : Bool β Bool β Bool
test x@true y = {!y!}
test x@false y = and x {!y!} -- x will go out of scope if we split on y
-- Multiple as-patterns on the same pattern should be preserved in the
-- right order.
testβ : Bool β Bool β Bool
testβ x@y@true z = {!z!}
testβ x@y z = {!!}
open import Agda.Builtin.String
-- As bindings on literals should also be preserved
testβ : String β Bool β Bool
testβ x@"foo" z = {!z!}
testβ _ _ = {!!}
|
module advection
integer, parameter :: NADV = 2, NADVS=1 ! add'l boundary points
end module advection
|
function stroud_test19 ( )
%*****************************************************************************80
%
%% TEST19 tests CUBE_UNIT_3D, QMULT_3D, RECTANGLE_3D.
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 06 April 2009
%
% Author:
%
% John Burkardt
%
global FUNC_3D_INDEX;
a1 = -1.0;
b1 = +1.0;
a(1) = -1.0;
a(2) = -1.0;
a(3) = -1.0;
b(1) = 1.0;
b(2) = 1.0;
b(3) = 1.0;
fprintf ( 1, '\n' );
fprintf ( 1, '\n' );
fprintf ( 1, 'TEST19\n' );
fprintf ( 1, ' CUBE_UNIT_3D approximates integrals\n' );
fprintf ( 1, ' in the unit cube in 3D.\n' );
fprintf ( 1, ' QMULT_3D approximates triple integrals.\n' );
fprintf ( 1, ' RECTANGLE_3D approximates integrals\n' );
fprintf ( 1, ' in a rectangular block.\n' );
fprintf ( 1, '\n' );
fprintf ( 1, ...
' F(X) CUBE_UNIT_3D QMULT_3D RECTANGLE_3D\n' );
fprintf ( 1, '\n' );
num = function_3d_num ( );
for i = 1 : num
FUNC_3D_INDEX = i;
result1 = cube_unit_3d ( 'function_3d' );
result2 = qmult_3d ( 'function_3d', a1, b1, 'fu18', 'fl18', 'fu28', 'fl28' );
result3 = rectangle_3d ( 'function_3d', a, b );
fname = function_3d_name ( i );
fprintf ( 1, ' %s %12f %12f %12f\n', ...
fname, result1, result2, result3 );
end
return
end
|
= = = Criticism = = =
|
import pygame
import numpy as np
from typing import Tuple
from wireframe import Wireframe, load_models_from_folder
import matrices
def translate_3d_to_2d(point_3d: np.array, view_width: float, view_heigh: float, focal: float) -> Tuple[float, float]:
"""Map 3D point to 2D value that can be displayed"""
from_focal = focal / point_3d[1]
x = from_focal * point_3d[0] + view_width / 2
y = view_heigh / 2 - from_focal * point_3d[2]
return x, y
def is_point_visible(point_3d: np.array, focal: float) -> bool:
return point_3d[1] > focal
screen_size = (500, 500)
focal = 300
node_color = (255, 255, 255)
node_size = 3
edge_color = node_color
edge_widht = 2
screen = pygame.display.set_mode(screen_size)
pygame.display.set_caption('3D models :D')
wireframes = load_models_from_folder('models')
# transformation = matrices.translation_matrix(-200, 0, -300)
# wireframes[0].transform(transformation)
FOCAL_LIMITS = 20., 500.
FOCAL_STEP = 2.
TRANSLATION_STEP = 10.
ROTATION_STEP = np.radians(0.8)
left_translation = matrices.translation_matrix(TRANSLATION_STEP, 0, 0)
right_translation = matrices.translation_matrix(-TRANSLATION_STEP, 0, 0)
forwart_translation = matrices.translation_matrix(0, -TRANSLATION_STEP, 0)
backward_translation = matrices.translation_matrix(0, +TRANSLATION_STEP, 0)
up_translation = matrices.translation_matrix(0, 0, -TRANSLATION_STEP)
down_translation = matrices.translation_matrix(0, 0, +TRANSLATION_STEP)
cunter_clockwise_rotation = matrices.rotation_matrix(ROTATION_STEP, 'z')
clokcwise_rotation = matrices.rotation_matrix(-ROTATION_STEP, 'z')
up_rotation = matrices.rotation_matrix(ROTATION_STEP, 'x')
down_rotation = matrices.rotation_matrix(-ROTATION_STEP, 'x')
left_rotation = matrices.rotation_matrix(ROTATION_STEP * 2, 'y')
right_rotation = matrices.rotation_matrix(-ROTATION_STEP * 2, 'y')
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
keys = pygame.key.get_pressed()
if keys[pygame.K_MINUS]:
if focal-FOCAL_STEP > FOCAL_LIMITS[0]:
focal -= FOCAL_STEP
if keys[pygame.K_EQUALS]:
if focal+FOCAL_STEP < FOCAL_LIMITS[1]:
focal += FOCAL_STEP
if keys[pygame.K_LEFT] or keys[pygame.K_a]:
for wireframe in wireframes:
wireframe.transform(left_translation)
if keys[pygame.K_RIGHT] or keys[pygame.K_d]:
for wireframe in wireframes:
wireframe.transform(right_translation)
if keys[pygame.K_UP] or keys[pygame.K_w]:
for wireframe in wireframes:
wireframe.transform(forwart_translation)
if keys[pygame.K_DOWN] or keys[pygame.K_s]:
for wireframe in wireframes:
wireframe.transform(backward_translation)
if keys[pygame.K_SPACE]:
for wireframe in wireframes:
wireframe.transform(up_translation)
if keys[pygame.K_LSHIFT] or keys[pygame.K_RSHIFT]:
for wireframe in wireframes:
wireframe.transform(down_translation)
if keys[pygame.K_j]:
for wireframe in wireframes:
wireframe.transform(cunter_clockwise_rotation)
if keys[pygame.K_l]:
for wireframe in wireframes:
wireframe.transform(clokcwise_rotation)
if keys[pygame.K_i]:
for wireframe in wireframes:
wireframe.transform(up_rotation)
if keys[pygame.K_k]:
for wireframe in wireframes:
wireframe.transform(down_rotation)
if keys[pygame.K_u]:
for wireframe in wireframes:
wireframe.transform(left_rotation)
if keys[pygame.K_o]:
for wireframe in wireframes:
wireframe.transform(right_rotation)
# Draw
screen.fill((0, 0, 0))
for wireframe in wireframes:
for node in wireframe.nodes:
if is_point_visible(node, focal):
center = translate_3d_to_2d(node, *screen_size, focal)
pygame.draw.circle(screen, node_color, center, node_size)
for edge in wireframe.edges:
a, b = wireframe.nodes[edge[0]], wireframe.nodes[edge[1]]
if is_point_visible(a, focal) and is_point_visible(b, focal):
a = translate_3d_to_2d(a, *screen_size, focal)
b = translate_3d_to_2d(b, *screen_size, focal)
pygame.draw.line(screen, edge_color, a, b, edge_widht)
pygame.display.flip()
|
Artistic Director, Trevor Schmidt, was also nominated for Best New Fringe Play for writing "Flora and Fauna's Field Trip (with Fleurette!)" with Darrin Hagen.
Artistic Associate, Ellen Chorley, was also nominated for Outstanding Production for Young Audiences and Outstanding New Play (award to playwright) for writing and producing her childrenβs play "Birdie on the Wrong Bus."
Congratulations to all of the nominees and the entire Edmonton theatre community for an incredible season of theatre! We are honored to be recognized.
For all of the nominees and coverage of the event, click here. |
/-
Project: Euclidean Geometry.
Contributors: Adarsh, Anurag, Nikhesh, Sai Niranjan.
-/
import Lake
import Init
import Mathlib.Data.Real.Basic
set_option autoImplicit false
/-!
We define basic geometric objects and state Euclid's 5 postulates.
Convention:
* Begin variable/theorem names with capital letters.
* Use underscore between "words" in the variable names.
* Small letters for Points, Lines, etc.
TODO:
* Prove equivalence of 5th Postulate and Playfair's axiom.
* Prove transitivity of `IsParallel` using Playfair's axiom.
* Show that interior of `Symm_angles` is the same.
-/
structure IncidenceGeometry where
Point : Type
Line : Type
/-- The point lies on the given line.-/
Lies_on : Point β Line β Prop
/-- ```In_between a b c``` means "`b` is in between `a` & `c`"-/
In_between : Point β Point β Point β Prop -- is this supposed to be here?
-- properties of In_between
/--`Between_refl_left a b` means "`a` is in between `a` & `b`"-/
Between_refl_left (a b : Point) : In_between a a b
/--`Between_refl_right a b` means "`b` is in between `a` & `b`"-/
Between_refl_right (a b : Point) : In_between a b b
-- Enter some description here.
structure EuclidGeometry extends IncidenceGeometry where
-- defining Distance
Distance (a b : Point) : β
-- Distance axioms
/--Distance between two points is non-negative.-/
Dist_is_not_neg (a b : Point): Distance a b β₯ 0
/--Distance from a point to itself is 0.-/
Dist_same_point (a : Point) : Distance a a = 0
/--Distance between two distinct points is strictly positive.-/
Dist_geq_0 (a b : Point) : a β b β Distance a b > 0
/--Distance from `a` to `b` = Distance from `b` to `a`-/
Dist_is_symm (a b : Point) : Distance a b = Distance b a
/--Triangle Inequality: `Distance a b + Distance b c β₯ Distance a c`-/
Dist_tri_ineq (a b c : Point) : Distance a b + Distance b c β₯ Distance a c
/--Distance between collinear points `a`, `b`, `c`: `Distance a b + Distance b c = Distance a c`.-/
Dist_in_between (a b c : Point) (h : In_between a b c) :
Distance a b + Distance b c = Distance a c
-- Postulate 1
-- Between two points there is an unique line passing through them
/--Function that takes two distinct points `a` & `b` and gives a line.-/
Line_of_two_points (a b : Point) (h : a β b): Line
/--The line `Line_of_two_points a b` contains the points `a` & `b`.-/
Point_contain (a b : Point) (h : a β b) :
have l : Line := Line_of_two_points a b h
Lies_on a l β§ Lies_on b l
/--A unique line passes through two distinct points `a` & `b`.-/
Line_unique (A B: Point) (h : A β B) (l1 l2 : Line):
(Lies_on A l1 β§ Lies_on B l1) β§ (Lies_on A l2 β§ Lies_on B l2) β l1 = l2
/--Definition of Collinear points `a`, `b`, `c`.-/
Collinear_point (A B C : Point) (h : A β B):
In_between A B C β¨ In_between A C B β¨ In_between C A B
β Lies_on C (Line_of_two_points A B h)
/--A line segment is determined by its endpoints.
Segment is a structure that consisting of two endpoints: `p1` & `p2`.-/
structure Segment (geom : IncidenceGeometry) where
p1 : geom.Point
p2 : geom.Point
instance : Coe EuclidGeometry IncidenceGeometry where
coe geom := { Point := geom.Point, Line := geom.Line, Lies_on := geom.Lies_on, In_between := geom.In_between, Between_refl_left := geom.Between_refl_left, Between_refl_right := geom.Between_refl_right}
/--Function gives the length of the segment `seg`.-/
def EuclidGeometry.length (geom : EuclidGeometry) (seg : Segment geom) : β :=
geom.Distance seg.p1 seg.p2
variable (geom : EuclidGeometry)
/--`Lies_on_segment p seg` means that point `p` lies on segment `seg`.-/
def EuclidGeometry.Lies_on_segment (p : geom.Point) (seg : Segment geom) : Prop :=
geom.In_between seg.p1 p seg.p2
-- this defines when a segment lies on a line (use with CAUTION)
/--`Segment_in_line seg l` says that all the points of the segment `seg` lie on the line `l`.-/
def EuclidGeometry.Segment_in_line (geom: EuclidGeometry)
(seg: Segment geom)(l: geom.Line) : Prop :=
β p: geom.Point, geom.In_between seg.p1 p seg.p2 β geom.Lies_on p l
-- this theorem says that the line is unique when the length of the segment is non-zero
/--There is a unique line that contains a line segment of non-zero length.-/
theorem EuclidGeometry.Unique_line_from_segment
(seg : Segment geom) (h : Β¬ seg.p1 = seg.p2) (l1 l2 : geom.Line) :
geom.Segment_in_line seg l1 β§ geom.Segment_in_line seg l2
β l1 = l2
:= by
intro h1
let β¨h2, h3β© := h1
apply geom.Line_unique seg.p1 seg.p2 h l1 l2
apply And.intro
case left =>
apply And.intro
case left =>
have lem : geom.In_between seg.p1 seg.p1 seg.p2 :=
geom.Between_refl_left seg.p1 seg.p2
simp [Segment_in_line] at h2
have lem' : geom.In_between seg.p1 seg.p1 seg.p2 β
geom.Lies_on seg.p1 l1 := by
simp [h2, lem]
apply lem'
assumption
case right =>
have lem : geom.In_between seg.p1 seg.p2 seg.p2 :=
geom.Between_refl_right seg.p1 seg.p2
simp [Segment_in_line] at h2
have lem' : geom.In_between seg.p1 seg.p2 seg.p2 β
geom.Lies_on seg.p2 l1 := by
simp [h2, lem]
apply lem'
assumption
case right =>
apply And.intro
case left =>
have lem : geom.In_between seg.p1 seg.p1 seg.p2 :=
geom.Between_refl_left seg.p1 seg.p2
simp [Segment_in_line] at h3
have lem' : geom.In_between seg.p1 seg.p1 seg.p2 β
geom.Lies_on seg.p1 l2 := by
simp [h3, lem]
apply lem'
assumption
case right =>
have lem : geom.In_between seg.p1 seg.p2 seg.p2 :=
geom.Between_refl_right seg.p1 seg.p2
simp [Segment_in_line] at h3
have lem' : geom.In_between seg.p1 seg.p2 seg.p2 β
geom.Lies_on seg.p2 l2 := by
simp [h3, lem]
apply lem'
assumption
/--Definition of Parallel Lines-/
def EuclidGeometry.IsParallel (l1 l2 : geom.Line) : Prop :=
(l1 = l2) β¨ (β a : geom.Point, geom.Lies_on a l1 β Β¬ geom.Lies_on a l2)
-- IsParallel is an equivalence relation on Lines
/--`IsParallel` is reflexive.-/
theorem EuclidGeometry.Parallel_refl (l : geom.Line) :
IsParallel geom l l
:= by
simp [IsParallel]
/-- `IsParallel` is symmetric.-/
theorem EuclidGeometry.Parallel_symm (l1 l2 : geom.Line) :
IsParallel geom l1 l2 β IsParallel geom l2 l1
:= by
intro h
simp [IsParallel]
simp [IsParallel] at h
apply Or.elim h
case left =>
intro h1
apply Or.inl
rw [h1]
case right =>
intro h2
apply Or.inr
intro p hp
have lem : geom.Lies_on p l1 β Β¬ geom.Lies_on p l2 := by
apply of_eq_true
apply eq_true (h2 p)
by_contra h3
simp [h3] at lem
contradiction
-- transitivity : for this we need Playfair's theorem.
-- Transitivity has not been used in any definition/theorem,
-- so we shall prove it later.
/--`IsParallel` is transitive.-/
theorem EuclidGeometry.Parallel_trans (l1 l2 l3: geom.Line) :
IsParallel geom l1 l2 β IsParallel geom l2 l3 β IsParallel geom l1 l3
:= by
intro h1 h2
have h1' : IsParallel geom l1 l2 := h1
have h2' : IsParallel geom l2 l3 := h2
simp [IsParallel]
simp [IsParallel] at h1
simp [IsParallel] at h2
apply Or.elim h1
case left =>
intro h12eq
apply Or.elim h2
case left =>
intro h23eq
apply Or.inl
rw [<- h23eq, h12eq]
case right =>
intro hk
rw [<-h12eq] at h2'
simp [IsParallel] at h2'
assumption
case right =>
intro hk
apply Or.elim h2
case left =>
intro h23eq
rw [h23eq] at h1'
simp [IsParallel] at h1'
assumption
case right =>
intro hk1
by_cases l1 = l3
case pos =>
apply Or.inl
assumption
case neg =>
apply Or.inr
intro p hp
by_contra hk'
sorry -- need to use Playfair's axiom here for this case.
/--Definition of intersection of lines. Here, we just say: `Β¬ IsParallel`. -/
def EuclidGeometry.Lines_intersect (l1 l2 : geom.Line) : Prop
:= Β¬ IsParallel geom l1 l2
/-- Existence of intersection point for two intersecting lines.-/
theorem EuclidGeometry.Lines_intersect_Point_Exist
(l1 l2 : geom.Line) (h : Lines_intersect geom l1 l2) :
β c : geom.Point, geom.Lies_on c l1 β§ geom.Lies_on c l2
:= by
simp [Lines_intersect, IsParallel] at h
rw [not_or] at h
let β¨_, h2β© := h
simp at h2
assumption
/--`Lines_intersect2 l1 l2 p` states that the lines `l1, l2`
intersect at the point `p`.-/
def EuclidGeometry.Lines_intersect2
(l1 l2 : geom.Line) (p : geom.Point) :
Prop
:= geom.Lies_on p l1 β§ geom.Lies_on p l2
/--Uniqueness of intersection point for two intersecting lines.-/
theorem EuclidGeometry.Lines_intersect_point_unique
(l1 l2 : geom.Line) (h : Lines_intersect geom l1 l2) (A B : geom.Point) :
Lines_intersect2 geom l1 l2 A β
Lines_intersect2 geom l1 l2 B β
A = B
:= by
intro h1 h2
by_contra hab
simp [Lines_intersect2] at h1
let β¨h1l, h1rβ© := h1
simp [Lines_intersect2] at h2
let β¨h2l, h2rβ© := h2
have lem1 : l1 = l2 := by
apply geom.Line_unique A B hab
simp [h1l, h1r, h2l, h2r]
simp [Lines_intersect, IsParallel] at h
rw [not_or] at h
let β¨hk, _β© := h
contradiction
/--Existence of intersection point of a line and a segment -/
def EuclidGeometry.Line_intersect_segment
(seg : Segment geom) (l : geom.Line) : Prop :=
β p : geom.Point, geom.Lies_on p l β§ geom.Lies_on_segment p seg
/--`Line_intersect_segment2 seg l p` states that
line `l` intersects the segment `seg` at point `p`-/
def EuclidGeometry.Line_intersect_segment2
(seg : Segment geom) (l : geom.Line) (p : geom.Point): Prop :=
geom.Lies_on p l β§ geom.Lies_on_segment p seg
-- postulate 3
-- A circle can be constructed with any centre and any radius.
-- this is just the definition of a circle.
/-- Circle is a structure with a point `centre` and positive real `radius`.-/
structure Circle (geom : EuclidGeometry) where
centre : geom.Point
radius : β
/--`On_circle p circ` states that the point `p` lies on the circle `circ`.-/
def EuclidGeometry.On_circle (p : geom.Point) (circ : Circle geom) : Prop
:= geom.Distance p circ.centre = circ.radius
-- postulate 4
/--Angle is a structure with three points.
`p1 = a`, `Pivot = o`, `p2 = b` denotes the angle (a o b).-/
structure Angle (geom : EuclidGeometry) where
p1 : geom.Point
Pivot : geom.Point
p2 : geom.Point
-- p1 and p2 need to be different from the Pivot
variable (reflexAngle : Angle geom β Angle geom)
--measure of an angle
variable (mAngle : Angle geom β β)
/--`Int_point_angle a A` states that point `a` is inside angle `A`.-/
def EuclidGeometry.Int_point_angle (a : geom.Point) (A : Angle geom) : Prop :=
let A1 : Angle geom := Angle.mk a A.Pivot A.p1
let A2 : Angle geom := Angle.mk a A.Pivot A.p2
(mAngle (A1) < mAngle A) β§ (mAngle (A2) < mAngle A) -- strict inequality because of 120
-- we need to write that Int_point_angle for angles AOB and BOA are the same
-- Postulate 4 says all right angles are equal.
-- We are assigning it a value of 90
/--Define a property called `Is_right_angle A` which states that
`A` is a right angle and its measure is 90.-/
def EuclidGeometry.Is_right_angle (A : Angle geom): Prop := mAngle A = 90
-- Postulate 5
/--`Opp_sided_points p1 p2 l` states that the two points `p1, p2`
are on opposite sides of line `l`.-/
def EuclidGeometry.Opp_sided_points (p1 p2 : geom.Point) (l : geom.Line) : Prop :=
geom.Line_intersect_segment (Segment.mk p1 p2) l
/--`Same_sided_points p1 p2 l` states that the two points `p1, p2`
are on the same side of line `l`. Here, it is: `Β¬ Opp_sided_points p1 p2 l`-/
def EuclidGeometry.Same_sided_points (p1 p2 : geom.Point) (l : geom.Line) : Prop :=
Β¬ geom.Opp_sided_points p1 p2 l
-- List of Axioms and Euclid's Postulates
structure Axioms where
/--Euclid Postulate #2: every segment lies on a line.-/
Post2 (geom : EuclidGeometry) :
β s : Segment geom, β l: geom.Line, geom.Segment_in_line s l
/--Line can be obtained from a segment.-/
Line_from_segment : Segment geom β geom.Line
-- property of Line_from_segment
/---/
Line_from_segment_contains_segment (seg : Segment geom) :
geom.Segment_in_line seg (Line_from_segment seg)
-- properties of mAngle
/--Measure of any angle is non-negative.-/
mAngle_non_neg (a b c : geom.Point) :
mAngle (Angle.mk a b c) β₯ 0
/--Definition of Zero Angle.-/
ZeroAngle (a b c : geom.Point) (_ : geom.In_between a c b):
mAngle (Angle.mk a b c) = 0
/--If not a zero angle, then the measure of the angle is positive.-/
mAngle_postive (a b c : geom.Point) :
Β¬ geom.In_between a c b β
Β¬ geom.In_between c a b β
mAngle (Angle.mk a b c) > 0
/--`mReflexAngle A` returns the measure of the reflex angle of angle `A`.-/
mReflexAngle (A : Angle geom) :
mAngle (reflexAngle A) = 360 - mAngle A
/-- Angle `A` as sum of its constituents.
For `mAngle_add a A h`, we have:
`have A1 : Angle geom := Angle.mk a A.Pivot A.p1`
`have A2 : Angle geom := Angle.mk a A.Pivot A.p2`
then
`mAngle A = mAngle A1 + mAngle A2`-/
mAngle_add (a : geom.Point) (A : Angle geom)
(_ : EuclidGeometry.Int_point_angle geom mAngle a A) :
have A1 : Angle geom := Angle.mk a A.Pivot A.p1
have A2 : Angle geom := Angle.mk a A.Pivot A.p2
mAngle A = mAngle A1 + mAngle A2
/--The measure of a straight angle is 180.-/
StraightAngle (a b c : geom.Point) (_ : geom.In_between a b c) :
mAngle (Angle.mk a b c) = 180
-- equality of measure of "symmetric" angles: e.g. Angle AOB = Angle BOA
-- can't write directly that they are equal as they are different structures "entrywise".
-- we need to also mention that their interior points are equal.
/--`Symm_angles (a o b)` states that:
`mAngle (Angle (a o b)) = mAngle (Angle (b o a))`.-/
Symm_angles (a o b : geom.Point):
mAngle (Angle.mk a o b) = mAngle (Angle.mk b o a)
-- Statement of Postulate 5:
-- Let line segment AB and CD be intersected by
-- line l at points p1 and p2 respectively.
-- Let A and C be same-sided wrt l.
-- Let mAngle(A p1 p2) + mAngle(C p2 p1) < 180.
-- Then there exists a point p such that
-- line from AB and CD intersect at p
-- and p is same-sided as A wrt to line l.
/--Euclid's Postulate #5.-/
Post5 (a b c d p1 p2 : geom.Point) (l : geom.Line)
(hab : Β¬ a = b) (hcd : Β¬ c = d) :
geom.Line_intersect_segment2 (Segment.mk a b) l p1
β
geom.Line_intersect_segment2 (Segment.mk c d) l p2
β
geom.Same_sided_points a c l
β
mAngle (Angle.mk a p1 p2) + mAngle (Angle.mk c p2 p1) < 180
β
β p : geom.Point,
geom.Lines_intersect2
(geom.Line_of_two_points a b hab) (geom.Line_of_two_points c d hcd) p
β§
geom.Same_sided_points p a l
/--Vertically opposite angles are equal.-/
theorem VOAequal (a b c d o : geom.Point) (SELF : Axioms geom reflexAngle mAngle)
(h1 : Β¬a = b)
(h2 : Β¬c = d)
(h3 : geom.In_between a o b β§ geom.In_between c o d)
(_ : Β¬geom.IsParallel
(geom.Line_of_two_points a b h1)
(geom.Line_of_two_points c d h2))
(h5: geom.Int_point_angle mAngle c (Angle.mk a o b))
(h6: geom.Int_point_angle mAngle a (Angle.mk c o d)):
let COB := Angle.mk c o b
let AOD := Angle.mk a o d
mAngle (COB) = mAngle (AOD)
:= by
let COA := Angle.mk c o a
let AOC := Angle.mk a o c
let AOD := Angle.mk a o d
let AOB := Angle.mk a o b
let COD := Angle.mk c o d
let COB := Angle.mk c o b
have lem1 : mAngle (AOB) = 180 := Axioms.StraightAngle SELF a o b (And.left h3)
have lem2 : mAngle (COD) = 180 := Axioms.StraightAngle SELF c o d (And.right h3)
have lem3 : mAngle (AOB) = mAngle (COA) + mAngle (COB) :=
Axioms.mAngle_add SELF c AOB h5
have lem4 : mAngle (COD) = mAngle (AOC) + mAngle (AOD) :=
Axioms.mAngle_add SELF a COD h6
have lem5 : mAngle COA = mAngle AOC := by
apply Axioms.Symm_angles SELF
have lem6 : mAngle (COA) + mAngle (COB) = mAngle (AOC) + mAngle (AOD) := by
rw [<-lem3, <-lem4, lem1, lem2]
rw [lem5] at lem6
simp [add_left_cancel] at lem6
assumption
|
" No , No , No Part 2 "
|
/-
Copyright (c) 2019 Lucas Allen. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Lucas Allen, Scott Morrison
-/
import algebra.big_operators.basic
import data.finsupp.basic
import tactic.converter.apply_congr
import tactic.interactive
example (f g : β€ β β€) (S : finset β€) (h : β m β S, f m = g m) :
finset.sum S f = finset.sum S g :=
begin
conv_lhs {
-- If we just call `congr` here, in the second goal we're helpless,
-- because we are only given the opportunity to rewrite `f`.
-- However `apply_congr` uses the appropriate `@[congr]` lemma,
-- so we get to rewrite `f x`, in the presence of the crucial `H : x β S` hypothesis.
apply_congr,
skip,
simp [h, H],
}
end
-- Again, with some `guard` statements.
example (f g : β€ β β€) (S : finset β€) (h : β m β S, f m = g m) :
finset.sum S f = finset.sum S g :=
begin
conv_lhs {
apply_congr finset.sum_congr,
-- (See the note about get_goals/set_goals inside apply_congr)
(do ng β tactic.num_goals, guard $ ng = 2),
guard_target S,
skip,
guard_target f x,
simp [h, H]
}
end
-- Verify we can `rw` as well as `simp`.
example (f g : β€ β β€) (S : finset β€) (h : β m β S, f m = g m) :
finset.sum S f = finset.sum S g :=
by conv_lhs { apply_congr, skip, rw h x H, }
-- Check that the appropriate `@[congr]` lemma is automatically selected.
example (f g : β€ β β€) (S : finset β€) (h : β m β S, f m = g m) :
finset.prod S f = finset.prod S g :=
by conv_lhs { apply_congr, skip, simp [h, H], }
example (f g : β€ β β€) (S : finset β€) (h : β m β S, f m = g m) :
finset.fold (+) 0 f S = finset.fold (+) 0 g S :=
begin
-- This time, the automatically selected congruence lemma is "too good"!
-- `finset.sum_congr` matches, and so the `conv` block actually
-- rewrites the left hand side into a `finset.sum`.
conv_lhs { apply_congr, skip, simp [h, H], },
-- So we need a `refl` to identify that we're done.
refl,
end
-- This can be avoided by selecting the congruence lemma by hand.
example (f g : β€ β β€) (S : finset β€) (h : β m β S, f m = g m) :
finset.fold (+) 0 f S = finset.fold (+) 0 g S :=
begin
conv_lhs { apply_congr finset.fold_congr, simp [h, H], },
end
example (f : β€ β β€) (S : finset β€) (h : β m β S, f m = 0) :
finset.sum S f = 0 :=
begin
conv_lhs { apply_congr, skip, simp [h, H], },
simp,
end
-- An example using `finsupp.sum`
open_locale classical
example {k G : Type} [semiring k] [group G]
(g : G ββ k) (aβ x : G) (bβ : k)
(t : β (aβ : G), aβ * aβ = x β aββ»ΒΉ * x = aβ) :
g.sum (Ξ» (aβ : G) (bβ : k), ite (aβ * aβ = x) (bβ * bβ) 0) = bβ * g (aββ»ΒΉ * x) :=
begin
-- In fact, `congr` works fine here, because our rewrite works globally.
conv_lhs { apply_congr, skip, dsimp, rw t, },
rw finset.sum_ite_eq g.support, -- it's a pity we can't just use `simp` here.
split_ifs,
{ refl, },
{ simp [finsupp.not_mem_support_iff.1 h], },
end
example : true :=
begin
success_if_fail { conv { apply_congr, }, },
trivial
end
|
= = Interviewees = =
|
(*
File: Group_Relations.thy
Author: Joseph Thommes, TU MΓΌnchen
*)
section \<open>Group relations\<close>
theory Group_Relations
imports Finite_Product_Extend
begin
text \<open>We introduce the notion of a relation of a set of elements: a way to express the neutral
element by using only powers of said elements. The following predicate describes the set of all the
relations that one can construct from a set of elements.\<close>
definition (in comm_group) relations :: "'a set \<Rightarrow> ('a \<Rightarrow> int) set" where
"relations A = {f. finprod G (\<lambda>a. a [^] f a) A = \<one>} \<inter> extensional A"
text \<open>Now some basic lemmas about relations.\<close>
lemma (in comm_group) in_relationsI[intro]:
assumes "finprod G (\<lambda>a. a [^] f a) A = \<one>" "f \<in> extensional A"
shows "f \<in> relations A"
unfolding relations_def using assms by blast
lemma (in comm_group) triv_rel:
"restrict (\<lambda>_. 0::int) A \<in> relations A"
proof
show "(\<Otimes>a\<in>A. a [^] (\<lambda>_\<in>A. 0::int) a) = \<one>" by (intro finprod_one_eqI, simp)
qed simp
lemma (in comm_group) not_triv_relI:
assumes "a \<in> A" "f a \<noteq> (0::int)"
shows "f \<noteq> (\<lambda>_\<in>A. 0::int)"
using assms by auto
lemma (in comm_group) rel_in_carr:
assumes "A \<subseteq> carrier G" "r \<in> relations A"
shows "(\<lambda>a. a [^] r a) \<in> A \<rightarrow> carrier G"
by (meson Pi_I assms(1) int_pow_closed subsetD)
text \<open>The following lemmas are of importance when proving the fundamental theorem of finitely
generated abelian groups in the case that there is just the trivial relation between a set of
generators. They all build up to the last lemma that then is actually used in the proof.\<close>
lemma (in comm_group) relations_zero_imp_pow_not_one:
assumes "a \<in> A" "\<forall>f\<in>(relations A). f a = 0"
shows "\<forall>z::int \<noteq> 0. a [^] z \<noteq> \<one>"
proof (rule ccontr; safe)
fix z::int
assume z: "z \<noteq> 0" "a [^] z = \<one>"
have "restrict ((\<lambda>x. 0)(a := z)) A \<in> relations A"
by (intro in_relationsI finprod_one_eqI, use z in auto)
thus False using z assms by auto
qed
lemma (in comm_group) relations_zero_imp_ord_zero:
assumes "a \<in> A" "\<forall>f\<in>(relations A). f a = 0"
and "a \<in> carrier G"
shows "ord a = 0"
using assms relations_zero_imp_pow_not_one[OF assms(1, 2)]
by (meson finite_cyclic_subgroup_int infinite_cyclic_subgroup_order)
lemma (in comm_group) finprod_relations_triv_harder_better_stronger:
assumes "A \<subseteq> carrier G" "relations A = {(\<lambda>_\<in>A. 0::int)}"
shows "\<forall>f \<in> Pi\<^sub>E A (\<lambda>a. generate G {a}). finprod G f A = \<one> \<longrightarrow> (\<forall>a\<in>A. f a = \<one>)"
proof(rule, rule)
fix f
assume f: "f \<in> (\<Pi>\<^sub>E a\<in>A. generate G {a})" "finprod G f A = \<one>"
with generate_pow assms(1) have "\<forall>a\<in>A. \<exists>k::int. f a = a [^] k" by blast
then obtain r::"'a \<Rightarrow> int" where r: "\<forall>a\<in>A. f a = a [^] r a" by metis
have "restrict r A \<in> relations A"
proof(intro in_relationsI)
have "(\<Otimes>a\<in>A. a [^] restrict r A a) = finprod G f A"
by (intro finprod_cong, use assms r in auto)
thus "(\<Otimes>a\<in>A. a [^] restrict r A a) = \<one>" using f by simp
qed simp
with assms(2) have z: "restrict r A = (\<lambda>_\<in>A. 0)" by blast
have "(restrict r A) a = r a" if "a \<in> A" for a using that by auto
with r z show "\<forall>a\<in>A. f a = \<one>" by auto
qed
lemma (in comm_group) stronger_PiE_finprod_imp:
assumes "A \<subseteq> carrier G" "\<forall>f \<in> Pi\<^sub>E A (\<lambda>a. generate G {a}). finprod G f A = \<one> \<longrightarrow> (\<forall>a\<in>A. f a = \<one>)"
shows "\<forall>f \<in> Pi\<^sub>E ((\<lambda>a. generate G {a}) ` A) id.
finprod G f ((\<lambda>a. generate G {a}) ` A) = \<one> \<longrightarrow> (\<forall>H\<in> (\<lambda>a. generate G {a}) ` A. f H = \<one>)"
proof(rule, rule)
fix f
assume f: "f \<in> Pi\<^sub>E ((\<lambda>a. generate G {a}) ` A) id" "finprod G f ((\<lambda>a. generate G {a}) ` A) = \<one>"
define B where "B = inv_into A (\<lambda>a. generate G {a}) ` ((\<lambda>a. generate G {a}) ` A)"
have Bs: "B \<subseteq> A"
proof
fix x
assume x: "x \<in> B"
then obtain C where C: "C \<in> ((\<lambda>a. generate G {a}) ` A)" "x = inv_into A (\<lambda>a. generate G {a}) C"
unfolding B_def by blast
then obtain c where c: "C = generate G {c}" "c \<in> A" by blast
with C someI_ex[of "\<lambda>y. y \<in> A \<and> generate G {y} = C"] show "x \<in> A"
unfolding inv_into_def by blast
qed
have sI: "(\<lambda>x. generate G {x}) ` B = (\<lambda>x. generate G {x}) ` A"
proof
show "(\<lambda>x. generate G {x}) ` B \<subseteq> (\<lambda>x. generate G {x}) ` A" using Bs by blast
show "(\<lambda>x. generate G {x}) ` A \<subseteq> (\<lambda>x. generate G {x}) ` B"
proof
fix C
assume C: "C \<in> (\<lambda>x. generate G {x}) ` A"
then obtain x where x: "x = inv_into A (\<lambda>a. generate G {a}) C" unfolding B_def by blast
then obtain c where c: "C = generate G {c}" "c \<in> A" using C by blast
with C x someI_ex[of "\<lambda>y. y \<in> A \<and> generate G {y} = C"] have "generate G {x} = C"
unfolding inv_into_def by blast
with x C show "C \<in> (\<lambda>x. generate G {x}) ` B" unfolding B_def by blast
qed
qed
have fBc: "f (generate G {b}) \<in> carrier G" if "b \<in> B" for b
proof -
have "f (generate G {b}) \<in> generate G {b}" using f(1)
by (subst (asm) sI[symmetric], use that in fastforce)
moreover have "generate G {b} \<subseteq> carrier G" using assms(1) that Bs generate_incl by blast
ultimately show ?thesis by blast
qed
let ?r = "restrict (\<lambda>a. if a\<in>B then f (generate G {a}) else \<one>) A"
have "?r \<in> Pi\<^sub>E A (\<lambda>a. generate G {a})"
proof
show "?r x = undefined" if "x \<notin> A" for x using that by simp
show "?r x \<in> generate G {x}" if "x \<in> A" for x using that generate.one B_def f(1) by auto
qed
moreover have "finprod G ?r A = \<one>"
proof (cases "finite A")
case True
have "A = B \<union> (A - B)" using Bs by auto
then have "finprod G ?r A = finprod G ?r (B\<union>(A-B))" by auto
moreover have "\<dots> = finprod G ?r B \<otimes> finprod G ?r (A - B)"
proof(intro finprod_Un_disjoint)
from True Bs finite_subset show "finite B" "finite (A - B)" "B \<inter> (A - B) = {}" by auto
show "(\<lambda>a\<in>A. if a \<in> B then f (generate G {a}) else \<one>) \<in> A - B \<rightarrow> carrier G" using Bs by simp
from fBc show "(\<lambda>a\<in>A. if a \<in> B then f (generate G {a}) else \<one>) \<in> B \<rightarrow> carrier G"
using Bs by auto
qed
moreover have "finprod G ?r B = \<one>"
proof -
have "finprod G ?r B = finprod G (f \<circ> (\<lambda>a. generate G {a})) B"
proof(intro finprod_cong')
show "?r b = (f \<circ> (\<lambda>a. generate G {a})) b" if "b \<in> B" for b using that Bs by auto
show "f \<circ> (\<lambda>a. generate G {a}) \<in> B \<rightarrow> carrier G" using fBc by simp
qed simp
also have "\<dots> = finprod G f ((\<lambda>a. generate G {a}) ` B)"
proof(intro finprod_comp[symmetric])
show "(f \<circ> (\<lambda>a. generate G {a})) ` B \<subseteq> carrier G" using fBc by auto
show "inj_on (\<lambda>a. generate G {a}) B"
by (intro inj_onI, unfold B_def, metis (no_types, lifting) f_inv_into_f inv_into_into)
qed
also have "\<dots> = finprod G f ((\<lambda>a. generate G {a}) ` A)" using sI by argo
finally show ?thesis using f(2) by argo
qed
moreover have "finprod G ?r (A - B) = \<one>" by(intro finprod_one_eqI, simp)
ultimately show ?thesis by fastforce
next
case False
then show ?thesis unfolding finprod_def by simp
qed
ultimately have a: "\<forall>a\<in>A. ?r a = \<one>" using assms(2) by blast
then have BA: "\<forall>a\<in>B\<inter>A. ?r a = \<one>" by blast
from Bs sI have "\<forall>a\<in>A. (generate G {a}) \<in> ((\<lambda>x. generate G {x}) ` B)" by simp
then have "\<forall>a\<in>A. \<exists>b\<in>B. f (generate G {a}) = f (generate G {b})" by force
thus "\<forall>H\<in>(\<lambda>a. generate G {a}) ` A. f H = \<one>" using a BA Bs by fastforce
qed
lemma (in comm_group) finprod_relations_triv:
assumes "A \<subseteq> carrier G" "relations A = {(\<lambda>_\<in>A. 0::int)}"
shows "\<forall>f \<in> Pi\<^sub>E ((\<lambda>a. generate G {a}) ` A) id.
finprod G f ((\<lambda>a. generate G {a}) ` A) = \<one> \<longrightarrow> (\<forall>H\<in> (\<lambda>a. generate G {a}) ` A. f H = \<one>)"
using assms finprod_relations_triv_harder_better_stronger stronger_PiE_finprod_imp by presburger
lemma (in comm_group) ord_zero_strong_imp_rel_triv:
assumes "A \<subseteq> carrier G" "\<forall>a \<in> A. ord a = 0"
and "\<forall>f \<in> Pi\<^sub>E A (\<lambda>a. generate G {a}). finprod G f A = \<one> \<longrightarrow> (\<forall>a\<in>A. f a = \<one>)"
shows "relations A = {(\<lambda>_\<in>A. 0::int)}"
proof -
have "\<And>r. r \<in> relations A \<Longrightarrow> r = (\<lambda>_\<in>A. 0::int)"
proof
fix r x
assume r: "r \<in> relations A"
show "r x = (\<lambda>_\<in>A. 0::int) x"
proof (cases "x \<in> A")
case True
let ?r = "restrict (\<lambda>a. a [^] r a) A"
have rp: "?r \<in> Pi\<^sub>E A (\<lambda>a. generate G {a})"
proof -
have "?r \<in> extensional A" by blast
moreover have "?r \<in> Pi A (\<lambda>a. generate G {a})"
proof
fix a
assume a: "a \<in> A"
then have sga: "subgroup (generate G {a}) G" using generate_is_subgroup assms(1) by auto
show "a [^] r a \<in> generate G {a}"
using generate.incl[of a "{a}" G] subgroup_int_pow_closed[OF sga] by simp
qed
ultimately show ?thesis unfolding PiE_def by blast
qed
have "finprod G ?r A = (\<Otimes>a\<in>A. a [^] r a)" by(intro finprod_cong, use assms(1) in auto)
with r have "finprod G ?r A = \<one>" unfolding relations_def by simp
with assms(3) rp have "\<forall>a\<in>A. ?r a = \<one>" by fast
then have "\<forall>a\<in>A. a [^] r a = \<one>" by simp
with assms(1, 2) True have "r x = 0"
using finite_cyclic_subgroup_int infinite_cyclic_subgroup_order by blast
thus ?thesis using True by simp
next
case False
thus ?thesis using r unfolding relations_def extensional_def by simp
qed
qed
thus ?thesis using triv_rel by blast
qed
lemma (in comm_group) compl_fam_iff_relations_triv:
assumes "finite gs" "gs \<subseteq> carrier G" "\<forall>g\<in>gs. ord g = 0"
shows "relations gs = {(\<lambda>_\<in>gs. 0::int)} \<longleftrightarrow> compl_fam (\<lambda>g. generate G {g}) gs"
using triv_finprod_iff_compl_fam_PiE[of _ "\<lambda>g. generate G {g}", OF assms(1) generate_is_subgroup]
ord_zero_strong_imp_rel_triv[OF assms(2, 3)]
finprod_relations_triv_harder_better_stronger[OF assms(2)] assms by blast
end
|
---
pandoc-minted:
language: idris
---
= Bathroom Security
[Link](https://adventofcode.com/2016/day/2)
You arrive at *Easter Bunny Headquarters* under cover of darkness. However, you
left in such a rush that you forgot to use the bathroom! Fancy office buildings
like this one usually have keypad locks on their bathrooms, so you search the
front desk for the code.
"In order to improve security," the document you find says, "bathroom codes will
no longer be written down. Instead, please memorize and follow the procedure
below to access the bathrooms."
The document goes on to explain that each button to be pressed can be found by
starting on the previous button and moving to adjacent buttons on the keypad:
`U` moves up, `D` moves down, `L` moves left, and `R` moves right. Each line of
instructions corresponds to one button, starting at the previous button (or, for
the first line, *the "5" button*); press whatever button you're on at the end of
each line. If a move doesn't lead to a button, ignore it.
You can't hold it much longer, so you decide to figure out the code as you walk
to the bathroom. You picture a keypad like this:
```text
1 2 3
4 5 6
7 8 9
```
Suppose your instructions are:
```text
ULL
RRDDD
LURDL
UUUUD
```
- You start at "5" and move up (to "2"), left (to "1"), and left (you can't, and
stay on "1"), so the first button is `1`.
- Starting from the previous button ("1"), you move right twice (to "3") and
then down three times (stopping at "9" after two moves and ignoring the
third), ending up with `9`.
- Continuing from "9", you move left, up, right, down, and left, ending with
`8`.
- Finally, you move up four times (stopping at "2"), then down once, ending with
`5`.
So, in this example, the bathroom code is `1985`.
Your puzzle input is the instructions from the document you found at the front
desk.
== Module Declaration and Imports
> ||| Day 2: Bathroom Security
> module Data.Advent.Day02
>
> import public Data.Advent.Day
> import public Data.Ix
>
> import Data.Vect
>
> import public Lightyear
> import public Lightyear.Char
> import public Lightyear.Strings
== Data Types
> %access public export
>
> ||| Up, down, left or right.
> data Instruction = ||| Up
> U
> | ||| Down
> D
> | ||| Left
> L
> | ||| Right
> R
>
> ||| A single digit, i.e. a number strictly less than ten.
> Digit : Type
> Digit = Fin 10
>
> implementation Show Digit where
> show = show . finToInteger
>
> implementation [showDigits] Show (List Digit) where
> show = concatMap show
>
> ||| A pair of coordinates on the keypad, `(x, y)`.
> Coordinates : Type
> Coordinates = (Fin 3, Fin 3)
\newpage
== Parsers
> %access export
>
> up : Parser Instruction
> up = char 'U' *> pure U <?> "up"
>
> down : Parser Instruction
> down = char 'D' *> pure D <?> "down"
>
> left : Parser Instruction
> left = char 'L' *> pure L <?> "left"
>
> right : Parser Instruction
> right = char 'R' *> pure R <?> "right"
>
> instruction : Parser Instruction
> instruction = up <|> down <|> left <|> right <?> "up, down, left or right"
>
> partial instructions : Parser (List Instruction)
> instructions = some instruction <* (skip endOfLine <|> eof)
== Part One
\begin{quote}
What is the bathroom code?
\end{quote}
> namespace PartOne
>
> ||| A keypad like this:
> |||
> ||| ```
> ||| 1 2 3
> ||| 4 5 6
> ||| 7 8 9
> ||| ```
> keypad : Vect 3 (Vect 3 Digit)
> keypad = [ [1, 2, 3],
> [4, 5, 6],
> [7, 8, 9] ]
>
> move : Coordinates -> Instruction -> Coordinates
> move (x, y) U = (x, pred y)
> move (x, y) D = (x, succ y)
> move (x, y) L = (pred x, y)
> move (x, y) R = (succ x, y)
\newpage
> button : Coordinates -> List Instruction -> (Coordinates, Digit)
> button loc@(x, y) [] = (loc, index x (index y keypad))
> button loc (i :: is) = button (move loc i) is
>
> partial partOne : List (List Instruction) -> String
> partOne = show @{showDigits} . go ((1,1), [])
> where
> go : (Coordinates, List Digit) -> List (List Instruction) -> List Digit
> go (_, ds) [] = reverse ds
> go (loc, ds) (is :: iis) = let (loc', d) = PartOne.button loc is in
> go (loc', d :: ds) iis
>
> namespace PartOne
>
> ||| ```idris example
> ||| example
> ||| ```
> partial example : String
> example = fromEither $ partOne <$>
> parse (some instructions) "ULL\nRRDDD\nLURDL\nUUUUD"
== Part Two
You finally arrive at the bathroom (it's a several minute walk from the lobby so
visitors can behold the many fancy conference rooms and water coolers on this
floor) and go to punch in the code. Much to your bladder's dismay, the keypad is
not at all like you imagined it. Instead, you are confronted with the result of
hundreds of man-hours of bathroom-keypad-design meetings:
```text
1
2 3 4
5 6 7 8 9
A B C
D
```
You still start at "5" and stop when you're at an edge, but given the same
instructions as above, the outcome is very different:
- You start at "5" and don't move at all (up and left are both edges), ending at
`5`.
- Continuing from "5", you move right twice and down three times (through "6",
"7", "B", "D", "D"), ending at `D`.
- Then, from "D", you move five more times (through "D", "B", "C", "C", "B"),
ending at `B`.
- Finally, after five more moves, you end at `3`.
\newpage
So, given the actual keypad layout, the code would be `5DB3`.
\begin{quote}
Using the same instructions in your puzzle input, what is the correct
\textit{bathroom code}?
\end{quote}
> namespace PartTwo
>
> keypad : Vect 5 (n ** Vect n Char)
> keypad = [ (1 ** ['1'])
> , (3 ** ['2', '3', '4'])
> , (5 ** ['5', '6', '7', '8', '9'])
> , (3 ** ['A', 'B', 'C'])
> , (1 ** ['D'])
> ]
>
> -- NOTE: This will wrap at the bounds, which might be unexpected.
> partial convert : (n : Nat) -> Fin m -> Fin n
> convert (S j) fm {m} =
> let delta = half $ if S j > m
> then S j `minus` m
> else m `minus` S j in
> the (Fin (S j)) $ fromNat $ finToNat fm `f` delta
> where
> f : Nat -> Nat -> Nat
> f = if S j > m then plus else minus
> partial half : Nat -> Nat
> half = flip div 2
>
> canMoveVertically : (Fin (S k), Fin 5) -> Instruction -> Bool
> canMoveVertically (x, y) i with ((finToNat x, finToNat y))
> canMoveVertically (x, y) U | (col, row) =
> case row of
> Z => False
> S Z => col == 1
> S (S Z) => inRange (1,3) col
> _ => True
> canMoveVertically (x, y) D | (col, row) =
> case row of
> S (S Z) => inRange (1,3) col
> S (S (S Z)) => col == 1
> S (S (S (S Z))) => False
> _ => True
> canMoveVertically _ _ | _ = True
\newpage
> partial move : (Fin (S k), Fin 5) -> Instruction ->
> ((n ** Fin n), Fin 5)
> move (x, y) U = if canMoveVertically (x, y) U
> then let n = fst (index (pred y) keypad) in
> ((n ** convert n x), pred y)
> else ((_ ** x), y)
> move (x, y) D = if canMoveVertically (x, y) D
> then let n = fst (index (succ y) keypad) in
> ((n ** convert n x), succ y)
> else ((_ ** x), y)
> move (x, y) L = let n = fst (index y keypad) in
> ((n ** convert n (pred x)), y)
> move (x, y) R = let n = fst (index y keypad) in
> ((n ** convert n (succ x)), y)
>
> partial button : (Fin (S k), Fin 5) -> List Instruction ->
> (((n ** Fin n), Fin 5), Char)
> button loc@(x, y) [] =
> let (n ** row) = index y PartTwo.keypad
> xx = convert n x in
> (((n ** xx), y), index xx row)
> button loc (i :: is) =
> let ((S _ ** x), y) = move loc i in
> button (x, y) is
>
> partial partTwo : List (List Instruction) -> String
> partTwo = go (((5 ** 0),2), [])
> where
> partial go : (((n ** Fin n), Fin 5), List Char) ->
> List (List Instruction) -> String
> go (_, cs) [] = pack $ reverse cs
> go (loc, cs) (is :: iis) =
> let ((S k ** xx), y) = loc
> (loc', c) = PartTwo.button (xx, y) {k=k} is in
> go (loc', c :: cs) iis
>
> namespace PartTwo
>
> ||| ```idris example
> ||| PartTwo.example
> ||| ```
> partial example : String
> example = fromEither $ partTwo <$>
> parse (some instructions) "ULL\nRRDDD\nLURDL\nUUUUD"
== Main
> namespace Main
>
> partial main : IO ()
> main = runDay $ MkDay 2 (some instructions)
> (pure . partOne)
> (pure . partTwo)
|
universe u v
structure InjectiveFunction (Ξ± : Type u) (Ξ² : Type v) where
fn : Ξ± β Ξ²
inj : β a b, fn a = fn b β a = b
def add1 : InjectiveFunction Nat Nat where
fn a := a + 1
inj a b h := by injection h
instance : CoeFun (InjectiveFunction Ξ± Ξ²) (fun _ => Ξ± β Ξ²) where
coe s := s.fn
#eval add1 10
def mapAdd1 (xs : List Nat) : List Nat :=
xs.map add1
#eval mapAdd1 [1, 2]
def foo : InjectiveFunction Bool (Nat β Nat) where
fn
| true, a => a + 1
| false, a => a
inj a b h := by
cases a
cases b; rfl; injection (congrFun h 0)
cases b; injection (congrFun h 0); rfl
theorem ex1 (x : Nat) : foo true x = x + 1 :=
rfl
theorem ex2 (x : Nat) : foo false x = x :=
rfl
#eval foo true 10
#eval foo false 20
#eval [1, 2, 3].map (foo true)
|
Antonio Colonna <unk> ( September 26 , 1766 ) β Cardinal @-@ Priest of S. Maria in Via
|
import numpy as _onp
import casadi as _cas
from aerosandbox.numpy.determine_type import is_casadi_type
def diff(a, n=1, axis=-1):
"""
Calculate the n-th discrete difference along the given axis.
See syntax here: https://numpy.org/doc/stable/reference/generated/numpy.diff.html
"""
if not is_casadi_type(a):
return _onp.diff(a, n=n, axis=axis)
else:
if axis != -1:
raise NotImplementedError("This could be implemented, but haven't had the need yet.")
result = a
for i in range(n):
result = _cas.diff(a)
return result
def trapz(x, modify_endpoints=False): # TODO unify with NumPy trapz, this is different
"""
Computes each piece of the approximate integral of `x` via the trapezoidal method with unit spacing.
Can be viewed as the opposite of diff().
Args:
x: The vector-like object (1D np.ndarray, cas.MX) to be integrated.
Returns: A vector of length N-1 with each piece corresponding to the mean value of the function on the interval
starting at index i.
"""
integral = (
x[1:] + x[:-1]
) / 2
if modify_endpoints:
integral[0] = integral[0] + x[0] * 0.5
integral[-1] = integral[-1] + x[-1] * 0.5
return integral
|
[STATEMENT]
lemma prevVertex_nextVertex[simp]:
"\<lbrakk> distinct(vertices f); v \<in> \<V> f \<rbrakk>
\<Longrightarrow> f\<^bsup>-1\<^esup> \<bullet> (f \<bullet> v) = v"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>distinct (vertices f); v \<in> \<V> f\<rbrakk> \<Longrightarrow> f\<^bsup>-1\<^esup> \<bullet> (f \<bullet> v) = v
[PROOF STEP]
by(simp add:prevVertex_def nextVertex_def prevElem_nextElem) |
[STATEMENT]
lemma tObsC_abs_tLast[simp]:
"(u, v) \<in> tObsC_abs t \<Longrightarrow> envObsC (es v) = envObsC (es (tLast t))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (u, v) \<in> tObsC_abs t \<Longrightarrow> envObsC (es v) = envObsC (es (tLast t))
[PROOF STEP]
unfolding tObsC_abs_def tObsC_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (u, v) \<in> rel_ext (\<lambda>uu_. \<exists>t'. uu_ = (tFirst t', tLast t') \<and> t' \<in> SPR.jkbpC \<and> tMap (envObsC \<circ> es) t' = tMap (envObsC \<circ> es) t) \<Longrightarrow> envObsC (es v) = envObsC (es (tLast t))
[PROOF STEP]
by (auto iff: o_def elim: tMap_tLast_inv) |
module TyTTP.Core.Error
public export
interface Error e where
message : e -> String
export
Error String where
message = id
|
In early 2002, researchers at the Monterey Bay Aquarium Institute (MBARI) were using the remotely operated vehicle (ROV) Tiburon to investigate deep-sea clams off Monterey Canyon. While there, they came across the skeleton of a whale that had apparently been picked clean by what seemed to be a carpet of tiny, worm-like creatures. They were growing like crazy, said MBARI biologist Robert Vrijenhoek in a news release about the discovery, carpeting the remaining whale bones. The worms had short trunks topped by red plumes, and were about an inch or two in height.β When researchers had a chance to examine the creatures more closely, some werenβt convinced they were even worms. All did agree they were very strange, but very few probably could have imagined just how strange theyβd turn out to be.
DNA analysis confirmed these creatures were indeed annelids, related to other species of tubeworms that live around hydrothermal vents. Like the vent worms, the whale worms had bright red plumes that acted as gills, collecting oxygen from seawater. Unlike the vent worms, however, these worms had green, root-like structures that penetrated the whale bones and branched out in the marrow cavity. These roots, in fact, turned out to be the most important part of the worms body, containing specialized bacteria allowing for the breakdown of the oils and proteins in the bones.
But things began to get strange when researchers couldnβt seem to find any males among the specimens brought up to the surface. Things went from strange to utterly bizarre when they discovered that males did exist, but that, in this species of worms, the males never passed the larval stage. They instead lived in groups of 30 to 100 inside a females body. In short, males dont feed like the females do but instead live off the yolk of the egg that produced them, all the while producing sperm for the further propagation of the species. Why such an odd method for keeping the species going? Dr. Vrijenhoek suggests the worms are like the ecological equivalent of dandelionsβa weedy species that grows rapidly, makes lots of eggs, and disperses far and wide.β The strategy makes sense when you consider what depending upon dead whales as a source of food would entail. Once the worms had picked the whale clean, theyβd run out food. Releasing as many tiny larvae as they could and having the ocean current carry them away would increase the chances of them landing on another whale carcass.
Over the next three years, MBARI scientists sank whale carcasses from depths of 400 to 1,800 meters and discovered as many as 12 species of these strange, bone-eating worms living just in Monterey Bay alone. Later, additional species were found off the coasts of Sweden and Japan. The scientists gave the worms the name Osedax, which is Latin for bone-eating. They found that different species of the worms appeared at various depths and attacked the whale carcass at different stages of decomposition.
In less than a decade, this previously unknown species of worm has gone from anonymity to something of a cultural phenomenon, inspiring things such as childrensβ hand puppets and the name of a rock band. Regardless of their roles, it seemed that these worms had one thing in common: they fed on mammals. Such a strict diet, however, would have implications for when the Osedax could have developed. If only able to feast on mammalian bones, the Osedax could not have have evolved during the Cretaceous period, well before the dawn of marine mammals, as many seemed to believe. To solve the puzzle, researchers from the Scripps Institution of Oceanography at the University of California-San Diego decided to set some bait for the worms. Again using ROVs, the scientists sunk tuna and Wahoo bones, as well as shark cartilage, to a depth of 1,000 meters off Monterey Canyon. Collecting the bones five months later, they found three distinct species of Osedax growing on them. (The shark cartilage apparently fell prey to other organisms.) This experiment proved that the Osedax werent whale bone specialists but bone-eating generalists.
The Scripps and MBARI researchers plan to continue their studies of the nutritional limits of the Osedax, in particular concentrating on how they might be able to use shark cartilage. The researchers are also interested in coming to a better understanding of exactly how the Osedax feed off the bone. If past findings are any indication of what to expect, undoubtedly they will find many new species of these strange ocean-dwelling worms.
Whale-eating worms? What next? Shark-devouring bacteria? Get some more chemistry on this site!!! |
[STATEMENT]
lemma R2Reflc:
"eval_rr2_rel \<F> Rs (R2Reflc r) = eval_rr2_rel \<F> Rs r \<union> Id_on (\<T>\<^sub>G \<F>)"
"eval_rr2_rel \<F> Rs (R2Reflc r) = Restr ((eval_rr2_rel \<F> Rs r)\<^sup>=) (\<T>\<^sub>G \<F>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. eval_rr2_rel \<F> Rs (R2Reflc r) = eval_rr2_rel \<F> Rs r \<union> Id_on (\<T>\<^sub>G \<F>) &&& eval_rr2_rel \<F> Rs (R2Reflc r) = Restr ((eval_rr2_rel \<F> Rs r)\<^sup>=) (\<T>\<^sub>G \<F>)
[PROOF STEP]
using eval_rr12_rel_sig(2)[of \<F> Rs "R2Reflc r"]
[PROOF STATE]
proof (prove)
using this:
eval_rr2_rel \<F> Rs (R2Reflc r) \<subseteq> \<T>\<^sub>G \<F> \<times> \<T>\<^sub>G \<F>
goal (1 subgoal):
1. eval_rr2_rel \<F> Rs (R2Reflc r) = eval_rr2_rel \<F> Rs r \<union> Id_on (\<T>\<^sub>G \<F>) &&& eval_rr2_rel \<F> Rs (R2Reflc r) = Restr ((eval_rr2_rel \<F> Rs r)\<^sup>=) (\<T>\<^sub>G \<F>)
[PROOF STEP]
by (auto simp: R2Reflc_def R2Eq) |
The engraving was unusually minute and required very close and incessant labor for several weeks . I made the original dies and hubs for making the working dies twice over , to secure their perfect adaptation to the coining machinery . I had a wish to execute this work single handed , that I might thus silently reply to those who had questioned my ability for the work . The result , I believe , was satisfactory .
|
import .subgraph
noncomputable theory
open_locale classical
universes u
variables {V : Type u}
namespace simple_graph
variables (G : simple_graph V)
include G
variables [fintype V]
-- want a lemma that 2 * card_edges equals the card of a specific finset.
-- lemma 2 * G.card_edges = finset.card G.E_finset
@[elab_as_eliminator]
lemma induction_on
(P : simple_graph V β Prop)
(P_empty : P empty)
(P_inductive : β G β empty, β (H : simple_graph V),
H.is_subgraph G β§
H.card_edges < G.card_edges β§
(P H β P G) ) : P G :=
begin
by_cases h : G = empty, { rwa h },
suffices : β H : simple_graph V, H.card_edges < G.card_edges β P H,
{ have := P_inductive G h, tauto },
induction G.card_edges using nat.strong_induction_on with k hk,
intros H hHk,
by_cases H_card : H = empty, { cc },
rcases P_inductive H H_card with β¨K, K_sub, K_card, hKHβ©,
apply hKH, exact hk _ hHk _ K_card,
end
-- for every graph, there exists an edge so that P (G.erase e) β P G
def erase (e : G.E) : simple_graph V :=
{ adj := Ξ» u v, if u β e β§ v β e then false else G.adj u v,
sym := by { unfold symmetric, intros, simp_rw [edge_symm, and_comm], cc } }
@[simp] lemma erase_adj_iff (e : G.E) (u v : V) :
(G.erase e).adj u v β G.adj u v β§ (u β e β¨ v β e) :=
by { simp [erase]; tauto, }
lemma erase_is_subgraph (e : G.E) : (G.erase e).is_subgraph G := by tidy
-- writing this down in a way that avoids nat subtraction
lemma card_edges_erase (e : G.E) : (G.erase e).card_edges + 1 = G.card_edges :=
begin
sorry
end
@[elab_as_eliminator]
lemma induction_on_erase
(P : simple_graph V β Prop)
(P_empty : P empty)
(P_inductive : β G : simple_graph V, G β empty β
β e : G.E, P (G.erase e) β P G)
: P G :=
begin
apply G.induction_on, assumption,
intros Gβ hGβ, cases P_inductive Gβ hGβ with e he,
use Gβ.erase e,
split, { apply erase_is_subgraph },
split, linarith [Gβ.card_edges_erase e],
assumption,
end
end simple_graph |
# -*- coding: utf-8 -*-
import numpy as np
from pypytorch.functions.function import Function
from pypytorch import utils
class Linear(Function):
def forward(self, *args):
a, b, c = utils.fetch_args(args, 3)
if c is None:
return a @ b
return a @ b + c
def backward_0(self, grad):
a, b, c = utils.fetch_args(self.inputs, 3)
return grad @ b.T
def backward_1(self, grad):
a, b, c = utils.fetch_args(self.inputs, 3)
return a.T @ grad
def backward_2(self, grad):
a, b, c = self.inputs
# print(a.shape, grad.shape)
# return np.ones_like(a).T @ grad
return grad
|
theory Hnr_Fallback
imports Hnr_Base
begin
lemma hnr_fallback:
assumes
"\<And>h. h \<Turnstile> \<Gamma> \<Longrightarrow> c = ci"
shows
"hnr \<Gamma> (return ci) (\<lambda>r ri. \<Gamma> * id_assn r ri) (Some c)"
apply(rule hnrI)
apply(rule hoare_triple_preI)
using assms
by(sep_auto simp: id_rel_def)
method extract_pre uses rule =
(determ\<open>elim mod_starE rule[elim_format]\<close>)?
lemma models_id_assn:"h \<Turnstile> id_assn x xi \<Longrightarrow> x = xi"
by(simp add: id_rel_def)
(* TODO: This just works for non-refined fallbacks *)
method hnr_fallback =
rule hnr_fallback,
extract_pre rule: models_id_assn,
((hypsubst, rule refl) | (simp(no_asm_simp) only: ; fail))
end
|
postulate
A : Set
I : (@erased _ : A) β Set
R : A β Set
f : β (@erased x : A) (r : R x) β I x
-- can now be used here ^
|
At Cosmetic Dentists of Houston, our dental crowns and onlays are metal-free and fabricated from durable and highest quality US-made materials. For more information about crown technology, the quality of our materials and what to expect from the dental crown procedure, please see Dental Crowns, Onlays and Bridges.
The average cost of a porcelain dental crown at Cosmetic Dentists of Houston ranges from $1800 β $2500. Each crown is custom designed by world-class ceramists in our laboratories in Idaho, using highest quality US-made non-metal materials.
You may find less expensive crowns, but we promise you that you will never find higher quality work anywhere else.
Because Porcelain onlays are more difficult than crowns and require more precision to create, many dentists do not even offer them. Dr. Canto, however, is an expert in the art of porcelain onlays and will recommend them to her patients as needed.
A well-done dental onlay conserves healthy tooth structure and is gentler on the gums than a crown. Generally the cost of an onlay is about the same as the cost of a porcelain crown.
If you are in the Houston Texas area, please contact us for a free, no-risk porcelain crown and onlay consultations to get a more exact estimate.
Dr. Canto strongly believes in personalized education for each of her patients and will be happy to consult with you to help you decide if dental crowns, onlays or bridges are right for you.
Call 713-622-1977 or come visit our Houston Galleria office for a Porcelain Crown or Onlay Consultation.
Learn more about your dental options and get all your cosmetic dentistry questions answered by Dr. Amanda Canto, DDS. |
[STATEMENT]
lemma confTs_map [iff]:
"\<And>vs. (P,h \<turnstile> vs [:\<le>\<^sub>\<top>] map OK Ts) = (P,h \<turnstile> vs [:\<le>] Ts)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>vs. (P,h \<turnstile> vs [:\<le>\<^sub>\<top>] map OK Ts) = (P,h \<turnstile> vs [:\<le>] Ts)
[PROOF STEP]
by (induct Ts) (auto simp: list_all2_Cons2) |
(* Title: HOL/Analysis/Cartesian_Space.thy
Author: Amine Chaieb, University of Cambridge
Author: Jose DivasΓ³n <jose.divasonm at unirioja.es>
Author: JesΓΊs Aransay <jesus-maria.aransay at unirioja.es>
Author: Johannes HΓΆlzl, VU Amsterdam
Author: Fabian Immler, TUM
*)
section "Linear Algebra on Finite Cartesian Products"
theory Cartesian_Space
imports
"HOL-Combinatorics.Transposition"
Finite_Cartesian_Product
Linear_Algebra
begin
subsection\<^marker>\<open>tag unimportant\<close> \<open>Type @{typ \<open>'a ^ 'n\<close>} and fields as vector spaces\<close> (*much of the following
is really basic linear algebra, check for overlap? rename subsection? *)
definition "cart_basis = {axis i 1 | i. i\<in>UNIV}"
lemma finite_cart_basis: "finite (cart_basis)" unfolding cart_basis_def
using finite_Atleast_Atmost_nat by fastforce
lemma card_cart_basis: "card (cart_basis::('a::zero_neq_one^'i) set) = CARD('i)"
unfolding cart_basis_def Setcompr_eq_image
by (rule card_image) (auto simp: inj_on_def axis_eq_axis)
interpretation vec: vector_space "(*s) "
by unfold_locales (vector algebra_simps)+
lemma independent_cart_basis:
"vec.independent (cart_basis)"
proof (rule vec.independent_if_scalars_zero)
show "finite (cart_basis)" using finite_cart_basis .
fix f::"('a, 'b) vec \<Rightarrow> 'a" and x::"('a, 'b) vec"
assume eq_0: "(\<Sum>x\<in>cart_basis. f x *s x) = 0" and x_in: "x \<in> cart_basis"
obtain i where x: "x = axis i 1" using x_in unfolding cart_basis_def by auto
have sum_eq_0: "(\<Sum>x\<in>(cart_basis) - {x}. f x * (x $ i)) = 0"
proof (rule sum.neutral, rule ballI)
fix xa assume xa: "xa \<in> cart_basis - {x}"
obtain a where a: "xa = axis a 1" and a_not_i: "a \<noteq> i"
using xa x unfolding cart_basis_def by auto
have "xa $ i = 0" unfolding a axis_def using a_not_i by auto
thus "f xa * xa $ i = 0" by simp
qed
have "0 = (\<Sum>x\<in>cart_basis. f x *s x) $ i" using eq_0 by simp
also have "... = (\<Sum>x\<in>cart_basis. (f x *s x) $ i)" unfolding sum_component ..
also have "... = (\<Sum>x\<in>cart_basis. f x * (x $ i))" unfolding vector_smult_component ..
also have "... = f x * (x $ i) + (\<Sum>x\<in>(cart_basis) - {x}. f x * (x $ i))"
by (rule sum.remove[OF finite_cart_basis x_in])
also have "... = f x * (x $ i)" unfolding sum_eq_0 by simp
also have "... = f x" unfolding x axis_def by auto
finally show "f x = 0" ..
qed
lemma span_cart_basis:
"vec.span (cart_basis) = UNIV"
proof (auto)
fix x::"('a, 'b) vec"
let ?f="\<lambda>v. x $ (THE i. v = axis i 1)"
show "x \<in> vec.span (cart_basis)"
apply (unfold vec.span_finite[OF finite_cart_basis])
apply (rule image_eqI[of _ _ ?f])
apply (subst vec_eq_iff)
apply clarify
proof -
fix i::'b
let ?w = "axis i (1::'a)"
have the_eq_i: "(THE a. ?w = axis a 1) = i"
by (rule the_equality, auto simp: axis_eq_axis)
have sum_eq_0: "(\<Sum>v\<in>(cart_basis) - {?w}. x $ (THE i. v = axis i 1) * v $ i) = 0"
proof (rule sum.neutral, rule ballI)
fix xa::"('a, 'b) vec"
assume xa: "xa \<in> cart_basis - {?w}"
obtain j where j: "xa = axis j 1" and i_not_j: "i \<noteq> j" using xa unfolding cart_basis_def by auto
have the_eq_j: "(THE i. xa = axis i 1) = j"
proof (rule the_equality)
show "xa = axis j 1" using j .
show "\<And>i. xa = axis i 1 \<Longrightarrow> i = j" by (metis axis_eq_axis j zero_neq_one)
qed
show "x $ (THE i. xa = axis i 1) * xa $ i = 0"
apply (subst (2) j)
unfolding the_eq_j unfolding axis_def using i_not_j by simp
qed
have "(\<Sum>v\<in>cart_basis. x $ (THE i. v = axis i 1) *s v) $ i =
(\<Sum>v\<in>cart_basis. (x $ (THE i. v = axis i 1) *s v) $ i)" unfolding sum_component ..
also have "... = (\<Sum>v\<in>cart_basis. x $ (THE i. v = axis i 1) * v $ i)"
unfolding vector_smult_component ..
also have "... = x $ (THE a. ?w = axis a 1) * ?w $ i + (\<Sum>v\<in>(cart_basis) - {?w}. x $ (THE i. v = axis i 1) * v $ i)"
by (rule sum.remove[OF finite_cart_basis], auto simp add: cart_basis_def)
also have "... = x $ (THE a. ?w = axis a 1) * ?w $ i" unfolding sum_eq_0 by simp
also have "... = x $ i" unfolding the_eq_i unfolding axis_def by auto
finally show "x $ i = (\<Sum>v\<in>cart_basis. x $ (THE i. v = axis i 1) *s v) $ i" by simp
qed simp
qed
(*Some interpretations:*)
interpretation vec: finite_dimensional_vector_space "(*s)" "cart_basis"
by (unfold_locales, auto simp add: finite_cart_basis independent_cart_basis span_cart_basis)
lemma matrix_vector_mul_linear_gen[intro, simp]:
"Vector_Spaces.linear (*s) (*s) ((*v) A)"
by unfold_locales
(vector matrix_vector_mult_def sum.distrib algebra_simps)+
lemma span_vec_eq: "vec.span X = span X"
and dim_vec_eq: "vec.dim X = dim X"
and dependent_vec_eq: "vec.dependent X = dependent X"
and subspace_vec_eq: "vec.subspace X = subspace X"
for X::"(real^'n) set"
unfolding span_raw_def dim_raw_def dependent_raw_def subspace_raw_def
by (auto simp: scalar_mult_eq_scaleR)
lemma linear_componentwise:
fixes f:: "'a::field ^'m \<Rightarrow> 'a ^ 'n"
assumes lf: "Vector_Spaces.linear (*s) (*s) f"
shows "(f x)$j = sum (\<lambda>i. (x$i) * (f (axis i 1)$j)) (UNIV :: 'm set)" (is "?lhs = ?rhs")
proof -
interpret lf: Vector_Spaces.linear "(*s)" "(*s)" f
using lf .
let ?M = "(UNIV :: 'm set)"
let ?N = "(UNIV :: 'n set)"
have fM: "finite ?M" by simp
have "?rhs = (sum (\<lambda>i. (x$i) *s (f (axis i 1))) ?M)$j"
unfolding sum_component by simp
then show ?thesis
unfolding lf.sum[symmetric] lf.scale[symmetric]
unfolding basis_expansion by auto
qed
interpretation vec: Vector_Spaces.linear "(*s)" "(*s)" "(*v) A"
using matrix_vector_mul_linear_gen.
interpretation vec: finite_dimensional_vector_space_pair "(*s)" cart_basis "(*s)" cart_basis ..
lemma matrix_works:
assumes lf: "Vector_Spaces.linear (*s) (*s) f"
shows "matrix f *v x = f (x::'a::field ^ 'n)"
apply (simp add: matrix_def matrix_vector_mult_def vec_eq_iff mult.commute)
apply clarify
apply (rule linear_componentwise[OF lf, symmetric])
done
lemma matrix_of_matrix_vector_mul[simp]: "matrix(\<lambda>x. A *v (x :: 'a::field ^ 'n)) = A"
by (simp add: matrix_eq matrix_works)
lemma matrix_compose_gen:
assumes lf: "Vector_Spaces.linear (*s) (*s) (f::'a::{field}^'n \<Rightarrow> 'a^'m)"
and lg: "Vector_Spaces.linear (*s) (*s) (g::'a^'m \<Rightarrow> 'a^_)"
shows "matrix (g o f) = matrix g ** matrix f"
using lf lg Vector_Spaces.linear_compose[OF lf lg] matrix_works[OF Vector_Spaces.linear_compose[OF lf lg]]
by (simp add: matrix_eq matrix_works matrix_vector_mul_assoc[symmetric] o_def)
lemma matrix_compose:
assumes "linear (f::real^'n \<Rightarrow> real^'m)" "linear (g::real^'m \<Rightarrow> real^_)"
shows "matrix (g o f) = matrix g ** matrix f"
using matrix_compose_gen[of f g] assms
by (simp add: linear_def scalar_mult_eq_scaleR)
lemma left_invertible_transpose:
"(\<exists>(B). B ** transpose (A) = mat (1::'a::comm_semiring_1)) \<longleftrightarrow> (\<exists>(B). A ** B = mat 1)"
by (metis matrix_transpose_mul transpose_mat transpose_transpose)
lemma right_invertible_transpose:
"(\<exists>(B). transpose (A) ** B = mat (1::'a::comm_semiring_1)) \<longleftrightarrow> (\<exists>(B). B ** A = mat 1)"
by (metis matrix_transpose_mul transpose_mat transpose_transpose)
lemma linear_matrix_vector_mul_eq:
"Vector_Spaces.linear (*s) (*s) f \<longleftrightarrow> linear (f :: real^'n \<Rightarrow> real ^'m)"
by (simp add: scalar_mult_eq_scaleR linear_def)
lemma matrix_vector_mul[simp]:
"Vector_Spaces.linear (*s) (*s) g \<Longrightarrow> (\<lambda>y. matrix g *v y) = g"
"linear f \<Longrightarrow> (\<lambda>x. matrix f *v x) = f"
"bounded_linear f \<Longrightarrow> (\<lambda>x. matrix f *v x) = f"
for f :: "real^'n \<Rightarrow> real ^'m"
by (simp_all add: ext matrix_works linear_matrix_vector_mul_eq linear_linear)
lemma matrix_left_invertible_injective:
fixes A :: "'a::field^'n^'m"
shows "(\<exists>B. B ** A = mat 1) \<longleftrightarrow> inj ((*v) A)"
proof safe
fix B
assume B: "B ** A = mat 1"
show "inj ((*v) A)"
unfolding inj_on_def
by (metis B matrix_vector_mul_assoc matrix_vector_mul_lid)
next
assume "inj ((*v) A)"
from vec.linear_injective_left_inverse[OF matrix_vector_mul_linear_gen this]
obtain g where "Vector_Spaces.linear (*s) (*s) g" and g: "g \<circ> (*v) A = id"
by blast
have "matrix g ** A = mat 1"
by (metis matrix_vector_mul_linear_gen \<open>Vector_Spaces.linear (*s) (*s) g\<close> g matrix_compose_gen
matrix_eq matrix_id_mat_1 matrix_vector_mul(1))
then show "\<exists>B. B ** A = mat 1"
by metis
qed
lemma matrix_left_invertible_ker:
"(\<exists>B. (B::'a::{field} ^'m^'n) ** (A::'a::{field}^'n^'m) = mat 1) \<longleftrightarrow> (\<forall>x. A *v x = 0 \<longrightarrow> x = 0)"
unfolding matrix_left_invertible_injective
using vec.inj_on_iff_eq_0[OF vec.subspace_UNIV, of A]
by (simp add: inj_on_def)
lemma matrix_right_invertible_surjective:
"(\<exists>B. (A::'a::field^'n^'m) ** (B::'a::field^'m^'n) = mat 1) \<longleftrightarrow> surj (\<lambda>x. A *v x)"
proof -
{ fix B :: "'a ^'m^'n"
assume AB: "A ** B = mat 1"
{ fix x :: "'a ^ 'm"
have "A *v (B *v x) = x"
by (simp add: matrix_vector_mul_assoc AB) }
hence "surj ((*v) A)" unfolding surj_def by metis }
moreover
{ assume sf: "surj ((*v) A)"
from vec.linear_surjective_right_inverse[OF _ this]
obtain g:: "'a ^'m \<Rightarrow> 'a ^'n" where g: "Vector_Spaces.linear (*s) (*s) g" "(*v) A \<circ> g = id"
by blast
have "A ** (matrix g) = mat 1"
unfolding matrix_eq matrix_vector_mul_lid
matrix_vector_mul_assoc[symmetric] matrix_works[OF g(1)]
using g(2) unfolding o_def fun_eq_iff id_def
.
hence "\<exists>B. A ** (B::'a^'m^'n) = mat 1" by blast
}
ultimately show ?thesis unfolding surj_def by blast
qed
lemma matrix_left_invertible_independent_columns:
fixes A :: "'a::{field}^'n^'m"
shows "(\<exists>(B::'a ^'m^'n). B ** A = mat 1) \<longleftrightarrow>
(\<forall>c. sum (\<lambda>i. c i *s column i A) (UNIV :: 'n set) = 0 \<longrightarrow> (\<forall>i. c i = 0))"
(is "?lhs \<longleftrightarrow> ?rhs")
proof -
let ?U = "UNIV :: 'n set"
{ assume k: "\<forall>x. A *v x = 0 \<longrightarrow> x = 0"
{ fix c i
assume c: "sum (\<lambda>i. c i *s column i A) ?U = 0" and i: "i \<in> ?U"
let ?x = "\<chi> i. c i"
have th0:"A *v ?x = 0"
using c
by (vector matrix_mult_sum)
from k[rule_format, OF th0] i
have "c i = 0" by (vector vec_eq_iff)}
hence ?rhs by blast }
moreover
{ assume H: ?rhs
{ fix x assume x: "A *v x = 0"
let ?c = "\<lambda>i. ((x$i ):: 'a)"
from H[rule_format, of ?c, unfolded matrix_mult_sum[symmetric], OF x]
have "x = 0" by vector }
}
ultimately show ?thesis unfolding matrix_left_invertible_ker by auto
qed
lemma matrix_right_invertible_independent_rows:
fixes A :: "'a::{field}^'n^'m"
shows "(\<exists>(B::'a^'m^'n). A ** B = mat 1) \<longleftrightarrow>
(\<forall>c. sum (\<lambda>i. c i *s row i A) (UNIV :: 'm set) = 0 \<longrightarrow> (\<forall>i. c i = 0))"
unfolding left_invertible_transpose[symmetric]
matrix_left_invertible_independent_columns
by (simp add:)
lemma matrix_right_invertible_span_columns:
"(\<exists>(B::'a::field ^'n^'m). (A::'a ^'m^'n) ** B = mat 1) \<longleftrightarrow>
vec.span (columns A) = UNIV" (is "?lhs = ?rhs")
proof -
let ?U = "UNIV :: 'm set"
have fU: "finite ?U" by simp
have lhseq: "?lhs \<longleftrightarrow> (\<forall>y. \<exists>(x::'a^'m). sum (\<lambda>i. (x$i) *s column i A) ?U = y)"
unfolding matrix_right_invertible_surjective matrix_mult_sum surj_def
by (simp add: eq_commute)
have rhseq: "?rhs \<longleftrightarrow> (\<forall>x. x \<in> vec.span (columns A))" by blast
{ assume h: ?lhs
{ fix x:: "'a ^'n"
from h[unfolded lhseq, rule_format, of x] obtain y :: "'a ^'m"
where y: "sum (\<lambda>i. (y$i) *s column i A) ?U = x" by blast
have "x \<in> vec.span (columns A)"
unfolding y[symmetric] scalar_mult_eq_scaleR
proof (rule vec.span_sum [OF vec.span_scale])
show "column i A \<in> vec.span (columns A)" for i
using columns_def vec.span_superset by auto
qed
}
then have ?rhs unfolding rhseq by blast }
moreover
{ assume h:?rhs
let ?P = "\<lambda>(y::'a ^'n). \<exists>(x::'a^'m). sum (\<lambda>i. (x$i) *s column i A) ?U = y"
{ fix y
have "y \<in> vec.span (columns A)"
unfolding h by blast
then have "?P y"
proof (induction rule: vec.span_induct_alt)
case base
then show ?case
by (metis (full_types) matrix_mult_sum matrix_vector_mult_0_right)
next
case (step c y1 y2)
from step obtain i where i: "i \<in> ?U" "y1 = column i A"
unfolding columns_def by blast
obtain x:: "'a ^'m" where x: "sum (\<lambda>i. (x$i) *s column i A) ?U = y2"
using step by blast
let ?x = "(\<chi> j. if j = i then c + (x$i) else (x$j))::'a^'m"
show ?case
proof (rule exI[where x= "?x"], vector, auto simp add: i x[symmetric] if_distrib distrib_left if_distribR cong del: if_weak_cong)
fix j
have th: "\<forall>xa \<in> ?U. (if xa = i then (c + (x$i)) * ((column xa A)$j)
else (x$xa) * ((column xa A$j))) = (if xa = i then c * ((column i A)$j) else 0) + ((x$xa) * ((column xa A)$j))"
using i(1) by (simp add: field_simps)
have "sum (\<lambda>xa. if xa = i then (c + (x$i)) * ((column xa A)$j)
else (x$xa) * ((column xa A$j))) ?U = sum (\<lambda>xa. (if xa = i then c * ((column i A)$j) else 0) + ((x$xa) * ((column xa A)$j))) ?U"
by (rule sum.cong[OF refl]) (use th in blast)
also have "\<dots> = sum (\<lambda>xa. if xa = i then c * ((column i A)$j) else 0) ?U + sum (\<lambda>xa. ((x$xa) * ((column xa A)$j))) ?U"
by (simp add: sum.distrib)
also have "\<dots> = c * ((column i A)$j) + sum (\<lambda>xa. ((x$xa) * ((column xa A)$j))) ?U"
unfolding sum.delta[OF fU]
using i(1) by simp
finally show "sum (\<lambda>xa. if xa = i then (c + (x$i)) * ((column xa A)$j)
else (x$xa) * ((column xa A$j))) ?U = c * ((column i A)$j) + sum (\<lambda>xa. ((x$xa) * ((column xa A)$j))) ?U" .
qed
qed
}
then have ?lhs unfolding lhseq ..
}
ultimately show ?thesis by blast
qed
lemma matrix_left_invertible_span_rows_gen:
"(\<exists>(B::'a^'m^'n). B ** (A::'a::field^'n^'m) = mat 1) \<longleftrightarrow> vec.span (rows A) = UNIV"
unfolding right_invertible_transpose[symmetric]
unfolding columns_transpose[symmetric]
unfolding matrix_right_invertible_span_columns
..
lemma matrix_left_invertible_span_rows:
"(\<exists>(B::real^'m^'n). B ** (A::real^'n^'m) = mat 1) \<longleftrightarrow> span (rows A) = UNIV"
using matrix_left_invertible_span_rows_gen[of A] by (simp add: span_vec_eq)
lemma matrix_left_right_inverse:
fixes A A' :: "'a::{field}^'n^'n"
shows "A ** A' = mat 1 \<longleftrightarrow> A' ** A = mat 1"
proof -
{ fix A A' :: "'a ^'n^'n"
assume AA': "A ** A' = mat 1"
have sA: "surj ((*v) A)"
using AA' matrix_right_invertible_surjective by auto
from vec.linear_surjective_isomorphism[OF matrix_vector_mul_linear_gen sA]
obtain f' :: "'a ^'n \<Rightarrow> 'a ^'n"
where f': "Vector_Spaces.linear (*s) (*s) f'" "\<forall>x. f' (A *v x) = x" "\<forall>x. A *v f' x = x" by blast
have th: "matrix f' ** A = mat 1"
by (simp add: matrix_eq matrix_works[OF f'(1)]
matrix_vector_mul_assoc[symmetric] f'(2)[rule_format])
hence "(matrix f' ** A) ** A' = mat 1 ** A'" by simp
hence "matrix f' = A'"
by (simp add: matrix_mul_assoc[symmetric] AA')
hence "matrix f' ** A = A' ** A" by simp
hence "A' ** A = mat 1" by (simp add: th)
}
then show ?thesis by blast
qed
lemma invertible_left_inverse:
fixes A :: "'a::{field}^'n^'n"
shows "invertible A \<longleftrightarrow> (\<exists>(B::'a^'n^'n). B ** A = mat 1)"
by (metis invertible_def matrix_left_right_inverse)
lemma invertible_right_inverse:
fixes A :: "'a::{field}^'n^'n"
shows "invertible A \<longleftrightarrow> (\<exists>(B::'a^'n^'n). A** B = mat 1)"
by (metis invertible_def matrix_left_right_inverse)
lemma invertible_mult:
assumes inv_A: "invertible A"
and inv_B: "invertible B"
shows "invertible (A**B)"
proof -
obtain A' where AA': "A ** A' = mat 1" and A'A: "A' ** A = mat 1"
using inv_A unfolding invertible_def by blast
obtain B' where BB': "B ** B' = mat 1" and B'B: "B' ** B = mat 1"
using inv_B unfolding invertible_def by blast
show ?thesis
proof (unfold invertible_def, rule exI[of _ "B'**A'"], rule conjI)
have "A ** B ** (B' ** A') = A ** (B ** (B' ** A'))"
using matrix_mul_assoc[of A B "(B' ** A')", symmetric] .
also have "... = A ** (B ** B' ** A')" unfolding matrix_mul_assoc[of B "B'" "A'"] ..
also have "... = A ** (mat 1 ** A')" unfolding BB' ..
also have "... = A ** A'" unfolding matrix_mul_lid ..
also have "... = mat 1" unfolding AA' ..
finally show "A ** B ** (B' ** A') = mat (1::'a)" .
have "B' ** A' ** (A ** B) = B' ** (A' ** (A ** B))" using matrix_mul_assoc[of B' A' "(A ** B)", symmetric] .
also have "... = B' ** (A' ** A ** B)" unfolding matrix_mul_assoc[of A' A B] ..
also have "... = B' ** (mat 1 ** B)" unfolding A'A ..
also have "... = B' ** B" unfolding matrix_mul_lid ..
also have "... = mat 1" unfolding B'B ..
finally show "B' ** A' ** (A ** B) = mat 1" .
qed
qed
lemma transpose_invertible:
fixes A :: "real^'n^'n"
assumes "invertible A"
shows "invertible (transpose A)"
by (meson assms invertible_def matrix_left_right_inverse right_invertible_transpose)
lemma vector_matrix_mul_assoc:
fixes v :: "('a::comm_semiring_1)^'n"
shows "(v v* M) v* N = v v* (M ** N)"
proof -
from matrix_vector_mul_assoc
have "transpose N *v (transpose M *v v) = (transpose N ** transpose M) *v v" by fast
thus "(v v* M) v* N = v v* (M ** N)"
by (simp add: matrix_transpose_mul [symmetric])
qed
lemma matrix_scaleR_vector_ac:
fixes A :: "real^('m::finite)^'n"
shows "A *v (k *\<^sub>R v) = k *\<^sub>R A *v v"
by (metis matrix_vector_mult_scaleR transpose_scalar vector_scaleR_matrix_ac vector_transpose_matrix)
lemma scaleR_matrix_vector_assoc:
fixes A :: "real^('m::finite)^'n"
shows "k *\<^sub>R (A *v v) = k *\<^sub>R A *v v"
by (metis matrix_scaleR_vector_ac matrix_vector_mult_scaleR)
(*Finally, some interesting theorems and interpretations that don't appear in any file of the
library.*)
locale linear_first_finite_dimensional_vector_space =
l?: Vector_Spaces.linear scaleB scaleC f +
B?: finite_dimensional_vector_space scaleB BasisB
for scaleB :: "('a::field => 'b::ab_group_add => 'b)" (infixr "*b" 75)
and scaleC :: "('a => 'c::ab_group_add => 'c)" (infixr "*c" 75)
and BasisB :: "('b set)"
and f :: "('b=>'c)"
lemma vec_dim_card: "vec.dim (UNIV::('a::{field}^'n) set) = CARD ('n)"
proof -
let ?f="\<lambda>i::'n. axis i (1::'a)"
have "vec.dim (UNIV::('a::{field}^'n) set) = card (cart_basis::('a^'n) set)"
unfolding vec.dim_UNIV ..
also have "... = card ({i. i\<in> UNIV}::('n) set)"
proof (rule bij_betw_same_card[of ?f, symmetric], unfold bij_betw_def, auto)
show "inj (\<lambda>i::'n. axis i (1::'a))" by (simp add: inj_on_def axis_eq_axis)
fix i::'n
show "axis i 1 \<in> cart_basis" unfolding cart_basis_def by auto
fix x::"'a^'n"
assume "x \<in> cart_basis"
thus "x \<in> range (\<lambda>i. axis i 1)" unfolding cart_basis_def by auto
qed
also have "... = CARD('n)" by auto
finally show ?thesis .
qed
interpretation vector_space_over_itself: vector_space "(*) :: 'a::field \<Rightarrow> 'a \<Rightarrow> 'a"
by unfold_locales (simp_all add: algebra_simps)
lemmas [simp del] = vector_space_over_itself.scale_scale
interpretation vector_space_over_itself: finite_dimensional_vector_space
"(*) :: 'a::field => 'a => 'a" "{1}"
by unfold_locales (auto simp: vector_space_over_itself.span_singleton)
lemma dimension_eq_1[code_unfold]: "vector_space_over_itself.dimension TYPE('a::field)= 1"
unfolding vector_space_over_itself.dimension_def by simp
lemma dim_subset_UNIV_cart_gen:
fixes S :: "('a::field^'n) set"
shows "vec.dim S \<le> CARD('n)"
by (metis vec.dim_eq_full vec.dim_subset_UNIV vec.span_UNIV vec_dim_card)
lemma dim_subset_UNIV_cart:
fixes S :: "(real^'n) set"
shows "dim S \<le> CARD('n)"
using dim_subset_UNIV_cart_gen[of S] by (simp add: dim_vec_eq)
text\<open>Two sometimes fruitful ways of looking at matrix-vector multiplication.\<close>
lemma matrix_mult_dot: "A *v x = (\<chi> i. inner (A$i) x)"
by (simp add: matrix_vector_mult_def inner_vec_def)
lemma adjoint_matrix: "adjoint(\<lambda>x. (A::real^'n^'m) *v x) = (\<lambda>x. transpose A *v x)"
apply (rule adjoint_unique)
apply (simp add: transpose_def inner_vec_def matrix_vector_mult_def
sum_distrib_right sum_distrib_left)
apply (subst sum.swap)
apply (simp add: ac_simps)
done
lemma matrix_adjoint: assumes lf: "linear (f :: real^'n \<Rightarrow> real ^'m)"
shows "matrix(adjoint f) = transpose(matrix f)"
proof -
have "matrix(adjoint f) = matrix(adjoint ((*v) (matrix f)))"
by (simp add: lf)
also have "\<dots> = transpose(matrix f)"
unfolding adjoint_matrix matrix_of_matrix_vector_mul
apply rule
done
finally show ?thesis .
qed
subsection\<open> Rank of a matrix\<close>
text\<open>Equivalence of row and column rank is taken from George Mackiw's paper, Mathematics Magazine 1995, p. 285.\<close>
lemma matrix_vector_mult_in_columnspace_gen:
fixes A :: "'a::field^'n^'m"
shows "(A *v x) \<in> vec.span(columns A)"
apply (simp add: matrix_vector_column columns_def transpose_def column_def)
apply (intro vec.span_sum vec.span_scale)
apply (force intro: vec.span_base)
done
lemma matrix_vector_mult_in_columnspace:
fixes A :: "real^'n^'m"
shows "(A *v x) \<in> span(columns A)"
using matrix_vector_mult_in_columnspace_gen[of A x] by (simp add: span_vec_eq)
lemma subspace_orthogonal_to_vector: "subspace {y. orthogonal x y}"
by (simp add: subspace_def orthogonal_clauses)
lemma orthogonal_nullspace_rowspace:
fixes A :: "real^'n^'m"
assumes 0: "A *v x = 0" and y: "y \<in> span(rows A)"
shows "orthogonal x y"
using y
proof (induction rule: span_induct)
case base
then show ?case
by (simp add: subspace_orthogonal_to_vector)
next
case (step v)
then obtain i where "v = row i A"
by (auto simp: rows_def)
with 0 show ?case
unfolding orthogonal_def inner_vec_def matrix_vector_mult_def row_def
by (simp add: mult.commute) (metis (no_types) vec_lambda_beta zero_index)
qed
lemma nullspace_inter_rowspace:
fixes A :: "real^'n^'m"
shows "A *v x = 0 \<and> x \<in> span(rows A) \<longleftrightarrow> x = 0"
using orthogonal_nullspace_rowspace orthogonal_self span_zero matrix_vector_mult_0_right
by blast
lemma matrix_vector_mul_injective_on_rowspace:
fixes A :: "real^'n^'m"
shows "\<lbrakk>A *v x = A *v y; x \<in> span(rows A); y \<in> span(rows A)\<rbrakk> \<Longrightarrow> x = y"
using nullspace_inter_rowspace [of A "x-y"]
by (metis diff_eq_diff_eq diff_self matrix_vector_mult_diff_distrib span_diff)
definition\<^marker>\<open>tag important\<close> rank :: "'a::field^'n^'m=>nat"
where row_rank_def_gen: "rank A \<equiv> vec.dim(rows A)"
lemma row_rank_def: "rank A = dim (rows A)" for A::"real^'n^'m"
by (auto simp: row_rank_def_gen dim_vec_eq)
lemma dim_rows_le_dim_columns:
fixes A :: "real^'n^'m"
shows "dim(rows A) \<le> dim(columns A)"
proof -
have "dim (span (rows A)) \<le> dim (span (columns A))"
proof -
obtain B where "independent B" "span(rows A) \<subseteq> span B"
and B: "B \<subseteq> span(rows A)""card B = dim (span(rows A))"
using basis_exists [of "span(rows A)"] by metis
with span_subspace have eq: "span B = span(rows A)"
by auto
then have inj: "inj_on ((*v) A) (span B)"
by (simp add: inj_on_def matrix_vector_mul_injective_on_rowspace)
then have ind: "independent ((*v) A ` B)"
by (rule linear_independent_injective_image [OF Finite_Cartesian_Product.matrix_vector_mul_linear \<open>independent B\<close>])
have "dim (span (rows A)) \<le> card ((*v) A ` B)"
unfolding B(2)[symmetric]
using inj
by (auto simp: card_image inj_on_subset span_superset)
also have "\<dots> \<le> dim (span (columns A))"
using _ ind
by (rule independent_card_le_dim) (auto intro!: matrix_vector_mult_in_columnspace)
finally show ?thesis .
qed
then show ?thesis
by (simp)
qed
lemma column_rank_def:
fixes A :: "real^'n^'m"
shows "rank A = dim(columns A)"
unfolding row_rank_def
by (metis columns_transpose dim_rows_le_dim_columns le_antisym rows_transpose)
lemma rank_transpose:
fixes A :: "real^'n^'m"
shows "rank(transpose A) = rank A"
by (metis column_rank_def row_rank_def rows_transpose)
lemma matrix_vector_mult_basis:
fixes A :: "real^'n^'m"
shows "A *v (axis k 1) = column k A"
by (simp add: cart_eq_inner_axis column_def matrix_mult_dot)
lemma columns_image_basis:
fixes A :: "real^'n^'m"
shows "columns A = (*v) A ` (range (\<lambda>i. axis i 1))"
by (force simp: columns_def matrix_vector_mult_basis [symmetric])
lemma rank_dim_range:
fixes A :: "real^'n^'m"
shows "rank A = dim(range (\<lambda>x. A *v x))"
unfolding column_rank_def
proof (rule span_eq_dim)
have "span (columns A) \<subseteq> span (range ((*v) A))" (is "?l \<subseteq> ?r")
by (simp add: columns_image_basis image_subsetI span_mono)
then show "?l = ?r"
by (metis (no_types, lifting) image_subset_iff matrix_vector_mult_in_columnspace
span_eq span_span)
qed
lemma rank_bound:
fixes A :: "real^'n^'m"
shows "rank A \<le> min CARD('m) (CARD('n))"
by (metis (mono_tags, lifting) dim_subset_UNIV_cart min.bounded_iff
column_rank_def row_rank_def)
lemma full_rank_injective:
fixes A :: "real^'n^'m"
shows "rank A = CARD('n) \<longleftrightarrow> inj ((*v) A)"
by (simp add: matrix_left_invertible_injective [symmetric] matrix_left_invertible_span_rows row_rank_def
dim_eq_full [symmetric] card_cart_basis vec.dimension_def)
lemma full_rank_surjective:
fixes A :: "real^'n^'m"
shows "rank A = CARD('m) \<longleftrightarrow> surj ((*v) A)"
by (simp add: matrix_right_invertible_surjective [symmetric] left_invertible_transpose [symmetric]
matrix_left_invertible_injective full_rank_injective [symmetric] rank_transpose)
lemma rank_I: "rank(mat 1::real^'n^'n) = CARD('n)"
by (simp add: full_rank_injective inj_on_def)
lemma less_rank_noninjective:
fixes A :: "real^'n^'m"
shows "rank A < CARD('n) \<longleftrightarrow> \<not> inj ((*v) A)"
using less_le rank_bound by (auto simp: full_rank_injective [symmetric])
lemma matrix_nonfull_linear_equations_eq:
fixes A :: "real^'n^'m"
shows "(\<exists>x. (x \<noteq> 0) \<and> A *v x = 0) \<longleftrightarrow> rank A \<noteq> CARD('n)"
by (meson matrix_left_invertible_injective full_rank_injective matrix_left_invertible_ker)
lemma rank_eq_0: "rank A = 0 \<longleftrightarrow> A = 0" and rank_0 [simp]: "rank (0::real^'n^'m) = 0"
for A :: "real^'n^'m"
by (auto simp: rank_dim_range matrix_eq)
lemma rank_mul_le_right:
fixes A :: "real^'n^'m" and B :: "real^'p^'n"
shows "rank(A ** B) \<le> rank B"
proof -
have "rank(A ** B) \<le> dim ((*v) A ` range ((*v) B))"
by (auto simp: rank_dim_range image_comp o_def matrix_vector_mul_assoc)
also have "\<dots> \<le> rank B"
by (simp add: rank_dim_range dim_image_le)
finally show ?thesis .
qed
lemma rank_mul_le_left:
fixes A :: "real^'n^'m" and B :: "real^'p^'n"
shows "rank(A ** B) \<le> rank A"
by (metis matrix_transpose_mul rank_mul_le_right rank_transpose)
subsection\<^marker>\<open>tag unimportant\<close> \<open>Lemmas for working on \<open>real^1/2/3/4\<close>\<close>
lemma exhaust_2:
fixes x :: 2
shows "x = 1 \<or> x = 2"
proof (induct x)
case (of_int z)
then have "0 \<le> z" and "z < 2" by simp_all
then have "z = 0 | z = 1" by arith
then show ?case by auto
qed
lemma forall_2: "(\<forall>i::2. P i) \<longleftrightarrow> P 1 \<and> P 2"
by (metis exhaust_2)
lemma exhaust_3:
fixes x :: 3
shows "x = 1 \<or> x = 2 \<or> x = 3"
proof (induct x)
case (of_int z)
then have "0 \<le> z" and "z < 3" by simp_all
then have "z = 0 \<or> z = 1 \<or> z = 2" by arith
then show ?case by auto
qed
lemma forall_3: "(\<forall>i::3. P i) \<longleftrightarrow> P 1 \<and> P 2 \<and> P 3"
by (metis exhaust_3)
lemma exhaust_4:
fixes x :: 4
shows "x = 1 \<or> x = 2 \<or> x = 3 \<or> x = 4"
proof (induct x)
case (of_int z)
then have "0 \<le> z" and "z < 4" by simp_all
then have "z = 0 \<or> z = 1 \<or> z = 2 \<or> z = 3" by arith
then show ?case by auto
qed
lemma forall_4: "(\<forall>i::4. P i) \<longleftrightarrow> P 1 \<and> P 2 \<and> P 3 \<and> P 4"
by (metis exhaust_4)
lemma UNIV_1 [simp]: "UNIV = {1::1}"
by (auto simp add: num1_eq_iff)
lemma UNIV_2: "UNIV = {1::2, 2::2}"
using exhaust_2 by auto
lemma UNIV_3: "UNIV = {1::3, 2::3, 3::3}"
using exhaust_3 by auto
lemma UNIV_4: "UNIV = {1::4, 2::4, 3::4, 4::4}"
using exhaust_4 by auto
lemma sum_1: "sum f (UNIV::1 set) = f 1"
unfolding UNIV_1 by simp
lemma sum_2: "sum f (UNIV::2 set) = f 1 + f 2"
unfolding UNIV_2 by simp
lemma sum_3: "sum f (UNIV::3 set) = f 1 + f 2 + f 3"
unfolding UNIV_3 by (simp add: ac_simps)
lemma sum_4: "sum f (UNIV::4 set) = f 1 + f 2 + f 3 + f 4"
unfolding UNIV_4 by (simp add: ac_simps)
subsection\<^marker>\<open>tag unimportant\<close>\<open>The collapse of the general concepts to dimension one\<close>
lemma vector_one: "(x::'a ^1) = (\<chi> i. (x$1))"
by (simp add: vec_eq_iff)
lemma forall_one: "(\<forall>(x::'a ^1). P x) \<longleftrightarrow> (\<forall>x. P(\<chi> i. x))"
apply auto
apply (erule_tac x= "x$1" in allE)
apply (simp only: vector_one[symmetric])
done
lemma norm_vector_1: "norm (x :: _^1) = norm (x$1)"
by (simp add: norm_vec_def)
lemma dist_vector_1:
fixes x :: "'a::real_normed_vector^1"
shows "dist x y = dist (x$1) (y$1)"
by (simp add: dist_norm norm_vector_1)
lemma norm_real: "norm(x::real ^ 1) = \<bar>x$1\<bar>"
by (simp add: norm_vector_1)
lemma dist_real: "dist(x::real ^ 1) y = \<bar>(x$1) - (y$1)\<bar>"
by (auto simp add: norm_real dist_norm)
subsection\<^marker>\<open>tag unimportant\<close>\<open>Routine results connecting the types \<^typ>\<open>real^1\<close> and \<^typ>\<open>real\<close>\<close>
lemma vector_one_nth [simp]:
fixes x :: "'a^1" shows "vec (x $ 1) = x"
by (metis vec_def vector_one)
lemma tendsto_at_within_vector_1:
fixes S :: "'a :: metric_space set"
assumes "(f \<longlongrightarrow> fx) (at x within S)"
shows "((\<lambda>y::'a^1. \<chi> i. f (y $ 1)) \<longlongrightarrow> (vec fx::'a^1)) (at (vec x) within vec ` S)"
proof (rule topological_tendstoI)
fix T :: "('a^1) set"
assume "open T" "vec fx \<in> T"
have "\<forall>\<^sub>F x in at x within S. f x \<in> (\<lambda>x. x $ 1) ` T"
using \<open>open T\<close> \<open>vec fx \<in> T\<close> assms open_image_vec_nth tendsto_def by fastforce
then show "\<forall>\<^sub>F x::'a^1 in at (vec x) within vec ` S. (\<chi> i. f (x $ 1)) \<in> T"
unfolding eventually_at dist_norm [symmetric]
by (rule ex_forward)
(use \<open>open T\<close> in
\<open>fastforce simp: dist_norm dist_vec_def L2_set_def image_iff vector_one open_vec_def\<close>)
qed
lemma has_derivative_vector_1:
assumes der_g: "(g has_derivative (\<lambda>x. x * g' a)) (at a within S)"
shows "((\<lambda>x. vec (g (x $ 1))) has_derivative (*\<^sub>R) (g' a))
(at ((vec a)::real^1) within vec ` S)"
using der_g
apply (auto simp: Deriv.has_derivative_within bounded_linear_scaleR_right norm_vector_1)
apply (drule tendsto_at_within_vector_1, vector)
apply (auto simp: algebra_simps eventually_at tendsto_def)
done
subsection\<^marker>\<open>tag unimportant\<close>\<open>Explicit vector construction from lists\<close>
definition "vector l = (\<chi> i. foldr (\<lambda>x f n. fun_upd (f (n+1)) n x) l (\<lambda>n x. 0) 1 i)"
lemma vector_1 [simp]: "(vector[x]) $1 = x"
unfolding vector_def by simp
lemma vector_2 [simp]: "(vector[x,y]) $1 = x" "(vector[x,y] :: 'a^2)$2 = (y::'a::zero)"
unfolding vector_def by simp_all
lemma vector_3 [simp]:
"(vector [x,y,z] ::('a::zero)^3)$1 = x"
"(vector [x,y,z] ::('a::zero)^3)$2 = y"
"(vector [x,y,z] ::('a::zero)^3)$3 = z"
unfolding vector_def by simp_all
lemma forall_vector_1: "(\<forall>v::'a::zero^1. P v) \<longleftrightarrow> (\<forall>x. P(vector[x]))"
by (metis vector_1 vector_one)
lemma forall_vector_2: "(\<forall>v::'a::zero^2. P v) \<longleftrightarrow> (\<forall>x y. P(vector[x, y]))"
apply auto
apply (erule_tac x="v$1" in allE)
apply (erule_tac x="v$2" in allE)
apply (subgoal_tac "vector [v$1, v$2] = v")
apply simp
apply (vector vector_def)
apply (simp add: forall_2)
done
lemma forall_vector_3: "(\<forall>v::'a::zero^3. P v) \<longleftrightarrow> (\<forall>x y z. P(vector[x, y, z]))"
apply auto
apply (erule_tac x="v$1" in allE)
apply (erule_tac x="v$2" in allE)
apply (erule_tac x="v$3" in allE)
apply (subgoal_tac "vector [v$1, v$2, v$3] = v")
apply simp
apply (vector vector_def)
apply (simp add: forall_3)
done
subsection\<^marker>\<open>tag unimportant\<close> \<open>lambda skolemization on cartesian products\<close>
lemma lambda_skolem: "(\<forall>i. \<exists>x. P i x) \<longleftrightarrow>
(\<exists>x::'a ^ 'n. \<forall>i. P i (x $ i))" (is "?lhs \<longleftrightarrow> ?rhs")
proof -
let ?S = "(UNIV :: 'n set)"
{ assume H: "?rhs"
then have ?lhs by auto }
moreover
{ assume H: "?lhs"
then obtain f where f:"\<forall>i. P i (f i)" unfolding choice_iff by metis
let ?x = "(\<chi> i. (f i)) :: 'a ^ 'n"
{ fix i
from f have "P i (f i)" by metis
then have "P i (?x $ i)" by auto
}
hence "\<forall>i. P i (?x$i)" by metis
hence ?rhs by metis }
ultimately show ?thesis by metis
qed
text \<open>The same result in terms of square matrices.\<close>
text \<open>Considering an n-element vector as an n-by-1 or 1-by-n matrix.\<close>
definition "rowvector v = (\<chi> i j. (v$j))"
definition "columnvector v = (\<chi> i j. (v$i))"
lemma transpose_columnvector: "transpose(columnvector v) = rowvector v"
by (simp add: transpose_def rowvector_def columnvector_def vec_eq_iff)
lemma transpose_rowvector: "transpose(rowvector v) = columnvector v"
by (simp add: transpose_def columnvector_def rowvector_def vec_eq_iff)
lemma dot_rowvector_columnvector: "columnvector (A *v v) = A ** columnvector v"
by (vector columnvector_def matrix_matrix_mult_def matrix_vector_mult_def)
lemma dot_matrix_product:
"(x::real^'n) \<bullet> y = (((rowvector x ::real^'n^1) ** (columnvector y :: real^1^'n))$1)$1"
by (vector matrix_matrix_mult_def rowvector_def columnvector_def inner_vec_def)
lemma dot_matrix_vector_mul:
fixes A B :: "real ^'n ^'n" and x y :: "real ^'n"
shows "(A *v x) \<bullet> (B *v y) =
(((rowvector x :: real^'n^1) ** ((transpose A ** B) ** (columnvector y :: real ^1^'n)))$1)$1"
unfolding dot_matrix_product transpose_columnvector[symmetric]
dot_rowvector_columnvector matrix_transpose_mul matrix_mul_assoc ..
lemma dim_substandard_cart: "vec.dim {x::'a::field^'n. \<forall>i. i \<notin> d \<longrightarrow> x$i = 0} = card d"
(is "vec.dim ?A = _")
proof (rule vec.dim_unique)
let ?B = "((\<lambda>x. axis x 1) ` d)"
have subset_basis: "?B \<subseteq> cart_basis"
by (auto simp: cart_basis_def)
show "?B \<subseteq> ?A"
by (auto simp: axis_def)
show "vec.independent ((\<lambda>x. axis x 1) ` d)"
using subset_basis
by (rule vec.independent_mono[OF vec.independent_Basis])
have "x \<in> vec.span ?B" if "\<forall>i. i \<notin> d \<longrightarrow> x $ i = 0" for x::"'a^'n"
proof -
have "finite ?B"
using subset_basis finite_cart_basis
by (rule finite_subset)
have "x = (\<Sum>i\<in>UNIV. x $ i *s axis i 1)"
by (rule basis_expansion[symmetric])
also have "\<dots> = (\<Sum>i\<in>d. (x $ i) *s axis i 1)"
by (rule sum.mono_neutral_cong_right) (auto simp: that)
also have "\<dots> \<in> vec.span ?B"
by (simp add: vec.span_sum vec.span_clauses)
finally show "x \<in> vec.span ?B" .
qed
then show "?A \<subseteq> vec.span ?B" by auto
qed (simp add: card_image inj_on_def axis_eq_axis)
lemma affinity_inverses:
assumes m0: "m \<noteq> (0::'a::field)"
shows "(\<lambda>x. m *s x + c) \<circ> (\<lambda>x. inverse(m) *s x + (-(inverse(m) *s c))) = id"
"(\<lambda>x. inverse(m) *s x + (-(inverse(m) *s c))) \<circ> (\<lambda>x. m *s x + c) = id"
using m0
by (auto simp add: fun_eq_iff vector_add_ldistrib diff_conv_add_uminus simp del: add_uminus_conv_diff)
lemma vector_affinity_eq:
assumes m0: "(m::'a::field) \<noteq> 0"
shows "m *s x + c = y \<longleftrightarrow> x = inverse m *s y + -(inverse m *s c)"
proof
assume h: "m *s x + c = y"
hence "m *s x = y - c" by (simp add: field_simps)
hence "inverse m *s (m *s x) = inverse m *s (y - c)" by simp
then show "x = inverse m *s y + - (inverse m *s c)"
using m0 by (simp add: vector_smult_assoc vector_ssub_ldistrib)
next
assume h: "x = inverse m *s y + - (inverse m *s c)"
show "m *s x + c = y" unfolding h
using m0 by (simp add: vector_smult_assoc vector_ssub_ldistrib)
qed
lemma vector_eq_affinity:
"(m::'a::field) \<noteq> 0 ==> (y = m *s x + c \<longleftrightarrow> inverse(m) *s y + -(inverse(m) *s c) = x)"
using vector_affinity_eq[where m=m and x=x and y=y and c=c]
by metis
lemma vector_cart:
fixes f :: "real^'n \<Rightarrow> real"
shows "(\<chi> i. f (axis i 1)) = (\<Sum>i\<in>Basis. f i *\<^sub>R i)"
unfolding euclidean_eq_iff[where 'a="real^'n"]
by simp (simp add: Basis_vec_def inner_axis)
lemma const_vector_cart:"((\<chi> i. d)::real^'n) = (\<Sum>i\<in>Basis. d *\<^sub>R i)"
by (rule vector_cart)
subsection\<^marker>\<open>tag unimportant\<close> \<open>Explicit formulas for low dimensions\<close>
lemma prod_neutral_const: "prod f {(1::nat)..1} = f 1"
by simp
lemma prod_2: "prod f {(1::nat)..2} = f 1 * f 2"
by (simp add: eval_nat_numeral atLeastAtMostSuc_conv mult.commute)
lemma prod_3: "prod f {(1::nat)..3} = f 1 * f 2 * f 3"
by (simp add: eval_nat_numeral atLeastAtMostSuc_conv mult.commute)
subsection \<open>Orthogonality of a matrix\<close>
definition\<^marker>\<open>tag important\<close> "orthogonal_matrix (Q::'a::semiring_1^'n^'n) \<longleftrightarrow>
transpose Q ** Q = mat 1 \<and> Q ** transpose Q = mat 1"
lemma orthogonal_matrix: "orthogonal_matrix (Q:: real ^'n^'n) \<longleftrightarrow> transpose Q ** Q = mat 1"
by (metis matrix_left_right_inverse orthogonal_matrix_def)
lemma orthogonal_matrix_id: "orthogonal_matrix (mat 1 :: _^'n^'n)"
by (simp add: orthogonal_matrix_def)
proposition orthogonal_matrix_mul:
fixes A :: "real ^'n^'n"
assumes "orthogonal_matrix A" "orthogonal_matrix B"
shows "orthogonal_matrix(A ** B)"
using assms
by (simp add: orthogonal_matrix matrix_transpose_mul matrix_left_right_inverse matrix_mul_assoc)
proposition orthogonal_transformation_matrix:
fixes f:: "real^'n \<Rightarrow> real^'n"
shows "orthogonal_transformation f \<longleftrightarrow> linear f \<and> orthogonal_matrix(matrix f)"
(is "?lhs \<longleftrightarrow> ?rhs")
proof -
let ?mf = "matrix f"
let ?ot = "orthogonal_transformation f"
let ?U = "UNIV :: 'n set"
have fU: "finite ?U" by simp
let ?m1 = "mat 1 :: real ^'n^'n"
{
assume ot: ?ot
from ot have lf: "Vector_Spaces.linear (*s) (*s) f" and fd: "\<And>v w. f v \<bullet> f w = v \<bullet> w"
unfolding orthogonal_transformation_def orthogonal_matrix linear_def scalar_mult_eq_scaleR
by blast+
{
fix i j
let ?A = "transpose ?mf ** ?mf"
have th0: "\<And>b (x::'a::comm_ring_1). (if b then 1 else 0)*x = (if b then x else 0)"
"\<And>b (x::'a::comm_ring_1). x*(if b then 1 else 0) = (if b then x else 0)"
by simp_all
from fd[of "axis i 1" "axis j 1",
simplified matrix_works[OF lf, symmetric] dot_matrix_vector_mul]
have "?A$i$j = ?m1 $ i $ j"
by (simp add: inner_vec_def matrix_matrix_mult_def columnvector_def rowvector_def
th0 sum.delta[OF fU] mat_def axis_def)
}
then have "orthogonal_matrix ?mf"
unfolding orthogonal_matrix
by vector
with lf have ?rhs
unfolding linear_def scalar_mult_eq_scaleR
by blast
}
moreover
{
assume lf: "Vector_Spaces.linear (*s) (*s) f" and om: "orthogonal_matrix ?mf"
from lf om have ?lhs
unfolding orthogonal_matrix_def norm_eq orthogonal_transformation
apply (simp only: matrix_works[OF lf, symmetric] dot_matrix_vector_mul)
apply (simp add: dot_matrix_product linear_def scalar_mult_eq_scaleR)
done
}
ultimately show ?thesis
by (auto simp: linear_def scalar_mult_eq_scaleR)
qed
subsection \<open>Finding an Orthogonal Matrix\<close>
text \<open>We can find an orthogonal matrix taking any unit vector to any other.\<close>
lemma orthogonal_matrix_transpose [simp]:
"orthogonal_matrix(transpose A) \<longleftrightarrow> orthogonal_matrix A"
by (auto simp: orthogonal_matrix_def)
lemma orthogonal_matrix_orthonormal_columns:
fixes A :: "real^'n^'n"
shows "orthogonal_matrix A \<longleftrightarrow>
(\<forall>i. norm(column i A) = 1) \<and>
(\<forall>i j. i \<noteq> j \<longrightarrow> orthogonal (column i A) (column j A))"
by (auto simp: orthogonal_matrix matrix_mult_transpose_dot_column vec_eq_iff mat_def norm_eq_1 orthogonal_def)
lemma orthogonal_matrix_orthonormal_rows:
fixes A :: "real^'n^'n"
shows "orthogonal_matrix A \<longleftrightarrow>
(\<forall>i. norm(row i A) = 1) \<and>
(\<forall>i j. i \<noteq> j \<longrightarrow> orthogonal (row i A) (row j A))"
using orthogonal_matrix_orthonormal_columns [of "transpose A"] by simp
proposition orthogonal_matrix_exists_basis:
fixes a :: "real^'n"
assumes "norm a = 1"
obtains A where "orthogonal_matrix A" "A *v (axis k 1) = a"
proof -
obtain S where "a \<in> S" "pairwise orthogonal S" and noS: "\<And>x. x \<in> S \<Longrightarrow> norm x = 1"
and "independent S" "card S = CARD('n)" "span S = UNIV"
using vector_in_orthonormal_basis assms by force
then obtain f0 where "bij_betw f0 (UNIV::'n set) S"
by (metis finite_class.finite_UNIV finite_same_card_bij finiteI_independent)
then obtain f where f: "bij_betw f (UNIV::'n set) S" and a: "a = f k"
using bij_swap_iff [of f0 k "inv f0 a"]
by (metis UNIV_I \<open>a \<in> S\<close> bij_betw_inv_into_right bij_betw_swap_iff swap_apply(1))
show thesis
proof
have [simp]: "\<And>i. norm (f i) = 1"
using bij_betwE [OF \<open>bij_betw f UNIV S\<close>] by (blast intro: noS)
have [simp]: "\<And>i j. i \<noteq> j \<Longrightarrow> orthogonal (f i) (f j)"
using \<open>pairwise orthogonal S\<close> \<open>bij_betw f UNIV S\<close>
by (auto simp: pairwise_def bij_betw_def inj_on_def)
show "orthogonal_matrix (\<chi> i j. f j $ i)"
by (simp add: orthogonal_matrix_orthonormal_columns column_def)
show "(\<chi> i j. f j $ i) *v axis k 1 = a"
by (simp add: matrix_vector_mult_def axis_def a if_distrib cong: if_cong)
qed
qed
lemma orthogonal_transformation_exists_1:
fixes a b :: "real^'n"
assumes "norm a = 1" "norm b = 1"
obtains f where "orthogonal_transformation f" "f a = b"
proof -
obtain k::'n where True
by simp
obtain A B where AB: "orthogonal_matrix A" "orthogonal_matrix B" and eq: "A *v (axis k 1) = a" "B *v (axis k 1) = b"
using orthogonal_matrix_exists_basis assms by metis
let ?f = "\<lambda>x. (B ** transpose A) *v x"
show thesis
proof
show "orthogonal_transformation ?f"
by (subst orthogonal_transformation_matrix)
(auto simp: AB orthogonal_matrix_mul)
next
show "?f a = b"
using \<open>orthogonal_matrix A\<close> unfolding orthogonal_matrix_def
by (metis eq matrix_mul_rid matrix_vector_mul_assoc)
qed
qed
proposition orthogonal_transformation_exists:
fixes a b :: "real^'n"
assumes "norm a = norm b"
obtains f where "orthogonal_transformation f" "f a = b"
proof (cases "a = 0 \<or> b = 0")
case True
with assms show ?thesis
using that by force
next
case False
then obtain f where f: "orthogonal_transformation f" and eq: "f (a /\<^sub>R norm a) = (b /\<^sub>R norm b)"
by (auto intro: orthogonal_transformation_exists_1 [of "a /\<^sub>R norm a" "b /\<^sub>R norm b"])
show ?thesis
proof
interpret linear f
using f by (simp add: orthogonal_transformation_linear)
have "f a /\<^sub>R norm a = f (a /\<^sub>R norm a)"
by (simp add: scale)
also have "\<dots> = b /\<^sub>R norm a"
by (simp add: eq assms [symmetric])
finally show "f a = b"
using False by auto
qed (use f in auto)
qed
subsection \<open>Scaling and isometry\<close>
proposition scaling_linear:
fixes f :: "'a::real_inner \<Rightarrow> 'a::real_inner"
assumes f0: "f 0 = 0"
and fd: "\<forall>x y. dist (f x) (f y) = c * dist x y"
shows "linear f"
proof -
{
fix v w
have "norm (f x) = c * norm x" for x
by (metis dist_0_norm f0 fd)
then have "f v \<bullet> f w = c\<^sup>2 * (v \<bullet> w)"
unfolding dot_norm_neg dist_norm[symmetric]
by (simp add: fd power2_eq_square field_simps)
}
then show ?thesis
unfolding linear_iff vector_eq[where 'a="'a"] scalar_mult_eq_scaleR
by (simp add: inner_add field_simps)
qed
lemma isometry_linear:
"f (0::'a::real_inner) = (0::'a) \<Longrightarrow> \<forall>x y. dist(f x) (f y) = dist x y \<Longrightarrow> linear f"
by (rule scaling_linear[where c=1]) simp_all
text \<open>Hence another formulation of orthogonal transformation\<close>
proposition orthogonal_transformation_isometry:
"orthogonal_transformation f \<longleftrightarrow> f(0::'a::real_inner) = (0::'a) \<and> (\<forall>x y. dist(f x) (f y) = dist x y)"
unfolding orthogonal_transformation
apply (auto simp: linear_0 isometry_linear)
apply (metis (no_types, opaque_lifting) dist_norm linear_diff)
by (metis dist_0_norm)
text \<open>Can extend an isometry from unit sphere:\<close>
lemma isometry_sphere_extend:
fixes f:: "'a::real_inner \<Rightarrow> 'a"
assumes f1: "\<And>x. norm x = 1 \<Longrightarrow> norm (f x) = 1"
and fd1: "\<And>x y. \<lbrakk>norm x = 1; norm y = 1\<rbrakk> \<Longrightarrow> dist (f x) (f y) = dist x y"
shows "\<exists>g. orthogonal_transformation g \<and> (\<forall>x. norm x = 1 \<longrightarrow> g x = f x)"
proof -
{
fix x y x' y' u v u' v' :: "'a"
assume H: "x = norm x *\<^sub>R u" "y = norm y *\<^sub>R v"
"x' = norm x *\<^sub>R u'" "y' = norm y *\<^sub>R v'"
and J: "norm u = 1" "norm u' = 1" "norm v = 1" "norm v' = 1" "norm(u' - v') = norm(u - v)"
then have *: "u \<bullet> v = u' \<bullet> v' + v' \<bullet> u' - v \<bullet> u "
by (simp add: norm_eq norm_eq_1 inner_add inner_diff)
have "norm (norm x *\<^sub>R u' - norm y *\<^sub>R v') = norm (norm x *\<^sub>R u - norm y *\<^sub>R v)"
using J by (simp add: norm_eq norm_eq_1 inner_diff * field_simps)
then have "norm(x' - y') = norm(x - y)"
using H by metis
}
note norm_eq = this
let ?g = "\<lambda>x. if x = 0 then 0 else norm x *\<^sub>R f (x /\<^sub>R norm x)"
have thfg: "?g x = f x" if "norm x = 1" for x
using that by auto
have thd: "dist (?g x) (?g y) = dist x y" for x y
proof (cases "x=0 \<or> y=0")
case False
show "dist (?g x) (?g y) = dist x y"
unfolding dist_norm
proof (rule norm_eq)
show "x = norm x *\<^sub>R (x /\<^sub>R norm x)" "y = norm y *\<^sub>R (y /\<^sub>R norm y)"
"norm (f (x /\<^sub>R norm x)) = 1" "norm (f (y /\<^sub>R norm y)) = 1"
using False f1 by auto
qed (use False in \<open>auto simp: field_simps intro: f1 fd1[unfolded dist_norm]\<close>)
qed (auto simp: f1)
show ?thesis
unfolding orthogonal_transformation_isometry
by (rule exI[where x= ?g]) (metis thfg thd)
qed
subsection\<open>Induction on matrix row operations\<close>
lemma induct_matrix_row_operations:
fixes P :: "real^'n^'n \<Rightarrow> bool"
assumes zero_row: "\<And>A i. row i A = 0 \<Longrightarrow> P A"
and diagonal: "\<And>A. (\<And>i j. i \<noteq> j \<Longrightarrow> A$i$j = 0) \<Longrightarrow> P A"
and swap_cols: "\<And>A m n. \<lbrakk>P A; m \<noteq> n\<rbrakk> \<Longrightarrow> P(\<chi> i j. A $ i $ Transposition.transpose m n j)"
and row_op: "\<And>A m n c. \<lbrakk>P A; m \<noteq> n\<rbrakk>
\<Longrightarrow> P(\<chi> i. if i = m then row m A + c *\<^sub>R row n A else row i A)"
shows "P A"
proof -
have "P A" if "(\<And>i j. \<lbrakk>j \<in> -K; i \<noteq> j\<rbrakk> \<Longrightarrow> A$i$j = 0)" for A K
proof -
have "finite K"
by simp
then show ?thesis using that
proof (induction arbitrary: A rule: finite_induct)
case empty
with diagonal show ?case
by simp
next
case (insert k K)
note insertK = insert
have "P A" if kk: "A$k$k \<noteq> 0"
and 0: "\<And>i j. \<lbrakk>j \<in> - insert k K; i \<noteq> j\<rbrakk> \<Longrightarrow> A$i$j = 0"
"\<And>i. \<lbrakk>i \<in> -L; i \<noteq> k\<rbrakk> \<Longrightarrow> A$i$k = 0" for A L
proof -
have "finite L"
by simp
then show ?thesis using 0 kk
proof (induction arbitrary: A rule: finite_induct)
case (empty B)
show ?case
proof (rule insertK)
fix i j
assume "i \<in> - K" "j \<noteq> i"
show "B $ j $ i = 0"
using \<open>j \<noteq> i\<close> \<open>i \<in> - K\<close> empty
by (metis ComplD ComplI Compl_eq_Diff_UNIV Diff_empty UNIV_I insert_iff)
qed
next
case (insert l L B)
show ?case
proof (cases "k = l")
case True
with insert show ?thesis
by auto
next
case False
let ?C = "\<chi> i. if i = l then row l B - (B $ l $ k / B $ k $ k) *\<^sub>R row k B else row i B"
have 1: "\<lbrakk>j \<in> - insert k K; i \<noteq> j\<rbrakk> \<Longrightarrow> ?C $ i $ j = 0" for j i
by (auto simp: insert.prems(1) row_def)
have 2: "?C $ i $ k = 0"
if "i \<in> - L" "i \<noteq> k" for i
proof (cases "i=l")
case True
with that insert.prems show ?thesis
by (simp add: row_def)
next
case False
with that show ?thesis
by (simp add: insert.prems(2) row_def)
qed
have 3: "?C $ k $ k \<noteq> 0"
by (auto simp: insert.prems row_def \<open>k \<noteq> l\<close>)
have PC: "P ?C"
using insert.IH [OF 1 2 3] by auto
have eqB: "(\<chi> i. if i = l then row l ?C + (B $ l $ k / B $ k $ k) *\<^sub>R row k ?C else row i ?C) = B"
using \<open>k \<noteq> l\<close> by (simp add: vec_eq_iff row_def)
show ?thesis
using row_op [OF PC, of l k, where c = "B$l$k / B$k$k"] eqB \<open>k \<noteq> l\<close>
by (simp add: cong: if_cong)
qed
qed
qed
then have nonzero_hyp: "P A"
if kk: "A$k$k \<noteq> 0" and zeroes: "\<And>i j. j \<in> - insert k K \<and> i\<noteq>j \<Longrightarrow> A$i$j = 0" for A
by (auto simp: intro!: kk zeroes)
show ?case
proof (cases "row k A = 0")
case True
with zero_row show ?thesis by auto
next
case False
then obtain l where l: "A$k$l \<noteq> 0"
by (auto simp: row_def zero_vec_def vec_eq_iff)
show ?thesis
proof (cases "k = l")
case True
with l nonzero_hyp insert.prems show ?thesis
by blast
next
case False
have *: "A $ i $ Transposition.transpose k l j = 0" if "j \<noteq> k" "j \<notin> K" "i \<noteq> j" for i j
using False l insert.prems that
by (auto simp add: Transposition.transpose_def)
have "P (\<chi> i j. (\<chi> i j. A $ i $ Transposition.transpose k l j) $ i $ Transposition.transpose k l j)"
by (rule swap_cols [OF nonzero_hyp False]) (auto simp: l *)
moreover
have "(\<chi> i j. (\<chi> i j. A $ i $ Transposition.transpose k l j) $ i $ Transposition.transpose k l j) = A"
by simp
ultimately show ?thesis
by simp
qed
qed
qed
qed
then show ?thesis
by blast
qed
lemma induct_matrix_elementary:
fixes P :: "real^'n^'n \<Rightarrow> bool"
assumes mult: "\<And>A B. \<lbrakk>P A; P B\<rbrakk> \<Longrightarrow> P(A ** B)"
and zero_row: "\<And>A i. row i A = 0 \<Longrightarrow> P A"
and diagonal: "\<And>A. (\<And>i j. i \<noteq> j \<Longrightarrow> A$i$j = 0) \<Longrightarrow> P A"
and swap1: "\<And>m n. m \<noteq> n \<Longrightarrow> P(\<chi> i j. mat 1 $ i $ Transposition.transpose m n j)"
and idplus: "\<And>m n c. m \<noteq> n \<Longrightarrow> P(\<chi> i j. if i = m \<and> j = n then c else of_bool (i = j))"
shows "P A"
proof -
have swap: "P (\<chi> i j. A $ i $ Transposition.transpose m n j)" (is "P ?C")
if "P A" "m \<noteq> n" for A m n
proof -
have "A ** (\<chi> i j. mat 1 $ i $ Transposition.transpose m n j) = ?C"
by (simp add: matrix_matrix_mult_def mat_def vec_eq_iff if_distrib sum.delta_remove)
then show ?thesis
using mult swap1 that by metis
qed
have row: "P (\<chi> i. if i = m then row m A + c *\<^sub>R row n A else row i A)" (is "P ?C")
if "P A" "m \<noteq> n" for A m n c
proof -
let ?B = "\<chi> i j. if i = m \<and> j = n then c else of_bool (i = j)"
have "?B ** A = ?C"
using \<open>m \<noteq> n\<close> unfolding matrix_matrix_mult_def row_def of_bool_def
by (auto simp: vec_eq_iff if_distrib [of "\<lambda>x. x * y" for y] sum.remove cong: if_cong)
then show ?thesis
by (rule subst) (auto simp: that mult idplus)
qed
show ?thesis
by (rule induct_matrix_row_operations [OF zero_row diagonal swap row])
qed
lemma induct_matrix_elementary_alt:
fixes P :: "real^'n^'n \<Rightarrow> bool"
assumes mult: "\<And>A B. \<lbrakk>P A; P B\<rbrakk> \<Longrightarrow> P(A ** B)"
and zero_row: "\<And>A i. row i A = 0 \<Longrightarrow> P A"
and diagonal: "\<And>A. (\<And>i j. i \<noteq> j \<Longrightarrow> A$i$j = 0) \<Longrightarrow> P A"
and swap1: "\<And>m n. m \<noteq> n \<Longrightarrow> P(\<chi> i j. mat 1 $ i $ Transposition.transpose m n j)"
and idplus: "\<And>m n. m \<noteq> n \<Longrightarrow> P(\<chi> i j. of_bool (i = m \<and> j = n \<or> i = j))"
shows "P A"
proof -
have *: "P (\<chi> i j. if i = m \<and> j = n then c else of_bool (i = j))"
if "m \<noteq> n" for m n c
proof (cases "c = 0")
case True
with diagonal show ?thesis by auto
next
case False
then have eq: "(\<chi> i j. if i = m \<and> j = n then c else of_bool (i = j)) =
(\<chi> i j. if i = j then (if j = n then inverse c else 1) else 0) **
(\<chi> i j. of_bool (i = m \<and> j = n \<or> i = j)) **
(\<chi> i j. if i = j then if j = n then c else 1 else 0)"
using \<open>m \<noteq> n\<close>
apply (simp add: matrix_matrix_mult_def vec_eq_iff of_bool_def if_distrib [of "\<lambda>x. y * x" for y] cong: if_cong)
apply (simp add: if_if_eq_conj sum.neutral conj_commute cong: conj_cong)
done
show ?thesis
apply (subst eq)
apply (intro mult idplus that)
apply (auto intro: diagonal)
done
qed
show ?thesis
by (rule induct_matrix_elementary) (auto intro: assms *)
qed
lemma matrix_vector_mult_matrix_matrix_mult_compose:
"(*v) (A ** B) = (*v) A \<circ> (*v) B"
by (auto simp: matrix_vector_mul_assoc)
lemma induct_linear_elementary:
fixes f :: "real^'n \<Rightarrow> real^'n"
assumes "linear f"
and comp: "\<And>f g. \<lbrakk>linear f; linear g; P f; P g\<rbrakk> \<Longrightarrow> P(f \<circ> g)"
and zeroes: "\<And>f i. \<lbrakk>linear f; \<And>x. (f x) $ i = 0\<rbrakk> \<Longrightarrow> P f"
and const: "\<And>c. P(\<lambda>x. \<chi> i. c i * x$i)"
and swap: "\<And>m n::'n. m \<noteq> n \<Longrightarrow> P(\<lambda>x. \<chi> i. x $ Transposition.transpose m n i)"
and idplus: "\<And>m n::'n. m \<noteq> n \<Longrightarrow> P(\<lambda>x. \<chi> i. if i = m then x$m + x$n else x$i)"
shows "P f"
proof -
have "P ((*v) A)" for A
proof (rule induct_matrix_elementary_alt)
fix A B
assume "P ((*v) A)" and "P ((*v) B)"
then show "P ((*v) (A ** B))"
by (auto simp add: matrix_vector_mult_matrix_matrix_mult_compose intro!: comp)
next
fix A :: "real^'n^'n" and i
assume "row i A = 0"
show "P ((*v) A)"
using matrix_vector_mul_linear
by (rule zeroes[where i=i])
(metis \<open>row i A = 0\<close> inner_zero_left matrix_vector_mul_component row_def vec_lambda_eta)
next
fix A :: "real^'n^'n"
assume 0: "\<And>i j. i \<noteq> j \<Longrightarrow> A $ i $ j = 0"
have "A $ i $ i * x $ i = (\<Sum>j\<in>UNIV. A $ i $ j * x $ j)" for x and i :: "'n"
by (simp add: 0 comm_monoid_add_class.sum.remove [where x=i])
then have "(\<lambda>x. \<chi> i. A $ i $ i * x $ i) = ((*v) A)"
by (auto simp: 0 matrix_vector_mult_def)
then show "P ((*v) A)"
using const [of "\<lambda>i. A $ i $ i"] by simp
next
fix m n :: "'n"
assume "m \<noteq> n"
have eq: "(\<Sum>j\<in>UNIV. if i = Transposition.transpose m n j then x $ j else 0) =
(\<Sum>j\<in>UNIV. if j = Transposition.transpose m n i then x $ j else 0)"
for i and x :: "real^'n"
by (rule sum.cong) (auto simp add: swap_id_eq)
have "(\<lambda>x::real^'n. \<chi> i. x $ Transposition.transpose m n i) = ((*v) (\<chi> i j. if i = Transposition.transpose m n j then 1 else 0))"
by (auto simp: mat_def matrix_vector_mult_def eq if_distrib [of "\<lambda>x. x * y" for y] cong: if_cong)
with swap [OF \<open>m \<noteq> n\<close>] show "P ((*v) (\<chi> i j. mat 1 $ i $ Transposition.transpose m n j))"
by (simp add: mat_def matrix_vector_mult_def)
next
fix m n :: "'n"
assume "m \<noteq> n"
then have "x $ m + x $ n = (\<Sum>j\<in>UNIV. of_bool (j = n \<or> m = j) * x $ j)" for x :: "real^'n"
by (auto simp: of_bool_def if_distrib [of "\<lambda>x. x * y" for y] sum.remove cong: if_cong)
then have "(\<lambda>x::real^'n. \<chi> i. if i = m then x $ m + x $ n else x $ i) =
((*v) (\<chi> i j. of_bool (i = m \<and> j = n \<or> i = j)))"
unfolding matrix_vector_mult_def of_bool_def
by (auto simp: vec_eq_iff if_distrib [of "\<lambda>x. x * y" for y] cong: if_cong)
then show "P ((*v) (\<chi> i j. of_bool (i = m \<and> j = n \<or> i = j)))"
using idplus [OF \<open>m \<noteq> n\<close>] by simp
qed
then show ?thesis
by (metis \<open>linear f\<close> matrix_vector_mul(2))
qed
end |
import algebra.camera.basic
universe u
@[simp] lemma option.none_eq_at {Ξ± : Type u} [ofe Ξ±] {n : β} {a : option Ξ±} :
none =[n] a β a = none :=
begin
split,
{ intro h,
cases h,
refl, },
{ rintro rfl,
refl, },
end
@[simp] lemma option.eq_at_none {Ξ± : Type u} [ofe Ξ±] {n : β} {a : option Ξ±} :
a =[n] none β a = none :=
begin
rw β option.none_eq_at,
symmetry,
end
lemma option.some_eq_at {Ξ± : Type u} [ofe Ξ±] {n : β} {a : Ξ±} {b : option Ξ±} :
some a =[n] b β β b', b = some b' :=
begin
intro h,
cases b,
cases h,
exact β¨b, rflβ©,
end
lemma option.eq_at_some {Ξ± : Type u} [ofe Ξ±] {n : β} {a : option Ξ±} {b : Ξ±} :
a =[n] some b β β a', a = some a' :=
begin
intro h,
symmetry' at h,
exact option.some_eq_at h,
end
@[simp] lemma option.some_eq_at_some {Ξ± : Type u} [ofe Ξ±] {n : β} {a b : Ξ±} :
some a =[n] some b β a =[n] b :=
begin
split,
intro h, cases h, assumption,
intro h, exact option.eq_at_prop.some h,
end
@[simp] lemma option.some_eq_at_some_mul_some {Ξ± : Type u} [camera Ξ±]
{n : β} {a b c : Ξ±} : some a =[n] some b * some c β a =[n] b * c :=
by rw [some_mul_some, option.some_eq_at_some]
@[simp] lemma option.some_eq_at_some_mul_none {Ξ± : Type u} [camera Ξ±]
{n : β} {a b : Ξ±} : some a =[n] some b * none β a =[n] b :=
by rw [mul_none, option.some_eq_at_some]
@[simp] lemma option.some_eq_at_none_mul_some {Ξ± : Type u} [camera Ξ±]
{n : β} {a b : Ξ±} : some a =[n] none * some b β a =[n] b :=
by rw [none_mul, option.some_eq_at_some]
lemma option.map_nonexpansive {Ξ± Ξ² : Type u} [ofe Ξ±] [ofe Ξ²] (f : Ξ± β Ξ²) (hf : is_nonexpansive f) :
is_nonexpansive (option.map f) :=
begin
intros n a b h,
cases h,
{ refine option.eq_at_prop.some _,
refine hf _, assumption, },
{ refl, },
end
lemma option.map_eq_at_map {Ξ± Ξ² Ξ³ : Type u} [ofe Ξ±] [ofe Ξ²] [ofe Ξ³] {n : β}
{f : Ξ± β Ξ²} {a b : option Ξ±} :
is_nonexpansive f β a =[n] b β f <$> a =[n] f <$> b:=
begin
intros hf hac,
cases a,
simpa only [option.map_eq_map, option.map_none', option.none_eq_at, option.map_eq_none'] using hac,
cases b,
cases hac,
simp only [option.map_eq_map, option.map_some', option.some_eq_at_some] at hac β’,
exact hf hac,
end
lemma option.seq_eq_at_seq {Ξ± Ξ² Ξ³ : Type u} [ofe Ξ±] [ofe Ξ²] [ofe Ξ³] {n : β}
{f : Ξ± β Ξ² β Ξ³} {a b : option Ξ±} {c d : option Ξ²} :
is_nonexpansive (function.uncurry f) β
a =[n] b β c =[n] d β f <$> a <*> c =[n] f <$> b <*> d :=
begin
intros hf hac hbd,
cases a,
{ rw option.none_eq_at at hac,
rw hac,
refl, },
cases b,
{ cases hac, },
cases c,
{ rw option.none_eq_at at hbd,
rw hbd,
refl, },
cases d,
{ cases hbd, },
simp only [option.map_eq_map, option.map_some', option.seq_some, option.some_eq_at_some],
rw option.some_eq_at_some at hac hbd,
exact hf.uncurry_apply_eq_at hac hbd,
end
@[simp] lemma option.not_none_is_some {Ξ± : Type*} : (none : option Ξ±).is_some β false :=
by finish
@[simp] lemma option.some_is_some {Ξ± : Type*} {a : Ξ±} : (some a).is_some :=
by solve_by_elim
@[simp] lemma option.not_some_seq_none_is_some {Ξ± Ξ² : Type*} {f : Ξ± β Ξ²} :
(some f <*> none).is_some β false :=
by finish
@[simp] lemma option.not_none_seq_some_is_some {Ξ± Ξ² : Type*} {a : Ξ±} :
((none : option (Ξ± β Ξ²)) <*> some a).is_some β false :=
by finish
@[simp] lemma option.not_none_seq_none_is_some {Ξ± Ξ² : Type*} :
((none : option (Ξ± β Ξ²)) <*> none).is_some β false :=
by finish
@[simp] lemma option.map_is_some_iff {Ξ± Ξ² : Type*} {f : Ξ± β Ξ²} {a : option Ξ±} :
(f <$> a).is_some β a.is_some :=
by cases a; refl
@[simp] lemma option.seq_is_some_iff {Ξ± Ξ² : Type*} {f : option (Ξ± β Ξ²)} {a : option Ξ±} :
(f <*> a).is_some β f.is_some β§ a.is_some :=
begin
cases f; cases a;
simp only [option.not_none_is_some, option.some_is_some, option.seq_some,
option.not_some_seq_none_is_some, option.not_none_seq_some_is_some,
option.not_none_seq_none_is_some, and_self, false_and, and_false],
end
def option.extend {Ξ± : Type u} [camera Ξ±] (n : β) :
Ξ {a bβ bβ : option Ξ±} (hβ : β a', a = some a' β β[n] a')
(hβ : a =[n] (bβ * bβ)), option Ξ± Γ option Ξ±
| (some a) (some bβ) (some bβ) hβ hβ :=
(some (extend (hβ a rfl) (option.some_eq_at_some_mul_some.mp hβ)).1,
some (extend (hβ a rfl) (option.some_eq_at_some_mul_some.mp hβ)).2)
| (some a) (some bβ) none hβ hβ := (some a, none)
| (some a) none (some bβ) hβ hβ := (none, some a)
| _ _ _ _ _ := (none, none)
private lemma option.camera.mul_is_nonexpansive {Ξ± : Type u} [camera Ξ±] :
is_nonexpansive (function.uncurry ((*) : option Ξ± β option Ξ± β option Ξ±)) :=
begin
rintros n β¨aβ, aββ© β¨bβ, bββ© β¨hβ, hββ©,
cases hβ,
{ cases hβ,
{ simp only [function.uncurry_apply_pair, some_mul_some,
option.some_eq_at_some],
refine camera.mul_eq_at _ _; assumption, },
simp only [function.uncurry_apply_pair, mul_none,
option.some_eq_at_some],
assumption, },
{ cases hβ,
{ simp only [function.uncurry_apply_pair, none_mul,
option.some_eq_at_some],
assumption, },
{ refl, }, },
end
private lemma option.camera.core_mul_self {Ξ± : Type u} [camera Ξ±]
(a : option Ξ±) {ca : option Ξ±} : some (option.elim none (Ξ» a, core a) a) = some ca β
ca * a = a :=
begin
intro h,
rw option.some_inj at h,
rw β h,
cases a,
{ refl, },
{ simp only [option.elim],
have := camera.core_mul_self a,
revert this,
induction core a,
{ intro h, refl, },
{ intro h,
rw some_mul_some,
rw h rfl, }, },
end
private lemma option.camera.core_core {Ξ± : Type u} [camera Ξ±]
(a : option Ξ±) {ca : option Ξ±} : some (option.elim none (Ξ» a, core a) a) = some ca β
some (option.elim none (Ξ» a, core a) ca) = some ca :=
begin
intro h,
rw option.some_inj at h β’,
cases a,
{ cases h,
refl, },
cases ca,
{ simp only [option.elim] at h,
simp_rw h,
refl, },
simp only [option.elim] at h β’,
exact camera.core_core a h,
end
private lemma option.camera.core_mono_some {Ξ± : Type u} [camera Ξ±]
(a b : option Ξ±) {ca : option Ξ±} : some (option.elim none (Ξ» a, core a) a) = some ca β a βΌ b β
β cb : option Ξ±, some (option.elim none (Ξ» a, core a) b) = some cb :=
begin
intros hβ hβ,
simp_rw exists_eq',
end
private lemma option.camera.core_mono {Ξ± : Type u} [camera Ξ±]
(a b : option Ξ±) {ca : option Ξ±} : some (option.elim none (Ξ» a, core a) a) = some ca β
a βΌ b β some (option.elim none (Ξ» a, core a) a) βΌ some (option.elim none (Ξ» a, core a) b) :=
begin
intros hβ hβ,
cases a,
{ refine β¨some (option.elim none (Ξ» a, core a) b), _β©,
simp only [option.elim, some_mul_some, none_mul], },
obtain β¨c, hcβ© := hβ,
rw β hc,
cases c,
{ rw mul_none at hc,
refine β¨none, _β©,
rw [mul_none, mul_none], },
simp only [option.elim] at hβ,
cases ca,
{ simp only [option.elim, some_mul_some],
rw hβ,
refine β¨some (core (a * c)), _β©,
simp only [some_mul_some, none_mul], },
obtain β¨d, hdβ© := camera.core_mono a (a * c) hβ β¨c, rflβ©,
refine β¨some d, _β©,
simp only [option.elim, some_mul_some],
exact hd,
end
private lemma option.camera.extend_mul_eq {Ξ± : Type u} [camera Ξ±] (n : β)
(a bβ bβ : option Ξ±) (ha : β b, a = some b β β[n] b) (hb : a =[n] bβ * bβ) :
a = (option.extend n ha hb).1 * (option.extend n ha hb).2 :=
begin
cases a,
{ simp only [option.none_eq_at] at hb,
cases bβ,
{ rw none_mul at hb,
cases hb,
unfold option.extend,
refl, },
{ cases bβ; cases hb, }, },
cases bβ,
{ rw none_mul at hb,
obtain β¨bβ, rflβ© := option.some_eq_at hb,
unfold option.extend,
rw none_mul, },
cases bβ,
{ rw mul_none at hb,
unfold option.extend,
rw mul_none, },
unfold option.extend,
rw some_mul_some,
simp only [some_mul_some, option.some_eq_at_some] at hb,
rw β camera.extend_mul_eq (ha a rfl) hb,
end
private lemma option.camera.extend_eq_at_left {Ξ± : Type u} [camera Ξ±] (n : β)
(a bβ bβ : option Ξ±) (ha : β b, a = some b β β[n] b) (hb : a =[n] bβ * bβ) :
(option.extend n ha hb).1 =[n] bβ :=
begin
cases bβ,
{ rw none_mul at hb,
cases bβ,
{ rw option.eq_at_none at hb,
cases hb,
refl, },
obtain β¨a, rflβ© := option.some_eq_at (ofe.eq_at_symmetric n hb),
refl, },
cases bβ,
{ rw mul_none at hb,
obtain β¨a, rflβ© := option.some_eq_at (ofe.eq_at_symmetric n hb),
exact hb, },
rw [some_mul_some, eq_at_symm_iff] at hb,
obtain β¨a, rflβ© := option.some_eq_at hb,
unfold option.extend,
rw option.some_eq_at_some,
exact camera.extend_eq_at_left _ _,
end
private lemma option.camera.extend_eq_at_right {Ξ± : Type u} [camera Ξ±] (n : β)
(a bβ bβ : option Ξ±) (ha : β b, a = some b β β[n] b) (hb : a =[n] bβ * bβ) :
(option.extend n ha hb).2 =[n] bβ :=
begin
cases bβ,
{ rw none_mul at hb,
cases bβ,
{ rw option.eq_at_none at hb,
cases hb,
refl, },
obtain β¨a, rflβ© := option.some_eq_at (ofe.eq_at_symmetric n hb),
exact hb, },
cases bβ,
{ rw mul_none at hb,
obtain β¨a, rflβ© := option.some_eq_at (ofe.eq_at_symmetric n hb),
refl, },
rw [some_mul_some, eq_at_symm_iff] at hb,
obtain β¨a, rflβ© := option.some_eq_at hb,
unfold option.extend,
rw option.some_eq_at_some,
exact camera.extend_eq_at_right _ _,
end
instance option.camera {Ξ± : Type u} [camera Ξ±] : camera (option Ξ±) := {
validn := β¨Ξ» a, β¨Ξ» n, β b, a = some b β β[n] b,
Ξ» m n hmn h b hb, (camera.validn b).mono hmn (h b hb)β©,
begin
intros n a b h m hmn,
simp only [option.mem_def, sprop.coe_fn_mk],
split; rintros ha c rfl,
{ obtain β¨d, rflβ© := option.eq_at_some h,
rw option.some_eq_at_some at h,
exact camera.validn_of_eq_at (eq_at_mono hmn h) (ha d rfl), },
{ obtain β¨d, rflβ© := option.some_eq_at h,
rw option.some_eq_at_some at h,
exact camera.validn_of_eq_at
(eq_at_mono hmn (eq_at_symmetric n h)) (ha d rfl), },
endβ©,
core := β¨Ξ» a, some (option.elim none (Ξ» a, core a) a), begin
intros n a b hab,
cases hab,
{ simp only [option.elim, option.some_eq_at_some],
refine nonexpansive core _,
assumption, },
{ refl, },
endβ©,
extend := option.extend,
mul_is_nonexpansive := option.camera.mul_is_nonexpansive,
core_mul_self := option.camera.core_mul_self,
core_core := option.camera.core_core,
core_mono_some := option.camera.core_mono_some,
core_mono := option.camera.core_mono,
validn_mul := begin
intros a b n h,
cases a,
{ simp only [is_empty.forall_iff, implies_true_iff, nonexpansive_fun.coe_fn_mk], },
cases b,
{ simpa only [sprop.coe_fn_mk, forall_eq', mul_none, nonexpansive_fun.coe_fn_mk] using h, },
simp only [sprop.coe_fn_mk, forall_eq', nonexpansive_fun.coe_fn_mk, some_mul_some] at h β’,
exact camera.validn_mul a b n h,
end,
extend_mul_eq := option.camera.extend_mul_eq,
extend_eq_at_left := option.camera.extend_eq_at_left,
extend_eq_at_right := option.camera.extend_eq_at_right,
..option.ofe,
..option.comm_semigroup,
}
|
# Read in ITRDB data
adjustLW <- function(lw, ew){
cores = names(lw)
lwadj = data.frame(matrix(ncol = dim(lw)[2], nrow = dim(lw)[1]))
names(lwadj) <- names(lw)
row.names(lwadj) <- row.names(lw)
for(j in 1:length(cores)){
if(names(lw)[j] %in% names(ew)){
idx = match(names(lw)[j], names(ew))
mdl1 = lm(lw[,j]~ew[,idx])
mdl2 = lm(lw[,j]~ew[,idx]+I(ew[,idx]^2))
if(AIC(mdl2)<AIC(mdl1) & mdl2$coefficients[2]>0 & mdl2$coefficients[3]>0){
lwadj[!is.na(lw[,j])&!is.na(ew[,idx]), j] = mdl2$residuals+1
} else lwadj[!is.na(lw[,j])&!is.na(ew[,idx]), j] = mdl1$residuals+1
}
}
return(lwadj)
}
##### ITRDB operations #####
setwd("C:\\Users\\dannenberg\\Documents\\Data_Analysis\\ITRDB_update092717") ## Change to ITRDB directory on local machine!!!
library(dplR)
# List text files
files <- list.files() # List all files
txtfiles <- files[grep(glob2rx("*.txt"), files)] # List txt files
n = length(txtfiles)
trdata <- data.frame(matrix(ncol = 10, nrow = n))
names(trdata) <- c("SITE", "START", "END", "PI", "NAME", "LOCATION", "SPECIES", "LAT", "LON", "ELEV")
for(i in 1:n){
file = txtfiles[i]
site = unlist(strsplit(file, "[.]"))[1]
site = unlist(strsplit(site, "[_]"))[1] # some with "_gap"
fc = file(file)
mylist <- strsplit(readLines(fc), ": ")
e = "great!"
idx = 1
idxn = length(mylist)
repeat{
testline = mylist[[idx]][1]
test = grepl("Beginning", testline)
if(idx==idxn){
e = "crap!"
break()
}
if(test==TRUE) break()
idx = idx+1
}
trdata$SITE[i] = site
if(e != "crap!"){
trdata$START[i] = as.numeric(mylist[[idx]][2])
trdata$END[i] = as.numeric(mylist[[idx+1]][2])
trdata$PI[i] = mylist[[idx+2]][2]
trdata$NAME[i] = mylist[[idx+3]][2]
trdata$LOCATION[i] = mylist[[idx+4]][2]
trdata$SPECIES[i] = substring(mylist[[idx+5]][2], 1, 4)
if(grepl("S", mylist[[idx+6]][2])==TRUE){ lat = -1*as.numeric(gsub("\\D", "", mylist[[idx+6]][2])) / 100
} else lat = as.numeric(gsub("\\D", "", mylist[[idx+6]][2])) / 100
if(!is.na(lat)){
dsign = sign(lat)
degree = floor(abs(lat))
minute = round(100*(abs(lat)-degree))
if(minute < 60) trdata$LAT[i] = dsign*degree + dsign*(minute/60)
else trdata$LAT[i] = lat
} else trdata$LAT[i] = NA
if(grepl("W", mylist[[idx+7]][2])==TRUE){ lon = -1*as.numeric(gsub("\\D", "", mylist[[idx+7]][2])) / 100
} else lon = as.numeric(mylist[[idx+7]][2]) / 100
if(!is.na(lon)){
dsign = sign(lon)
degree = floor(abs(lon))
minute = round(100*(abs(lon)-degree))
if(minute < 60) trdata$LON[i] = dsign*degree + dsign*(minute/60)
else trdata$LON[i] = lon
} else trdata$LON[i] = NA
trdata$ELEV[i] = as.numeric(gsub("\\D", "", mylist[[idx+8]][2]))
}
## Insert code to detrend, chronologize, and export to csv
# Include exceptions for when it can't find the *.rwl file (write names to txt file)
rwlFile = paste(c(site, ".rwl"), collapse="")
if(!length(grep(rwlFile, files))){
txtToWrite = paste(c(site, "\n"), collapse="")
} else{
scratchRWL <- tryCatch(
{
rwl = read.rwl(rwlFile)
spl = detrend(rwl, method="Spline")
ids = read.ids(spl)
window.length = min(50, nrow(spl))
window.overlap = window.length - 10
stats <- tryCatch(
{
stats = rwi.stats.running(spl, ids=ids, window.length=window.length, window.overlap=window.overlap)
}, error=function(cond){
stats = rwi.stats.running(spl, window.length=window.length, window.overlap=window.overlap)
}, warning=function(cond){
stats = rwi.stats.running(spl, window.length=window.length, window.overlap=window.overlap)
}, finally = {
temp = NULL
}
)
write.csv(stats, paste(c(site, "w_stats.csv"), collapse=""))
res = chron(spl, prefix="", prewhiten=TRUE)
write.csv(res, paste(c(site, "w_crn.csv"), collapse=""))
}, error=function(e){})
## EW
site = unlist(strsplit(site, "[w]"))[1]
rwlFile = paste(c(site, "e.rwl"), collapse="")
scratchEW <- tryCatch(
{
ew = read.rwl(rwlFile)
ew_spl = detrend(ew, method="Spline")
ids = read.ids(ew_spl)
window.length = min(50, nrow(ew_spl))
window.overlap = window.length - 10
stats <- tryCatch(
{
stats = rwi.stats.running(ew_spl, ids=ids, window.length=window.length, window.overlap=window.overlap)
}, error=function(cond){
stats = rwi.stats.running(ew_spl, window.length=window.length, window.overlap=window.overlap)
}, warning=function(cond){
stats = rwi.stats.running(ew_spl, window.length=window.length, window.overlap=window.overlap)
}, finally = {
temp = NULL
}
)
write.csv(stats, paste(c(site, "e_stats.csv"), collapse=""))
ewres = chron(ew_spl, prefix="", prewhiten=TRUE)
write.csv(ewres, paste(c(site, "e_crn.csv"), collapse=""))
}, error=function(e){})
## LW
rwlFile = paste(c(site, "l.rwl"), collapse="")
scratchLW <- tryCatch(
{
lw = read.rwl(rwlFile)
lw_spl = detrend(lw, method="Spline")
ids = read.ids(lw_spl)
window.length = min(50, nrow(lw_spl))
window.overlap = window.length - 10
stats <- tryCatch(
{
stats = rwi.stats.running(lw_spl, ids=ids, window.length=window.length, window.overlap=window.overlap)
}, error=function(cond){
stats = rwi.stats.running(lw_spl, window.length=window.length, window.overlap=window.overlap)
}, warning=function(cond){
stats = rwi.stats.running(lw_spl, window.length=window.length, window.overlap=window.overlap)
}, finally = {
temp = NULL
}
)
write.csv(stats, paste(c(site, "l_stats.csv"), collapse=""))
lwres = chron(lw_spl, prefix="", prewhiten=TRUE)
write.csv(lwres, paste(c(site, "l_crn.csv"), collapse=""))
## Adjusted LW (following Griffin et al. 2011, Tree-Ring Res)
if(dim(lw_spl)[1]==dim(ew_spl)[1] & dim(lw_spl)[2]==dim(ew_spl)[2]){
lwadj = adjustLW(lw_spl, ew_spl)
ids = read.ids(lwadj)
window.length = min(50, nrow(lwadj))
window.overlap = window.length - 10
stats <- tryCatch(
{
stats = rwi.stats.running(lwadj, ids=ids, window.length=window.length, window.overlap=window.overlap)
}, error=function(cond){
stats = rwi.stats.running(lwadj, window.length=window.length, window.overlap=window.overlap)
}, warning=function(cond){
stats = rwi.stats.running(lwadj, window.length=window.length, window.overlap=window.overlap)
}, finally = {
temp = NULL
}
)
write.csv(stats, paste(c(site, "la_stats.csv"), collapse=""))
lwadjres = chron(lwadj, prefix="", prewhiten=TRUE)
write.csv(lwadjres, paste(c(site, "la_crn.csv"), collapse=""))
} else {
commonYrs = intersect(row.names(lwres), row.names(ew))
lwadjres = data.frame(matrix(nrow = length(commonYrs), ncol=3))
names(lwadjres) <- names(lwres)
row.names(lwadjres) <- commonYrs
lwadjres[,3] = lwres[row.names(lwres) %in% commonYrs,3]
# STD adjustment
lwtemp = lwres[row.names(lwres) %in% commonYrs,1]
ewtemp = ewres[row.names(ewres) %in% commonYrs,1]
mdl = lm(lwtemp~ewtemp)
lwadjres[,1] = mdl$residuals +1
# RES adjustment
lwtemp = lwres[row.names(lwres) %in% commonYrs,2]
ewtemp = ewres[row.names(ewres) %in% commonYrs,2]
naidx = !is.na(lwtemp)&!is.na(ewtemp)
mdl = lm(lwtemp~ewtemp, na.action = na.exclude)
lwadjres[naidx,2] = mdl$residuals +1
write.csv(lwadjres, paste(c(site, "la_crn.csv"), collapse=""))
}
}, error=function(e){})
}
close(fc)
}
|
c
c------------------------------------------------------------
c
subroutine reg_slopes3(qavg,q,qx,qy,qxx,qxy,qyy,mitot,mjtot,irr,
& lstgrd,lwidth,hx,hy,xlow,ylow,mptr,
& nvar,istage)
use amr_module
implicit double precision(a-h,o-z)
dimension q(nvar,mitot,mjtot),qx(nvar,mitot,mjtot),
& qy(nvar,mitot,mjtot),irr(mitot,mjtot)
dimension qavg(nvar,mitot,mjtot)
dimension qxx(nvar,mitot,mjtot), qyy(nvar,mitot,mjtot)
dimension qxy(nvar,mitot,mjtot)
logical quad, nolimiter
include "cuserdt.i"
common /order2/ ssw, quad, nolimiter
c
c Set all slopes, even for irregular or solid.
c Will be fixed in slope routines for irregular/solid cells called afterwards
c # ssw = slope switch (1. for slopes, 0 for donor cell 0 slopes)
c # now set in amrcart
c
c ssw = 0 for ctu (no slopes), 1 for muscl (second order).
c compute slopes using muscl limiter
c q contains either conserved or primitive variables
c
c now set for quadratic reconstruction. Limiting done elsewhere
c initialized to zero in method (in case no slopes at all)
c
c this version converts to pointwise and then does pointwise derivatives
c formulas should be high order quadratics
q = qavg ! this will become pointwise vals
hx2 = 2.d0*hx
hy2 = 2.d0*hy
hxsq = hx*hx
hysq = hy*hy
c
c compute second derivatives first since needed to convert
c note that division by h^2 postponed til next loop
do j = 2, mjtot-1
do i = 2, mitot-1
qxx(:,i,j) = q(:,i+1,j)-2.d0*q(:,i,j)+q(:,i-1,j)
qyy(:,i,j) = q(:,i,j+1)-2.d0*q(:,i,j)+q(:,i,j-1)
! and while were at it, one more
qxy(:,i,j) = ((q(:,i+1,j+1) - q(:,i-1,j+1))
& - (q(:,i+1,j-1) - q(:,i-1,j-1)))/(hx2*hy2)
end do
end do
do j = 2, mjtot-1
do i = 2, mitot-1
q(:,i,j) = q(:,i,j) -(qxx(:,i,j)+qyy(:,i,j))/24.d0
qxx(:,i,j) = qxx(:,i,j)/hxsq
qyy(:,i,j) = qyy(:,i,j)/hysq
end do
end do
! finally do gradients
do j = 2, mjtot-1
do i = 2, mitot-1
qx(:,i,j) = (q(:,i+1,j)-q(:,i-1,j))/hx2
qy(:,i,j) = (q(:,i,j+1)-q(:,i,j-1))/hy2
end do
end do
c
return
end
|
lemma binaryset_sums: assumes f: "f {} = 0" shows "(\<lambda>n. f (binaryset A B n)) sums (f A + f B)" |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Source: https://www.ibm.com/support/knowledgecenter/SSRU69_1.2.0/base/vision_prepare_custom_deploy.html
"""
import os
import logging as log
import keras
import numpy as np
from PIL import Image
# Import required by Visual Insights
from deploy_interface import DeployCallback
# Import SSD7 model package
import SSD7
class MyDeploy(DeployCallback):
def __init__(self):
log.info("CALL MyDeploy.__init__")
# Define maximum image size (width, height)
# Images will be NOT resized and must be all of the same size (see load_img method)
self.img_size = (500, 660)
def onModelLoading(self, model_path, labels, workspace_path):
"""
Callback for model loading
Params:
model_path: path of the trained model (has been decompressed before this callback)
workspace_path: recommended temporary workspace
labels: dict of index -> "category"
Return: None
"""
log.info("CALL MyDeploy.onModelLoading")
self.labels_dict = labels
model_file = os.path.join(model_path, "model.h5")
log.info("[model_file] Loading model from %s", model_file)
# load the Keras model
self.model = keras.models.load_model(model_file,
custom_objects = {
"AnchorBoxes": SSD7.keras_layer_AnchorBoxes.AnchorBoxes,
"compute_loss": SSD7.keras_ssd_loss.SSDLoss().compute_loss
})
def onTest(self):
"""
Return: custom string (used to test API interface)
"""
log.info("CALL MyDeploy.onTest")
return "This is Houston. I copy a transmission calling Houston. Over"
def onInference(self, image_url, params):
"""
Run inference on a single image
Params:
image_url: image path
params: additional inference options
"heatmap": for classification, "true" if heatmap requested, "false" if not
Return:
if classification: dict
"label" -> "category", # label name
"confidence": float, # confidence score between 0 and 1
"heatmap": "value" # heatmap return [TODO] what does it look like ? No doc provided
if object detection: list
"confidence": float # confidence score between 0 and 1
"label": "category" # label name
"ymax", "xmax", "xmin", "ymin": coordinates of bounding box
"""
log.info("CALL MyDeploy.onInference")
log.info("image_url: %s", image_url)
# load image to predict, and add dummy batch dimension (as model expect size [batch, width, height, channel])
image = np.expand_dims(self.load_img(image_url), axis=0)
# run prediction (a list of one label per image is returned -> first element)
prediction_encoded = self.model.predict(image)
prediction = SSD7.decode_detections(prediction_encoded,
confidence_thresh=0.5,
img_height=self.img_size[0],
img_width=self.img_size[1])
return [
{
"confidence": box[1],
"label": self.labels_dict[box[0] - 1],
"xmin": box[-4],
"ymin": box[-3],
"xmax": box[-2],
"ymax": box[-1]
} for box in prediction[0]]
def onFailed(self, deploy_status, e, tb_message):
"""
Callback for failed deployment
Params:
deploy_status: deploy status when failure occurred
e: Exception object
tb_message: formatted traceback
Return: None
"""
log.info("CALL MyDeploy.onFailed")
log.error("Deploy status: %s", deploy_status)
log.error("Traceback message: %s", tb_message)
log.exception(e)
def load_img(self, path):
# given an image path, load and resize the image
# returns a numpy.array of the shape of the resized image
img = np.array(Image.open(path), dtype=np.uint8)
assert(img.shape == self.img_size + (3,))
return img
|
(** * Perm: Basic Techniques for Comparisons and Permutations *)
(** Consider these algorithms and data structures:
- sort a sequence of numbers
- finite maps from numbers to (arbitrary-type) data
- finite maps from any ordered type to (arbitrary-type) data
- priority queues: finding/deleting the highest number in a set
To prove the correctness of such programs, we need to reason about
comparisons, and about whether two collections have the same
contents. In this chapter, we introduce some techniques for
reasoning about:
- less-than comparisons on natural numbers, and
- permutations (rearrangements of lists).
In later chapters, we'll apply these proof techniques to reasoning
about algorithms and data structures. *)
Set Warnings "-notation-overridden,-parsing".
From Coq Require Import Strings.String. (* for manual grading *)
From Coq Require Export Bool.Bool.
From Coq Require Export Arith.Arith.
From Coq Require Export Arith.EqNat.
From Coq Require Export Omega.
From Coq Require Export Lists.List.
Export ListNotations.
From Coq Require Export Permutation.
(* ################################################################# *)
(** * The Less-Than Order on the Natural Numbers *)
(** In our proofs about searching and sorting algorithms, we often
have to reason about the less-than order on natural numbers.
greater-than. Recall that the Coq standard library contains both
propositional and Boolean less-than operators on natural numbers.
We write [x < y] for the proposition that [x] is less than [y]: *)
Locate "_ < _". (* "x < y" := lt x y *)
Check lt : nat -> nat -> Prop.
(** And we write [x <? y] for the computation that returns [true] or
[false] depending on whether [x] is less than [y]: *)
Locate "_ <? _". (* x <? y := Nat.ltb x y *)
Check Nat.ltb : nat -> nat -> bool.
(** Operation [<] is a reflection of [<?], as discussed in
[Logic] and [IndProp]. The [Nat] module has a
theorem showing how they relate: *)
Check Nat.ltb_lt : forall n m : nat, (n <? m) = true <-> n < m.
(** The [Nat] module contains a synonym for [lt]. *)
Print Nat.lt. (* Nat.lt = lt *)
(** For unknown reasons, [Nat] does not define notations
for [>?] or [>=?]. So we define them here: *)
Notation "a >=? b" := (Nat.leb b a)
(at level 70) : nat_scope.
Notation "a >? b" := (Nat.ltb b a)
(at level 70) : nat_scope.
(* ================================================================= *)
(** ** The Omega Tactic *)
(** Reasoning about inequalities by hand can be a little painful. Luckily, Coq
provides a tactic called [omega] that is quite helpful. *)
Theorem omega_example1:
forall i j k,
i < j ->
~ (k - 3 <= j) ->
k > i.
Proof.
intros.
(** The hard way to prove this is by hand. *)
(* try to remember the name of the lemma about negation and [<=] *)
Search (~ _ <= _ -> _).
apply not_le in H0.
(* try to remember the name of the transitivity lemma about [>] *)
Search (_ > _ -> _ > _ -> _ > _).
apply gt_trans with j.
apply gt_trans with (k-3).
(* Is [k] greater than [k-3]? On the integers, sure. But we're working
with natural numbers, which truncate subtraction at zero. *)
Abort.
Theorem truncated_subtraction: ~ (forall k:nat, k > k - 3).
Proof.
intros contra.
(* [specialize] applies a hypothesis to an argument *)
specialize (contra 0).
simpl in contra.
inversion contra.
Qed.
(** Since subtraction is truncated, does [omega_example1] actually hold?
It does. Let's try again, the hard way, to find the proof. *)
Theorem omega_example1:
forall i j k,
i < j ->
~ (k - 3 <= j) ->
k > i.
Proof. (* try again! *)
intros.
apply not_le in H0.
unfold gt in H0.
unfold gt.
(* try to remember the name ... *)
Search (_ < _ -> _ <= _ -> _ < _).
apply lt_le_trans with j.
apply H.
apply le_trans with (k-3).
Search (_ < _ -> _ <= _).
apply lt_le_weak.
auto.
apply le_minus.
Qed.
(** That was tedious. Here's a much easier way: *)
Theorem omega_example2:
forall i j k,
i < j ->
~ (k - 3 <= j) ->
k > i.
Proof.
intros.
omega.
Qed.
(** Omega is a decision procedure invented in 1991 by William Pugh for
integer linear programming (ILP). The [omega] tactic was made
available by importing [Coq.omega.Omega], at the beginning of the
file. It is an implementation of Pugh's algorithm. The tactic
works with Coq types [Z] and [nat], and these operators: [<] [=] [>]
[<=] [>=] [+] [-] [~], as well as multiplication by small integer
literals (such as 0,1,2,3...), and some uses of [\/] and [/\].
Omega does not "understand" other operators. It treats
expressions such as [a * b] and [f x y] as variables. That is, it
can prove [f x y > a * b -> f x y + 3 >= a * b], in the same way it
would prove [u > v -> u + 3 >= v]. But it cannot reason about, e.g.,
multiplication. *)
Theorem omega_example_3 : forall (f : nat -> nat -> nat) a b x y,
f x y > a * b -> f x y + 3 >= a * b.
Proof.
intros. omega.
Qed.
Theorem omega_example_4 : forall a b,
a * b = b * a.
Proof.
intros. Fail omega.
Abort.
(** The Omega algorithm is NP-complete, so we might expect that
this tactic is exponential-time in the worst case. Indeed,
if you have [N] equations, it could take [2^N] time.
But in the typical cases that result from reasoning about
programs, [omega] is much faster than that. *)
(* ################################################################# *)
(** * Swapping *)
(** Consider trying to sort a list of natural numbers. As a small piece of
a sorting algorithm, we might need to swap the first two elements of a list
if they are out of order. *)
Definition maybe_swap (al: list nat) : list nat :=
match al with
| a :: b :: ar => if a >? b then b :: a :: ar else a :: b :: ar
| _ => al
end.
Example maybe_swap_123:
maybe_swap [1; 2; 3] = [1; 2; 3].
Proof. reflexivity. Qed.
Example maybe_swap_321:
maybe_swap [3; 2; 1] = [2; 3; 1].
Proof. reflexivity. Qed.
(** Applying [maybe_swap] twice should give the same result as applying it once.
That is, [maybe_swap] is _idempotent_. *)
Theorem maybe_swap_idempotent: forall al,
maybe_swap (maybe_swap al) = maybe_swap al.
Proof.
intros [ | a [ | b al]]; simpl; try reflexivity.
destruct (b <? a) eqn:Hb_lt_a; simpl.
- destruct (a <? b) eqn:Ha_lt_b; simpl.
+ (** Now what? We have a contradiction in the hypotheses: it
cannot hold that [a] is less than [b] and [b] is less than
[a]. Unfortunately, [omega] cannot immediately show that
for us, because it reasons about comparisons in [Prop] not
[bool]. *)
Fail omega.
Abort.
(** Of course we could finish the proof by reasoning directly about
inequalities in [bool]. But this situation is going to occur
repeatedly in our study of sorting. *)
(** Let's set up some machinery to enable using [omega] on boolean
tests. *)
(* ================================================================= *)
(** ** Reflection *)
(** The [reflect] type, defined in the standard library (and presented
in [IndProp]), relates a proposition to a Boolean. That is,
a value of type [reflect P b] contains a proof of [P] if [b] is
[true], or a proof of [~ P] if [b] is [false]. *)
Print reflect.
(*
Inductive reflect (P : Prop) : bool -> Set :=
| ReflectT : P -> reflect P true
| ReflectF : ~ P -> reflect P false
*)
(** The standard library proves a theorem that says if [P] is provable
whenever [b = true] is provable, then [P] reflects [b]. *)
Check iff_reflect : forall (P : Prop) (b : bool),
P <-> b = true -> reflect P b.
(** Using that theorem, we can quickly prove that the propositional
(in)equality operators are reflections of the Boolean
operators. *)
Lemma eqb_reflect : forall x y, reflect (x = y) (x =? y).
Proof.
intros x y. apply iff_reflect. symmetry.
apply Nat.eqb_eq.
Qed.
Lemma ltb_reflect : forall x y, reflect (x < y) (x <? y).
Proof.
intros x y. apply iff_reflect. symmetry.
apply Nat.ltb_lt.
Qed.
Lemma leb_reflect : forall x y, reflect (x <= y) (x <=? y).
Proof.
intros x y. apply iff_reflect. symmetry.
apply Nat.leb_le.
Qed.
(** Here's an example of how you could use these lemmas. Suppose you
have this simple program, [(if a <? 5 then a else 2)], and you
want to prove that it evaluates to a number smaller than 6. You
can use [ltb_reflect] "by hand": *)
Example reflect_example1: forall a,
(if a <? 5 then a else 2) < 6.
Proof.
intros a.
(* The next two lines aren't strictly necessary, but they
help make it clear what [destruct] does. *)
assert (R: reflect (a < 5) (a <? 5)) by apply ltb_reflect.
remember (a <? 5) as guard.
destruct R as [H|H] eqn:HR.
* (* ReflectT *) omega.
* (* ReflectF *) omega.
Qed.
(** For the [ReflectT] constructor, the guard [a <? 5] must be equal
to [true]. The [if] expression in the goal has already been
simplified to take advantage of that fact. Also, for [ReflectT] to
have been used, there must be evidence [H] that [a < 5] holds.
From there, all that remains is to show [a < 5] entails [a < 6].
The [omega] tactic, which is capable of automatically proving some
theorems about inequalities, succeeds.
For the [ReflectF] constructor, the guard [a <? 5] must be equal
to [false]. So the [if] expression simplifies to [2 < 6], which is
immediately provable by [omega]. *)
(** A less didactic version of the above proof wouldn't do the
[assert] and [remember]: we can directly skip to [destruct]. *)
Example reflect_example1': forall a,
(if a <? 5 then a else 2) < 6.
Proof.
intros a. destruct (ltb_reflect a 5); omega.
Qed.
(** But even that proof is a little unsatisfactory. The original expression,
[a <? 5], is not perfectly apparent from the expression [ltb_reflect a 5]
that we pass to [destruct]. *)
(** It would be nice to be able to just say something like [destruct
(a <? 5)] and get the reflection "for free." That's what we'll
engineer, next. *)
(* ================================================================= *)
(** ** A Tactic for Boolean Destruction *)
(** We're now going to build a tactic that you'll want to _use_, but
you won't need to understand the details of how to _build_ it
yourself.
Let's put several of these [reflect] lemmas into a Hint database.
We call it [bdestruct], because we'll use it in our
boolean-destruction tactic: *)
Hint Resolve ltb_reflect leb_reflect eqb_reflect : bdestruct.
(** Here is the tactic, the body of which you do not need to
understand. Invoking [bdestruct] on Boolean expression [b] does
the same kind of reasoning we did above: reflection and
destruction. It also attempts to simplify negations involving
inequalities in hypotheses. *)
Ltac bdestruct X :=
let H := fresh in let e := fresh "e" in
evar (e: Prop);
assert (H: reflect e X); subst e;
[eauto with bdestruct
| destruct H as [H|H];
[ | try first [apply not_lt in H | apply not_le in H]]].
(** This tactic makes quick, easy-to-read work of our running example. *)
Example reflect_example2: forall a,
(if a <? 5 then a else 2) < 6.
Proof.
intros.
bdestruct (a <? 5); (* instead of: [destruct (ltb_reflect a 5)]. *)
omega.
Qed.
(* ================================================================= *)
(** ** Finishing the [maybe_swap] Proof *)
(** Now that we have [bdestruct], we can finish the proof of [maybe_swap]'s
idempotence. *)
Theorem maybe_swap_idempotent: forall al,
maybe_swap (maybe_swap al) = maybe_swap al.
Proof.
intros [ | a [ | b al]]; simpl; try reflexivity.
bdestruct (a >? b); simpl.
(** Note how [b < a] is a hypothesis, rather than [b <? a = true]. *)
- bdestruct (b >? a); simpl.
+ (** [omega] can take care of the contradictory propositional inequalities. *)
omega.
+ reflexivity.
- bdestruct (a >? b); simpl.
+ omega.
+ reflexivity.
Qed.
(** When proving theorems about a program that uses Boolean
comparisons, use [bdestruct] followed by [omega], rather than
[destruct] followed by application of various theorems about
Boolean operators. *)
(* ################################################################# *)
(** * Permutations *)
(** Another useful fact about [maybe_swap] is that it doesn't add or
remove elements from the list: it only reorders them. That is,
the output list is a permutation of the input. List [al] is a
_permutation_ of list [bl] if the elements of [al] can be
reordered to get the list [bl]. Note that reordering does not
permit adding or removing duplicate elements. *)
(** Coq's [Permutation] library has an inductive definition of
permutations. *)
Print Permutation.
(*
Inductive Permutation {A : Type} : list A -> list A -> Prop :=
perm_nil : Permutation [] []
| perm_skip : forall (x : A) (l l' : list A),
Permutation l l' ->
Permutation (x :: l) (x :: l')
| perm_swap : forall (x y : A) (l : list A),
Permutation (y :: x :: l) (x :: y :: l)
| perm_trans : forall l l' l'' : list A,
Permutation l l' ->
Permutation l' l'' ->
Permutation l l''.
*)
(** You might wonder, "is that really the right definition?" And
indeed, it's important that we get a right definition, because
[Permutation] is going to be used in our specifications of
searching and sorting algorithms. If we have the wrong
specification, then all our proofs of "correctness" will be
useless.
It's not obvious that this is indeed the right specification of
permutations. (It happens to be, but that's not obvious.) To gain
confidence that we have the right specification, let's use it
prove some properties that permutations ought to have. *)
(** **** Exercise: 2 stars, standard (Permutation_properties)
Think of some desirable properties of the [Permutation] relation
and write them down informally in English, or a mix of Coq and
English. Here are four to get you started:
- 1. If [Permutation al bl], then [length al = length bl].
- 2. If [Permutation al bl], then [Permutation bl al].
- 3. [[1;1]] is NOT a permutation of [[1;2]].
- 4. [[1;2;3;4]] IS a permutation of [[3;4;2;1]].
YOUR TASK: Add three more properties. Write them here: *)
(** Now, let's examine all the theorems in the Coq library about
permutations: *)
Search Permutation. (* Browse through the results of this query! *)
(** Which of the properties that you wrote down above have already
been proved as theorems by the Coq library developers? Answer
here:
*)
(* Do not modify the following line: *)
Definition manual_grade_for_Permutation_properties : option (nat*string) := None.
(** [] *)
(** Let's use the permutation theorems in the library to prove the
following theorem. *)
Example butterfly: forall b u t e r f l y : nat,
Permutation ([b;u;t;t;e;r]++[f;l;y]) ([f;l;u;t;t;e;r]++[b;y]).
Proof.
intros.
(** Let's group [[u;t;t;e;r]] together on both sides. Tactic
[change t with u] replaces [t] with [u]. Terms [t] and [u] must
be _convertible_, here meaning that they evalute to the same
term. *)
change [b;u;t;t;e;r] with ([b]++[u;t;t;e;r]).
change [f;l;u;t;t;e;r] with ([f;l]++[u;t;t;e;r]).
(** We don't actually need to know the list elements in
[[u;t;t;e;r]]. Let's forget about them and just remember them
as a variable named [utter]. *)
remember [u;t;t;e;r] as utter. clear Hequtter.
(** Likewise, let's group [[f;l]] and remember it as a variable. *)
change [f;l;y] with ([f;l]++[y]).
remember [f;l] as fl. clear Heqfl.
(** Next, let's cancel [fl] from both sides. In order to do that,
we need to bring it to the beginning of each list. For the right
list, that follows easily from the associativity of [++]. *)
replace ((fl ++ utter) ++ [b;y]) with (fl ++ utter ++ [b;y])
by apply app_assoc.
(** But for the left list, we can't just use associativity.
Instead, we need to reason about permutations and use some
library theorems. *)
apply perm_trans with (fl ++ [y] ++ ([b] ++ utter)).
- replace (fl ++ [y] ++ [b] ++ utter) with ((fl ++ [y]) ++ [b] ++ utter).
+ apply Permutation_app_comm.
+ rewrite <- app_assoc. reflexivity.
- (** A library theorem will now help us cancel [fl]. *)
apply Permutation_app_head.
(** Next let's cancel [utter]. *)
apply perm_trans with (utter ++ [y] ++ [b]).
+ replace ([y] ++ [b] ++ utter) with (([y] ++ [b]) ++ utter).
* apply Permutation_app_comm.
* rewrite app_assoc. reflexivity.
+ apply Permutation_app_head.
(** Finally we're left with just [y] and [b]. *)
apply perm_swap.
Qed.
(** That example illustrates a general method for proving permutations
involving cons [::] and append [++]:
- Identify some portion appearing in both sides.
- Bring that portion to the front on each side using lemmas such
as [Permutation_app_comm] and [perm_swap], with generous use of
[perm_trans].
- Use [Permutation_app_head] to cancel an appended head. You can
also use [perm_skip] to cancel a single element. *)
(** **** Exercise: 3 stars, standard (permut_example)
Use the permutation rules in the library to prove the following
theorem. The following [Check] commands are a hint about useful
lemmas. You don't need all of them, and depending on your
approach you will find lemmas to be more useful than others. Use
[Search Permutation] to find others, if you like. *)
Check perm_skip.
Check perm_trans.
Check Permutation_refl.
Check Permutation_app_comm.
Check app_assoc.
Check app_nil_r.
Check app_comm_cons.
Example permut_example: forall (a b: list nat),
Permutation (5 :: 6 :: a ++ b) ((5 :: b) ++ (6 :: a ++ [])).
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 2 stars, standard (not_a_permutation)
Prove that [[1;1]] is not a permutation of [[1;2]].
Hints are given as [Check] commands. *)
Check Permutation_cons_inv.
Check Permutation_length_1_inv.
Example not_a_permutation:
~ Permutation [1;1] [1;2].
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(* ================================================================= *)
(** ** Correctness of [maybe_swap] *)
(** Now we can prove that [maybe_swap] is a permutation: it reorders
elements but does not add or remove any. *)
Theorem maybe_swap_perm: forall al,
Permutation al (maybe_swap al).
Proof.
(* WORKED IN CLASS *)
unfold maybe_swap.
destruct al as [ | a [ | b al]].
- simpl. apply perm_nil.
- apply Permutation_refl.
- bdestruct (b <? a).
+ apply perm_swap.
+ apply Permutation_refl.
Qed.
(** And, we can prove that [maybe_swap] permutes elements such that
the first is less than or equal to the second. *)
Definition first_le_second (al: list nat) : Prop :=
match al with
| a :: b :: _ => a <= b
| _ => True
end.
Theorem maybe_swap_correct: forall al,
Permutation al (maybe_swap al)
/\ first_le_second (maybe_swap al).
Proof.
intros. split.
- apply maybe_swap_perm.
- (* WORKED IN CLASS *)
unfold maybe_swap.
destruct al as [ | a [ | b al]]; simpl; auto.
bdestruct (a >? b); simpl; omega.
Qed.
(* ################################################################# *)
(** * Summary: Comparisons and Permutations *)
(** To prove correctness of algorithms for sorting and searching,
we'll reason about comparisons and permutations using the tools
developed in this chapter. The [maybe_swap] program is a tiny
little example of a sorting program. The proof style in
[maybe_swap_correct] will be applied (at a larger scale) in
the next few chapters. *)
(** **** Exercise: 3 stars, standard (Forall_perm)
To close, we define a utility tactic and lemma. First, the
tactic. *)
(** Coq's [inversion H] tactic is so good at extracting
information from the hypothesis [H] that [H] sometimes becomes
completely redundant, and one might as well [clear] it from the
goal. Then, since the [inversion] typically creates some equality
facts, why not then [subst] ? Tactic [inv] does just that. *)
Ltac inv H := inversion H; clear H; subst.
(** Second, the lemma. You will find [inv] useful in proving it.
[Forall] is Coq library's version of the [All] proposition defined
in [Logic], but defined as an inductive proposition rather
than a fixpoint. Prove this lemma by induction. You will need to
decide what to induct on: [al], [bl], [Permutation al bl], and
[Forall f al] are possibilities. *)
Theorem Forall_perm: forall {A} (f: A -> Prop) al bl,
Permutation al bl ->
Forall f al -> Forall f bl.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(* 2020-08-07 17:08 *)
|
||| Implementation of ordering relations for `Fin`ite numbers
module Data.Fin.Order
import Data.Fin
import Data.Fun
import Data.Rel
import Data.Nat
import Data.Nat.Order
import Decidable.Decidable
import Decidable.Order
using (k : Nat)
data FinLTE : Fin k -> Fin k -> Type where
FromNatPrf : {m : Fin k} -> {n : Fin k} -> LTE (finToNat m) (finToNat n) -> FinLTE m n
implementation Preorder (Fin k) FinLTE where
transitive m n o (FromNatPrf p1) (FromNatPrf p2) =
FromNatPrf (LTEIsTransitive (finToNat m) (finToNat n) (finToNat o) p1 p2)
reflexive n = FromNatPrf (LTEIsReflexive (finToNat n))
implementation Poset (Fin k) FinLTE where
antisymmetric m n (FromNatPrf p1) (FromNatPrf p2) =
finToNatInjective m n (LTEIsAntisymmetric (finToNat m) (finToNat n) p1 p2)
implementation Decidable [Fin k, Fin k] FinLTE where
decide m n with (decideLTE (finToNat m) (finToNat n))
decide m n | Yes prf = Yes (FromNatPrf prf)
decide m n | No disprf = No (\ (FromNatPrf prf) => disprf prf)
implementation Ordered (Fin k) FinLTE where
order m n =
either (Left . FromNatPrf)
(Right . FromNatPrf)
(order (finToNat m) (finToNat n))
|
(*
* Copyright 2014, NICTA
*
* This software may be distributed and modified according to the terms of
* the BSD 2-Clause license. Note that NO WARRANTY is provided.
* See "LICENSE_BSD2.txt" for details.
*
* @TAG(NICTA_BSD)
*)
theory dc_20081211
imports "CParser.CTranslation"
begin
external_file "dc_20081211.c"
install_C_file "dc_20081211.c"
declare [[show_types]]
context dc_20081211_global_addresses
begin
thm \<Gamma>_def
end
context setHardwareASID_modifies
begin
thm \<Gamma>_def
thm setHardwareASID_modifies
end
context dc_20081211 begin
term "\<Gamma>"
thm setHardwareASID_modifies
thm test_body_def
thm test_modifies
lemma test_modifies:
"\<forall>s. \<Gamma> \<turnstile>\<^bsub>/UNIV\<^esub> {s} Call
test_'proc {t. t may_only_modify_globals s in [x]}"
(* fails: apply(vcg spec=modifies)
perhaps because there already is a test_modifies already in
scope *)
oops
end
end
|
[STATEMENT]
theorem F'map_cong:
"\<lbrakk>\<And>z. z \<in> F'set1 x \<Longrightarrow> f1 z = g1 z; \<And>z. z \<in> F'set2 x \<Longrightarrow> f2 z = g2 z;
\<And>z. z \<in> F'set3 x \<Longrightarrow> f3 z = g3 z; \<And>z. z \<in> F'set4 x \<Longrightarrow> f4 z = g4 z\<rbrakk>
\<Longrightarrow> F'map f1 f2 f3 f4 x = F'map g1 g2 g3 g4 x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<And>z. z \<in> {} \<Longrightarrow> f1 z = g1 z; \<And>z. z \<in> {} \<Longrightarrow> f2 z = g2 z; \<And>z. z \<in> F'set3 x \<Longrightarrow> f3 z = g3 z; \<And>z. z \<in> F'set4 x \<Longrightarrow> f4 z = g4 z\<rbrakk> \<Longrightarrow> Fmap f3 f4 x = Fmap g3 g4 x
[PROOF STEP]
apply (tactic \<open>BNF_Util.rtac @{context} @{thm F.map_cong0} 1 THEN REPEAT_DETERM_N 2 (assume_tac @{context} 1)\<close>)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>z1. \<lbrakk>\<And>z. z \<in> {} \<Longrightarrow> f1 z = g1 z; \<And>z. z \<in> {} \<Longrightarrow> f2 z = g2 z; \<And>z. z \<in> F'set3 x \<Longrightarrow> f3 z = g3 z; \<And>z. z \<in> F'set4 x \<Longrightarrow> f4 z = g4 z; z1 \<in> F'set3 x\<rbrakk> \<Longrightarrow> f3 z1 = g3 z1
2. \<And>z2. \<lbrakk>\<And>z. z \<in> {} \<Longrightarrow> f1 z = g1 z; \<And>z. z \<in> {} \<Longrightarrow> f2 z = g2 z; \<And>z. z \<in> F'set3 x \<Longrightarrow> f3 z = g3 z; \<And>z. z \<in> F'set4 x \<Longrightarrow> f4 z = g4 z; z2 \<in> F'set4 x\<rbrakk> \<Longrightarrow> f4 z2 = g4 z2
[PROOF STEP]
apply assumption+
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
-- This module is used to illustrate how to import a parameterised module.
module examples.syntax.ModuleB
(A : Set)
((==) : A -> A -> Prop)
(refl : (x : A) -> x == x)
where
infix 5 /\
module SubModule where
postulate dummy : A
data True : Prop where
tt : True
data False : Prop where
data (/\) (P, Q : Prop) : Prop where
andI : P -> Q -> P /\ Q
data List : Set where
nil : List
cons : A -> List -> List
eqList : List -> List -> Prop
eqList nil nil = True
eqList (cons x xs) nil = False
eqList nil (cons y ys) = False
eqList (cons x xs) (cons y ys) = x == y /\ eqList xs ys
reflEqList : (xs : List) -> eqList xs xs
reflEqList nil = tt
reflEqList (cons x xs) = andI (refl x) (reflEqList xs)
|
[GOAL]
p q : β
s t : Set β
β’ NeBot (cocompact β β π p)
[PROOFSTEP]
refine' (hasBasis_cocompact.inf (nhds_basis_opens _)).neBot_iff.2 _
[GOAL]
p q : β
s t : Set β
β’ β {i : Set β Γ Set β}, IsCompact i.fst β§ p β i.snd β§ IsOpen i.snd β Set.Nonempty (i.fstαΆ β© i.snd)
[PROOFSTEP]
rintro β¨s, oβ© β¨hs, hpo, hoβ©
[GOAL]
case mk.intro.intro
p q : β
sβ t s o : Set β
hs : IsCompact (s, o).fst
hpo : p β (s, o).snd
ho : IsOpen (s, o).snd
β’ Set.Nonempty ((s, o).fstαΆ β© (s, o).snd)
[PROOFSTEP]
rw [inter_comm]
[GOAL]
case mk.intro.intro
p q : β
sβ t s o : Set β
hs : IsCompact (s, o).fst
hpo : p β (s, o).snd
ho : IsOpen (s, o).snd
β’ Set.Nonempty ((s, o).snd β© (s, o).fstαΆ)
[PROOFSTEP]
exact (dense_compl_compact hs).inter_open_nonempty _ ho β¨p, hpoβ©
[GOAL]
p q : β
s t : Set β
β’ Β¬IsCountablyGenerated (cocompact β)
[PROOFSTEP]
intro H
[GOAL]
p q : β
s t : Set β
H : IsCountablyGenerated (cocompact β)
β’ False
[PROOFSTEP]
rcases exists_seq_tendsto (cocompact β β π 0) with β¨x, hxβ©
[GOAL]
case intro
p q : β
s t : Set β
H : IsCountablyGenerated (cocompact β)
x : β β β
hx : Tendsto x atTop (cocompact β β π 0)
β’ False
[PROOFSTEP]
rw [tendsto_inf] at hx
[GOAL]
case intro
p q : β
s t : Set β
H : IsCountablyGenerated (cocompact β)
x : β β β
hx : Tendsto x atTop (cocompact β) β§ Tendsto x atTop (π 0)
β’ False
[PROOFSTEP]
rcases hx with β¨hxc, hx0β©
[GOAL]
case intro.intro
p q : β
s t : Set β
H : IsCountablyGenerated (cocompact β)
x : β β β
hxc : Tendsto x atTop (cocompact β)
hx0 : Tendsto x atTop (π 0)
β’ False
[PROOFSTEP]
obtain β¨n, hnβ© : β n : β, x n β insert (0 : β) (range x)
[GOAL]
p q : β
s t : Set β
H : IsCountablyGenerated (cocompact β)
x : β β β
hxc : Tendsto x atTop (cocompact β)
hx0 : Tendsto x atTop (π 0)
β’ β n, Β¬x n β insert 0 (range x)
case intro.intro.intro
p q : β
s t : Set β
H : IsCountablyGenerated (cocompact β)
x : β β β
hxc : Tendsto x atTop (cocompact β)
hx0 : Tendsto x atTop (π 0)
n : β
hn : Β¬x n β insert 0 (range x)
β’ False
[PROOFSTEP]
exact (hxc.eventually hx0.isCompact_insert_range.compl_mem_cocompact).exists
[GOAL]
case intro.intro.intro
p q : β
s t : Set β
H : IsCountablyGenerated (cocompact β)
x : β β β
hxc : Tendsto x atTop (cocompact β)
hx0 : Tendsto x atTop (π 0)
n : β
hn : Β¬x n β insert 0 (range x)
β’ False
[PROOFSTEP]
exact hn (Or.inr β¨n, rflβ©)
[GOAL]
p q : β
s t : Set β
β’ Β¬IsCountablyGenerated (π β)
[PROOFSTEP]
intro
[GOAL]
p q : β
s t : Set β
aβ : IsCountablyGenerated (π β)
β’ False
[PROOFSTEP]
have : IsCountablyGenerated (comap (OnePoint.some : β β ββ) (π β)) := by infer_instance
[GOAL]
p q : β
s t : Set β
aβ : IsCountablyGenerated (π β)
β’ IsCountablyGenerated (comap OnePoint.some (π β))
[PROOFSTEP]
infer_instance
[GOAL]
p q : β
s t : Set β
aβ : IsCountablyGenerated (π β)
this : IsCountablyGenerated (comap OnePoint.some (π β))
β’ False
[PROOFSTEP]
rw [OnePoint.comap_coe_nhds_infty, coclosedCompact_eq_cocompact] at this
[GOAL]
p q : β
s t : Set β
aβ : IsCountablyGenerated (π β)
this : IsCountablyGenerated (cocompact β)
β’ False
[PROOFSTEP]
exact not_countably_generated_cocompact this
[GOAL]
p q : β
s t : Set β
β’ Β¬FirstCountableTopology ββ
[PROOFSTEP]
intro
[GOAL]
p q : β
s t : Set β
aβ : FirstCountableTopology ββ
β’ False
[PROOFSTEP]
exact not_countably_generated_nhds_infty_opc inferInstance
[GOAL]
p q : β
s t : Set β
β’ Β¬SecondCountableTopology ββ
[PROOFSTEP]
intro
[GOAL]
p q : β
s t : Set β
aβ : SecondCountableTopology ββ
β’ False
[PROOFSTEP]
exact not_firstCountableTopology_opc inferInstance
[GOAL]
p q : β
s t : Set β
β’ TotallyDisconnectedSpace β
[PROOFSTEP]
refine' β¨fun s hsu hs x hx y hy => _β©
[GOAL]
p q : β
sβ t s : Set β
hsu : s β univ
hs : IsPreconnected s
x : β
hx : x β s
y : β
hy : y β s
β’ x = y
[PROOFSTEP]
clear hsu
[GOAL]
p q : β
sβ t s : Set β
hs : IsPreconnected s
x : β
hx : x β s
y : β
hy : y β s
β’ x = y
[PROOFSTEP]
by_contra' H : x β y
[GOAL]
p q : β
sβ t s : Set β
hs : IsPreconnected s
x : β
hx : x β s
y : β
hy : y β s
H : x β y
β’ False
[PROOFSTEP]
wlog hlt : x < y
[GOAL]
case inr
p q : β
sβ t s : Set β
hs : IsPreconnected s
x : β
hx : x β s
y : β
hy : y β s
H : x β y
this :
β {p q : β} {s t : Set β} (s : Set β), IsPreconnected s β β (x : β), x β s β β (y : β), y β s β x β y β x < y β False
hlt : Β¬x < y
β’ False
[PROOFSTEP]
refine' this s hs y hy x hx H.symm <| H.lt_or_lt.resolve_left hlt
[GOAL]
case inr.refine'_1
p q : β
sβ t s : Set β
hs : IsPreconnected s
x : β
hx : x β s
y : β
hy : y β s
H : x β y
this :
β {p q : β} {s t : Set β} (s : Set β), IsPreconnected s β β (x : β), x β s β β (y : β), y β s β x β y β x < y β False
hlt : Β¬x < y
β’ β
[PROOFSTEP]
assumption
[GOAL]
case inr.refine'_2
p q : β
sβ t s : Set β
hs : IsPreconnected s
x : β
hx : x β s
y : β
hy : y β s
H : x β y
this :
β {p q : β} {s t : Set β} (s : Set β), IsPreconnected s β β (x : β), x β s β β (y : β), y β s β x β y β x < y β False
hlt : Β¬x < y
β’ β
[PROOFSTEP]
assumption
[GOAL]
case inr.refine'_3
p q : β
sβ t s : Set β
hs : IsPreconnected s
x : β
hx : x β s
y : β
hy : y β s
H : x β y
this :
β {p q : β} {s t : Set β} (s : Set β), IsPreconnected s β β (x : β), x β s β β (y : β), y β s β x β y β x < y β False
hlt : Β¬x < y
β’ Set β
[PROOFSTEP]
assumption
[GOAL]
case inr.refine'_4
p q : β
sβ t s : Set β
hs : IsPreconnected s
x : β
hx : x β s
y : β
hy : y β s
H : x β y
this :
β {p q : β} {s t : Set β} (s : Set β), IsPreconnected s β β (x : β), x β s β β (y : β), y β s β x β y β x < y β False
hlt : Β¬x < y
β’ Set β
[PROOFSTEP]
assumption
[GOAL]
p q : β
sβ t s : Set β
hs : IsPreconnected s
x : β
hx : x β s
y : β
hy : y β s
H : x β y
hlt : x < y
β’ False
[PROOFSTEP]
rcases exists_irrational_btwn (Rat.cast_lt.2 hlt) with β¨z, hz, hxz, hzyβ©
[GOAL]
case intro.intro.intro
p q : β
sβ t s : Set β
hs : IsPreconnected s
x : β
hx : x β s
y : β
hy : y β s
H : x β y
hlt : x < y
z : β
hz : Irrational z
hxz : βx < z
hzy : z < βy
β’ False
[PROOFSTEP]
have := hs.image _ continuous_coe_real.continuousOn
[GOAL]
case intro.intro.intro
p q : β
sβ t s : Set β
hs : IsPreconnected s
x : β
hx : x β s
y : β
hy : y β s
H : x β y
hlt : x < y
z : β
hz : Irrational z
hxz : βx < z
hzy : z < βy
this : IsPreconnected (Rat.cast '' s)
β’ False
[PROOFSTEP]
rw [isPreconnected_iff_ordConnected] at this
[GOAL]
case intro.intro.intro
p q : β
sβ t s : Set β
hs : IsPreconnected s
x : β
hx : x β s
y : β
hy : y β s
H : x β y
hlt : x < y
z : β
hz : Irrational z
hxz : βx < z
hzy : z < βy
thisβ : IsPreconnected (Rat.cast '' s)
this : OrdConnected (Rat.cast '' s)
β’ False
[PROOFSTEP]
have : z β Rat.cast '' s := this.out (mem_image_of_mem _ hx) (mem_image_of_mem _ hy) β¨hxz.le, hzy.leβ©
[GOAL]
case intro.intro.intro
p q : β
sβ t s : Set β
hs : IsPreconnected s
x : β
hx : x β s
y : β
hy : y β s
H : x β y
hlt : x < y
z : β
hz : Irrational z
hxz : βx < z
hzy : z < βy
thisβΒΉ : IsPreconnected (Rat.cast '' s)
thisβ : OrdConnected (Rat.cast '' s)
this : z β Rat.cast '' s
β’ False
[PROOFSTEP]
exact hz (image_subset_range _ _ this)
|
#include <boost/geometry/strategies/distance_result.hpp>
|
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE GADTs #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE PolyKinds #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE TypeApplications #-}
{-# LANGUAGE TypeOperators #-}
{-# LANGUAGE QuasiQuotes #-}
{-# OPTIONS_GHC -fplugin=Polysemy.Plugin #-}
--{-# LANGUAGE AllowAmbiguousTypes #-}
import qualified Control.Foldl as FL
import qualified Data.List as L
import qualified Data.Map as M
import qualified Data.Vector as VB
import qualified Data.Text as T
import qualified Text.Printf as PF
import qualified Data.Vinyl as V
import qualified Frames as F
import qualified Frames.CSV as F
import qualified Pipes as P
import qualified Pipes.Prelude as P
import qualified Numeric.LinearAlgebra as LA
import qualified Frames.Visualization.VegaLite.Data
as FV
import qualified Frames.Visualization.VegaLite.StackedArea
as FV
import qualified Frames.Visualization.VegaLite.LineVsTime
as FV
import qualified Frames.Visualization.VegaLite.ParameterPlots
as FV
import qualified Frames.Visualization.VegaLite.Correlation
as FV
import qualified Frames.Transform as FT
import qualified Frames.Folds as FF
import qualified Frames.MapReduce as MR
import qualified Frames.Enumerations as FE
import qualified Frames.Utils as FU
import qualified Knit.Report as K
import Polysemy.Error ( Error )
import Data.String.Here ( here )
import BlueRipple.Configuration
import BlueRippleUtilitie.KnitUtils
import BlueRipple.Data.DataFrames
import BlueRipple.Data.MRP
import qualified BlueRipple.Model.TurnoutAdjustment
as TA
templateVars = M.fromList
[ ("lang" , "English")
, ("author" , "Adam Conner-Sax & Frank David")
, ("pagetitle", "Preference Model & Predictions")
-- , ("tufte","True")
]
{- TODO
1. Why are rows still being dropped? Which col is missing? In general, write a diagnostic for showing what is missing...
Some answers:
The boring: CountyFIPS. I don't want this anyway. Dropped
The weird: Missing voted_rep_party. These are non-voters or voters who didn't vote in house race. A bit more than 1/3 of
survey responses. Maybe that's not weird??
-}
main :: IO ()
main = do
let template = K.FromIncludedTemplateDir "mindoc-pandoc-KH.html"
-- let template = K.FullySpecifiedTemplatePath "pandoc-templates/minWithVega-pandoc.html"
pandocWriterConfig <- K.mkPandocWriterConfig template
templateVars
K.mindocOptionsF
eitherDocs <-
K.knitHtmls (Just "MRP_Basics.Main") K.logAll pandocWriterConfig $ do
K.logLE K.Info "Loading data..."
let csvParserOptions =
F.defaultParser { F.quotingMode = F.RFC4180Quoting ' ' }
tsvParserOptions = csvParserOptions { F.columnSeparator = "\t" }
preFilterYears = FU.filterOnMaybeField @CCESYear (`L.elem` [2016])
ccesMaybeRecs <- loadToMaybeRecs @CCES_MRP_Raw @(F.RecordColumns CCES)
tsvParserOptions
preFilterYears
ccesTSV
ccesFrame <-
fmap transformCCESRow
<$> maybeRecsToFrame fixCCESRow (const True) ccesMaybeRecs
let firstFew = take 4 $ FL.fold FL.list ccesFrame
K.logLE K.Diagnostic
$ "ccesFrame (first 4 rows):\n"
<> (T.pack $ show firstFew)
K.logLE K.Info "Inferring..."
K.logLE K.Info "Knitting docs..."
case eitherDocs of
Right namedDocs ->
K.writeAllPandocResultsWithInfoAsHtml "reports/html/MRP_Basics" namedDocs
Left err -> putStrLn $ "pandoc error: " ++ show err
|
lemma zero_in_bigo [simp]: "(\<lambda>_. 0) \<in> O[F](f)" |
-- This module has functions that store pinwheels to disk and read them out to do convolution,
-- for the purpose of handling the case when filter size is greater than RAM size.
module STC.Binary
( makePlanBinary
, computeInitialEigenVectorBinary
, writeDFTPinwheel
, convolutionBinary
) where
import Array.UnboxedArray as AU
import Control.Monad as M
import Control.Monad.IO.Class
import Control.Monad.Trans.Resource
import Data.Array.Repa as R
import Data.Binary
import Data.ByteString.Lazy as BSL
import Data.ByteString.Lazy as BS
import Data.Complex
import Data.Conduit
import Control.DeepSeq (($!!))
import Data.Conduit.List as CL
import Data.List as L
import Data.Vector.Storable as VS
import Data.Vector.Unboxed as VU
import DFT.Plan
import Filter.Pinwheel
import FokkerPlanck.Interpolation
import FokkerPlanck.Pinwheel (cutoff)
import GHC.Float
import System.IO
import System.Random
import Types
import Utils.Array
import Utils.Parallel (ParallelParams (..),
parConduitIO)
makePlanBinary ::
DFTPlan -> Bool -> FilePath -> Int -> Int -> Int -> Int -> IO DFTPlan
makePlanBinary oldPlan wisdomFlag wisdomFilePath numThetaFreqs numScaleFreqs rows cols = do
let n = numThetaFreqs * numScaleFreqs * rows * cols
xs <- M.replicateM n randomIO :: IO [Double]
ys <- M.replicateM n randomIO :: IO [Double]
let vecTemp = VS.fromList . L.zipWith (:+) xs $ ys
when wisdomFlag (importFFTWWisdom wisdomFilePath)
lock <- getFFTWLock
plan <-
fst <$>
(dft1dGPlan
lock
oldPlan
[cols, rows, numThetaFreqs, numScaleFreqs]
[0, 1]
vecTemp >>= \(plan, vec) ->
idft1dGPlan
lock
plan
[cols, rows, numThetaFreqs, numScaleFreqs]
[0, 1]
vec >>= \(plan, vec) ->
dft1dGPlan
lock
plan
[numThetaFreqs, numScaleFreqs, cols, rows]
[0, 1]
vec >>= \(plan, vec) ->
idft1dGPlan
lock
plan
[numThetaFreqs, numScaleFreqs, cols, rows]
[0, 1]
vec)
exportFFTWWisdom wisdomFilePath
return plan
computeInitialEigenVectorBinary ::
Int
-> Int
-> [Double]
-> [Double]
-> [R2S1RPPoint]
-> R2Z2Array
computeInitialEigenVectorBinary xLen yLen thetaFreqs scaleFreqs xs =
let numThetaFreqs = L.length thetaFreqs
numScaleFreqs = L.length scaleFreqs
xShift = div xLen 2
(xMin, xMax) =
if odd xLen
then (-xShift, xShift)
else (-xShift, xShift - 1)
yShift = div yLen 2
(yMin, yMax) =
if odd yLen
then (-yShift, yShift)
else (-yShift, yShift - 1)
vec =
toUnboxedVector .
AU.accum (+) 0 ((xMin, yMin), (xMax, yMax)) .
L.map
(\(R2S1RPPoint (x, y, _, _)) ->
((x, y), 1 / (fromIntegral . L.length $ xs))) $
xs
in computeS .
R.traverse
(fromUnboxed (Z :. xLen :. yLen) vec)
(const (Z :. numThetaFreqs :. numScaleFreqs :. xLen :. yLen)) $ \f idx@(Z :. tf :. sf :. i :. j) ->
if tf == div numThetaFreqs 2 && sf == div numScaleFreqs 2
then f (Z :. i :. j)
else 0
{-# INLINE convertDFTPinwheel #-}
convertDFTPinwheel ::
DFTPlan
-> R.Array D DIM5 Double
-> R.Array U DIM1 Double
-> R.Array U DIM1 Double
-> Double
-> Double
-> (Int, Int)
-> (Int, Int)
-> IO BS.ByteString
convertDFTPinwheel plan radialArr thetaFreqs scaleFreqs hollowRadius rMax (rows, cols) (t, s) =
let (Z :. numThetaFreq :. numScaleFreq :. numTheta0Freq :. numScale0Freq :. _) =
extent radialArr
pinwheelArr =
traverse2
thetaFreqs
scaleFreqs
(\_ _ -> (Z :. numTheta0Freq :. numScale0Freq :. cols :. rows)) $ \ft0 fs0 (Z :. t0 :. s0 :. i :. j) ->
pinwheelFunc
(PinwheelHollow0 hollowRadius)
(thetaFreqs R.! (Z :. t) - ft0 (Z :. t0))
(scaleFreqs R.! (Z :. s) + fs0 (Z :. s0))
rMax
0
(i - center cols)
(j - center rows)
interpolatedPinwheelArr =
radialCubicInterpolation
(R.slice radialArr (Z :. t :. s :. All :. All :. All))
1
pinwheelArr
in do xx <-
fmap
(encode .
L.map (\(x :+ y) -> double2Float x :+ double2Float y) . VS.toList) .
dftExecute
plan
(DFTPlanID DFT1DG [cols, rows, numTheta0Freq, numScale0Freq] [0, 1]) .
VS.convert . toUnboxed . computeS . rotate4D2 . makeFilter2D $
interpolatedPinwheelArr
return $!! xx
{-# INLINE writeDFTPinwheelSink #-}
writeDFTPinwheelSink ::
FilePath -> ConduitT BS.ByteString Void (ResourceT IO) ()
writeDFTPinwheelSink filePath = do
h <- liftIO $ openBinaryFile filePath WriteMode
go h
where
go h = do
z <- await
case z of
Nothing -> liftIO $ hClose h
Just bs -> do
liftIO . BSL.hPut h . encode . BS.length $ bs
liftIO . BS.hPut h $ bs
go h
writeDFTPinwheel ::
ParallelParams
-> DFTPlan
-> R.Array D DIM5 Double
-> Double
-> Double
-> [Double]
-> [Double]
-> Double
-> (Int, Int)
-> FilePath
-> IO ()
writeDFTPinwheel parallelParams plan radialArr hollowRadius cutoffRadius thetaFreqs scaleFreqs rMax (rows, cols) filePath = do
let (Z :. numThetaFreq :. numScaleFreq :. numTheta0Freq :. numScale0Freq :. _) =
extent radialArr
thetaFreqsArr = fromListUnboxed (Z :. L.length thetaFreqs) thetaFreqs
scaleFreqsArr = fromListUnboxed (Z :. L.length scaleFreqs) scaleFreqs
runConduitRes $
CL.sourceList
[(t, s) | t <- [0 .. numThetaFreq - 1], s <- [0 .. numScaleFreq - 1]] .|
parConduitIO
parallelParams
(convertDFTPinwheel
plan
(cutoff (round cutoffRadius) radialArr)
thetaFreqsArr
scaleFreqsArr
hollowRadius
rMax
(rows, cols)) .|
writeDFTPinwheelSink filePath
{-# INLINE sourceBinary #-}
sourceBinary :: FilePath -> ConduitT () BS.ByteString (ResourceT IO) ()
sourceBinary filePath = do
h <- liftIO $ openBinaryFile filePath ReadMode
go h
where
go handle = do
lenBS <- liftIO (BSL.hGet handle 8)
if BSL.null lenBS
then liftIO $ hClose handle
else do
let len = decode lenBS :: Int
bs <- liftIO . BS.hGet handle $ len
yield bs
go handle
{-# INLINE convolution #-}
convolution ::
Bool
-> DFTPlan
-> DIM4
-> R.Array U DIM1 Double
-> VS.Vector (Complex Double)
-> BS.ByteString
-> IO (VU.Vector (Complex Double))
convolution True plan dim@(Z :. cols :. rows :. numThetaFreq :. numScaleFreq) thetaFreqs input bs = do
arr <-
fmap (fromUnboxed dim . VS.convert) .
dftExecute
plan
(DFTPlanID IDFT1DG [cols, rows, numThetaFreq, numScaleFreq] [0, 1]) .
VS.zipWith (*) input $
VS.fromList . L.map (\(x :+ y) -> float2Double x :+ float2Double y) . decode $
bs
return $!! toUnboxed . sumS . sumS . R.traverse2 arr thetaFreqs const $
(\f ft idx@(Z :. _ :. _ :. i :. _) -> f idx * exp (0 :+ ft (Z :. i) * pi))
convolution False plan dim@(Z :. cols :. rows :. numThetaFreq :. numScaleFreq) thetaFreqs input bs = do
x <-
fmap (toUnboxed . sumS . sumS . fromUnboxed dim . VS.convert) .
dftExecute
plan
(DFTPlanID IDFT1DG [cols, rows, numThetaFreq, numScaleFreq] [0, 1]) .
VS.zipWith (*) input .
VS.fromList . L.map (\(x :+ y) -> float2Double x :+ float2Double y) . decode $
bs
return $!! x
convolutionBinary ::
Bool
-> ParallelParams
-> DFTPlan
-> FilePath
-> [Double]
-> R.Array U DIM4 (Complex Double)
-> IO (R.Array U DIM4 (Complex Double))
convolutionBinary sinkFlag parallelParams plan filterFilePath thetaFreqs inputArr = do
let thetaFreqsArr = fromListUnboxed (Z :. (L.length thetaFreqs)) thetaFreqs
(Z :. numThetaFreq :. numScaleFreq :. cols :. rows) = extent inputArr
inputArrF <-
dftExecute
plan
(DFTPlanID DFT1DG [cols, rows, numThetaFreq, numScaleFreq] [0, 1]) .
VU.convert . toUnboxed . computeS . rotate4D2 $
inputArr
xs <-
runConduitRes $
sourceBinary filterFilePath .|
parConduitIO
parallelParams
(convolution
sinkFlag
plan
(Z :. cols :. rows :. numThetaFreq :. numScaleFreq)
thetaFreqsArr
inputArrF) .|
CL.consume
let arr = fromUnboxed (extent inputArr) . VU.concat $ xs
return $
if sinkFlag
then computeS . R.traverse2 arr thetaFreqsArr const $ \f ft idx@(Z :. i :. _ :. _ :. _) ->
f idx * exp (0 :+ ft (Z :. i) * pi)
else arr
|
open import MLib.Prelude.FromStdlib
open import Relation.Binary using (Decidable; IsStrictTotalOrder)
module MLib.Prelude.DFS
{v p e} {V : Set v} (_β_ : V β V β Set e)
{_<_ : V β V β Set p} (isStrictTotalOrder : IsStrictTotalOrder _β‘_ _<_)
where
open import MLib.Prelude.Path
open Bool using (T)
open import Function.Equivalence using (Equivalence)
import Data.AVL isStrictTotalOrder as Tree
open Tree using (Tree)
-- A (directed) graph is a map from vertices to the list of edges out of that
-- vertex.
Graph = Tree (Ξ» x β List (β Ξ» y β x β y))
private
module Seen where
-- Represents a set of vertices that have been seen. At most 'n' vertices
-- have /not/ been seen.
record SeenSet (n : β) : Set (v βΛ‘ e βΛ‘ p) where
constructor mkSeen
field
getSeen : Tree (Ξ» _ β β€)
isSeen : β {n} β V β SeenSet n β Bool
isSeen x (mkSeen t) = x Tree.β? t
-- Mark a vertex as seen, thus reducing the number of /unseen/ vertices.
mark : β {n} β V β SeenSet (Nat.suc n) β Maybe (SeenSet n)
mark x u@(mkSeen t) = if Bool.not (isSeen x u) then just (mkSeen (Tree.insert x tt t)) else nothing
-- Make a seen set for a particular graph. The number of unseen vertices in
-- the result is bounded above by the number of vertices in the graph.
forGraph : Graph β β SeenSet
forGraph gr = (countVertices gr , mkSeen Tree.empty)
where
-- this overcounts but that doesn't matter because the index is only there
-- to ensure termination of the DFS
countVertices : Graph β β
countVertices = List.foldr (Ξ» { (_ , es) n β List.foldr (Ξ» _ β Nat.suc) n es }) 0 β Tree.toList
inj : β {n} β SeenSet n β SeenSet (Nat.suc n)
inj (mkSeen t) = mkSeen t
open Seen using (SeenSet)
private
infixl 1 _>>=β_
MonadDfs : β {a} β β β Set a β Set (v βΛ‘ (p βΛ‘ (e βΛ‘ a)))
MonadDfs n A = SeenSet n β A Γ SeenSet n
runMonadDfs : β {n a} {A : Set a} β MonadDfs n A β SeenSet n β A
runMonadDfs f = projβ β f
_>>=β_ : β {n a b} {A : Set a} {B : Set b} β MonadDfs n A β (A β MonadDfs n B) β MonadDfs n B
(f >>=β g) s =
let x , sβ² = f s
in g x sβ²
returnβ : β {n a} {A : Set a} β A β MonadDfs n A
returnβ x s = x , s
withMarked : β {n a} {A : Set a} V β MonadDfs n A β MonadDfs (Nat.suc n) (Maybe A)
withMarked v f s =
case Seen.mark v s of Ξ»
{ (just sβ²) β
let x , sβ²β² = f sβ²
in just x , Seen.inj sβ²β²
; nothing β nothing , s
}
module _ (graph : Graph) where
private
PathsFrom = List β β β Path _β_
-- Calculates paths from the given source to every reachable target not in
-- the seen set. Returns the new seen set, the list of paths found, and
-- a boolean indicating whether this source was previously in the seen
-- set.
pathsFromSource : β {n} (source : V) β MonadDfs n (Maybe (PathsFrom source))
-- Calculates paths from the given source to every reachable target not in
-- the seen set, whose first edge is among the list given.
pathsViaEdges : β {n} {source : V} β List (β (Ξ» dest β source β dest)) β MonadDfs n (PathsFrom source)
-- Calculates paths via the edge given to every reachable target not in the
-- seen set.
pathsViaEdge : β {n} {source dest : V} β source β dest β MonadDfs n (PathsFrom source)
-- The base case of induction on the size of the seen set. This is only here
-- to satisfy the termination checker.
pathsFromSource {Nat.zero} source = returnβ nothing
pathsFromSource {Nat.suc _} source = withMarked source (
case Tree.lookup source graph of Ξ»
-- We have a list of edges from this source to some other vertex. For
-- each of these, a recursive call will only return paths from that
-- vertex if it is yet unseen. Note the recursive call is on a seen set
-- with an index one lower, satisfying Agda's termination checker.
{ (just es) β pathsViaEdges es >>=β returnβ β just
-- there are no edges from this source so nothing is reachable from it
; nothing β returnβ (just [])
}) >>=β maybe returnβ (returnβ nothing)
pathsViaEdges [] = returnβ []
pathsViaEdges ((d , e) β· es) =
pathsViaEdge e >>=β Ξ» pathsVia-d β
pathsViaEdges es >>=β Ξ» restPaths β
returnβ (pathsVia-d List.++ restPaths)
pathsViaEdge {dest = d} e =
pathsFromSource d >>=β Ξ»
{ (just pathsFrom-d) β
let pathsVia-d = List.map (Ξ» {(dβ² , p) β dβ² , connect (edge e) p}) pathsFrom-d
in returnβ ((_ , edge e) β· pathsVia-d)
; nothing β returnβ []
}
-- Given a source vertex S, finds all vertices T such that there is a path
-- from S to T, and returns the path. No target vertex is returned more than
-- once.
allTargetsFrom : (source : V) β List (β (Path _β_ source))
allTargetsFrom source =
let _ , seen = Seen.forGraph graph
in maybe id [] (runMonadDfs (pathsFromSource source) seen)
module _ (graph : Graph) (matches : V β Bool) where
private
findMatchingFrom : β {n} (source : V) β MonadDfs n (Maybe (β Ξ» dest β Path _β_ source dest Γ T (matches dest)))
findMatchingViaEdges : β {n} {source : V} β List (β (Ξ» inter β source β inter)) β MonadDfs n (Maybe (β Ξ» dest β Path _β_ source dest Γ T (matches dest)))
findMatchingViaEdge : β {n} {source inter : V} β source β inter β MonadDfs n (Maybe (β Ξ» dest β Path _β_ source dest Γ T (matches dest)))
-- The base case of induction on the size of the seen set. This is only here
-- to satisfy the termination checker.
findMatchingFrom {Nat.zero} source = returnβ nothing
findMatchingFrom {Nat.suc _} source = withMarked source (
case Tree.lookup source graph of Ξ»
-- We have a list of edges from this source to some other vertex. For
-- each of these, a recursive call will only return paths from that
-- vertex if it is yet unseen. Note the recursive call is on a seen set
-- with an index one lower, satisfying Agda's termination checker.
{ (just es) β findMatchingViaEdges es
-- there are no edges from this source so nothing is reachable from it
; nothing β returnβ nothing
}) >>=β maybe returnβ (returnβ nothing)
findMatchingViaEdges [] = returnβ nothing
findMatchingViaEdges ((d , e) β· es) =
findMatchingViaEdge e >>=β Ξ»
{ (just r) β returnβ (just r)
; nothing β findMatchingViaEdges es
}
findMatchingViaEdge {inter = d} e with matches d | β‘.inspect matches d
findMatchingViaEdge {inter = _} e | true | β‘.[ eq ] = returnβ (just (_ , edge e , Equivalence.from Bool.T-β‘ β¨$β© eq))
findMatchingViaEdge {inter = d} e | false | _ =
findMatchingFrom d >>=β Ξ»
{ (just (_ , p , q)) β returnβ (just (_ , connect (edge e) p , q))
; nothing β returnβ nothing
}
-- Given a source vertex S, finds all vertices T such that there is a path
-- from S to T, and returns the path. No target vertex is returned more than
-- once.
findMatching : (source : V) β Maybe (β Ξ» dest β Path _β_ source dest Γ T (matches dest))
findMatching source =
let _ , seen = Seen.forGraph graph
in runMonadDfs (findMatchingFrom source) seen
module _ (graph : Graph) {dest} (isDest : β v β Dec (v β‘ dest)) where
-- Given a source vertex S, finds all vertices T such that there is a path
-- from S to T, and returns the path. No target vertex is returned more than
-- once.
findDest : (source : V) β Maybe (Path _β_ source dest)
findDest source with findMatching graph (β_β β isDest) source
... | just (destβ² , p , q) with isDest destβ²
... | yes β‘.refl = just p
findDest source | just (_ , _ , ()) | no _
findDest source | nothing = nothing
|
#ifndef __GSL_PERMUTE_H__
#define __GSL_PERMUTE_H__
#include <gsl/gsl_permute_complex_long_double.h>
#include <gsl/gsl_permute_complex_double.h>
#include <gsl/gsl_permute_complex_float.h>
#include <gsl/gsl_permute_long_double.h>
#include <gsl/gsl_permute_double.h>
#include <gsl/gsl_permute_float.h>
#include <gsl/gsl_permute_ulong.h>
#include <gsl/gsl_permute_long.h>
#include <gsl/gsl_permute_uint.h>
#include <gsl/gsl_permute_int.h>
#include <gsl/gsl_permute_ushort.h>
#include <gsl/gsl_permute_short.h>
#include <gsl/gsl_permute_uchar.h>
#include <gsl/gsl_permute_char.h>
#endif /* __GSL_PERMUTE_H__ */
|
//
// Copyright (C) 2004-2018 Greg Landrum and Rational Discovery LLC
//
// @@ All Rights Reserved @@
// This file is part of the RDKit.
// The contents are covered by the terms of the BSD license
// which is included in the file license.txt, found at the root
// of the RDKit source tree.
//
#include <RDGeneral/test.h>
#include <iostream>
#include <RDGeneral/Invariant.h>
#include <RDGeneral/RDLog.h>
#include <RDGeneral/utils.h>
#include <GraphMol/RDKitBase.h>
#include <GraphMol/SmilesParse/SmilesParse.h>
#include <GraphMol/SmilesParse/SmilesWrite.h>
#include <GraphMol/FileParsers/FileParsers.h>
#include <GraphMol/FileParsers/MolSupplier.h>
#include <GraphMol/ForceFieldHelpers/FFConvenience.h>
#include <GraphMol/ForceFieldHelpers/MMFF/AtomTyper.h>
#include <GraphMol/ForceFieldHelpers/MMFF/Builder.h>
#include <GraphMol/ForceFieldHelpers/MMFF/MMFF.h>
#include <ForceField/ForceField.h>
#include <ForceField/MMFF/Params.h>
#include <GraphMol/DistGeomHelpers/Embedder.h>
#include <GraphMol/Substruct/SubstructMatch.h>
#include <boost/math/special_functions/round.hpp>
using namespace RDKit;
#ifdef RDK_TEST_MULTITHREADED
namespace {
void runblock_mmff(const std::vector<ROMol *> &mols) {
for (auto mol : mols) {
ForceFields::ForceField *field = MMFF::constructForceField(*mol);
TEST_ASSERT(field);
field->initialize();
field->minimize(1);
delete field;
}
}
} // namespace
#include <thread>
#include <future>
void testMMFFMultiThread() {
BOOST_LOG(rdErrorLog) << "-------------------------------------" << std::endl;
BOOST_LOG(rdErrorLog) << " Test MMFF multithreading" << std::endl;
std::string pathName = getenv("RDBASE");
pathName += "/Code/GraphMol/ForceFieldHelpers/MMFF/test_data";
SDMolSupplier suppl(pathName + "/bulk.sdf");
unsigned int count = 24;
std::vector<std::vector<ROMol*>> mols;
for (unsigned int i = 0; i < count; ++i) {
mols.push_back(std::vector<ROMol *>());
}
while (!suppl.atEnd() && mols.size() < 100) {
ROMol *mol = nullptr;
try {
mol = suppl.next();
for(unsigned int i=0;i<count;++i) {
if (i == 0) {
mols[i].push_back(mol);
} else {
mols[i].push_back(new ROMol(*mol));
}
}
} catch (...) {
continue;
}
}
std::vector<std::future<void>> tg;
std::cerr << "processing" << std::endl;
for (unsigned int i = 0; i < count; ++i) {
std::cerr << " launch :" << i << std::endl;
std::cerr.flush();
tg.emplace_back(std::async(std::launch::async, runblock_mmff, mols[i]));
}
for (auto &fut : tg) {
fut.get();
}
std::cerr << "done" << std::endl;
for(unsigned int i=0; i<count; ++i)
BOOST_FOREACH (ROMol *mol, mols[i]) { delete mol; }
BOOST_LOG(rdErrorLog) << " done" << std::endl;
}
#endif
//-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
//
//-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
int main() {
RDLog::InitLogs();
#ifdef RDK_TEST_MULTITHREADED
testMMFFMultiThread();
#endif
}
|
{-# OPTIONS --cubical #-}
open import Agda.Primitive.Cubical
open import Agda.Builtin.Cubical.Path
postulate
A : Set
B : Set
b : B
f : (\ {a : A} (x : B) β b) β‘ (\ _ β b)
f i x = b
|
------------------------------------------------------------------------
-- A parser for PBM images; illustrates "essential" use of bind
------------------------------------------------------------------------
-- Note that I am using the simple "Plain PBM" format, and I try to
-- adhere to the following statement from the pbm man page:
--
-- "Programs that read this format should be as lenient as possible,
-- accepting anything that looks remotely like a bitmap."
-- I got the idea to write this particular parser from "The Power of
-- Pi" by Oury and Swierstra.
module StructurallyRecursiveDescentParsing.PBM where
import Data.Vec as Vec
import Data.List as List
open import Codata.Musical.Notation
open import Data.Bool
open import Data.Char using (_==_)
import Data.Char.Properties as Char
import Data.String as String
open import Data.Unit
open import Function
open import Relation.Binary.PropositionalEquality
open import StructurallyRecursiveDescentParsing.Grammar
open import StructurallyRecursiveDescentParsing.Lib
open import StructurallyRecursiveDescentParsing.DepthFirst
open Token Char.decSetoid
open import TotalParserCombinators.Examples.PBM using (module PBM)
open PBM
mutual
comment : Parser EmptyNT _ _ _
comment = tt <$ tok '#'
<β sat' (not β _==_ '\n') β
<β tok '\n'
colour = white <$ tok '0'
β£ black <$ tok '1'
pbm =
wβ£c β β>
theString (String.toVec "P1") β>
wβ£c β β>
number !>>= Ξ» cols β β― -- _>>=_ works just as well.
(wβ£c + β>
number >>= Ξ» rows β -- _!>>=_ works just as well.
wβ£c β>
(toPBM <$> exactly rows (exactly cols (wβ£c β β> colour))) <β
any β)
where wβ£c = whitespace β£ comment
module Example where
open Vec
image = toPBM ((white β· black β· white β· []) β·
(black β· white β· black β· []) β·
(white β· black β· white β· []) β· [])
ex : parseComplete (β¦ pbm β§ emptyGrammar)
(String.toList (show image)) β‘
List.[_] image
ex = refl
|
(* AGCT in Coq *)
Require Import List.
Inductive dna : Type :=
| A : dna
| G : dna
| C : dna
| T : dna.
Notation "[ ]" := nil.
Notation "[ x , .. , y ]" := (cons x .. (cons y nil) ..).
Fixpoint selection_aux(n:nat)(xs:list (list dna)):list (list dna) :=
match n with
| O => xs
| S n' => let ys := selection_aux n' xs in
fold_left
(fun stat x => stat ++ (map (fun s => x :: s) ys))
[A, G, C, T]
[]
end.
Definition selection(n:nat) := selection_aux n [[]].
Definition beq_dna (a b : dna) : bool :=
match a, b with
| A, A => true
| G, G => true
| C, C => true
| T, T => true
| _, _ => false
end.
Fixpoint match_left (xs ys : list dna) : bool :=
match xs, ys with
| [], _ => true
| x :: xs', [] => false
| x :: xs', y :: ys' =>
if beq_dna x y then match_left xs' ys'
else false
end.
Fixpoint contains_dna (xs ys : list dna) : bool :=
match ys with
| [] => false
| y :: ys' =>
if match_left xs ys then true
else contains_dna xs ys'
end.
Eval compute in filter (fun x => contains_dna [A, A, G] x) (selection 4).
(*
= [[A, A, A, G], [A, A, G, A], [A, A, G, G], [A, A, G, C], [A, A, G, T],
[G, A, A, G], [C, A, A, G], [T, A, A, G]]
: list (list dna)
*) |
variable {Ξ± : Type*}
def is_prefix (lβ : list Ξ±) (lβ : list Ξ±) : Prop :=
β t, lβ ++ t = lβ
infix ` <+: `:50 := is_prefix
attribute [simp]
theorem list.is_prefix_refl (l : list Ξ±) : l <+: l :=
β¨[], by simpβ©
example : [1, 2, 3] <+: [1, 2, 3] := by simp
|
/-
Copyright (c) 2022 Moritz Doll. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Moritz Doll
-/
import analysis.locally_convex.balanced_core_hull
import analysis.locally_convex.with_seminorms
import analysis.convex.gauge
/-!
# Absolutely convex sets
A set is called absolutely convex or disked if it is convex and balanced.
The importance of absolutely convex sets comes from the fact that every locally convex
topological vector space has a basis consisting of absolutely convex sets.
## Main definitions
* `gauge_seminorm_family`: the seminorm family induced by all open absolutely convex neighborhoods
of zero.
## Main statements
* `with_gauge_seminorm_family`: the topology of a locally convex space is induced by the family
`gauge_seminorm_family`.
## Todo
* Define the disked hull
## Tags
disks, convex, balanced
-/
open normed_field set
open_locale big_operators nnreal pointwise topology
variables {π E F G ΞΉ : Type*}
section nontrivially_normed_field
variables (π E) {s : set E}
variables [nontrivially_normed_field π] [add_comm_group E] [module π E]
variables [module β E] [smul_comm_class β π E]
variables [topological_space E] [locally_convex_space β E] [has_continuous_smul π E]
lemma nhds_basis_abs_convex : (π (0 : E)).has_basis
(Ξ» (s : set E), s β π (0 : E) β§ balanced π s β§ convex β s) id :=
begin
refine (locally_convex_space.convex_basis_zero β E).to_has_basis (Ξ» s hs, _)
(Ξ» s hs, β¨s, β¨hs.1, hs.2.2β©, rfl.subsetβ©),
refine β¨convex_hull β (balanced_core π s), _, convex_hull_min (balanced_core_subset s) hs.2β©,
refine β¨filter.mem_of_superset (balanced_core_mem_nhds_zero hs.1) (subset_convex_hull β _), _β©,
refine β¨balanced_convex_hull_of_balanced (balanced_core_balanced s), _β©,
exact convex_convex_hull β (balanced_core π s),
end
variables [has_continuous_smul β E] [topological_add_group E]
lemma nhds_basis_abs_convex_open : (π (0 : E)).has_basis
(Ξ» (s : set E), (0 : E) β s β§ is_open s β§ balanced π s β§ convex β s) id :=
begin
refine (nhds_basis_abs_convex π E).to_has_basis _ _,
{ rintros s β¨hs_nhds, hs_balanced, hs_convexβ©,
refine β¨interior s, _, interior_subsetβ©,
exact β¨mem_interior_iff_mem_nhds.mpr hs_nhds, is_open_interior,
hs_balanced.interior (mem_interior_iff_mem_nhds.mpr hs_nhds), hs_convex.interiorβ© },
rintros s β¨hs_zero, hs_open, hs_balanced, hs_convexβ©,
exact β¨s, β¨hs_open.mem_nhds hs_zero, hs_balanced, hs_convexβ©, rfl.subsetβ©,
end
end nontrivially_normed_field
section absolutely_convex_sets
variables [topological_space E] [add_comm_monoid E] [has_zero E] [semi_normed_ring π]
variables [has_smul π E] [has_smul β E]
variables (π E)
/-- The type of absolutely convex open sets. -/
def abs_convex_open_sets :=
{ s : set E // (0 : E) β s β§ is_open s β§ balanced π s β§ convex β s }
instance abs_convex_open_sets.has_coe : has_coe (abs_convex_open_sets π E) (set E) := β¨subtype.valβ©
namespace abs_convex_open_sets
variables {π E}
lemma coe_zero_mem (s : abs_convex_open_sets π E) : (0 : E) β (s : set E) := s.2.1
lemma coe_is_open (s : abs_convex_open_sets π E) : is_open (s : set E) := s.2.2.1
lemma coe_nhds (s : abs_convex_open_sets π E) : (s : set E) β π (0 : E) :=
s.coe_is_open.mem_nhds s.coe_zero_mem
lemma coe_balanced (s : abs_convex_open_sets π E) : balanced π (s : set E) := s.2.2.2.1
lemma coe_convex (s : abs_convex_open_sets π E) : convex β (s : set E) := s.2.2.2.2
end abs_convex_open_sets
instance : nonempty (abs_convex_open_sets π E) :=
begin
rw βexists_true_iff_nonempty,
dunfold abs_convex_open_sets,
rw subtype.exists,
exact β¨set.univ, β¨mem_univ 0, is_open_univ, balanced_univ, convex_univβ©, trivialβ©,
end
end absolutely_convex_sets
variables [is_R_or_C π]
variables [add_comm_group E] [topological_space E]
variables [module π E] [module β E] [is_scalar_tower β π E]
variables [has_continuous_smul β E]
variables (π E)
/-- The family of seminorms defined by the gauges of absolute convex open sets. -/
noncomputable
def gauge_seminorm_family : seminorm_family π E (abs_convex_open_sets π E) :=
Ξ» s, gauge_seminorm s.coe_balanced s.coe_convex (absorbent_nhds_zero s.coe_nhds)
variables {π E}
lemma gauge_seminorm_family_ball (s : abs_convex_open_sets π E) :
(gauge_seminorm_family π E s).ball 0 1 = (s : set E) :=
begin
dunfold gauge_seminorm_family,
rw seminorm.ball_zero_eq,
simp_rw gauge_seminorm_to_fun,
exact gauge_lt_one_eq_self_of_open s.coe_convex s.coe_zero_mem s.coe_is_open,
end
variables [topological_add_group E] [has_continuous_smul π E]
variables [smul_comm_class β π E] [locally_convex_space β E]
/-- The topology of a locally convex space is induced by the gauge seminorm family. -/
lemma with_gauge_seminorm_family : with_seminorms (gauge_seminorm_family π E) :=
begin
refine seminorm_family.with_seminorms_of_has_basis _ _,
refine (nhds_basis_abs_convex_open π E).to_has_basis (Ξ» s hs, _) (Ξ» s hs, _),
{ refine β¨s, β¨_, rfl.subsetβ©β©,
convert (gauge_seminorm_family _ _).basis_sets_singleton_mem β¨s, hsβ© one_pos,
rw [gauge_seminorm_family_ball, subtype.coe_mk] },
refine β¨s, β¨_, rfl.subsetβ©β©,
rw seminorm_family.basis_sets_iff at hs,
rcases hs with β¨t, r, hr, rflβ©,
rw [seminorm.ball_finset_sup_eq_Inter _ _ _ hr],
-- We have to show that the intersection contains zero, is open, balanced, and convex
refine β¨mem_Interβ.mpr (Ξ» _ _, by simp [seminorm.mem_ball_zero, hr]),
is_open_bInter (to_finite _) (Ξ» S _, _),
balanced_Interβ (Ξ» _ _, seminorm.balanced_ball_zero _ _),
convex_Interβ (Ξ» _ _, seminorm.convex_ball _ _ _)β©,
-- The only nontrivial part is to show that the ball is open
have hr' : r = β(r : π)β * 1 := by simp [abs_of_pos hr],
have hr'' : (r : π) β 0 := by simp [hr.ne'],
rw [hr', β seminorm.smul_ball_zero hr'', gauge_seminorm_family_ball],
exact S.coe_is_open.smulβ hr''
end
|
This Le Creuset stock pot is ideal for boiling water for pasta, blanching vegetables, preparing stock, or simmering soups and stews. The high-profile design limits evaporation while forcing liquids to bubble through all layers of ingredients, infusing the entire dish with flavor. From Le Creuset.
Measures approximately 12-1/2" x 10"
Le Creuset 8-qt Stock Pot is rated 4.7 out of 5 by 11.
Rated 5 out of 5 by Anonymous from Exactly what I expected from Me Creuset Beautiful, heat conduction is wonderful. Love this for gumbo and big pots of soups and stews.
Rated 4 out of 5 by scorpiondiva from Nice Pot-Beautiful Color While I am ultimately happy with this pot, you should be aware that things do stick and will burn. I have a gas stove & it does burn hot, so I cook on "LOW" 90% of the time. Even so, my butter burned within seconds of my adding it. Good thing I don't step away! Either way I do love the size. I just need to find something to get the burn stain out without ruining the inside.
Rated 5 out of 5 by tfrey from Love ! This is the perfect size pot I desperately needed !! Ty QVC very sturdy and well made !!
Rated 5 out of 5 by 57Fanna57 from FABULOUSLY PERFECT Bought the cherry stock pot. Received yesterday. True, it is not cast iron, but still a lovely Le Creuset piece. I make a lot of soups, stews, and chili, and this piece is perfect for that! $100 for a Le Creuset, although not made in France, was still a good buy. Last piece I need is a skillet, which I'll order when I pay down my Q Card! Lol. Ladies, I know you understand this!
Rated 5 out of 5 by SandyBE from The Best pot for High Temps and Boiling!!! First off, I love my Le Creuset Cast iron pots, but be aware this is a STEEL pot! It's made for high temperatures, such as boiling water for pasta. And talk about a beautiful rolling boil this pot produces. My husband video taped the water to send to his sister, the first time I used it! The water just rolls over and over, no bubbling splatters! Anyhow, I have the 10 quart also. All of my Le Creuset is in the beautiful cherry. This pot does not disappoint! Worth every penny!!! |
[STATEMENT]
lemma SXcpt_subcls_Throwable_lemma:
"\<lbrakk>class G (SXcpt xn) = Some xc;
super xc = (if xn = Throwable then Object else SXcpt Throwable)\<rbrakk>
\<Longrightarrow> G\<turnstile>SXcpt xn\<preceq>\<^sub>C SXcpt Throwable"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>class G (SXcpt xn) = Some xc; super xc = (if xn = Throwable then Object else SXcpt Throwable)\<rbrakk> \<Longrightarrow> G\<turnstile>SXcpt xn\<preceq>\<^sub>C SXcpt Throwable
[PROOF STEP]
apply (case_tac "xn = Throwable")
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>class G (SXcpt xn) = Some xc; super xc = (if xn = Throwable then Object else SXcpt Throwable); xn = Throwable\<rbrakk> \<Longrightarrow> G\<turnstile>SXcpt xn\<preceq>\<^sub>C SXcpt Throwable
2. \<lbrakk>class G (SXcpt xn) = Some xc; super xc = (if xn = Throwable then Object else SXcpt Throwable); xn \<noteq> Throwable\<rbrakk> \<Longrightarrow> G\<turnstile>SXcpt xn\<preceq>\<^sub>C SXcpt Throwable
[PROOF STEP]
apply simp_all
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>class G (SXcpt xn) = Some xc; super xc = SXcpt Throwable; xn \<noteq> Throwable\<rbrakk> \<Longrightarrow> G\<turnstile>SXcpt xn\<preceq>\<^sub>C SXcpt Throwable
[PROOF STEP]
apply (drule subcls_direct)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>super xc = SXcpt Throwable; xn \<noteq> Throwable\<rbrakk> \<Longrightarrow> SXcpt xn \<noteq> Object
2. \<lbrakk>super xc = SXcpt Throwable; xn \<noteq> Throwable; G\<turnstile>SXcpt xn\<preceq>\<^sub>C super xc\<rbrakk> \<Longrightarrow> G\<turnstile>SXcpt xn\<preceq>\<^sub>C SXcpt Throwable
[PROOF STEP]
apply (auto dest: sym)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
# Set up local module environment to test against.
# Note that we override the normal path here.
options(import.path = 'modules',
import.attach = FALSE)
#' Opposite of \code{is_identical_to}
expect_not_identical = function (object, expected, info = NULL, label = NULL, expected.label = NULL) {
lab_act = testthat::make_label(object, label)
lab_exp = testthat::make_label(expected, expected.label)
ident = identical(object, expected)
msg = if (ident) 'Objects identical' else ''
testthat::expect_false(ident, info = info,
label = sprintf('%s identical to %s.\n%s',
lab_act, lab_exp, msg))
}
|
module Data.QuadTree.Implementation.Foldable where
open import Haskell.Prelude renaming (zero to Z; suc to S)
open import Data.Logic
open import Data.QuadTree.Implementation.Definition
open import Data.QuadTree.Implementation.ValidTypes
open import Data.QuadTree.Implementation.QuadrantLenses
open import Data.QuadTree.Implementation.SafeFunctions
open import Data.QuadTree.Implementation.PropDepthRelation
{-# FOREIGN AGDA2HS
{-# LANGUAGE Safe #-}
{-# LANGUAGE LambdaCase #-}
{-# LANGUAGE Rank2Types #-}
import Data.Nat
import Data.Lens.Lens
import Data.Logic
import Data.QuadTree.Implementation.Definition
import Data.QuadTree.Implementation.ValidTypes
import Data.QuadTree.Implementation.QuadrantLenses
#-}
record FoldableEq (t : (y : Set) -> {{ eqT : Eq y }} -> Set) : Setβ where
field
foldMapβ : {a b : Set} -> {{ eqA : Eq a }} {{ eqB : Eq b }}
-> {{ monB : Monoid b }} β (a β b) β t a β b
lengthβ : {{eqA : Eq a}} -> t a β Nat
lengthβ = foldMapβ β¦ monB = MonoidSum β¦ (const 1)
open FoldableEq public
{-# COMPILE AGDA2HS FoldableEq class #-}
data Region : Set where
-- point 1: x, point 1: y, point 2: x, point 2: y
RegionC : (p1 : Nat Γ Nat) (p2 : Nat Γ Nat) -> Region
{-# COMPILE AGDA2HS Region #-}
data Tile (t : Set) : Set where
TileC : t -> Region -> Tile t
{-# COMPILE AGDA2HS Tile #-}
instance
tileFunctor : Functor Tile
tileFunctor .fmap f (TileC v r) = TileC (f v) r
tilesQd : {t : Set} {{eqT : Eq t}} (dep : Nat) -> VQuadrant t {dep}
-> (reg : Region)
-> List (Tile t)
tilesQd dep (CVQuadrant (Leaf v)) reg = TileC v reg β· []
tilesQd {t} (dep @ (S deps)) (CVQuadrant (Node a b c d) {p}) reg@(RegionC (x1 , y1) (x2 , y2)) =
let
mid = pow 2 deps
sA : List (Tile t)
sA = tilesQd deps (CVQuadrant a {aSub a b c d p}) (RegionC
(x1 , y1)
(min x2 (mid + x1) , min y2 (mid + y1)))
sB : List (Tile t)
sB = tilesQd deps (CVQuadrant b {bSub a b c d p}) (RegionC
(min x2 (mid + x1) , y1)
(x2 , min y2 (mid + y1)) )
sC : List (Tile t)
sC = tilesQd deps (CVQuadrant c {cSub a b c d p}) (RegionC
(x1 , min y2 (mid + y1))
(min x2 (mid + x1) , y2) )
sD : List (Tile t)
sD = tilesQd deps (CVQuadrant d {dSub a b c d p}) (RegionC
(min x2 (mid + x1) , min y2 (mid + y1))
(x2 , y2) )
in sA ++ sB ++ sC ++ sD
{-# COMPILE AGDA2HS tilesQd #-}
tilesQt : {t : Set} {{eqT : Eq t}} (dep : Nat) -> VQuadTree t {dep} -> List (Tile t)
tilesQt dep (CVQuadTree (Wrapper wh qd) {p1} {p2}) = tilesQd dep (CVQuadrant qd {p1}) (RegionC (0 , 0) wh)
{-# COMPILE AGDA2HS tilesQt #-}
replicateβ : {t : Set} -> Nat -> t -> List t
replicateβ Z v = []
replicateβ (S n) v = v β· replicateβ n v
{-# COMPILE AGDA2HS replicateβ #-}
expand : {t : Set} -> Tile t -> List t
expand (TileC v (RegionC (lx , ly) (ux , uy))) =
replicateβ (dx * dy) v where
dx = diff ux lx
dy = diff uy ly
{-# COMPILE AGDA2HS expand #-}
quadtreeFoldable : (dep : Nat) -> FoldableEq (Ξ» y -> VQuadTree y {dep})
quadtreeFoldable dep .foldMapβ f t = foldMap f $ concat $ map expand (tilesQt dep t) |
# Copyright (c) 2018-2021, Carnegie Mellon University
# See LICENSE for details
RewriteRules(RulesStrengthReduce, rec(
################## HALF ####################
vunpacklo_half := Rule( vunpacklo_half,
e -> vextract_half(vtrnq_half(e.args[1], e.args[2]), [0])),
vunpackhi_half := Rule( vunpackhi_half,
e -> vextract_half(vtrnq_half(e.args[1], e.args[2]), [1])),
################## NEON ####################
vpacklo_neon := Rule( vpacklo_neon,
e -> vextract_neon_4x32f(vuzpq_32f(e.args[1], e.args[2]), [0])),
vpackhi_neon := Rule( vpackhi_neon,
e -> vextract_neon_4x32f(vuzpq_32f(e.args[1], e.args[2]), [1])),
vunpacklo_neon := Rule( vunpacklo_neon,
e -> vextract_neon_4x32f(vzipq_32f(e.args[1], e.args[2]), [0])),
vunpackhi_neon := Rule( vunpackhi_neon,
e -> vextract_neon_4x32f(vzipq_32f(e.args[1], e.args[2]), [1])),
vtransposelo_neon := Rule( vtransposelo_neon,
e -> vextract_neon_4x32f(vtrnq_32f(e.args[1], e.args[2]), [0])),
vtransposehi_neon := Rule( vtransposehi_neon,
e -> vextract_neon_4x32f(vtrnq_32f(e.args[1], e.args[2]), [1])),
vunpacklolo2_neon := Rule( vunpacklolo2_neon,
e -> vextract_neon_4x32f(vtrnq_32f(vpacklo_neon(e.args[1], e.args[2]), vpackhi_neon(e.args[1], e.args[2])), [0])),
vunpackhihi2_neon := Rule( vunpackhihi2_neon,
e -> vextract_neon_4x32f(vtrnq_32f(vpacklo_neon(e.args[1], e.args[2]), vpackhi_neon(e.args[1], e.args[2])), [1])),
));
|
Require Import SpecCert.x86.Architecture.
Definition lock_smramc_pre
{Label: Type} :=
fun (a:Architecture Label) =>
smramc_is_unlocked (memory_controller a).
Definition lock_smramc_post
{Label: Type} :=
fun (a a':Architecture Label) =>
exists h, let m' := lock_smramc (memory_controller a) h
in a' = update_memory_controller a m'.
|
(*
File: Arithmetic_Summatory_Asymptotics.thy
Author: Manuel Eberl, TU MΓΌnchen
*)
section \<open>Asymptotics of summatory arithmetic functions\<close>
theory Arithmetic_Summatory_Asymptotics
imports
Euler_MacLaurin.Euler_MacLaurin_Landau
Arithmetic_Summatory
Dirichlet_Series_Analysis
Landau_Symbols.Landau_More
begin
subsection \<open>Auxiliary bounds\<close>
lemma sum_inverse_squares_tail_bound:
assumes "d > 0"
shows "summable (\<lambda>n. 1 / (real (Suc n) + d) ^ 2)"
"(\<Sum>n. 1 / (real (Suc n) + d) ^ 2) \<le> 1 / d"
proof -
show *: "summable (\<lambda>n. 1 / (real (Suc n) + d) ^ 2)"
proof (rule summable_comparison_test, intro allI exI impI)
fix n :: nat
from assms show "norm (1 / (real (Suc n) + d) ^ 2) \<le> 1 / real (Suc n) ^ 2"
unfolding norm_divide norm_one norm_power
by (intro divide_left_mono power_mono) simp_all
qed (insert inverse_squares_sums, simp add: sums_iff)
show "(\<Sum>n. 1 / (real (Suc n) + d) ^ 2) \<le> 1 / d"
proof (rule sums_le[OF allI])
fix n have "1 / (real (Suc n) + d) ^ 2 \<le> 1 / ((real n + d) * (real (Suc n) + d))"
unfolding power2_eq_square using assms
by (intro divide_left_mono mult_mono mult_pos_pos add_nonneg_pos) simp_all
also have "\<dots> = 1 / (real n + d) - 1 / (real (Suc n) + d)"
using assms by (simp add: divide_simps)
finally show "1 / (real (Suc n) + d)\<^sup>2 \<le> 1 / (real n + d) - 1 / (real (Suc n) + d)" .
next
show "(\<lambda>n. 1 / (real (Suc n) + d)\<^sup>2) sums (\<Sum>n. 1 / (real (Suc n) + d)\<^sup>2)"
using * by (simp add: sums_iff)
next
have "(\<lambda>n. 1 / (real n + d) - 1 / (real (Suc n) + d)) sums (1 / (real 0 + d) - 0)"
by (intro telescope_sums' real_tendsto_divide_at_top[OF tendsto_const],
subst add.commute, rule filterlim_tendsto_add_at_top[OF tendsto_const
filterlim_real_sequentially])
thus "(\<lambda>n. 1 / (real n + d) - 1 / (real (Suc n) + d)) sums (1 / d)" by simp
qed
qed
lemma moebius_sum_tail_bound:
assumes "d > 0"
shows "abs (\<Sum>n. moebius_mu (Suc n + d) / real (Suc n + d)^2) \<le> 1 / d" (is "abs ?S \<le> _")
proof -
have *: "summable (\<lambda>n. 1 / (real (Suc n + d))\<^sup>2)"
by (insert sum_inverse_squares_tail_bound(1)[of "real d"] assms, simp_all add: add_ac)
have **: "summable (\<lambda>n. abs (moebius_mu (Suc n + d) / real (Suc n + d)^2))"
proof (rule summable_comparison_test, intro exI allI impI)
fix n :: nat
show "norm (\<bar>moebius_mu (Suc n + d) / (real (Suc n + d))^2\<bar>) \<le>
1 / (real (Suc n + d))^2"
unfolding real_norm_def abs_abs abs_divide power_abs abs_of_nat
by (intro divide_right_mono abs_moebius_mu_le) simp_all
qed (insert *)
from ** have "abs ?S \<le> (\<Sum>n. abs (moebius_mu (Suc n + d) / real (Suc n + d)^2))"
by (rule summable_rabs)
also have "\<dots> \<le> (\<Sum>n. 1 / (real (Suc n) + d) ^ 2)"
proof (intro suminf_le allI)
fix n :: nat
show "abs (moebius_mu (Suc n + d) / (real (Suc n + d))^2) \<le> 1 / (real (Suc n) + real d)^2"
unfolding abs_divide abs_of_nat power_abs of_nat_add [symmetric]
by (intro divide_right_mono abs_moebius_mu_le) simp_all
qed (insert * **, simp_all add: add_ac)
also from assms have "\<dots> \<le> 1 / d" by (intro sum_inverse_squares_tail_bound) simp_all
finally show ?thesis .
qed
lemma sum_upto_inverse_bound:
"sum_upto (\<lambda>i. 1 / real i) x \<ge> 0"
"eventually (\<lambda>x. sum_upto (\<lambda>i. 1 / real i) x \<le> ln x + 13 / 22) at_top"
proof -
show "sum_upto (\<lambda>i. 1 / real i) x \<ge> 0"
by (simp add: sum_upto_def sum_nonneg)
from order_tendstoD(2)[OF euler_mascheroni_LIMSEQ euler_mascheroni_less_13_over_22]
obtain N where N: "\<And>n. n \<ge> N \<Longrightarrow> harm n - ln (real n) < 13 / 22"
unfolding eventually_at_top_linorder by blast
show "eventually (\<lambda>x. sum_upto (\<lambda>i. 1 / real i) x \<le> ln x + 13 / 22) at_top"
using eventually_ge_at_top[of "max (real N) 1"]
proof eventually_elim
case (elim x)
have "sum_upto (\<lambda>i. 1 / real i) x = (\<Sum>i\<in>{0<..nat \<lfloor>x\<rfloor>}. 1 / real i)"
by (simp add: sum_upto_altdef)
also have "\<dots> = harm (nat \<lfloor>x\<rfloor>)"
unfolding harm_def by (intro sum.cong refl) (auto simp: field_simps)
also have "\<dots> \<le> ln (real (nat \<lfloor>x\<rfloor>)) + 13 / 22"
using N[of "nat \<lfloor>x\<rfloor>"] elim by (auto simp: le_nat_iff le_floor_iff)
also have "ln (real (nat \<lfloor>x\<rfloor>)) \<le> ln x" using elim by (subst ln_le_cancel_iff) auto
finally show ?case by - simp
qed
qed
lemma sum_upto_inverse_bigo: "sum_upto (\<lambda>i. 1 / real i) \<in> O(\<lambda>x. ln x)"
proof -
have "eventually (\<lambda>x. norm (sum_upto (\<lambda>i. 1 / real i) x) \<le> 1 * norm (ln x + 13/22)) at_top"
using eventually_ge_at_top[of "1::real"] sum_upto_inverse_bound(2)
by eventually_elim (insert sum_upto_inverse_bound(1), simp_all)
hence "sum_upto (\<lambda>i. 1 / real i) \<in> O(\<lambda>x. ln x + 13/22)"
by (rule bigoI)
also have "(\<lambda>x::real. ln x + 13/22) \<in> O(\<lambda>x. ln x)" by simp
finally show ?thesis .
qed
lemma
defines "G \<equiv> (\<lambda>x::real. (\<Sum>n. moebius_mu (n + Suc (nat \<lfloor>x\<rfloor>)) / (n + Suc (nat \<lfloor>x\<rfloor>))^2) :: real)"
shows moebius_sum_tail_bound': "\<And>t. t \<ge> 2 \<Longrightarrow> \<bar>G t\<bar> \<le> 1 / (t - 1)"
and moebius_sum_tail_bigo: "G \<in> O(\<lambda>t. 1 / t)"
proof -
show "\<bar>G t\<bar> \<le> 1 / (t - 1)" if t: "t \<ge> 2" for t
proof -
from t have "\<bar>G t\<bar> \<le> 1 / real (nat \<lfloor>t\<rfloor>)"
unfolding G_def using moebius_sum_tail_bound[of "nat \<lfloor>t\<rfloor>"] by simp
also have "t \<le> 1 + real_of_int \<lfloor>t\<rfloor>" by linarith
hence "1 / real (nat \<lfloor>t\<rfloor>) \<le> 1 / (t - 1)" using t by (simp add: field_simps)
finally show ?thesis .
qed
hence "G \<in> O(\<lambda>t. 1 / (t - 1))"
by (intro bigoI[of _ 1] eventually_mono[OF eventually_ge_at_top[of "2::real"]]) auto
also have "(\<lambda>t::real. 1 / (t - 1)) \<in> \<Theta>(\<lambda>t. 1 / t)" by simp
finally show "G \<in> O(\<lambda>t. 1 / t)" .
qed
subsection \<open>Summatory totient function\<close>
theorem summatory_totient_asymptotics':
"(\<lambda>x. sum_upto (\<lambda>n. real (totient n)) x) =o (\<lambda>x. 3 / pi\<^sup>2 * x\<^sup>2) +o O(\<lambda>x. x * ln x)"
using summatory_totient_asymptotics
by (subst set_minus_plus [symmetric]) (simp_all add: fun_diff_def)
theorem summatory_totient_asymptotics'':
"sum_upto (\<lambda>n. real (totient n)) \<sim>[at_top] (\<lambda>x. 3 / pi\<^sup>2 * x\<^sup>2)"
proof -
have "(\<lambda>x. sum_upto (\<lambda>n. real (totient n)) x - 3 / pi\<^sup>2 * x\<^sup>2) \<in> O(\<lambda>x. x * ln x)"
by (rule summatory_totient_asymptotics)
also have "(\<lambda>x. x * ln x) \<in> o(\<lambda>x. 3 / pi ^ 2 * x ^ 2)" by simp
finally show ?thesis by (simp add: asymp_equiv_altdef)
qed
subsection \<open>Asymptotic distribution of squarefree numbers\<close>
lemma le_sqrt_iff: "x \<ge> 0 \<Longrightarrow> x \<le> sqrt y \<longleftrightarrow> x^2 \<le> y"
using real_sqrt_le_iff[of "x^2" y] by (simp del: real_sqrt_le_iff)
theorem squarefree_asymptotics: "(\<lambda>x. card {n. real n \<le> x \<and> squarefree n} - 6 / pi\<^sup>2 * x) \<in> O(sqrt)"
proof -
define f :: "nat \<Rightarrow> real" where "f = (\<lambda>n. if n = 0 then 0 else 1)"
define g :: "nat \<Rightarrow> real" where "g = dirichlet_prod (ind squarefree) moebius_mu"
interpret g: multiplicative_function g unfolding g_def
by (intro multiplicative_dirichlet_prod squarefree.multiplicative_function_axioms
moebius_mu.multiplicative_function_axioms)
interpret g: multiplicative_function' g "\<lambda>p k. if k = 2 then -1 else 0" "\<lambda>_. 0"
proof
interpret g': multiplicative_dirichlet_prod' "ind squarefree" moebius_mu
"\<lambda>p k. if 1 < k then 0 else 1" "\<lambda>p k. if k = 1 then - 1 else 0" "\<lambda>_. 1" "\<lambda>_. - 1"
by (intro multiplicative_dirichlet_prod'.intro squarefree.multiplicative_function'_axioms
moebius_mu.multiplicative_function'_axioms)
fix p k :: nat assume "prime p" "k > 0"
hence "g (p ^ k) = (\<Sum>i\<in>{0<..<k}. (if Suc 0 < i then 0 else 1) *
(if k - i = Suc 0 then - 1 else 0))"
by (auto simp: g'.prime_power g_def)
also have "\<dots> = (\<Sum>i\<in>{0<..<k}. (if k = 2 then -1 else 0))"
by (intro sum.cong refl) auto
also from \<open>k > 0\<close> have "\<dots> = (if k = 2 then -1 else 0)" by simp
finally show "g (p ^ k) = \<dots>" .
qed simp_all
have mult_g_square: "multiplicative_function (\<lambda>n. g (n ^ 2))"
by standard (simp_all add: power_mult_distrib g.mult_coprime)
have g_square: "g (m ^ 2) = moebius_mu m" for m
using mult_g_square moebius_mu.multiplicative_function_axioms
proof (rule multiplicative_function_eqI)
fix p k :: nat assume *: "prime p" "k > 0"
have "g ((p ^ k) ^ 2) = g (p ^ (2 * k))" by (simp add: power_mult [symmetric] mult_ac)
also from * have "\<dots> = (if k = 1 then -1 else 0)" by (simp add: g.prime_power)
also from * have "\<dots> = moebius_mu (p ^ k)" by (simp add: moebius_mu.prime_power)
finally show "g ((p ^ k) ^ 2) = moebius_mu (p ^ k)" .
qed
have g_nonsquare: "g m = 0" if "\<not>is_square m" for m
proof (cases "m = 0")
case False
from that False obtain p where p: "prime p" "odd (multiplicity p m)"
using is_nth_power_conv_multiplicity_nat[of 2 m] by auto
from p have "multiplicity p m \<noteq> 2" by auto
moreover from p have "p \<in> prime_factors m"
by (auto simp: prime_factors_multiplicity intro!: Nat.gr0I)
ultimately have "(\<Prod>p\<in>prime_factors m. if multiplicity p m = 2 then - 1 else 0 :: real) = 0"
(is "?P = _") by auto
also have "?P = g m" using False by (subst g.prod_prime_factors') auto
finally show ?thesis .
qed auto
have abs_g_le: "abs (g m) \<le> 1" for m
by (cases "is_square m")
(auto simp: g_square g_nonsquare abs_moebius_mu_le elim!: is_nth_powerE)
have fds_g: "fds g = fds_ind squarefree * fds moebius_mu"
by (rule fds_eqI) (simp add: g_def fds_nth_mult)
have "fds g * fds_zeta = fds_ind squarefree * (fds_zeta * fds moebius_mu)"
by (simp add: fds_g mult_ac)
also have "fds_zeta * fds moebius_mu = (1 :: real fds)"
by (rule fds_zeta_times_moebius_mu)
finally have *: "fds_ind squarefree = fds g * fds_zeta" by simp
have ind_squarefree: "ind squarefree = dirichlet_prod g f"
proof
fix n :: nat
from * show "ind squarefree n = dirichlet_prod g f n"
by (cases "n = 0") (simp_all add: fds_eq_iff fds_nth_mult f_def)
qed
define H :: "real \<Rightarrow> real"
where "H = (\<lambda>x. sum_upto (\<lambda>m. g (m^2) * (real_of_int \<lfloor>x / real (m\<^sup>2)\<rfloor> - x / real (m^2))) (sqrt x))"
define J where "J = (\<lambda>x::real. (\<Sum>n. moebius_mu (n + Suc (nat \<lfloor>x\<rfloor>)) / (n + Suc (nat \<lfloor>x\<rfloor>))^2))"
have "eventually (\<lambda>x. norm (H x) \<le> 1 * norm (sqrt x)) at_top"
using eventually_ge_at_top[of "0::real"]
proof eventually_elim
case (elim x)
have "abs (H x) \<le> sum_upto (\<lambda>m. abs (g (m^2) * (real_of_int \<lfloor>x / real (m\<^sup>2)\<rfloor> -
x / real (m^2)))) (sqrt x)" (is "_ \<le> ?S") unfolding H_def sum_upto_def
by (rule sum_abs)
also have "x / (real m)\<^sup>2 - real_of_int \<lfloor>x / (real m)\<^sup>2\<rfloor> \<le> 1" for m by linarith
hence "?S \<le> sum_upto (\<lambda>m. 1 * 1) (sqrt x)" unfolding abs_mult sum_upto_def
by (intro sum_mono mult_mono abs_g_le) simp_all
also have "\<dots> = of_int \<lfloor>sqrt x\<rfloor>" using elim by (simp add: sum_upto_altdef)
also have "\<dots> \<le> sqrt x" by linarith
finally show ?case using elim by simp
qed
hence H_bigo: "H \<in> O(\<lambda>x. sqrt x)" by (rule bigoI)
let ?A = "\<lambda>x. card {n. real n \<le> x \<and> squarefree n}"
have "eventually (\<lambda>x. ?A x - 6 / pi\<^sup>2 * x = (-x) * J (sqrt x) + H x) at_top"
using eventually_ge_at_top[of "0::real"]
proof eventually_elim
fix x :: real assume x: "x \<ge> 0"
have "{n. real n \<le> x \<and> squarefree n} = {n. n > 0 \<and> real n \<le> x \<and> squarefree n}"
by (auto intro!: Nat.gr0I)
also have "card \<dots> = sum_upto (ind squarefree :: nat \<Rightarrow> real) x"
by (rule sum_upto_ind [symmetric])
also have "\<dots> = sum_upto (\<lambda>d. g d * sum_upto f (x / real d)) x" (is "_ = ?S")
unfolding ind_squarefree by (rule sum_upto_dirichlet_prod)
also have "sum f {0<..nat \<lfloor>x / real i\<rfloor>} = of_int \<lfloor>x / real i\<rfloor>" if "i > 0" for i
using x by (simp add: f_def)
hence "?S = sum_upto (\<lambda>d. g d * of_int \<lfloor>x / real d\<rfloor>) x"
unfolding sum_upto_altdef by (intro sum.cong refl) simp_all
also have "\<dots> = sum_upto (\<lambda>m. g (m ^ 2) * of_int \<lfloor>x / real (m ^ 2)\<rfloor>) (sqrt x)"
unfolding sum_upto_def
proof (intro sum.reindex_bij_betw_not_neutral [symmetric])
show "bij_betw power2 ({i. 0 < i \<and> real i \<le> sqrt x} - {})
({i. 0 < i \<and> real i \<le> x} - {i\<in>{0<..nat \<lfloor>x\<rfloor>}. \<not>is_square i})"
by (auto simp: bij_betw_def inj_on_def power_eq_iff_eq_base le_sqrt_iff
is_nth_power_def le_nat_iff le_floor_iff)
qed (auto simp: g_nonsquare)
also have "\<dots> = x * sum_upto (\<lambda>m. g (m ^ 2) / real m ^ 2) (sqrt x) + H x"
by (simp add: H_def sum_upto_def sum.distrib ring_distribs sum_subtractf
sum_distrib_left sum_distrib_right mult_ac)
also have "sum_upto (\<lambda>m. g (m ^ 2) / real m ^ 2) (sqrt x) =
sum_upto (\<lambda>m. moebius_mu m / real m ^ 2) (sqrt x)"
unfolding sum_upto_altdef by (intro sum.cong refl) (simp_all add: g_square)
also have "sum_upto (\<lambda>m. moebius_mu m / (real m)\<^sup>2) (sqrt x) =
(\<Sum>m<Suc (nat \<lfloor>sqrt x\<rfloor>). moebius_mu m / (real m) ^ 2)"
unfolding sum_upto_altdef by (intro sum.mono_neutral_cong_left) auto
also have "\<dots> = (6 / pi^2 - J (sqrt x))"
using sums_split_initial_segment[OF moebius_over_square_sums, of "Suc (nat \<lfloor>sqrt x\<rfloor>)"]
by (auto simp: sums_iff algebra_simps J_def sum_upto_altdef)
finally show "?A x - 6 / pi\<^sup>2 * x = (-x) * J (sqrt x) + H x"
by (simp add: algebra_simps)
qed
hence "(\<lambda>x. ?A x - 6 / pi\<^sup>2 * x) \<in> \<Theta>(\<lambda>x. (-x) * J (sqrt x) + H x)"
by (rule bigthetaI_cong)
also have "(\<lambda>x. (-x) * J (sqrt x) + H x) \<in> O(\<lambda>x. sqrt x)"
proof (intro sum_in_bigo H_bigo)
have "(\<lambda>x. J (sqrt x)) \<in> O(\<lambda>x. 1 / sqrt x)" unfolding J_def
using moebius_sum_tail_bigo sqrt_at_top by (rule landau_o.big.compose)
hence "(\<lambda>x. (-x) * J (sqrt x)) \<in> O(\<lambda>x. x * (1 / sqrt x))"
by (intro landau_o.big.mult) simp_all
also have "(\<lambda>x::real. x * (1 / sqrt x)) \<in> \<Theta>(\<lambda>x. sqrt x)"
by (intro bigthetaI_cong eventually_mono[OF eventually_gt_at_top[of "0::real"]])
(auto simp: field_simps)
finally show "(\<lambda>x. (-x) * J (sqrt x)) \<in> O(\<lambda>x. sqrt x)" .
qed
finally show ?thesis .
qed
theorem squarefree_asymptotics':
"(\<lambda>x. card {n. real n \<le> x \<and> squarefree n}) =o (\<lambda>x. 6 / pi\<^sup>2 * x) +o O(\<lambda>x. sqrt x)"
using squarefree_asymptotics
by (subst set_minus_plus [symmetric]) (simp_all add: fun_diff_def)
theorem squarefree_asymptotics'':
"(\<lambda>x. card {n. real n \<le> x \<and> squarefree n}) \<sim>[at_top] (\<lambda>x. 6 / pi\<^sup>2 * x)"
proof -
have "(\<lambda>x. card {n. real n \<le> x \<and> squarefree n} - 6 / pi\<^sup>2 * x) \<in> O(\<lambda>x. sqrt x)"
by (rule squarefree_asymptotics)
also have "(sqrt :: real \<Rightarrow> real) \<in> \<Theta>(\<lambda>x. x powr (1/2))"
by (intro bigthetaI_cong eventually_mono[OF eventually_ge_at_top[of "0::real"]])
(auto simp: powr_half_sqrt)
also have "(\<lambda>x::real. x powr (1/2)) \<in> o(\<lambda>x. 6 / pi ^ 2 * x)" by simp
finally show ?thesis by (simp add: asymp_equiv_altdef)
qed
subsection \<open>The hyperbola method\<close>
lemma hyperbola_method_bigo:
fixes f g :: "nat \<Rightarrow> 'a :: real_normed_field"
assumes "(\<lambda>x. sum_upto (\<lambda>n. f n * sum_upto g (x / real n)) (sqrt x) - R x) \<in> O(b)"
assumes "(\<lambda>x. sum_upto (\<lambda>n. sum_upto f (x / real n) * g n) (sqrt x) - S x) \<in> O(b)"
assumes "(\<lambda>x. sum_upto f (sqrt x) * sum_upto g (sqrt x) - T x) \<in> O(b)"
shows "(\<lambda>x. sum_upto (dirichlet_prod f g) x - (R x + S x - T x)) \<in> O(b)"
proof -
let ?A = "\<lambda>x. (sum_upto (\<lambda>n. f n * sum_upto g (x / real n)) (sqrt x) - R x) +
(sum_upto (\<lambda>n. sum_upto f (x / real n) * g n) (sqrt x) - S x) +
(-(sum_upto f (sqrt x) * sum_upto g (sqrt x) - T x))"
have "(\<lambda>x. sum_upto (dirichlet_prod f g) x - (R x + S x - T x)) \<in> \<Theta>(?A)"
by (intro bigthetaI_cong eventually_mono[OF eventually_ge_at_top[of "0::real"]])
(auto simp: hyperbola_method_sqrt)
also from assms have "?A \<in> O(b)"
by (intro sum_in_bigo(1)) (simp_all only: landau_o.big.uminus_in_iff)
finally show ?thesis .
qed
lemma frac_le_1: "frac x \<le> 1"
unfolding frac_def by linarith
lemma ln_minus_ln_floor_bound:
assumes "x \<ge> 2"
shows "ln x - ln (floor x) \<in> {0..<1 / (x - 1)}"
proof -
from assms have "ln (floor x) \<ge> ln (x - 1)" by (subst ln_le_cancel_iff) simp_all
hence "ln x - ln (floor x) \<le> ln ((x - 1) + 1) - ln (x - 1)" by simp
also from assms have "\<dots> < 1 / (x - 1)" by (intro ln_diff_le_inverse) simp_all
finally have "ln x - ln (floor x) < 1 / (x - 1)" by simp
moreover from assms have "ln x \<ge> ln (of_int \<lfloor>x\<rfloor>)" by (subst ln_le_cancel_iff) simp_all
ultimately show ?thesis by simp
qed
lemma ln_minus_ln_floor_bigo:
"(\<lambda>x::real. ln x - ln (floor x)) \<in> O(\<lambda>x. 1 / x)"
proof -
have "eventually (\<lambda>x. norm (ln x - ln (floor x)) \<le> 1 * norm (1 / (x - 1))) at_top"
using eventually_ge_at_top[of "2::real"]
proof eventually_elim
case (elim x)
with ln_minus_ln_floor_bound[OF this] show ?case by auto
qed
hence "(\<lambda>x::real. ln x - ln (floor x)) \<in> O(\<lambda>x. 1 / (x - 1))" by (rule bigoI)
also have "(\<lambda>x::real. 1 / (x - 1)) \<in> O(\<lambda>x. 1 / x)" by simp
finally show ?thesis .
qed
lemma divisor_count_asymptotics_aux:
"(\<lambda>x. sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) -
(x * ln x / 2 + euler_mascheroni * x)) \<in> O(sqrt)"
proof -
define R where "R = (\<lambda>x. \<Sum>i\<in>{0<..nat \<lfloor>sqrt x\<rfloor>}. frac (x / real i))"
define S where "S = (\<lambda>x. ln (real (nat \<lfloor>sqrt x\<rfloor>)) - ln x / 2)"
have R_bound: "R x \<in> {0..sqrt x}" if x: "x \<ge> 0" for x
proof -
have "R x \<le> (\<Sum>i\<in>{0<..nat \<lfloor>sqrt x\<rfloor>}. 1)" unfolding R_def by (intro sum_mono frac_le_1)
also from x have "\<dots> = of_int \<lfloor>sqrt x\<rfloor>" by simp
also have "\<dots> \<le> sqrt x" by simp
finally have "R x \<le> sqrt x" .
moreover have "R x \<ge> 0" unfolding R_def by (intro sum_nonneg) simp_all
ultimately show ?thesis by simp
qed
have R_bound': "norm (R x) \<le> 1 * norm (sqrt x)" if "x \<ge> 0" for x
using R_bound[OF that] that by simp
have R_bigo: "R \<in> O(sqrt)" using eventually_ge_at_top[of "0::real"]
by (intro bigoI[of _ 1], elim eventually_mono) (rule R_bound')
have "eventually (\<lambda>x. sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1 :: real) (x / real n)) (sqrt x) =
x * harm (nat \<lfloor>sqrt x\<rfloor>) - R x) at_top"
using eventually_ge_at_top[of "0 :: real"]
proof eventually_elim
case (elim x)
have "sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1 :: real) (x / real n)) (sqrt x) =
(\<Sum>i\<in>{0<..nat \<lfloor>sqrt x\<rfloor>}. of_int \<lfloor>x / real i\<rfloor>)" using elim
by (simp add: sum_upto_altdef)
also have "\<dots> = x * (\<Sum>i\<in>{0<..nat \<lfloor>sqrt x\<rfloor>}. 1 / real i) - R x"
by (simp add: sum_subtractf frac_def R_def sum_distrib_left)
also have "{0<..nat \<lfloor>sqrt x\<rfloor>} = {1..nat \<lfloor>sqrt x\<rfloor>}" by auto
also have "(\<Sum>i\<in>\<dots>. 1 / real i) = harm (nat \<lfloor>sqrt x\<rfloor>)" by (simp add: harm_def divide_simps)
finally show ?case .
qed
hence "(\<lambda>x. sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1 :: real) (x / real n)) (sqrt x) -
(x * ln x / 2 + euler_mascheroni * x)) \<in>
\<Theta>(\<lambda>x. x * (harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (nat \<lfloor>sqrt x\<rfloor>) + euler_mascheroni)) - R x + x * S x)"
(is "_ \<in> \<Theta>(?A)")
by (intro bigthetaI_cong) (elim eventually_mono, simp_all add: algebra_simps S_def)
also have "?A \<in> O(sqrt)"
proof (intro sum_in_bigo)
have "(\<lambda>x. - S x) \<in> \<Theta>(\<lambda>x. ln (sqrt x) - ln (of_int \<lfloor>sqrt x\<rfloor>))"
by (intro bigthetaI_cong eventually_mono [OF eventually_ge_at_top[of "1::real"]])
(auto simp: S_def ln_sqrt)
also have "(\<lambda>x. ln (sqrt x) - ln (of_int \<lfloor>sqrt x\<rfloor>)) \<in> O(\<lambda>x. 1 / sqrt x)"
by (rule landau_o.big.compose[OF ln_minus_ln_floor_bigo sqrt_at_top])
finally have "(\<lambda>x. x * S x) \<in> O(\<lambda>x. x * (1 / sqrt x))" by (intro landau_o.big.mult) simp_all
also have "(\<lambda>x::real. x * (1 / sqrt x)) \<in> \<Theta>(\<lambda>x. sqrt x)"
by (intro bigthetaI_cong eventually_mono [OF eventually_gt_at_top[of "0::real"]])
(auto simp: field_simps)
finally show "(\<lambda>x. x * S x) \<in> O(sqrt)" .
next
let ?f = "\<lambda>x::real. harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni)"
have "?f \<in> O(\<lambda>x. 1 / real (nat \<lfloor>sqrt x\<rfloor>))"
proof (rule landau_o.big.compose[of _ _ _ "\<lambda>x. nat \<lfloor>sqrt x\<rfloor>"])
show "filterlim (\<lambda>x::real. nat \<lfloor>sqrt x\<rfloor>) at_top at_top"
by (intro filterlim_compose[OF filterlim_nat_sequentially]
filterlim_compose[OF filterlim_floor_sequentially] sqrt_at_top)
next
show "(\<lambda>a. harm a - (ln (real a) + euler_mascheroni)) \<in> O(\<lambda>a. 1 / real a)"
by (rule harm_expansion_bigo_simple2)
qed
also have "(\<lambda>x. 1 / real (nat \<lfloor>sqrt x\<rfloor>)) \<in> O(\<lambda>x. 1 / (sqrt x - 1))"
proof (rule bigoI[of _ 1], use eventually_ge_at_top[of 2] in eventually_elim)
case (elim x)
have "sqrt x \<le> 1 + real_of_int \<lfloor>sqrt x\<rfloor>" by linarith
with elim show ?case by (simp add: field_simps)
qed
also have "(\<lambda>x::real. 1 / (sqrt x - 1)) \<in> O(\<lambda>x. 1 / sqrt x)"
by (rule landau_o.big.compose[OF _ sqrt_at_top]) simp_all
finally have "(\<lambda>x. x * ?f x) \<in> O(\<lambda>x. x * (1 / sqrt x))"
by (intro landau_o.big.mult landau_o.big_refl)
also have "(\<lambda>x::real. x * (1 / sqrt x)) \<in> \<Theta>(\<lambda>x. sqrt x)"
by (intro bigthetaI_cong eventually_mono[OF eventually_gt_at_top[of "0::real"]])
(auto elim!: eventually_mono simp: field_simps)
finally show "(\<lambda>x. x * ?f x) \<in> O(sqrt)" .
qed fact+
finally show ?thesis .
qed
lemma sum_upto_sqrt_bound:
assumes x: "x \<ge> (0 :: real)"
shows "norm ((sum_upto (\<lambda>_. 1) (sqrt x))\<^sup>2 - x) \<le> 2 * norm (sqrt x)"
proof -
from x have "0 \<le> 2 * sqrt x * (1 - frac (sqrt x)) + frac (sqrt x) ^ 2"
by (intro add_nonneg_nonneg mult_nonneg_nonneg) (simp_all add: frac_le_1)
also from x have "\<dots> = (sqrt x - frac (sqrt x)) ^ 2 - x + 2 * sqrt x"
by (simp add: algebra_simps power2_eq_square)
also have "sqrt x - frac (sqrt x) = of_int \<lfloor>sqrt x\<rfloor>" by (simp add: frac_def)
finally have "(of_int \<lfloor>sqrt x\<rfloor>) ^ 2 - x \<ge> -2 * sqrt x" by (simp add: algebra_simps)
moreover from x have "of_int (\<lfloor>sqrt x\<rfloor>) ^ 2 \<le> sqrt x ^ 2"
by (intro power_mono) simp_all
with x have "of_int (\<lfloor>sqrt x\<rfloor>) ^ 2 - x \<le> 0" by simp
ultimately have "sum_upto (\<lambda>_. 1) (sqrt x) ^ 2 - x \<in> {-2 * sqrt x..0}"
using x by (simp add: sum_upto_altdef)
with x show ?thesis by simp
qed
lemma summatory_divisor_count_asymptotics:
"(\<lambda>x. sum_upto (\<lambda>n. real (divisor_count n)) x -
(x * ln x + (2 * euler_mascheroni - 1) * x)) \<in> O(sqrt)"
proof -
let ?f = "\<lambda>x. x * ln x / 2 + euler_mascheroni * x"
have "(\<lambda>x. sum_upto (dirichlet_prod (\<lambda>_. 1 :: real) (\<lambda>_. 1)) x - (?f x + ?f x - x)) \<in> O(sqrt)"
(is "?g \<in> _")
proof (rule hyperbola_method_bigo)
have "eventually (\<lambda>x::real. norm (sum_upto (\<lambda>_. 1) (sqrt x) ^ 2 - x) \<le>
2 * norm (sqrt x)) at_top"
using eventually_ge_at_top[of "0::real"] by eventually_elim (rule sum_upto_sqrt_bound)
thus "(\<lambda>x::real. sum_upto (\<lambda>_. 1) (sqrt x) * sum_upto (\<lambda>_. 1) (sqrt x) - x) \<in> O(sqrt)"
by (intro bigoI[of _ 2]) (simp_all add: power2_eq_square)
next
show "(\<lambda>x. sum_upto (\<lambda>n. 1 * sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) -
(x * ln x / 2 + euler_mascheroni * x)) \<in> O(sqrt)"
using divisor_count_asymptotics_aux by simp
next
show "(\<lambda>x. sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n) * 1) (sqrt x) -
(x * ln x / 2 + euler_mascheroni * x)) \<in> O(sqrt)"
using divisor_count_asymptotics_aux by simp
qed
also have "divisor_count n = dirichlet_prod (\<lambda>_. 1) (\<lambda>_. 1) n" for n
using fds_divisor_count
by (cases "n = 0") (simp_all add: fds_eq_iff power2_eq_square fds_nth_mult)
hence "?g = (\<lambda>x. sum_upto (\<lambda>n. real (divisor_count n)) x -
(x * ln x + (2 * euler_mascheroni - 1) * x))"
by (intro ext) (simp_all add: algebra_simps dirichlet_prod_def)
finally show ?thesis .
qed
theorem summatory_divisor_count_asymptotics':
"(\<lambda>x. sum_upto (\<lambda>n. real (divisor_count n)) x) =o
(\<lambda>x. x * ln x + (2 * euler_mascheroni - 1) * x) +o O(\<lambda>x. sqrt x)"
using summatory_divisor_count_asymptotics
by (subst set_minus_plus [symmetric]) (simp_all add: fun_diff_def)
theorem summatory_divisor_count_asymptotics'':
"sum_upto (\<lambda>n. real (divisor_count n)) \<sim>[at_top] (\<lambda>x. x * ln x)"
proof -
have "(\<lambda>x. sum_upto (\<lambda>n. real (divisor_count n)) x -
(x * ln x + (2 * euler_mascheroni - 1) * x)) \<in> O(sqrt)"
by (rule summatory_divisor_count_asymptotics)
also have "sqrt \<in> \<Theta>(\<lambda>x. x powr (1/2))"
by (intro bigthetaI_cong eventually_mono [OF eventually_ge_at_top[of "0::real"]])
(auto elim!: eventually_mono simp: powr_half_sqrt)
also have "(\<lambda>x::real. x powr (1/2)) \<in> o(\<lambda>x. x * ln x + (2 * euler_mascheroni - 1) * x)" by simp
finally have "sum_upto (\<lambda>n. real (divisor_count n)) \<sim>[at_top]
(\<lambda>x. x * ln x + (2 * euler_mascheroni - 1) * x)"
by (simp add: asymp_equiv_altdef)
also have "\<dots> \<sim>[at_top] (\<lambda>x. x * ln x)" by (subst asymp_equiv_add_right) simp_all
finally show ?thesis .
qed
lemma summatory_divisor_eq:
"sum_upto (\<lambda>n. real (divisor_count n)) (real m) = card {(n,d). n \<in> {0<..m} \<and> d dvd n}"
proof -
have "sum_upto (\<lambda>n. real (divisor_count n)) m = card (SIGMA n:{0<..m}. {d. d dvd n})"
unfolding sum_upto_altdef divisor_count_def by (subst card_SigmaI) simp_all
also have "(SIGMA n:{0<..m}. {d. d dvd n}) = {(n,d). n \<in> {0<..m} \<and> d dvd n}" by auto
finally show ?thesis .
qed
context
fixes M :: "nat \<Rightarrow> real"
defines "M \<equiv> \<lambda>m. card {(n,d). n \<in> {0<..m} \<and> d dvd n} / card {0<..m}"
begin
lemma mean_divisor_count_asymptotics:
"(\<lambda>m. M m - (ln m + 2 * euler_mascheroni - 1)) \<in> O(\<lambda>m. 1 / sqrt m)"
proof -
have "(\<lambda>m. M m - (ln m + 2 * euler_mascheroni - 1))
\<in> \<Theta>(\<lambda>m. (sum_upto (\<lambda>n. real (divisor_count n)) (real m) -
(m * ln m + (2 * euler_mascheroni - 1) * m)) / m)" (is "_ \<in> \<Theta>(?f)")
unfolding M_def
by (intro bigthetaI_cong eventually_mono [OF eventually_gt_at_top[of "0::nat"]])
(auto simp: summatory_divisor_eq field_simps)
also have "?f \<in> O(\<lambda>m. sqrt m / m)"
by (intro landau_o.big.compose[OF _ filterlim_real_sequentially] landau_o.big.divide_right
summatory_divisor_count_asymptotics eventually_at_top_not_equal)
also have "(\<lambda>m::nat. sqrt m / m) \<in> \<Theta>(\<lambda>m. 1 / sqrt m)"
by (intro bigthetaI_cong eventually_mono [OF eventually_gt_at_top[of "0::nat"]])
(auto simp: field_simps)
finally show ?thesis .
qed
theorem mean_divisor_count_asymptotics':
"M =o (\<lambda>x. ln x + 2 * euler_mascheroni - 1) +o O(\<lambda>x. 1 / sqrt x)"
using mean_divisor_count_asymptotics
by (subst set_minus_plus [symmetric]) (simp_all add: fun_diff_def)
theorem mean_divisor_count_asymptotics'': "M \<sim>[at_top] ln"
proof -
have "(\<lambda>x. M x - (ln x + 2 * euler_mascheroni - 1)) \<in> O(\<lambda>x. 1 / sqrt x)"
by (rule mean_divisor_count_asymptotics)
also have "(\<lambda>x. 1 / sqrt (real x)) \<in> \<Theta>(\<lambda>x. x powr (-1/2))"
using eventually_gt_at_top[of "0::nat"]
by (intro bigthetaI_cong)
(auto elim!: eventually_mono simp: powr_half_sqrt field_simps powr_minus)
also have "(\<lambda>x::nat. x powr (-1/2)) \<in> o(\<lambda>x. ln x + 2 * euler_mascheroni - 1)"
by (intro smallo_real_nat_transfer) simp_all
finally have "M \<sim>[at_top] (\<lambda>x. ln x + 2 * euler_mascheroni - 1)"
by (simp add: asymp_equiv_altdef)
also have "\<dots> = (\<lambda>x::nat. ln x + (2 * euler_mascheroni - 1))" by (simp add: algebra_simps)
also have "\<dots> \<sim>[at_top] (\<lambda>x::nat. ln x)" by (subst asymp_equiv_add_right) auto
finally show ?thesis .
qed
end
subsection \<open>The asymptotic ditribution of coprime pairs\<close>
context
fixes A :: "nat \<Rightarrow> (nat \<times> nat) set"
defines "A \<equiv> (\<lambda>N. {(m,n) \<in> {1..N} \<times> {1..N}. coprime m n})"
begin
lemma coprime_pairs_asymptotics:
"(\<lambda>N. real (card (A N)) - 6 / pi\<^sup>2 * (real N)\<^sup>2) \<in> O(\<lambda>N. real N * ln (real N))"
proof -
define C :: "nat \<Rightarrow> (nat \<times> nat) set"
where "C = (\<lambda>N. (\<Union>m\<in>{1..N}. (\<lambda>n. (m,n)) ` totatives m))"
define D :: "nat \<Rightarrow> (nat \<times> nat) set"
where "D = (\<lambda>N. (\<Union>n\<in>{1..N}. (\<lambda>m. (m,n)) ` totatives n))"
have fin: "finite (C N)" "finite (D N)" for N unfolding C_def D_def
by (intro finite_UN_I finite_imageI; simp)+
have *: "card (A N) = 2 * (\<Sum>m\<in>{0<..N}. totient m) - 1" if N: "N > 0" for N
proof -
have "A N = C N \<union> D N"
by (auto simp add: A_def C_def D_def totatives_def image_iff ac_simps)
also have "card \<dots> = card (C N) + card (D N) - card (C N \<inter> D N)"
using card_Un_Int[OF fin[of N]] by arith
also have "C N \<inter> D N = {(1, 1)}" using N by (auto simp: image_iff totatives_def C_def D_def)
also have "D N = (\<lambda>(x,y). (y,x)) ` C N" by (simp add: image_UN image_image C_def D_def)
also have "card \<dots> = card (C N)" by (rule card_image) (simp add: inj_on_def C_def)
also have "card (C N) = (\<Sum>m\<in>{1..N}. card ((\<lambda>n. (m,n)) ` totatives m))"
unfolding C_def by (intro card_UN_disjoint) auto
also have "\<dots> = (\<Sum>m\<in>{1..N}. totient m)" unfolding totient_def
by (subst card_image) (auto simp: inj_on_def)
also have "\<dots> = (\<Sum>m\<in>{0<..N}. totient m)" by (intro sum.cong refl) auto
finally show "card (A N) = 2 * \<dots> - 1" by simp
qed
have **: "(\<Sum>m\<in>{0<..N}. totient m) \<ge> 1" if "N \<ge> 1" for N
proof -
have "1 \<le> N" by fact
also have "N = (\<Sum>m\<in>{0<..N}. 1)" by simp
also have "(\<Sum>m\<in>{0<..N}. 1) \<le> (\<Sum>m\<in>{0<..N}. totient m)"
by (intro sum_mono) (simp_all add: Suc_le_eq)
finally show ?thesis .
qed
have "(\<lambda>N. real (card (A N)) - 6 / pi\<^sup>2 * (real N)\<^sup>2) \<in>
\<Theta>(\<lambda>N. 2 * (sum_upto (\<lambda>m. real (totient m)) (real N) - (3 / pi\<^sup>2 * (real N)\<^sup>2)) - 1)"
(is "_ \<in> \<Theta>(?f)") using * **
by (intro bigthetaI_cong eventually_mono [OF eventually_gt_at_top[of "0::nat"]])
(auto simp: of_nat_diff sum_upto_altdef)
also have "?f \<in> O(\<lambda>N. real N * ln (real N))"
proof (rule landau_o.big.compose[OF _ filterlim_real_sequentially], rule sum_in_bigo)
show " (\<lambda>x. 2 * (sum_upto (\<lambda>m. real (totient m)) x - 3 / pi\<^sup>2 * x\<^sup>2)) \<in> O(\<lambda>x. x * ln x)"
by (subst landau_o.big.cmult_in_iff, simp, rule summatory_totient_asymptotics)
qed simp_all
finally show ?thesis .
qed
theorem coprime_pairs_asymptotics':
"(\<lambda>N. real (card (A N))) =o (\<lambda>N. 6 / pi\<^sup>2 * (real N)\<^sup>2) +o O(\<lambda>N. real N * ln (real N))"
using coprime_pairs_asymptotics
by (subst set_minus_plus [symmetric]) (simp_all add: fun_diff_def)
theorem coprime_pairs_asymptotics'':
"(\<lambda>N. real (card (A N))) \<sim>[at_top] (\<lambda>N. 6 / pi\<^sup>2 * (real N)\<^sup>2)"
proof -
have "(\<lambda>N. real (card (A N)) - 6 / pi\<^sup>2 * (real N) ^ 2) \<in> O(\<lambda>N. real N * ln (real N))"
by (rule coprime_pairs_asymptotics)
also have "(\<lambda>N. real N * ln (real N)) \<in> o(\<lambda>N. 6 / pi ^ 2 * real N ^ 2)"
by (rule landau_o.small.compose[OF _ filterlim_real_sequentially]) simp
finally show ?thesis by (simp add: asymp_equiv_altdef)
qed
theorem coprime_probability_tendsto:
"(\<lambda>N. card (A N) / card ({1..N} \<times> {1..N})) \<longlonglongrightarrow> 6 / pi\<^sup>2"
proof -
have "(\<lambda>N. 6 / pi ^ 2) \<sim>[at_top] (\<lambda>N. 6 / pi ^ 2 * real N ^ 2 / real N ^ 2)"
using eventually_gt_at_top[of "0::nat"]
by (intro asymp_equiv_refl_ev) (auto elim!: eventually_mono)
also have "\<dots> \<sim>[at_top] (\<lambda>N. real (card (A N)) / real N ^ 2)"
by (intro asymp_equiv_intros asymp_equiv_symI[OF coprime_pairs_asymptotics''])
also have "\<dots> \<sim>[at_top] (\<lambda>N. real (card (A N)) / real (card ({1..N} \<times> {1..N})))"
by (simp add: power2_eq_square)
finally have "\<dots> \<sim>[at_top] (\<lambda>_. 6 / pi ^ 2)" by (simp add: asymp_equiv_sym)
thus ?thesis by (rule asymp_equivD_const)
qed
end
subsection \<open>The asymptotics of the number of Farey fractions\<close>
definition farey_fractions :: "nat \<Rightarrow> rat set" where
"farey_fractions N = {q :: rat \<in> {0<..1}. snd (quotient_of q) \<le> int N} "
lemma Fract_eq_coprime:
assumes "Rat.Fract a b = Rat.Fract c d" "b > 0" "d > 0" "coprime a b" "coprime c d"
shows "a = c" "b = d"
proof -
from assms have "a * d = c * b" by (auto simp: eq_rat)
hence "abs (a * d) = abs (c * b)" by (simp only:)
hence "abs a * abs d = abs c * abs b" by (simp only: abs_mult)
also have "?this \<longleftrightarrow> abs a = abs c \<and> d = b"
using assms by (subst coprime_crossproduct_int) simp_all
finally show "b = d" by simp
with \<open>a * d = c * b\<close> and \<open>b > 0\<close> show "a = c" by simp
qed
lemma quotient_of_split:
"P (quotient_of q) = (\<forall>a b. b > 0 \<longrightarrow> coprime a b \<longrightarrow> q = Rat.Fract a b \<longrightarrow> P (a, b))"
by (cases q) (auto simp: quotient_of_Fract dest: Fract_eq_coprime)
lemma quotient_of_split_asm:
"P (Rat.quotient_of q) = (\<not>(\<exists>a b. b > 0 \<and> coprime a b \<and> q = Rat.Fract a b \<and> \<not>P (a, b)))"
using quotient_of_split[of P q] by blast
lemma farey_fractions_bij:
"bij_betw (\<lambda>(a,b). Rat.Fract (int a) (int b))
{(a,b)|a b. 0 < a \<and> a \<le> b \<and> b \<le> N \<and> coprime a b} (farey_fractions N)"
proof (rule bij_betwI[of _ _ _ "\<lambda>q. case quotient_of q of (a, b) \<Rightarrow> (nat a, nat b)"], goal_cases)
case 1
show ?case
by (auto simp: farey_fractions_def Rat.zero_less_Fract_iff Rat.Fract_le_one_iff
Rat.quotient_of_Fract Rat.normalize_def gcd_int_def Let_def)
next
case 2
show ?case
by (auto simp add: farey_fractions_def Rat.Fract_le_one_iff Rat.zero_less_Fract_iff split: prod.splits quotient_of_split_asm)
(simp add: coprime_int_iff [symmetric])
next
case (3 x)
thus ?case by (auto simp: Rat.quotient_of_Fract Rat.normalize_def Let_def gcd_int_def)
next
case (4 x)
thus ?case unfolding farey_fractions_def
by (split quotient_of_split) (auto simp: Rat.zero_less_Fract_iff)
qed
lemma card_farey_fractions: "card (farey_fractions N) = sum totient {0<..N}"
proof -
have "card (farey_fractions N) = card {(a,b)|a b. 0 < a \<and> a \<le> b \<and> b \<le> N \<and> coprime a b}"
using farey_fractions_bij by (rule bij_betw_same_card [symmetric])
also have "{(a,b)|a b. 0 < a \<and> a \<le> b \<and> b \<le> N \<and> coprime a b} =
(\<Union>b\<in>{0<..N}. (\<lambda>a. (a, b)) ` totatives b)"
by (auto simp: totatives_def image_iff)
also have "card \<dots> = (\<Sum>b\<in>{0<..N}. card ((\<lambda>a. (a, b)) ` totatives b))"
by (intro card_UN_disjoint) auto
also have "\<dots> = (\<Sum>b\<in>{0<..N}. totient b)"
unfolding totient_def by (intro sum.cong refl card_image) (auto simp: inj_on_def)
finally show ?thesis .
qed
lemma card_farey_fractions_asymptotics:
"(\<lambda>N. real (card (farey_fractions N)) - 3 / pi\<^sup>2 * (real N)\<^sup>2) \<in> O(\<lambda>N. real N * ln (real N))"
proof -
have "(\<lambda>N. sum_upto (\<lambda>n. real (totient n)) (real N) - 3 / pi\<^sup>2 * (real N)\<^sup>2)
\<in> O(\<lambda>N. real N * ln (real N))" (is "?f \<in> _")
using summatory_totient_asymptotics filterlim_real_sequentially
by (rule landau_o.big.compose)
also have "?f = (\<lambda>N. real (card (farey_fractions N)) - 3 / pi\<^sup>2 * (real N)\<^sup>2)"
by (intro ext) (simp add: sum_upto_altdef card_farey_fractions)
finally show ?thesis .
qed
theorem card_farey_fractions_asymptotics':
"(\<lambda>N. card (farey_fractions N)) =o (\<lambda>N. 3 / pi\<^sup>2 * N^2) +o O(\<lambda>N. N * ln N)"
using card_farey_fractions_asymptotics
by (subst set_minus_plus [symmetric]) (simp_all add: fun_diff_def)
theorem card_farey_fractions_asymptotics'':
"(\<lambda>N. real (card (farey_fractions N))) \<sim>[at_top] (\<lambda>N. 3 / pi\<^sup>2 * (real N)\<^sup>2)"
proof -
have "(\<lambda>N. real (card (farey_fractions N)) - 3 / pi\<^sup>2 * (real N) ^ 2) \<in> O(\<lambda>N. real N * ln (real N))"
by (rule card_farey_fractions_asymptotics)
also have "(\<lambda>N. real N * ln (real N)) \<in> o(\<lambda>N. 3 / pi ^ 2 * real N ^ 2)"
by (rule landau_o.small.compose[OF _ filterlim_real_sequentially]) simp
finally show ?thesis by (simp add: asymp_equiv_altdef)
qed
end
|
The complex number $\i$ is not a numeral. |
If $A_n$ is a sequence of measurable sets, then $\liminf A_n$ is measurable. |
\lesson{5}{Dec 01 2021 Wed (19:09:04)}{Polynomial Identities and Proofs}{Unit 3}
\begin{definition}[Algebraic Proofs]
\begin{itemize}
\item \bf{Polynomial Identities} can be proven to be true by simplifying the identity through application of \bf{Algebraic Theorems} and \bf{Principles}.
\item Start with the side of the identity that can be simplified the easiest.
\item Sometimes, following a \it{βclueβ} will lead to a dead-end in your \bf{Proof}. Do not give up. Just follow a different \it{βclueβ}. The more practice you have with proofs, the more you will be able to predict the dead-ends.
\end{itemize}
\end{definition}
\begin{example}
\end{example}
\subsubsection*{Application to Numerical Relationships}
Polynomial identities apply to more than just polynomials. Replacing the variables with numbers can help prove numerical relationships as well.
\newpage
|
# Transform the data as the coordinate system does
cdata <- function(plot) {
pieces <- ggplot_build(plot)
lapply(pieces$data, function(d) {
plyr::ddply(d, "PANEL", function(panel_data) {
scales <- pieces$layout$get_scales(panel_data$PANEL[1])
details <- plot$coordinates$train(scales)
plot$coordinates$transform(panel_data, details)
})
})
}
pranges <- function(plot) {
layout <- ggplot_build(plot)$layout
x_ranges <- lapply(layout$panel_scales$x, function(scale) scale$get_limits())
y_ranges <- lapply(layout$panel_scales$y, function(scale) scale$get_limits())
npscales <- plot$scales$non_position_scales()
npranges <- lapply(npscales$scales$scales, function(scale) scale$get_limits())
c(list(x = x_ranges, y = y_ranges), npranges)
}
|
////////////////////////////////////////////////////////////
//
// Copyright (c) 2018 Sir Ementaler
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//
////////////////////////////////////////////////////////////
#ifndef PICTOLEV_TILES_H
#define PICTOLEV_TILES_H
#include <algorithm>
#include <cstddef>
#include <iterator>
#include <type_traits>
#include <vector>
#include <gsl/gsl_assert>
#include <gsl/gsl_util>
#include <gsl/span>
#include "grid_size.h"
template<class T>
using image_fragment = std::vector<gsl::span<T>>;
template<class T>
auto buffer_to_image(gsl::span<T> buffer, grid_size size) {
Expects(gsl::narrow_cast<std::size_t>(buffer.size()) == grid_area(size));
image_fragment<T> image(size.height);
std::generate(image.begin(), image.end(), [buffer, width = size.width]() mutable {
gsl::span<T> row = buffer.subspan(0, width);
buffer = buffer.subspan(width);
return row;
});
return image;
}
template<class T>
using tile_vector = std::vector<image_fragment<T>>;
template<class ForwardIt>
auto image_to_tile_list(ForwardIt first, ForwardIt last, std::size_t tile_size) {
Expects(std::distance(first, last) % tile_size == 0);
Expects(first == last || first->size() % tile_size == 0);
using container_type = std::remove_reference_t<typename ForwardIt::reference>;
using value_type = std::remove_reference_t<typename container_type::reference>;
const grid_size size {
first != last ? first->size() / tile_size : 0,
std::distance(first, last) / tile_size,
};
tile_vector<value_type> tiles(grid_area(size), image_fragment<value_type>(tile_size));
auto out = tiles.begin();
while (first != last) {
ForwardIt second = first;
std::advance(second, tile_size);
for (gsl::index x = 0; x != first->size(); x += tile_size) {
std::transform(first, second, out->begin(), [=](const auto& row) {
return gsl::span<value_type>(row).subspan(x, tile_size);
});
++out;
}
first = second;
}
return tiles;
}
#endif
|
function r8_cscd_test ( )
%*****************************************************************************80
%
%% R8_CSCD_TEST tests R8_CSCD.
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 12 January 2015
%
% Author:
%
% John Burkardt
%
fprintf ( 1, '\n' );
fprintf ( 1, 'R8_CSCD_TEST\n' );
fprintf ( 1, ' R8_CSCD computes the cosecant of an angle\n' );
fprintf ( 1, ' given in degrees.\n' );
fprintf ( 1, '\n' );
fprintf ( 1, ' ANGLE R8_CSCD(ANGLE)\n' );
fprintf ( 1, '\n' );
for i = 0 : 15 : 360
angle = i;
if ( mod ( i, 180 ) == 0 )
fprintf ( 1, ' %8.2f Undefined\n', angle );
else
fprintf ( 1, ' %8.2f %14.6g\n', angle, r8_cscd ( angle ) );
end
end
return
end
|
A quick clip from our trip this weekend to XiaoLiu Qiu. This was taken with my gopro mounted on my hot-shoe while I was photographing them..
Totally awesome experience with such graceful creatures, especially when they are chill enough to let you join them.
Thanks to Paul & the Scubar, Fulong for setting the trip up.
This Sundays River South Africa video β Xiao Liu Qiu β turtles β was automatically curated and published with WP Robot plugin β CLICK HERE to learn more about WP Robot for WordPress β a massive time and money saver. |
using LinearAlgebra
using SparseArrays
using Plots
"This routine solves the heat equation using Forward Euler in time and finite differences
in space. Note that this is unstable unless dt ~ O(h^2)."
m = 100 # number of points
T = .01
dt = .0005
u0(x) = 0.0 # initial condition
f(x) = 5*(Float64((x > -.5) && (x <= 0.0)) - Float64((x < .5) && (x >= 0.0)))
f(x,t) = f(x)*exp(-t)
Ξ±(t) = 1.0
Ξ²(t) = pi
# define spatial grid
x = LinRange(-1,1,m+2)
xint = x[2:end-1]
h = x[2]-x[1]
A = (1/h^2) * spdiagm(0=>2*ones(m),-1=>-ones(m-1),1=>-ones(m-1))
function F(t)
b = f.(xint,t) # look for f(x,t)
b[1] += Ξ±(t)/h^2
b[end] += Ξ²(t)/h^2
return b
end
u = u0.(xint)
Nsteps = ceil(Int,T/dt)
dt = T / Nsteps
interval = 1
@gif for k = 1:Nsteps
tk = k*dt
u .= u + dt * (F(tk) - A*u)
if k % interval==0
plot(xint,u,linewidth=2,label="Solution",ylims=(-1.0,3.0))
println("on timestep $k out of $Nsteps.")
end
end every interval
|
module Control.Algebra.ZZBezoutsIdentity
import Control.Algebra
import Classes.Verified
import Data.ZZ
import Control.Algebra.NumericInstances
import Control.Algebra.ZZVerifiedInstances
import Data.Matrix.AlgebraicVerified -- for (ringNeutralIsMultZeroL)
import Control.Algebra.ZZDivisors
import Data.Fin.FinOrdering
import Data.Fin.Structural
import Control.Isomorphism
-- Dependent pattern matching using (do) notation binds improves clarity
import Control.Monad.Identity
import Syntax.PreorderReasoning
import Control.Algebra.DiamondInstances
-- For example
import Data.ZZ.ZZModulo
{-
Table of contents:
* Lemmas for verifying the euclidean algorithm (bezoutsIdentityZZIfModulo)
* (bezoutsIdentityZZIfModulo) implementation
* (Commentary) "Goal: Separation of algorithm from verification."
* The GCD derived from (modZT), itself derived from (modNatT).
-}
{-
Lemmas for verifying the euclidean algorithm (bezoutsIdentityZZIfModulo)
-}
total
bezoutsIdentityLincombEqZZ :
(a, b, c, d, q, r : ZZ)
-> q<.>d = c <-> r
-> b<.>c <+> (a <-> b <.> q)<.>d
= a<.>d <+> b<.>r
bezoutsIdentityLincombEqZZ a b c d q r pr =
(
b<.>c <+> (a <-> b <.> q)<.>d
) ={
cong {f=((b<.>c) <+>)}
$ (
(a <-> b <.> q)<.>d
) ={ ringOpIsDistributiveSubR _ _ _ }= (
a<.>d <-> (b<.>q)<.>d
) ={
cong {f=((a<.>d) <->)}
{- Bracketing style switch -}
$ ( b<.>q<.>d ) ={ sym $ ringOpIsAssociative b q d }=
( b<.>(q<.>d) ) ={ cong {f=(b<.>)} pr }=
( b<.>(c <-> r) ) QED
}= (
a<.>d <-> b<.>(c <-> r)
) ={ abelianGroupOpIsCommutative (a <.> d) _ }= (
(inverse $ b<.>(c <-> r)) <+> a <.> d
) QED
}= (
b <.> c <+> ( (inverse $ b<.>(c <-> r)) <+> a <.> d )
) ={ semigroupOpIsAssociative _ _ _ }= (
b <.> c <-> b<.>(c <-> r) <+> a <.> d
) ={ abelianGroupOpIsCommutative _ (a <.> d) }= (
a <.> d <+> ( b <.> c <-> b<.>(c <-> r) )
) ={
cong {f=((a <.> d) <+>)}
$ trans (sym $ ringOpIsDistributiveSubL _ _ _)
$ cong {f=(b<.>)}
{- Bracketing style switch -}
$ ( c <-> (c <-> r) )
={
cong {f=(c<+>)}
$ trans (ringOpIsDistributiveR _ _ _)
$ cong {f=((inverse c) <+>)}
$ inverseIsInvolution r
}=
( c <+> (inverse c <+> r) )
={ semigroupOpIsAssociative c (inverse c) r }=
( (c <-> c) <+> r )
={ cong {f=(<+> r)} $ groupInverseIsInverseL c }=
( Algebra.neutral <+> r )
={ monoidNeutralIsNeutralR _ }=
( r ) QED
}= (
a<.>d <+> b<.>r
) QED
total
bezoutsIdentityExtQuotientLincombZZ :
(c, d, q, r, x : ZZ)
-> (q<.>d <+> r = c)
-> (d `quotientOverZZ` x)
-> (r `quotientOverZZ` x)
-> (c `quotientOverZZ` x)
bezoutsIdentityExtQuotientLincombZZ c d q r x eqpr (dq ** dqPr) (rq ** rqPr)
= (q<.>dq <+> rq
** trans (ringOpIsDistributiveR (q<.>dq) rq x)
$ rewrite rqPr
in trans ( cong { f= (<+>r) }
$ trans (sym $ ringOpIsAssociative _ _ _)
$ cong {f=(q<.>)} $ dqPr )
$ eqpr)
total
bezQTy : (c, d, a, b : ZZ) -> Type
bezQTy c d a b =
( c `quotientOverZZ` (a<.>c <+> b<.>d)
, d `quotientOverZZ` (a<.>c <+> b<.>d) )
{-
(bezoutsIdentityZZIfModulo) implementation
Given a modulo operator giving the remainder in a remainder-division pair,
produce a linear combination of two input integers which divides them both.
Since every number dividing both integers divides every linear combination
of them, this gives the greatest common denominator.
The latter statement isn't proved here, though.
-}
{- (bezoutsIdentityZZIfModulo) parameters -}
parameters (
{-
The nonnegative residue of the 1st modulo the subgroup generated by the 2nd.
Can factor through a modulo on naturals, but must be everywhere defined.
-}
modZ : ZZ -> ZZ -> ZZ
, quotientPartZ : (x, m : ZZ) -> (x <-> modZ x m) `quotientOverZZ` m
) {
bezoutsIdentityZZIfModulo :
( c, d : ZZ )
-> ( zpar : (ZZ, ZZ) ** uncurry (bezQTy c d) zpar )
-- bezoutsIdentityZZIfModulo c (Pos Z) = ( (1, 0) ** ((1 ** _), (0 ** _)) )
bezoutsIdentityZZIfModulo c (Pos Z) = ( (Algebra.unity, Algebra.neutral)
** rewrite ringWithUnityIsUnityR c
in rewrite monoidNeutralIsNeutralL c
in ( (Algebra.unity ** ringWithUnityIsUnityR c)
, (Algebra.neutral ** ringNeutralIsMultZeroL c) ) )
bezoutsIdentityZZIfModulo c d with (bezoutsIdentityZZIfModulo d (c `modZ` d))
| ((a,b) ** oldpr) = runIdentity $ do {
(q ** qIsDivD) <- Id $ c `quotientPartZ` d
((a' ** divsPrA), (b' ** divsPrB)) <- Id $ oldpr
-- reverifyDivZ across the bind
let reverifyDivZ' =
trans (cong {f=(<+> (c `modZ` d))} qIsDivD)
$ groupSubtractionIsRDivision c $ c `modZ` d
-- the goal linear combo of c & d gives d `gcd` (c `modZ` d)
let lincombEq = bezoutsIdentityLincombEqZZ
a b c d q (c `modZ` d)
$ groupDivisionAddLToSubR
(q <.> d) (c `modZ` d) c
$ reverifyDivZ'
-- d `gcd` (c `modZ` d) | c
let extQuotientLincombZZ = bezoutsIdentityExtQuotientLincombZZ
c d q (c `modZ` d) ( a<.>d <+> b<.>(c `modZ` d) )
reverifyDivZ'
(a' ** divsPrA)
(b' ** divsPrB)
return $ ( (b, a <-> b <.> q)
** ((getWitness extQuotientLincombZZ
** trans (cong lincombEq)
$ getProof extQuotientLincombZZ)
, (a' ** trans (cong lincombEq) divsPrA)) )
}
} {- (bezoutsIdentityZZIfModulo) parameters -}
{-
Goal: Separation of algorithm from verification.
Obstructions: (Problem 1), (Problem 2).
divZ : ZZ -> ZZ -> ZZ
divZ a b = getWitness $ quotientPartZ a b
reverifyDivZ : divZ a b <.> b <+> modZ a b = a
reverifyDivZ {a} {b}
= trans (cong {f=(<+> (a `modZ` b))}
$ getProof $ quotientPartZ a b)
$ groupSubtractionIsRDivision a $ a `modZ` b
||| Greatest Common Denominator (GCD) linear combination factors
euclideanAlgPar : ZZ -> ZZ -> (ZZ, ZZ)
euclideanAlgPar c (Pos Z) = (1, 0)
euclideanAlgPar c d = (b, a <-> b <.> q)
where
eucParReduced : (ZZ, ZZ)
eucParReduced = euclideanAlgPar d (c `modZ` d)
a : ZZ
a = fst eucParReduced
b : ZZ
b = snd eucParReduced
q : ZZ
q = c `divZ` d
||| Greatest Common Denominator (GCD)
euclideanAlg : ZZ -> ZZ -> ZZ
euclideanAlg c d = let (a, b) = euclideanAlgPar c d in a<.>c <+> b<.>d
bezoutsIdentity : (c, d : ZZ) -> uncurry (bezQTy c d) $ euclideanAlgPar c d
---
Problem 1)
rewrite did not change type uncurry (bezQTy c (Pos 0)) (euclideanAlgPar c (Pos 0))
---
bezoutsIdentity c (Pos Z)
= rewrite ringWithUnityIsUnityR c
in rewrite monoidNeutralIsNeutralL c
in ( (Algebra.unity ** ringWithUnityIsUnityR c)
, (Algebra.neutral ** ringNeutralIsMultZeroL c) )
---
Problem 2)
Proof must be repeated for positive and negative halves,
since otherwise the case can't be determined to not be the one for 2nd argument 0.
---
bezoutsIdentity c d@(Pos $ S d') = ?bezoutsIdentity_rhs_2
bezoutsIdentity c d@(NegS d') = ?bezoutsIdentity_rhs_3
-}
{-
The GCD derived from (modZT), itself derived from (modNatT).
-}
bezoutZT :
( c, d : ZZ )
-> ( zpar : (ZZ, ZZ) ** uncurry (bezQTy c d) zpar )
bezoutZT = bezoutsIdentityZZIfModulo modZT quotientPartModZT
|
A polynomial $p$ is a unit if and only if its degree is zero. |
lemma emeasure_scale_measure [simp]: "emeasure (scale_measure r M) A = r * emeasure M A" (is "_ = ?\<mu> A") |
Mr. Belden is a 2001 graduate of Saint Maryβs College in Moraga, California where he received his Paralegal Certification and his bachelorβs degree in Law Studies.
His professional experience inlcudes twenty-three years of service for Bank of America, the last twelve of which were as Assistant Banking Center Manager in various locations. Mr. Belden also worked for the Civil Justice Association of California reviewing Appellate Court cases and summarizing appropriate cases for inclusion in a monthly newsletter called the Monthly Appellate Reports (MARS). In March of 2003, Mr. Belden joined the law offices of BAKERINK & McCUSKER as a Law Clerk where he worked extensively in the area of Estate Planning, Trust and Probate law under the direction of Mr. Bakerink.
In 2005 Mr. Belden graduated from University of the Pacific, McGeorge School of Law and in June 2006 was admitted to the California State Bar. Dedicated to client eduation and counseling, Mr. Belden is a member of the Wealth Counsel. Mr. Beldenβs practice emphasis is in Estate Planning, Probate, Trust Administration, and Bankruptcy matters. In January of 2008, the partnership of professional law corporations of BAKERINK, McCUSKER & BELDEN was established. Mr. Belden became a Certified Specialist in Estate Planning, Trust & Probate Law by the State Bar of California Board Of Legal Specialization in August 2014.
Mr. Belden is a member of the Tracy Sunrise Rotary and Past President. He is a past member for the Board of Governors for the San Joaquin County Bar Association. Mr. Belden was an adjunct professor at Humphreyβs College School of Law teaching wills. Michael and his wife Paula have been married for thirty-five years and have resided in Tracy for the past twenty-one years. Mike and Paula have a daughter, Stephanie, who is a West High School graduate. Stephanie currently resides in Collegeville, Pennsylvania with her husband Josh Roethlisberger. Stephanie works as a school teacher and Josh is in the Air Force. |
theory prop_24
imports Main
"../../TestTheories/Listi"
"../../TestTheories/Naturals"
"$HIPSTER_HOME/IsaHipster"
begin
theorem elemCount: "elem t ts \<Longrightarrow> lt Z (count t ts)"
by hipster_induct_simp_metis
end
|
-- A predicate transformer allowing one to express that
-- some value definitely does /not/ own some resource
infixl 9 _β_
data _β_ {p} (P : SPred p) (Ξ¦α΅’ : Carrier) : SPred (p β a) where
βͺ_,_β« : β {Ξ¦β Ξ¦} β P Ξ¦β β Ξ¦α΅’ β Ξ¦β β£ Ξ¦ β (P β Ξ¦α΅’) Ξ¦
-- | This gives another wand like thing
module _ {p q} (P : SPred p) (Q : SPred q) where
infixr 8 _ββ_
_ββ_ : SPred (p β q β a)
_ββ_ Ξ¦α΅’ = β[ P β Ξ¦α΅’ β Q ]
module _ {p q} {P : SPred p} {Q : SPred q} where
pair : Ξ΅[ P ββ (Q ββ P β΄ Q) ]
pair βͺ px , Οβ β« βͺ qx , Οβ β« rewrite β-idβ»Λ‘ Οβ = px Γβ¨ Οβ β© qx
module _ {p} {P : SPred p} where
β-Ξ΅ : β[ P β Ξ΅ β P ]
β-Ξ΅ βͺ px , Ο β« rewrite β-idβ»Λ‘ Ο = px
-- pure : β {p q} {P : SPred p} {Q : SPred q} β (P Ξ΅ β Q Ξ¦) β (P ββ΄ Q) Ξ¦
-- pure f px = {!!}
-- -- pure = {!!}
-- a pure wand is a resource-polymorphic function
-- unwand : Ξ΅[ P ββ΄ Q ] β β[ P β Q ]
-- unwand f p = f p β-idΛ‘
-- β΄-pure : β {p q} {P : SPred p} {Q : SPred q} β (β {Ξ¦} β P Ξ¦ β Ξ΅ β Ξ¦ β£ Ξ¦ β Q Ξ¦) β Ξ΅[ P ββ΄ Q ]
-- β΄-pure f px Ο rewrite β-idβ»Λ‘ Ο = f px β-idΛ‘
-- β΄-flip : β {p q r} {P : SPred p} {Q : SPred q} {R : SPred r} β Ξ΅[ (P ββ΄ (Q ββ΄ R)) ββ΄ (Q ββ΄ (P ββ΄ R)) ]
-- β΄-flip {P = P} {Q} {R} =
-- β΄-pure {P = P ββ΄ (Q ββ΄ R)} {Q = Q ββ΄ (P ββ΄ R)} Ξ» f Οβ q Οβ p Οβ β
-- let _ , Οβ , Οβ = β-assoc (β-comm Οβ) Οβ in f p Οβ q (β-comm Οβ)
-- β[id] : β {p} {P : Pred _ p} β Ξ΅[ P ββ΄ P ]
-- β[id] px Ο rewrite β-idβ»Λ‘ Ο = px
|
## methods.jl : refactored methods from run.jl
#
# Implements refactored methods from run.jl.
#
# This file is part of MultilevelEstimators.jl - A Julia toolbox for
# Multilevel Monte Carlo Methods (c) Pieterjan Robbe, 2019
#
# basics
#
ndims(::Estimator{<:AbstractIndexSet{d}}) where d = d
get_index_set(estimator::Estimator, sz) = get_index_set(estimator.index_set, sz)
get_tols(estimator::Estimator, tol::T) where T<:Real = estimator[:continuate] ? estimator[:continuation_mul_factor].^(estimator[:nb_of_tols]-1:-1:0)*tol : T[tol]
mse(estimator::Estimator) = varest(estimator) + bias(estimator)^2
rmse(estimator::Estimator) = sqrt(mse(estimator))
converged(estimator::Estimator, Ο΅::Real, ΞΈ::Real) = ( bias(estimator)^2 β€ (1-ΞΈ)*Ο΅^2 || mse(estimator) β€ Ο΅^2 )
max_level_exceeded(estimator::Estimator) = sz(estimator) β₯ estimator[:max_index_set_param]
#
# inspector functions: mean, var, varest...
#
qoi_with_max_var(estimator::Estimator{<:AbstractIndexSet, <:MC}) = estimator[:qoi_with_max_var] == 0 ? argmax(map(n_qoi -> sum(var(samples_diff(estimator, n_qoi, index)) for index in keys(estimator)), 1:estimator[:nb_of_qoi])) : estimator[:qoi_with_max_var]
cost(estimator::Estimator, index::Index) = estimator[:cost_model] isa EmptyFunction ? time(estimator, index) : work(estimator, index)
for f in [:mean, :var]
@eval begin
$f(estimator::Estimator{<:AbstractIndexSet, <:MC}, index::Index) = $f(samples_diff(estimator, qoi_with_max_var(estimator), index))
$(Symbol(f, 0))(estimator::Estimator{<:AbstractIndexSet, <:MC}, index::Index) = $f(samples(estimator, qoi_with_max_var(estimator), index))
$f(estimator::Estimator{<:AbstractIndexSet, <:QMC}, index::Index) = mean($f(samples_diff(estimator, qoi_with_max_var(estimator), shift, index)) for shift in 1:estimator[:nb_of_shifts](index))
$(Symbol(f, 0))(estimator::Estimator{<:AbstractIndexSet, <:QMC}, index::Index) = mean($f(samples(estimator, qoi_with_max_var(estimator), shift, index)) for shift in 1:estimator[:nb_of_shifts](index))
$f(estimator::Estimator) = sum($f(estimator, index) for index in keys(estimator))
end
end
varest(estimator::Estimator, index::Index) = var(estimator, index)/nb_of_samples(estimator, index)
varest(estimator::Estimator) = sum(varest(estimator, index) for index in keys(estimator))
#
# rates
#
for (f, g, sgn) in zip([:Ξ±, :Ξ², :Ξ³], [:mean, :var, :cost], [-1, -1, 1])
@eval begin
$f(estimator::Estimator{<:SL}) = nothing
$f(estimator::Estimator{<:AbstractIndexSet}) = $sgn.*getindex.(broadcast(i->$(Symbol("rates_", f))(estimator, i), 1:ndims(estimator)), 2)
$(Symbol("rates_", f))(estimator::Estimator{<:AbstractML}) = $(Symbol("rates_", f))(estimator, 1)
$(Symbol("rates_", f))(estimator::Estimator{<:AbstractIndexSet}, dir::Integer) = $(Symbol("rates_", f))(estimator, (maximum(getindex.(keys(estimator), dir)) + 1) * Index(ntuple(i -> i == dir, ndims(estimator))), dir)
function $(Symbol("rates_", f))(estimator::Estimator{<:AbstractIndexSet}, idx::Index, dir::Integer)
m = idx[dir] - 1
if m < 2
return (NaN, NaN)
else
x = m:-1:1
y = map(xα΅’ -> $g(estimator, idx - xα΅’ * Index(ntuple(i -> i == dir, ndims(estimator)))), 1:m)
idcs = .!(isnan.(y) .| iszero.(y))
ΞΈ = interp1(view(x, idcs), log2.(abs.(view(y, idcs))))
return tuple(ΞΈ...)
end
end
function $(Symbol("_regress_", g))(estimator::Estimator{<:AbstractIndexSet, <:MC}, index::Index)
p = broadcast(dir->$(Symbol("rates_", f))(estimator, index, dir), 1:ndims(estimator))
estimates = broadcast(dir->2^(p[dir][1]+index[dir]*p[dir][2]), 1:ndims(estimator))
estimate = mean(filter(!isnan, estimates))
if isnan(estimate)
p = interp($g, estimator)
return 2^(p[1]+sum(p[2:end].*index.I))
else
return estimate
end
end
end
end
#
# regression
#
function interp1(x::AbstractVector{<:Real}, y::AbstractVector{<:Real})
A = hcat(ones(eltype(x), length(x)), x)
A\y
end
function interp(f::Function, estimator::Estimator)
filter_fcn = estimator isa Estimator{<:AbstractIndexSet, QMC} ? i -> !isempty(samples(estimator)[1][i]) : i -> length(samples(estimator)[1][i]) > 1
idx_set = filter(filter_fcn, CartesianIndices(size(samples(estimator)[1])))
A = [i == 0 ? 1 : getindex(index - one(index), i) for index in idx_set, i in 0:ndims(estimator)]
y = map(i -> log2(f(estimator, i - one(i))), idx_set)
try
return A \ y
catch e
return fill(NaN, length(y))
end
end
regress_mean(estimator, index) = _regress_mean(estimator, index)
regress_var(estimator, index) = _regress_var(estimator, index)
regress_cost(estimator, index) = estimator[:cost_model] isa EmptyFunction ? _regress_cost(estimator, index) : estimator[:cost_model](index)
regress_nb_of_samples(estimator::Estimator{<:SL}, index_set, Ο΅::Real, ΞΈ::Real, L::Integer) = Dict(Level(0) => estimator[:nb_of_warm_up_samples])
regress_nb_of_samples(estimator::Estimator{<:SL, <:QMC}, index_set, Ο΅::Real, ΞΈ::Real, L::Integer) = Dict(Level(0) => estimator[:nb_of_warm_up_samples])
regress_nb_of_samples(estimator::Estimator{<:AbstractIndexSet, <:QMC}, index_set, Ο΅::Real, ΞΈ::Real, L::Integer) = Dict(index => estimator[:nb_of_warm_up_samples] for index in index_set)
function regress_nb_of_samples(estimator::Estimator, index_set, Ο΅::Real, ΞΈ::Real, L::Integer)
if estimator[:do_regression] && L > 2
return _regress_nb_of_samples(estimator, index_set, Ο΅, ΞΈ)
else
return Dict(index => estimator[:nb_of_warm_up_samples] for index in index_set)
end
end
function _regress_nb_of_samples(estimator::Estimator{<:AbstractIndexSet, <:MC}, index_set, Ο΅::Real, ΞΈ::Real)
vars = Dict(index => regress_var(estimator, index) for index in index_set)
costs = Dict(index => regress_cost(estimator, index) for index in index_set)
Ξ£_estimate = Ξ£(estimator)
for index in keys(vars)
Ξ£_estimate += sqrt(vars[index] * costs[index])
end
Dict(index => begin
n_opt = optimal_nb_of_samples(Ο΅, ΞΈ, vars[index], costs[index], Ξ£_estimate)
max(2, min(n_opt, estimator[:nb_of_warm_up_samples]))
end for index in index_set)
end
function compute_splitting(estimator::Estimator, Ο΅::Real)
bias_estimate = bias(estimator, max_sz(estimator))
a = estimator[:min_splitting]
b = estimator[:max_splitting]
splitting = 1 - bias_estimate^2/Ο΅^2
isnan(splitting) ? a : min(b, max(a, splitting))
end
Ξ£(estimator::Estimator) = sum(sqrt.(map(index -> var(estimator, index) * cost(estimator, index), keys(estimator))))
optimal_nb_of_samples(estimator::Estimator, index::Index, Ο΅::Real, ΞΈ::Real) = optimal_nb_of_samples(Ο΅, ΞΈ, var(estimator, index), cost(estimator, index), Ξ£(estimator))
optimal_nb_of_samples(Ο΅::Real, ΞΈ::Real, var_estimate::Real, cost_estimate::Real, Ξ£_estimate::Real) = ceil(Int, 1/(ΞΈ*Ο΅^2) * sqrt(var_estimate/cost_estimate) * Ξ£_estimate)
#
# bias computation
#
boundary(estimator::Estimator, cntr::Integer) = setdiff(get_index_set(estimator, cntr), get_index_set(estimator, cntr-1))
new_index_set(estimator::Estimator, cntr::Integer) = boundary(estimator, cntr)
bias(estimator::Estimator{<:SL}) = 0.0
bias(estimator::Estimator) = bias(estimator, sz(estimator))
function bias(estimator::Estimator, sz::Integer)
if !isempty(boundary(estimator, sz + 1) β© keys(estimator)) && !estimator[:robustify_bias_estimate]
return abs(sum(broadcast(i -> mean(estimator, i), boundary(estimator, sz + 1))))
else
x = max(1, sz - 2):sz
y = Float64[log2(abs(sum(broadcast(i -> mean(estimator, i), boundary(estimator, xα΅’))))) for xα΅’ in x]
idcs = isfinite.(y)
p = interp1(x[idcs], y[idcs])
return 2^(p[1]+(sz+1)*p[2])
end
end
#
# adaptivity
#
profit(estimator::Estimator{<:AD}, index::Index) = abs(mean(estimator, index)) / sqrt(var(estimator, index) * cost(estimator, index))^estimator[:penalization]
max_level_exceeded(estimator::Estimator{<:AD}) = isempty(setdiff(get_index_set(estimator[:max_search_space], estimator[:max_index_set_param]), keys(estimator)))
function find_index_with_max_profit(estimator::Estimator{<:AD})
indices = collect(active_set(estimator))
profits = [profit(estimator, index) for index in indices]
(max_profit, idx) = findmax(profits)
if rand() < estimator[:acceptance_rate] || length(profits)==1
max_index = indices[idx]
else
max_index = indices[rand(deleteat!(collect(1:length(profits)), idx))]
end
estimator[:verbose] && print_largest_profit(estimator, max_index, max_profit, indices, profits)
return max_index
end
function new_index_set(estimator::Estimator{<:AD}, sz::Integer)
d = ndims(estimator)
if isempty(active_set(estimator))
new_indices = Set{Index{d}}()
max_index = Index(ntuple(i -> 0, d))
add_to_active_set(estimator, max_index)
push!(new_indices, max_index)
else
max_index = find_index_with_max_profit(estimator)
add_to_old_set(estimator, max_index)
remove_from_active_set(estimator, max_index)
new_indices = Set{Index{d}}()
push!(new_indices, max_index)
for k in 1:d
new_index = max_index + Index(ntuple(i -> i == k, d))
if is_admissable(estimator, new_index)
if new_index β get_index_set(estimator[:max_search_space], estimator[:max_index_set_param])
add_to_active_set(estimator, new_index)
push!(new_indices, new_index)
else
warn_max_index(estimator, new_index)
add_to_max_index_set(estimator, new_index)
end
end
end
end
log_adaptive_index_set(estimator, max_index)
return new_indices
end
function bias(estimator::Estimator{<:AD}, sz::Integer)
b(indices) = abs(sum(broadcast(i -> mean(estimator, i), collect(indices))))
if sz != max_sz(estimator)
return b(active_set(estimator))
else
return min(b(active_set(estimator)), b(boundary(estimator)))
end
end
#
# QMC related functions
#
qoi_with_max_var(estimator::Estimator{<:AbstractIndexSet, <:QMC}) = estimator[:qoi_with_max_var] == 0 ? argmax(map(n_qoi -> sum(mean(var(samples_diff(estimator, n_qoi, n_shift, index)) for n_shift in 1:estimator[:nb_of_shifts](index)) for index in keys(estimator)), 1:estimator[:nb_of_qoi])) : estimator[:qoi_with_max_var]
function next_number_of_samples(estimator, index)
if estimator[:sample_mul_factor] == 2
Dict(index => nextpow(2, nb_of_samples(estimator, index) + 1))
elseif estimator[:sample_mul_factor] β€ 1
Dict(index => nb_of_samples(estimator, index) + 1)
else
Dict(index => ceil(Int, nb_of_samples(estimator, index) * estimator[:sample_mul_factor]))
end
end
function find_index_with_max_var_over_cost(estimator::Estimator{<:AbstractIndexSet, <:QMC})
indices = collect(keys(estimator))
vars = [varest(estimator, index) / cost(estimator, index) for index in indices]
(max_var, idx) = findmax(vars)
indices[idx]
end
varest(estimator::Estimator{<:AbstractIndexSet, <:QMC}, index::Index) = var(mean(samples_diff(estimator, qoi_with_max_var(estimator), n_shift, index)) for n_shift in 1:estimator[:nb_of_shifts](index), corrected=true) / estimator[:nb_of_shifts](index)
#
# Unbiased estimation
#
qoi_with_max_var(estimator::Estimator{<:U, <:MC}) = estimator[:qoi_with_max_var] == 0 ? argmax(map(n_qoi -> var(accumulator(estimator, n_qoi)) / length(accumulator(estimator, n_qoi)), 1:estimator[:nb_of_qoi])) : estimator[:qoi_with_max_var]
qoi_with_max_var(estimator::Estimator{<:U, <:QMC}) = estimator[:qoi_with_max_var] == 0 ? argmax(map(n_qoi -> var(mean(accumulator(estimator, n_qoi, n_shift)) for n_shift in 1:size(accumulator(estimator), 2)) / estimator[:nb_of_qoi], 1:estimator[:nb_of_qoi])) : estimator[:qoi_with_max_var]
function next_number_of_samples(estimator::Estimator{<:U})
n_total = sum(values(nb_of_samples(estimator)))
n0 = max(estimator[:nb_of_warm_up_samples], n_total)
n = estimator[:sample_mul_factor] β€ 1 ? 1 : ceil(Int, n0 * (estimator[:sample_mul_factor] - 1))
sample(pmf(estimator), n)
end
function sample(pmf::Dict{Index{d}, Float64}, n::Integer) where d
u = rand(n)
x = Vector{keytype(pmf)}(undef, n)
psum = 0.
for (index, p) in pmf
for i in 1:length(x)
if psum β€ u[i] β€ psum + p
x[i] = index
end
end
psum = psum + p
end
Dict(index => sum(x .== Ref(index)) for index in keys(pmf))
end
Geometric(p, k) = (1 - p)^k*p
function update_pmf(estimator::Estimator{<:U})
f(estimator, index) = sqrt(var(estimator, index)/cost(estimator, index))
p = interp(f, estimator)
if !any(isnan.(p)) && all(p[2:end] .< 0)
for index in keys(estimator)
if isnan(f(estimator, index)) || isinf(f(estimator, index))
set_pmf_key(estimator, index, 2^(p[1]+sum(p[2:end].*index.I)))
else
set_pmf_key(estimator, index, f(estimator, index))
end
end
normalize!(pmf(estimator))
end
end
function normalize!(pmf::Dict{<:Index, Float64})
tot_sum = sum(values(pmf))
for (key, val) in pmf
pmf[key] /= tot_sum
end
end
Prob(estimator::Estimator{<:U}, index::Index) = sum(val for (key, val) in pmf(estimator) if key β₯ index)
Prob(estimator::Estimator{<:U}) = Dict(index => Prob(estimator, index) for index in keys(estimator))
varest(estimator::Estimator{<:U, <:MC}) = var(accumulator(estimator, qoi_with_max_var(estimator))) / length(accumulator(estimator, qoi_with_max_var(estimator)))
varest(estimator::Estimator{<:U, <:QMC}) = var(mean(accumulator(estimator, qoi_with_max_var(estimator), n_shift)) for n_shift in 1:size(accumulator(estimator), 2)) / estimator[:nb_of_qoi]
bias(estimator::Estimator{<:U}) = 0.0
converged(estimator::Estimator{<:U}, Ο΅::Real) = mse(estimator) β€ Ο΅^2
mean(estimator::Estimator{<:U}) = mean(accumulator(estimator, qoi_with_max_var(estimator)))
mean0(estimator::Estimator{<:U}) = mean0(accumulator(estimator, qoi_with_max_var(estimator)))
var(estimator::Estimator{<:U}) = var(accumulator(estimator, qoi_with_max_var(estimator)))
var0(estimator::Estimator{<:U}) = var0(accumulator(estimator, qoi_with_max_var(estimator)))
|
{-# OPTIONS_GHC -fsimpl-tick-factor=150 #-}
{-# LANGUAGE BangPatterns, RecordWildCards #-}
module Network.HTTP.LoadTest.Analysis
(
-- * Result analysis
Analysis(..)
, Basic(..)
, analyseBasic
, analyseFull
) where
import Criterion.Analysis (SampleAnalysis, analyseSample)
import Network.HTTP.LoadTest.Types (Analysis(..), Basic(..), Summary(..))
import Prelude hiding (catch)
import Statistics.Quantile (weightedAvg)
import qualified Data.Vector as V
import qualified Data.Vector.Generic as G
import qualified Statistics.Sample as S
analyseFull :: V.Vector Summary -> Double -> IO (Analysis SampleAnalysis)
analyseFull sumv elapsed = do
let ci = 0.95
resamples = 10 * 1000
l <- analyseSample ci (G.convert . G.map summElapsed $ sumv) resamples
return Analysis {
latency = l
, latency99 = weightedAvg 99 100 . G.map summElapsed $ sumv
, latency999 = weightedAvg 999 1000 . G.map summElapsed $ sumv
, latValues = sumv
, throughput = fromIntegral (G.length sumv) / elapsed
}
analyseBasic :: V.Vector Summary -> Double -> Analysis Basic
analyseBasic sumv elapsed = Analysis {
latency = Basic {
mean = S.mean . G.map summElapsed $ sumv
, stdDev = S.stdDev . G.map summElapsed $ sumv
}
, latency99 = weightedAvg 99 100 . G.map summElapsed $ sumv
, latency999 = weightedAvg 999 1000 . G.map summElapsed $ sumv
, latValues = sumv
, throughput = fromIntegral (G.length sumv) / elapsed
}
|
(* This file is distributed under the terms of the MIT License, also
known as the X11 Licence. A copy of this license is in the README
file that accompanied the original distribution of this file.
Based on code written by:
Brian Aydemir
Arthur Charg\'eraud *)
Require Import Coq.Arith.Arith.
Require Import Coq.Arith.Max.
Require Import Coq.Classes.EquivDec.
Require Import Coq.Lists.List.
Require Import Coq.Structures.Equalities.
Require Import Coq.FSets.FSets.
Require Import Metalib.CoqListFacts.
Require Import Metalib.FSetExtra.
Require Import Metalib.FSetWeakNotin.
Require Import Metalib.LibTactics.
Require Import Lia.
(* ********************************************************************** *)
(** * Defining atoms *)
(** Atoms are structureless objects such that we can always generate
one fresh from a finite collection. Equality on atoms is [eq] and
decidable. We use Coq's module system to make abstract the
implementation of atoms. *)
Module Type ATOM <: UsualDecidableType.
Parameter atom : Set.
Definition t := atom.
Parameter eq_dec : forall x y : atom, {x = y} + {x <> y}.
Parameter atom_fresh_for_list :
forall (xs : list t), {x : atom | ~ List.In x xs}.
Parameter fresh : list atom -> atom.
Parameter fresh_not_in : forall l, ~ In (fresh l) l.
Parameter nat_of : atom -> nat.
#[global]
Hint Resolve eq_dec : core.
Include HasUsualEq <+ UsualIsEq <+ UsualIsEqOrig.
End ATOM.
(** The implementation of the above interface is hidden for
documentation purposes. *)
Module Atom : ATOM.
(* begin hide *)
Definition atom := nat.
Definition t := atom.
Definition eq_dec := eq_nat_dec.
Lemma max_lt_r : forall x y z,
x <= z -> x <= max y z.
Proof.
induction x. auto with arith.
induction y. auto with arith.
simpl. induction z. lia. auto with arith.
Qed.
Lemma nat_list_max : forall (xs : list nat),
{ n : nat | forall x, List.In x xs -> x <= n }.
Proof.
induction xs as [ | x xs [y H] ].
(* case: nil *)
exists 0. inversion 1.
(* case: cons x xs *)
exists (max x y). intros z J. simpl in J. destruct J as [K | K].
subst. auto with arith.
auto using max_lt_r.
Qed.
Lemma atom_fresh_for_list :
forall (xs : list nat), { n : nat | ~ List.In n xs }.
Proof.
intros xs. destruct (nat_list_max xs) as [x H].
exists (S x). intros J. lapply (H (S x)). lia. trivial.
Qed.
Definition fresh (l : list atom) :=
match atom_fresh_for_list l with
(exist _ x _) => x
end.
Lemma fresh_not_in : forall l, ~ In (fresh l) l.
Proof.
intro l. unfold fresh.
destruct atom_fresh_for_list. auto.
Qed.
Definition nat_of := fun (x : atom) => x.
Include HasUsualEq <+ UsualIsEq <+ UsualIsEqOrig.
(* end hide *)
End Atom.
(** We make [atom], [fresh], [fresh_not_in] and [atom_fresh_for_list] available
without qualification. *)
Notation atom := Atom.atom.
Notation fresh := Atom.fresh.
Notation fresh_not_in := Atom.fresh_not_in.
Notation atom_fresh_for_list := Atom.atom_fresh_for_list.
(* Automatically unfold Atom.eq *)
Global Arguments Atom.eq /.
(** It is trivial to declare an instance of [EqDec] for [atom]. *)
#[export] Instance EqDec_atom : @EqDec atom eq eq_equivalence.
Proof. exact Atom.eq_dec. Defined.
(* ********************************************************************** *)
(** * Finite sets of atoms *)
(** We use our implementation of atoms to obtain an implementation of
finite sets of atoms. We give the resulting type an intuitive
name, as well as import names of set operations for use within
this library. In order to avoid polluting Coq's namespace, we do
not use [Module Export]. *)
Module Import AtomSetImpl : FSetExtra.WSfun Atom :=
FSetExtra.Make Atom.
Notation atoms :=
AtomSetImpl.t.
(** The [AtomSetDecide] module provides the [fsetdec] tactic for
solving facts about finite sets of atoms. *)
Module Export AtomSetDecide := Coq.FSets.FSetDecide.WDecide_fun Atom AtomSetImpl.
(** The [AtomSetNotin] module provides the [destruct_notin] and
[solve_notin] for reasoning about non-membership in finite sets of
atoms, as well as a variety of lemmas about non-membership. *)
Module Export AtomSetNotin := FSetWeakNotin.Notin_fun Atom AtomSetImpl.
(** Given the [fsetdec] tactic, we typically do not need to refer to
specific lemmas about finite sets. However, instantiating
functors from the FSets library makes a number of setoid rewrites
available. These rewrites are crucial to developments since they
allow us to replace a set with an extensionally equal set (see the
[Equal] relation on finite sets) in propositions about finite
sets. *)
Module AtomSetFacts := FSetFacts.WFacts_fun Atom AtomSetImpl.
Module AtomSetProperties := FSetProperties.WProperties_fun Atom AtomSetImpl.
Export AtomSetFacts.
(* ********************************************************************** *)
(** * Properties *)
(** For any given finite set of atoms, we can generate an atom fresh
for it. *)
Lemma atom_fresh : forall L : atoms, { x : atom | ~ In x L }.
Proof.
intros L. destruct (atom_fresh_for_list (elements L)) as [a H].
exists a. intros J. contradiction H.
rewrite <- CoqListFacts.InA_iff_In. auto using elements_1.
Qed.
(* ********************************************************************** *)
(** * Tactic support for picking fresh atoms *)
(* begin hide *)
(** The auxiliary tactic [simplify_list_of_atom_sets] takes a list of
finite sets of atoms and unions everything together, returning the
resulting single finite set. *)
Ltac simplify_list_of_atom_sets L :=
let L := eval simpl in L in
let L := ltac_remove_dups L in
let L := eval simpl in (List.fold_right union empty L) in
match L with
| context C [union ?E empty] => context C [ E ]
end.
(* end hide *)
(** [gather_atoms_with F] returns the union of all the finite sets
[F x] where [x] is a variable from the context such that [F x]
type checks. *)
Ltac gather_atoms_with F :=
let apply_arg x :=
match type of F with
| _ -> _ -> _ -> _ => constr:(@F _ _ x)
| _ -> _ -> _ => constr:(@F _ x)
| _ -> _ => constr:(@F x)
end in
let rec gather V :=
match goal with
| H : _ |- _ =>
let FH := apply_arg H in
match V with
| context [FH] => fail 1
| _ => gather (union FH V)
end
| _ => V
end in
let L := gather empty in eval simpl in L.
(** [beautify_fset V] assumes that [V] is built as a union of finite
sets and returns the same set cleaned up: empty sets are removed
and items are laid out in a nicely parenthesized way. *)
Ltac beautify_fset V :=
let rec go Acc E :=
match E with
| union ?E1 ?E2 => let Acc2 := go Acc E2 in go Acc2 E1
| empty => Acc
| ?E1 => match Acc with
| empty => E1
| _ => constr:(union E1 Acc)
end
end
in go empty V.
(** The tactic [pick fresh Y for L] takes a finite set of atoms [L]
and a fresh name [Y], and adds to the context an atom with name
[Y] and a proof that [~ In Y L], i.e., that [Y] is fresh for [L].
The tactic will fail if [Y] is already declared in the context.
The variant [pick fresh Y] is similar, except that [Y] is fresh
for "all atoms in the context." This version depends on the
tactic [gather_atoms], which is responsible for returning the set
of "all atoms in the context." By default, it returns the empty
set, but users are free (and expected) to redefine it. *)
Ltac gather_atoms :=
constr:(empty).
Tactic Notation "pick" "fresh" ident(Y) "for" constr(L) :=
let Fr := fresh "Fr" in
let L := beautify_fset L in
(destruct (atom_fresh L) as [Y Fr]).
Tactic Notation "pick" "fresh" ident(Y) :=
let L := gather_atoms in
pick fresh Y for L.
Ltac pick_fresh y :=
pick fresh y.
(** Example: We can redefine [gather_atoms] to return all the
"obvious" atoms in the context using the [gather_atoms_with] thus
giving us a "useful" version of the "[pick fresh]" tactic. *)
Ltac gather_atoms ::=
let A := gather_atoms_with (fun x : atoms => x) in
let B := gather_atoms_with (fun x : atom => singleton x) in
constr:(union A B).
Lemma example_pick_fresh_use : forall (x y z : atom) (L1 L2 L3: atoms), True.
(* begin show *)
Proof.
intros x y z L1 L2 L3.
pick fresh k.
(** At this point in the proof, we have a new atom [k] and a
hypothesis [Fr] that [k] is fresh for [x], [y], [z], [L1], [L2],
and [L3]. *)
trivial.
Qed.
(* end show *)
|
From Parsec Require Export
Parser.
From ExtLib Require Export
Extras.
From JSON Require Export
Jpath.
From Coq Require Export
ssr.ssrfun.
From AsyncTest Require Export
Trace
Common.
Export
FunNotation.
Open Scope parser_scope.
Inductive jexp :=
Jexp__Const : IR -> jexp
| Jexp__Array : list jexp -> jexp
| Jexp__Object : list (string * jexp) -> jexp
| Jexp__Ref : labelT -> jpath -> (IR -> IR) -> jexp.
Definition nth_weak (n : nat) (j : IR)
: option IR :=
if j is JSON__Array l then
get_nth n j <|> last (map Some l) None
else None.
Fixpoint jget_weak (p : jpath) (j : IR) : option IR :=
match p with
| Jpath__This => Some j
| Jpath__Array p' n => jget_weak p' j >>= nth_weak n
| Jpath__Object p' s => jget_weak p' j >>= get_json' s
end.
Example tget_strong (l : labelT) (p : jpath) (t : traceT) : IR :=
odflt (JSON__Object []) $ packet__payload <$> get l t >>= jget p.
Definition tget_weak' (jget : jpath -> IR -> option IR)
(l : labelT) (p : jpath) (t : traceT) : IR :=
odflt (last (pick_some $ map (jget p β packet__payload β snd) t) $ JSON__Object []) $
packet__payload <$> get l t >>= jget p.
Definition tget_weak : labelT -> jpath -> traceT -> IR := tget_weak' jget_weak.
Fixpoint jexp_to_IR' (tget : labelT -> jpath -> traceT -> IR)
(t : traceT) (e : jexp) : IR :=
match e with
| Jexp__Const j => j
| Jexp__Array l => JSON__Array $ map (jexp_to_IR' tget t) l
| Jexp__Object m => JSON__Object $ map_snd (jexp_to_IR' tget t) m
| Jexp__Ref l p f => f $ tget l p t
end.
Example jexp_to_IR_strong : traceT -> jexp -> IR := jexp_to_IR' tget_strong.
Definition jexp_to_IR_weak : traceT -> jexp -> IR := jexp_to_IR' tget_weak.
Definition findpath' (p : jpath) : traceT -> list labelT :=
fmap fst β filter (fun lj => if jget_weak p (packet__payload $ snd lj) is Some _
then true else false).
Definition findpath (p : jpath) (f : IR -> IR) (t : traceT) : list jexp :=
l <- findpath' p t;; [Jexp__Ref l p f].
Fixpoint IR_to_jexp (j : IR) : jexp :=
match j with
| JSON__Array l => Jexp__Array (map IR_to_jexp l)
| JSON__Object l => Jexp__Object (map_snd IR_to_jexp l)
| _ => Jexp__Const j
end.
Fixpoint normalise (e : jexp) : jexp :=
match e with
| Jexp__Const j => IR_to_jexp j
| Jexp__Array l => Jexp__Array (map normalise l)
| Jexp__Object l => Jexp__Object (map_snd normalise l)
| _ => e
end.
|
[STATEMENT]
lemma Spy_see_private_Key [simp]:
"evs \<in> set_pur
==> (Key(invKey (publicKey b A)) \<in> parts(knows Spy evs)) = (A \<in> bad)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. evs \<in> set_pur \<Longrightarrow> (Key (invKey (publicKey b A)) \<in> parts (knows Spy evs)) = (A \<in> bad)
[PROOF STEP]
apply (erule set_pur.induct)
[PROOF STATE]
proof (prove)
goal (12 subgoals):
1. (Key (invKey (publicKey b A)) \<in> parts (knows Spy [])) = (A \<in> bad)
2. \<And>evsf X B. \<lbrakk>evsf \<in> set_pur; (Key (invKey (publicKey b A)) \<in> parts (knows Spy evsf)) = (A \<in> bad); X \<in> synth (analz (knows Spy evsf))\<rbrakk> \<Longrightarrow> (Key (invKey (publicKey b A)) \<in> parts (knows Spy (Says Spy B X # evsf))) = (A \<in> bad)
3. \<And>evsr Aa B X. \<lbrakk>evsr \<in> set_pur; (Key (invKey (publicKey b A)) \<in> parts (knows Spy evsr)) = (A \<in> bad); Says Aa B X \<in> set evsr\<rbrakk> \<Longrightarrow> (Key (invKey (publicKey b A)) \<in> parts (knows Spy (Gets B X # evsr))) = (A \<in> bad)
4. \<And>evsStart LID_M C k M i P j Transaction OrderDesc PurchAmt. \<lbrakk>evsStart \<in> set_pur; (Key (invKey (publicKey b A)) \<in> parts (knows Spy evsStart)) = (A \<in> bad); Number LID_M \<notin> used evsStart; C = Cardholder k; M = Merchant i; P = PG j; Transaction = \<lbrace>Agent M, Agent C, Number OrderDesc, Number PurchAmt\<rbrace>; LID_M \<notin> range CardSecret; LID_M \<notin> range PANSecret\<rbrakk> \<Longrightarrow> (Key (invKey (publicKey b A)) \<in> parts (knows Spy (Notes C \<lbrace>Number LID_M, Transaction\<rbrace> # Notes M \<lbrace>Number LID_M, Agent P, Transaction\<rbrace> # evsStart))) = (A \<in> bad)
5. \<And>evsPIReq Transaction M C OrderDesc PurchAmt Chall_C LID_M. \<lbrakk>evsPIReq \<in> set_pur; (Key (invKey (publicKey b A)) \<in> parts (knows Spy evsPIReq)) = (A \<in> bad); Transaction = \<lbrace>Agent M, Agent C, Number OrderDesc, Number PurchAmt\<rbrace>; Nonce Chall_C \<notin> used evsPIReq; Chall_C \<notin> range CardSecret; Chall_C \<notin> range PANSecret; Notes C \<lbrace>Number LID_M, Transaction\<rbrace> \<in> set evsPIReq\<rbrakk> \<Longrightarrow> (Key (invKey (publicKey b A)) \<in> parts (knows Spy (Says C M \<lbrace>Number LID_M, Nonce Chall_C\<rbrace> # evsPIReq))) = (A \<in> bad)
6. \<And>evsPIRes M LID_M Chall_C Transaction C OrderDesc PurchAmt P Chall_M XID. \<lbrakk>evsPIRes \<in> set_pur; (Key (invKey (publicKey b A)) \<in> parts (knows Spy evsPIRes)) = (A \<in> bad); Gets M \<lbrace>Number LID_M, Nonce Chall_C\<rbrace> \<in> set evsPIRes; Transaction = \<lbrace>Agent M, Agent C, Number OrderDesc, Number PurchAmt\<rbrace>; Notes M \<lbrace>Number LID_M, Agent P, Transaction\<rbrace> \<in> set evsPIRes; Nonce Chall_M \<notin> used evsPIRes; Chall_M \<notin> range CardSecret; Chall_M \<notin> range PANSecret; Number XID \<notin> used evsPIRes; XID \<notin> range CardSecret; XID \<notin> range PANSecret\<rbrakk> \<Longrightarrow> (Key (invKey (publicKey b A)) \<in> parts (knows Spy (Says M C (sign (priSK M) \<lbrace>Number LID_M, Number XID, Nonce Chall_C, Nonce Chall_M, cert P (pubEK P) onlyEnc (priSK RCA)\<rbrace>) # evsPIRes))) = (A \<in> bad)
7. \<And>C k KC1 Transaction M HOD OIData LID_M PIHead EKj Chall_C Chall_M OrderDesc P PurchAmt XID evsPReqU. \<lbrakk>evsPReqU \<in> set_pur; (Key (invKey (publicKey b A)) \<in> parts (knows Spy evsPReqU)) = (A \<in> bad); C = Cardholder k; CardSecret k = 0; Key KC1 \<notin> used evsPReqU; KC1 \<in> symKeys; Transaction = \<lbrace>Agent M, Agent C, Number OrderDesc, Number PurchAmt\<rbrace>; HOD = Hash \<lbrace>Number OrderDesc, Number PurchAmt\<rbrace>; OIData = \<lbrace>Number LID_M, Number XID, Nonce Chall_C, HOD, Nonce Chall_M\<rbrace>; PIHead = \<lbrace>Number LID_M, Number XID, HOD, Number PurchAmt, Agent M\<rbrace>; Gets C (sign (priSK M) \<lbrace>Number LID_M, Number XID, Nonce Chall_C, Nonce Chall_M, cert P EKj onlyEnc (priSK RCA)\<rbrace>) \<in> set evsPReqU; Says C M \<lbrace>Number LID_M, Nonce Chall_C\<rbrace> \<in> set evsPReqU; Notes C \<lbrace>Number LID_M, Transaction\<rbrace> \<in> set evsPReqU\<rbrakk> \<Longrightarrow> (Key (invKey (publicKey b A)) \<in> parts (knows Spy (Says C M \<lbrace>EXHcrypt KC1 EKj \<lbrace>PIHead, Hash OIData\<rbrace> (Pan (pan C)), OIData, Hash \<lbrace>PIHead, Pan (pan C)\<rbrace>\<rbrace> # Notes C \<lbrace>Key KC1, Agent M\<rbrace> # evsPReqU))) = (A \<in> bad)
8. \<And>C Chall_C Chall_M EKj HOD KC2 LID_M M OIData OIDualSigned OrderDesc P PANData PIData PIDualSigned PIHead PurchAmt Transaction XID evsPReqS k. \<lbrakk>evsPReqS \<in> set_pur; (Key (invKey (publicKey b A)) \<in> parts (knows Spy evsPReqS)) = (A \<in> bad); C = Cardholder k; CardSecret k \<noteq> 0; Key KC2 \<notin> used evsPReqS; KC2 \<in> symKeys; Transaction = \<lbrace>Agent M, Agent C, Number OrderDesc, Number PurchAmt\<rbrace>; HOD = Hash \<lbrace>Number OrderDesc, Number PurchAmt\<rbrace>; OIData = \<lbrace>Number LID_M, Number XID, Nonce Chall_C, HOD, Nonce Chall_M\<rbrace>; PIHead = \<lbrace>Number LID_M, Number XID, HOD, Number PurchAmt, Agent M, Hash \<lbrace>Number XID, Nonce (CardSecret k)\<rbrace>\<rbrace>; PANData = \<lbrace>Pan (pan C), Nonce (PANSecret k)\<rbrace>; PIData = \<lbrace>PIHead, PANData\<rbrace>; PIDualSigned = \<lbrace>sign (priSK C) \<lbrace>Hash PIData, Hash OIData\<rbrace>, EXcrypt KC2 EKj \<lbrace>PIHead, Hash OIData\<rbrace> PANData\<rbrace>; OIDualSigned = \<lbrace>OIData, Hash PIData\<rbrace>; Gets C (sign (priSK M) \<lbrace>Number LID_M, Number XID, Nonce Chall_C, Nonce Chall_M, cert P EKj onlyEnc (priSK RCA)\<rbrace>) \<in> set evsPReqS; Says C M \<lbrace>Number LID_M, Nonce Chall_C\<rbrace> \<in> set evsPReqS; Notes C \<lbrace>Number LID_M, Transaction\<rbrace> \<in> set evsPReqS\<rbrakk> \<Longrightarrow> (Key (invKey (publicKey b A)) \<in> parts (knows Spy (Says C M \<lbrace>PIDualSigned, OIDualSigned\<rbrace> # Notes C \<lbrace>Key KC2, Agent M\<rbrace> # evsPReqS))) = (A \<in> bad)
9. \<And>evsAReq KM Transaction M C OrderDesc PurchAmt HOD OIData LID_M XID Chall_C Chall_M k P_I HPIData encPANData P EKj. \<lbrakk>evsAReq \<in> set_pur; (Key (invKey (publicKey b A)) \<in> parts (knows Spy evsAReq)) = (A \<in> bad); Key KM \<notin> used evsAReq; KM \<in> symKeys; Transaction = \<lbrace>Agent M, Agent C, Number OrderDesc, Number PurchAmt\<rbrace>; HOD = Hash \<lbrace>Number OrderDesc, Number PurchAmt\<rbrace>; OIData = \<lbrace>Number LID_M, Number XID, Nonce Chall_C, HOD, Nonce Chall_M\<rbrace>; CardSecret k \<noteq> 0 \<longrightarrow> P_I = \<lbrace>sign (priSK C) \<lbrace>HPIData, Hash OIData\<rbrace>, encPANData\<rbrace>; Gets M \<lbrace>P_I, OIData, HPIData\<rbrace> \<in> set evsAReq; Says M C (sign (priSK M) \<lbrace>Number LID_M, Number XID, Nonce Chall_C, Nonce Chall_M, cert P EKj onlyEnc (priSK RCA)\<rbrace>) \<in> set evsAReq; Notes M \<lbrace>Number LID_M, Agent P, Transaction\<rbrace> \<in> set evsAReq\<rbrakk> \<Longrightarrow> (Key (invKey (publicKey b A)) \<in> parts (knows Spy (Says M P (EncB (priSK M) KM (pubEK P) \<lbrace>Number LID_M, Number XID, Hash OIData, HOD\<rbrace> P_I) # evsAReq))) = (A \<in> bad)
10. \<And>evsAResU C k M i KP KC1 KM PIHead LID_M XID HOD PurchAmt P_I EKj HOIData P. \<lbrakk>evsAResU \<in> set_pur; (Key (invKey (publicKey b A)) \<in> parts (knows Spy evsAResU)) = (A \<in> bad); C = Cardholder k; M = Merchant i; Key KP \<notin> used evsAResU; KP \<in> symKeys; CardSecret k = 0; KC1 \<in> symKeys; KM \<in> symKeys; PIHead = \<lbrace>Number LID_M, Number XID, HOD, Number PurchAmt, Agent M\<rbrace>; P_I = EXHcrypt KC1 EKj \<lbrace>PIHead, HOIData\<rbrace> (Pan (pan C)); Gets P (EncB (priSK M) KM (pubEK P) \<lbrace>Number LID_M, Number XID, HOIData, HOD\<rbrace> P_I) \<in> set evsAResU\<rbrakk> \<Longrightarrow> (Key (invKey (publicKey b A)) \<in> parts (knows Spy (Says P M (EncB (priSK P) KP (pubEK M) \<lbrace>Number LID_M, Number XID, Number PurchAmt\<rbrace> authCode) # evsAResU))) = (A \<in> bad)
A total of 12 subgoals...
[PROOF STEP]
apply (frule_tac [9] AuthReq_msg_in_parts_spies) \<comment> \<open>AuthReq\<close>
[PROOF STATE]
proof (prove)
goal (13 subgoals):
1. (Key (invKey (publicKey b A)) \<in> parts (knows Spy [])) = (A \<in> bad)
2. \<And>evsf X B. \<lbrakk>evsf \<in> set_pur; (Key (invKey (publicKey b A)) \<in> parts (knows Spy evsf)) = (A \<in> bad); X \<in> synth (analz (knows Spy evsf))\<rbrakk> \<Longrightarrow> (Key (invKey (publicKey b A)) \<in> parts (knows Spy (Says Spy B X # evsf))) = (A \<in> bad)
3. \<And>evsr Aa B X. \<lbrakk>evsr \<in> set_pur; (Key (invKey (publicKey b A)) \<in> parts (knows Spy evsr)) = (A \<in> bad); Says Aa B X \<in> set evsr\<rbrakk> \<Longrightarrow> (Key (invKey (publicKey b A)) \<in> parts (knows Spy (Gets B X # evsr))) = (A \<in> bad)
4. \<And>evsStart LID_M C k M i P j Transaction OrderDesc PurchAmt. \<lbrakk>evsStart \<in> set_pur; (Key (invKey (publicKey b A)) \<in> parts (knows Spy evsStart)) = (A \<in> bad); Number LID_M \<notin> used evsStart; C = Cardholder k; M = Merchant i; P = PG j; Transaction = \<lbrace>Agent M, Agent C, Number OrderDesc, Number PurchAmt\<rbrace>; LID_M \<notin> range CardSecret; LID_M \<notin> range PANSecret\<rbrakk> \<Longrightarrow> (Key (invKey (publicKey b A)) \<in> parts (knows Spy (Notes C \<lbrace>Number LID_M, Transaction\<rbrace> # Notes M \<lbrace>Number LID_M, Agent P, Transaction\<rbrace> # evsStart))) = (A \<in> bad)
5. \<And>evsPIReq Transaction M C OrderDesc PurchAmt Chall_C LID_M. \<lbrakk>evsPIReq \<in> set_pur; (Key (invKey (publicKey b A)) \<in> parts (knows Spy evsPIReq)) = (A \<in> bad); Transaction = \<lbrace>Agent M, Agent C, Number OrderDesc, Number PurchAmt\<rbrace>; Nonce Chall_C \<notin> used evsPIReq; Chall_C \<notin> range CardSecret; Chall_C \<notin> range PANSecret; Notes C \<lbrace>Number LID_M, Transaction\<rbrace> \<in> set evsPIReq\<rbrakk> \<Longrightarrow> (Key (invKey (publicKey b A)) \<in> parts (knows Spy (Says C M \<lbrace>Number LID_M, Nonce Chall_C\<rbrace> # evsPIReq))) = (A \<in> bad)
6. \<And>evsPIRes M LID_M Chall_C Transaction C OrderDesc PurchAmt P Chall_M XID. \<lbrakk>evsPIRes \<in> set_pur; (Key (invKey (publicKey b A)) \<in> parts (knows Spy evsPIRes)) = (A \<in> bad); Gets M \<lbrace>Number LID_M, Nonce Chall_C\<rbrace> \<in> set evsPIRes; Transaction = \<lbrace>Agent M, Agent C, Number OrderDesc, Number PurchAmt\<rbrace>; Notes M \<lbrace>Number LID_M, Agent P, Transaction\<rbrace> \<in> set evsPIRes; Nonce Chall_M \<notin> used evsPIRes; Chall_M \<notin> range CardSecret; Chall_M \<notin> range PANSecret; Number XID \<notin> used evsPIRes; XID \<notin> range CardSecret; XID \<notin> range PANSecret\<rbrakk> \<Longrightarrow> (Key (invKey (publicKey b A)) \<in> parts (knows Spy (Says M C (sign (priSK M) \<lbrace>Number LID_M, Number XID, Nonce Chall_C, Nonce Chall_M, cert P (pubEK P) onlyEnc (priSK RCA)\<rbrace>) # evsPIRes))) = (A \<in> bad)
7. \<And>C k KC1 Transaction M HOD OIData LID_M PIHead EKj Chall_C Chall_M OrderDesc P PurchAmt XID evsPReqU. \<lbrakk>evsPReqU \<in> set_pur; (Key (invKey (publicKey b A)) \<in> parts (knows Spy evsPReqU)) = (A \<in> bad); C = Cardholder k; CardSecret k = 0; Key KC1 \<notin> used evsPReqU; KC1 \<in> symKeys; Transaction = \<lbrace>Agent M, Agent C, Number OrderDesc, Number PurchAmt\<rbrace>; HOD = Hash \<lbrace>Number OrderDesc, Number PurchAmt\<rbrace>; OIData = \<lbrace>Number LID_M, Number XID, Nonce Chall_C, HOD, Nonce Chall_M\<rbrace>; PIHead = \<lbrace>Number LID_M, Number XID, HOD, Number PurchAmt, Agent M\<rbrace>; Gets C (sign (priSK M) \<lbrace>Number LID_M, Number XID, Nonce Chall_C, Nonce Chall_M, cert P EKj onlyEnc (priSK RCA)\<rbrace>) \<in> set evsPReqU; Says C M \<lbrace>Number LID_M, Nonce Chall_C\<rbrace> \<in> set evsPReqU; Notes C \<lbrace>Number LID_M, Transaction\<rbrace> \<in> set evsPReqU\<rbrakk> \<Longrightarrow> (Key (invKey (publicKey b A)) \<in> parts (knows Spy (Says C M \<lbrace>EXHcrypt KC1 EKj \<lbrace>PIHead, Hash OIData\<rbrace> (Pan (pan C)), OIData, Hash \<lbrace>PIHead, Pan (pan C)\<rbrace>\<rbrace> # Notes C \<lbrace>Key KC1, Agent M\<rbrace> # evsPReqU))) = (A \<in> bad)
8. \<And>C Chall_C Chall_M EKj HOD KC2 LID_M M OIData OIDualSigned OrderDesc P PANData PIData PIDualSigned PIHead PurchAmt Transaction XID evsPReqS k. \<lbrakk>evsPReqS \<in> set_pur; (Key (invKey (publicKey b A)) \<in> parts (knows Spy evsPReqS)) = (A \<in> bad); C = Cardholder k; CardSecret k \<noteq> 0; Key KC2 \<notin> used evsPReqS; KC2 \<in> symKeys; Transaction = \<lbrace>Agent M, Agent C, Number OrderDesc, Number PurchAmt\<rbrace>; HOD = Hash \<lbrace>Number OrderDesc, Number PurchAmt\<rbrace>; OIData = \<lbrace>Number LID_M, Number XID, Nonce Chall_C, HOD, Nonce Chall_M\<rbrace>; PIHead = \<lbrace>Number LID_M, Number XID, HOD, Number PurchAmt, Agent M, Hash \<lbrace>Number XID, Nonce (CardSecret k)\<rbrace>\<rbrace>; PANData = \<lbrace>Pan (pan C), Nonce (PANSecret k)\<rbrace>; PIData = \<lbrace>PIHead, PANData\<rbrace>; PIDualSigned = \<lbrace>sign (priSK C) \<lbrace>Hash PIData, Hash OIData\<rbrace>, EXcrypt KC2 EKj \<lbrace>PIHead, Hash OIData\<rbrace> PANData\<rbrace>; OIDualSigned = \<lbrace>OIData, Hash PIData\<rbrace>; Gets C (sign (priSK M) \<lbrace>Number LID_M, Number XID, Nonce Chall_C, Nonce Chall_M, cert P EKj onlyEnc (priSK RCA)\<rbrace>) \<in> set evsPReqS; Says C M \<lbrace>Number LID_M, Nonce Chall_C\<rbrace> \<in> set evsPReqS; Notes C \<lbrace>Number LID_M, Transaction\<rbrace> \<in> set evsPReqS\<rbrakk> \<Longrightarrow> (Key (invKey (publicKey b A)) \<in> parts (knows Spy (Says C M \<lbrace>PIDualSigned, OIDualSigned\<rbrace> # Notes C \<lbrace>Key KC2, Agent M\<rbrace> # evsPReqS))) = (A \<in> bad)
9. \<And>evsAReq KM Transaction M C OrderDesc PurchAmt HOD OIData LID_M XID Chall_C Chall_M k P_I HPIData encPANData P EKj. \<lbrakk>evsAReq \<in> set_pur; (Key (invKey (publicKey b A)) \<in> parts (knows Spy evsAReq)) = (A \<in> bad); Key KM \<notin> used evsAReq; KM \<in> symKeys; Transaction = \<lbrace>Agent M, Agent C, Number OrderDesc, Number PurchAmt\<rbrace>; HOD = Hash \<lbrace>Number OrderDesc, Number PurchAmt\<rbrace>; OIData = \<lbrace>Number LID_M, Number XID, Nonce Chall_C, HOD, Nonce Chall_M\<rbrace>; CardSecret k \<noteq> 0 \<longrightarrow> P_I = \<lbrace>sign (priSK C) \<lbrace>HPIData, Hash OIData\<rbrace>, encPANData\<rbrace>; Gets M \<lbrace>P_I, OIData, HPIData\<rbrace> \<in> set evsAReq; Says M C (sign (priSK M) \<lbrace>Number LID_M, Number XID, Nonce Chall_C, Nonce Chall_M, cert P EKj onlyEnc (priSK RCA)\<rbrace>) \<in> set evsAReq; Notes M \<lbrace>Number LID_M, Agent P, Transaction\<rbrace> \<in> set evsAReq\<rbrakk> \<Longrightarrow> evsAReq \<in> set_pur
10. \<And>evsAReq KM Transaction M C OrderDesc PurchAmt HOD OIData LID_M XID Chall_C Chall_M k P_I HPIData encPANData P EKj. \<lbrakk>evsAReq \<in> set_pur; (Key (invKey (publicKey b A)) \<in> parts (knows Spy evsAReq)) = (A \<in> bad); Key KM \<notin> used evsAReq; KM \<in> symKeys; Transaction = \<lbrace>Agent M, Agent C, Number OrderDesc, Number PurchAmt\<rbrace>; HOD = Hash \<lbrace>Number OrderDesc, Number PurchAmt\<rbrace>; OIData = \<lbrace>Number LID_M, Number XID, Nonce Chall_C, HOD, Nonce Chall_M\<rbrace>; CardSecret k \<noteq> 0 \<longrightarrow> P_I = \<lbrace>sign (priSK C) \<lbrace>HPIData, Hash OIData\<rbrace>, encPANData\<rbrace>; Gets M \<lbrace>P_I, OIData, HPIData\<rbrace> \<in> set evsAReq; Says M C (sign (priSK M) \<lbrace>Number LID_M, Number XID, Nonce Chall_C, Nonce Chall_M, cert P EKj onlyEnc (priSK RCA)\<rbrace>) \<in> set evsAReq; Notes M \<lbrace>Number LID_M, Agent P, Transaction\<rbrace> \<in> set evsAReq; P_I \<in> parts (knows Spy evsAReq)\<rbrakk> \<Longrightarrow> (Key (invKey (publicKey b A)) \<in> parts (knows Spy (Says M P (EncB (priSK M) KM (pubEK P) \<lbrace>Number LID_M, Number XID, Hash OIData, HOD\<rbrace> P_I) # evsAReq))) = (A \<in> bad)
A total of 13 subgoals...
[PROOF STEP]
apply auto
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
#NOTE even though varMap and valueMap files were created in 000_explore, import the files using the scripts because not all had info
rm(list=ls())
###############################################
###############################################
###############################################
# change information in this section only:
setwd("~/Box/Danica EuPathDB/Studies/LLINE-UP/DH_workspace/Data")
OUTPUT <- "./Processed data/001.LLINEUP_allVars.RData" # SET FOLDER & FILE NAME WHERE OUTPUT WILL BE SAVED
source("~/Documents/GitHub/ClinEpiWorkflow/Main/lib/R/000a_studyImport_function.R") # SET LOCATION OF STUDY IMPORT FUNCTION
FOLDER <- "./Data from provider" # SET FOLDER CONTAINING RAW DATA FILES FROM PROVIDER
STUDY <- "LLINEUP" # SET DATASET (OR STUDY)
PARTICIPANT_ID <- "not applicable" # set to "not applicable" to skip calculation of time varying
MISSING <- c(NA, "na", "NA", "n/a", "N/A", "", ".") # SET MISSING VALUES (NA, "na", "NA", "n/a", "N/A", "", ".")
DATE_TIME <- c("date", "dob") # SET REGEX FOR POSSIBLE DATE/TIME VARIABLES. NOTE: KEEP LOWERCASE
TYPE <- ".dta" # SET FILE TYPE EXTENSION:
# (".csv", ".txt", ".RData", ".sas7bdat", ".dta", ".sav", ".zsav", ".por")
# NOTE: EXTENSION MUST EXACTLY MATCH ONE OF THE SUGGESTIONS.
# FOR EXCEL FILES --> SAVE AS .csv FILES BEFORE RUNNING SCRIPTS
# IF EXTENSION IS NOT REPRESENTED IN THE LIST, TALK TO DANICA TO UPDATE CODE
###############################################
###############################################
###############################################
# use studyImport function to get allVars, valueMap, and dataFiles
temp <- studyImport(FOLDER, TYPE, STUDY, MISSING, DATE_TIME, PARTICIPANT_ID)
allVars <- temp[[1]]
valueMap <- temp[[2]]
dataFiles <- temp[[3]]
originalFiles <- temp[[4]]
#############################################################
# use labelled package to pull out variable labels
library(labelled)
allVars$label <- ""
for(i in unique(allVars$dataFile)){
temp <- dataFiles[[i]]
for(j in names(temp)){
if(allVars$type[allVars$dataFile==i & allVars$variable==j]=="dbl+lbl"){
if(!is.null(var_label(temp[,j]))){
allVars$label[allVars$dataFile==i & allVars$variable==j] <- var_label(temp[,j])
}
}
}
}
#############################################################
# use labelled package to pull out mapped values
values2 <- data.frame(dataFile=character(0), variable=character(0), value=character(0), mappedTerm=character(0))
for(i in names(dataFiles)){
temp <- dataFiles[[i]]
for(j in names(temp)){
if(!is.null(val_labels(temp[,j]))){
temp2 <- data.frame(dataFile=names(dataFiles[i]),
variable=rep(j, length(names(val_labels(temp[,j])))),
value=as.vector(unlist(val_labels(temp[j]))),
mappedTerm=names(val_labels(temp[,j])),
labeledValue=T)
temp2$dataFile <- as.character(temp2$dataFile)
temp2$variable <- as.character(temp2$variable)
temp2$value <- as.character(temp2$value)
temp2$mappedTerm <- as.character(temp2$mappedTerm)
values2 <- rbind(values2, temp2)
rm(temp2)
}
if(is.null(val_labels(temp[,j]))){
temp2 <- data.frame(dataFile=names(dataFiles[i]),
variable=j,
value="NULL",
mappedTerm="NULL",
labeledValue=F)
temp2$dataFile <- as.character(temp2$dataFile)
temp2$variable <- as.character(temp2$variable)
temp2$value <- as.character(temp2$value)
temp2$mappedTerm <- as.character(temp2$mappedTerm)
values2 <- rbind(values2, temp2)
rm(temp2)
}
}
}
#############################################################
# update uniqueVar
allVars$uniqueVar <- paste(allVars$dataFile, allVars$variable, sep="::")
valueMap$uniqueVar <- paste(valueMap$dataFile, valueMap$variable, sep="::")
values2$uniqueVar <- paste(values2$dataFile, values2$variable, sep="::")
#############################################################
# update mappedTerm for continuous or > 20 categories
unique(values2[values2$uniqueVar %in% valueMap$uniqueVar[valueMap$values=="continuous"], "mappedTerm"])
# [1] "NULL"
values2[values2$uniqueVar %in% valueMap$uniqueVar[valueMap$values=="continuous"], "mappedTerm"] <- "continuous"
values2[values2$uniqueVar %in% valueMap$uniqueVar[valueMap$values=="continuous"], "value"] <- "continuous"
unique(values2[values2$uniqueVar %in% valueMap$uniqueVar[valueMap$values==">20 categories"], "mappedTerm"])
#[1] "NULL"
values2[values2$uniqueVar %in% valueMap$uniqueVar[valueMap$values==">20 categories"], "mappedTerm"] <- ">20 categories"
values2[values2$uniqueVar %in% valueMap$uniqueVar[valueMap$values==">20 categories"], "value"] <- ">20 categories"
#############################################################
# variables without labels in the .dta files only occupy 1 row in the values2 dataframe
# determine which rows correspond to these variables in the valueMap file, and save as missing_values
# then remove variables without labels from the values2 datafile and replace these with missing_values
missing_values <- data.frame(uniqueVar=character(0), variable=character(0), dataFile=character(0), value=character(0),
mappedTerm=character(0), labeledValue=character(0))
for(i in unique(values2$uniqueVar[values2$mappedTerm=="NULL"])){
if(i %in% valueMap$uniqueVar==F){print(i)}
temp <- valueMap[valueMap$uniqueVar==i, c("uniqueVar", "dataFile", "variable", "values")]
temp$labeledValue <- F
temp$mappedTerm <- ""
missing_values <- rbind(missing_values, temp)
print(dim(missing_values))
}
head(missing_values)
head(values2)
names(values2)[names(values2)=="value"] <- "values"
values2 <- values2[values2$uniqueVar %in% missing_values$uniqueVar==F,]
values2 <- rbind(values2, missing_values)
#############################################################
# overwrite valueMap
valueMap <- values2
valueMap$dataSet <- "LLINEUP"
#############################################################
# save workspace
head(valueMap)
dim(valueMap)
#[1] 4707 7
head(allVars)
dim(allVars)
#[1] 1260 19
save(allVars, valueMap, dataFiles, originalFiles, file=OUTPUT)
write.csv(allVars, file="./Processed data/LLINEUP_001_allVars.csv", row.names=F)
write.csv(valueMap, file="./Processed data/LLINEUP_001_valueMap.csv", row.names=F)
|
(*
Authors:
Anthony Bordg, University of Cambridge, [email protected];
Yijun He, University of Cambridge, [email protected]
*)
theory Entanglement
imports
Quantum
More_Tensor
begin
section \<open>Quantum Entanglement\<close>
subsection \<open>The Product States and Entangled States of a 2-qubits System\<close>
text \<open>Below we add the condition that @{term v} and @{term w} are two-dimensional states, otherwise
@{term u} can always be represented by the tensor product of the 1-dimensional vector @{term 1} and
@{term u} itself.\<close>
definition prod_state2:: "complex Matrix.mat \<Rightarrow> bool" where
"prod_state2 u \<equiv> if state 2 u then \<exists>v w. state 1 v \<and> state 1 w \<and> u = v \<Otimes> w else undefined"
definition entangled2:: "complex Matrix.mat \<Rightarrow> bool" where
"entangled2 u \<equiv> \<not> prod_state2 u"
text \<open>The Bell states are entangled states.\<close>
lemma bell00_is_entangled2 [simp]:
"entangled2 |\<beta>\<^sub>0\<^sub>0\<rangle>"
proof -
have "\<forall>v w. state 1 v \<longrightarrow> state 1 w \<longrightarrow> |\<beta>\<^sub>0\<^sub>0\<rangle> \<noteq> v \<Otimes> w"
proof((rule allI)+,(rule impI)+, rule notI)
fix v w
assume a0:"state 1 v" and a1:"state 1 w" and a2:"|\<beta>\<^sub>0\<^sub>0\<rangle> = v \<Otimes> w"
have "(v $$ (0,0) * w $$ (0,0)) * (v $$ (1,0) * w $$ (1,0)) =
(v $$ (0,0) * w $$ (1,0)) * (v $$ (1,0) * w $$ (0,0))" by simp
then have "(v \<Otimes> w) $$ (0,0) * (v \<Otimes> w) $$ (3,0) = (v \<Otimes> w) $$ (1,0) * (v \<Otimes> w) $$ (2,0)"
using a0 a1 by simp
then have "|\<beta>\<^sub>0\<^sub>0\<rangle> $$ (0,0) * |\<beta>\<^sub>0\<^sub>0\<rangle> $$ (3,0) = |\<beta>\<^sub>0\<^sub>0\<rangle> $$ (1,0) * |\<beta>\<^sub>0\<^sub>0\<rangle> $$ (2,0)"
using a2 by simp
then have "1/ sqrt 2 * 1/sqrt 2 = 0" by simp
thus False by simp
qed
thus ?thesis by(simp add: entangled2_def prod_state2_def)
qed
lemma bell01_is_entangled2 [simp]:
"entangled2 |\<beta>\<^sub>0\<^sub>1\<rangle>"
proof -
have "\<forall>v w. state 1 v \<longrightarrow> state 1 w \<longrightarrow> |\<beta>\<^sub>0\<^sub>1\<rangle> \<noteq> v \<Otimes> w"
proof((rule allI)+,(rule impI)+, rule notI)
fix v w
assume a0:"state 1 v" and a1:"state 1 w" and a2:"|\<beta>\<^sub>0\<^sub>1\<rangle> = v \<Otimes> w"
have "(v $$ (0,0) * w $$ (1,0)) * (v $$ (1,0) * w $$ (0,0)) =
(v $$ (0,0) * w $$ (0,0)) * (v $$ (1,0) * w $$ (1,0))" by simp
then have "(v \<Otimes> w) $$ (1,0) * (v \<Otimes> w) $$ (2,0) = (v \<Otimes> w) $$ (0,0) * (v \<Otimes> w) $$ (3,0)"
using a0 a1 by simp
then have "|\<beta>\<^sub>0\<^sub>1\<rangle> $$ (1,0) * |\<beta>\<^sub>0\<^sub>1\<rangle> $$ (2,0) = |\<beta>\<^sub>0\<^sub>1\<rangle> $$ (0,0) * |\<beta>\<^sub>0\<^sub>1\<rangle> $$ (3,0)"
using a2 by simp
then have "1/sqrt 2 * 1/sqrt 2 = 0"
using bell01_index by simp
thus False by simp
qed
thus ?thesis by(simp add: entangled2_def prod_state2_def)
qed
lemma bell10_is_entangled2 [simp]:
"entangled2 |\<beta>\<^sub>1\<^sub>0\<rangle>"
proof -
have "\<forall>v w. state 1 v \<longrightarrow> state 1 w \<longrightarrow> |\<beta>\<^sub>1\<^sub>0\<rangle> \<noteq> v \<Otimes> w"
proof((rule allI)+,(rule impI)+, rule notI)
fix v w
assume a0:"state 1 v" and a1:"state 1 w" and a2:"|\<beta>\<^sub>1\<^sub>0\<rangle> = v \<Otimes> w"
have "(v $$ (0,0) * w $$ (0,0)) * (v $$ (1,0) * w $$ (1,0)) =
(v $$ (0,0) * w $$ (1,0)) * (v $$ (1,0) * w $$ (0,0))" by simp
then have "(v \<Otimes> w) $$ (0,0) * (v \<Otimes> w) $$ (3,0) = (v \<Otimes> w) $$ (1,0) * (v \<Otimes> w) $$ (2,0)"
using a0 a1 by simp
then have "|\<beta>\<^sub>1\<^sub>0\<rangle> $$ (1,0) * |\<beta>\<^sub>1\<^sub>0\<rangle> $$ (2,0) = |\<beta>\<^sub>1\<^sub>0\<rangle> $$ (0,0) * |\<beta>\<^sub>1\<^sub>0\<rangle> $$ (3,0)"
using a2 by simp
then have "1/sqrt 2 * 1/sqrt 2 = 0" by simp
thus False by simp
qed
thus ?thesis by(simp add: entangled2_def prod_state2_def)
qed
lemma bell11_is_entangled2 [simp]:
"entangled2 |\<beta>\<^sub>1\<^sub>1\<rangle>"
proof -
have "\<forall>v w. state 1 v \<longrightarrow> state 1 w \<longrightarrow> |\<beta>\<^sub>1\<^sub>1\<rangle> \<noteq> v \<Otimes> w"
proof((rule allI)+,(rule impI)+, rule notI)
fix v w
assume a0:"state 1 v" and a1:"state 1 w" and a2:"|\<beta>\<^sub>1\<^sub>1\<rangle> = v \<Otimes> w"
have "(v $$ (0,0) * w $$ (1,0)) * (v $$ (1,0) * w $$ (0,0)) =
(v $$ (0,0) * w $$ (0,0)) * (v $$ (1,0) * w $$ (1,0))" by simp
then have "(v \<Otimes> w) $$ (1,0) * (v \<Otimes> w) $$ (2,0) = (v \<Otimes> w) $$ (0,0) * (v \<Otimes> w) $$ (3,0)"
using a0 a1 by simp
then have "|\<beta>\<^sub>1\<^sub>1\<rangle> $$ (1,0) * |\<beta>\<^sub>1\<^sub>1\<rangle> $$ (2,0) = |\<beta>\<^sub>1\<^sub>1\<rangle> $$ (0,0) * |\<beta>\<^sub>1\<^sub>1\<rangle> $$ (3,0)"
using a2 by simp
then have "1/sqrt 2 * 1/sqrt 2 = 0"
using bell_11_index by simp
thus False by simp
qed
thus ?thesis by(simp add: entangled2_def prod_state2_def)
qed
text \<open>
An entangled state is a state that cannot be broken down as the tensor product of smaller states.
\<close>
definition prod_state:: "nat \<Rightarrow> complex Matrix.mat \<Rightarrow> bool" where
"prod_state m u \<equiv> if state m u then \<exists>n p::nat.\<exists>v w. state n v \<and> state p w \<and>
n < m \<and> p < m \<and> u = v \<Otimes> w else undefined"
definition entangled:: "nat \<Rightarrow> complex Matrix.mat \<Rightarrow> bool" where
"entangled n v \<equiv> \<not> (prod_state n v)"
(* To do: as an exercise prove the equivalence between entangled2 and (entangled 2). *)
lemma sanity_check:
"\<not>(entangled 2 (mat_of_cols_list 2 [[1/sqrt(2), 1/sqrt(2)]] \<Otimes> mat_of_cols_list 2 [[1/sqrt(2), 1/sqrt(2)]]))"
proof -
define u where "u = mat_of_cols_list 2 [[1/sqrt(2), 1/sqrt(2)]]"
then have "state 1 u"
proof -
have "dim_col u = 1"
using u_def mat_of_cols_list_def by simp
moreover have f:"dim_row u = 2"
using u_def mat_of_cols_list_def by simp
moreover have "\<parallel>Matrix.col u 0\<parallel> = 1"
proof -
have "(\<Sum>i<2. (cmod (u $$ (i, 0)))\<^sup>2) = (1/sqrt 2)\<^sup>2 + (1/sqrt 2)\<^sup>2"
by(simp add: u_def cmod_def numeral_2_eq_2)
then have "\<parallel>Matrix.col u 0\<parallel> = sqrt ((1/sqrt 2)\<^sup>2 + (1/sqrt 2)\<^sup>2)"
using f by(auto simp: Matrix.col_def u_def cpx_vec_length_def)
thus ?thesis by(simp add: power_divide)
qed
ultimately show ?thesis by(simp add: state_def)
qed
then have "state 2 (u \<Otimes> u)"
using tensor_state by(metis one_add_one)
thus ?thesis
using entangled_def prod_state_def by(metis \<open>state 1 u\<close> one_less_numeral_iff semiring_norm(76) u_def)
qed
end |
SS August 20/08, Offers August 25/08, Evening. Long time owner of this great character home on a fantastic street, Features 1 3/4 storeys, 3 spacious bedrooms + a den. 2 baths. Original oak woodwork throughout, Hardwood floors under some carpets, Large Dining Rm, Open Living Rm complete with brick facing gas fireplace & original built-in cabinets. Fantastic front porch for those warm summer nights. Full Basement with den and 2nd Bath. Oversized double garage with opener. Extra parking, fenced yard. Don't miss it! |
(* Title: FOL/ex/Foundation.thy
Author: Lawrence C Paulson, Cambridge University Computer Laboratory
Copyright 1991 University of Cambridge
*)
section "Intuitionistic FOL: Examples from The Foundation of a Generic Theorem Prover"
theory Foundation
imports IFOL
begin
lemma \<open>A \<and> B \<longrightarrow> (C \<longrightarrow> A \<and> C)\<close>
apply (rule impI)
apply (rule impI)
apply (rule conjI)
prefer 2 apply assumption
apply (rule conjunct1)
apply assumption
done
text \<open>A form of conj-elimination\<close>
lemma
assumes \<open>A \<and> B\<close>
and \<open>A \<Longrightarrow> B \<Longrightarrow> C\<close>
shows \<open>C\<close>
apply (rule assms)
apply (rule conjunct1)
apply (rule assms)
apply (rule conjunct2)
apply (rule assms)
done
lemma
assumes \<open>\<And>A. \<not> \<not> A \<Longrightarrow> A\<close>
shows \<open>B \<or> \<not> B\<close>
apply (rule assms)
apply (rule notI)
apply (rule_tac P = \<open>\<not> B\<close> in notE)
apply (rule_tac [2] notI)
apply (rule_tac [2] P = \<open>B \<or> \<not> B\<close> in notE)
prefer 2 apply assumption
apply (rule_tac [2] disjI1)
prefer 2 apply assumption
apply (rule notI)
apply (rule_tac P = \<open>B \<or> \<not> B\<close> in notE)
apply assumption
apply (rule disjI2)
apply assumption
done
lemma
assumes \<open>\<And>A. \<not> \<not> A \<Longrightarrow> A\<close>
shows \<open>B \<or> \<not> B\<close>
apply (rule assms)
apply (rule notI)
apply (rule notE)
apply (rule_tac [2] notI)
apply (erule_tac [2] notE)
apply (erule_tac [2] disjI1)
apply (rule notI)
apply (erule notE)
apply (erule disjI2)
done
lemma
assumes \<open>A \<or> \<not> A\<close>
and \<open>\<not> \<not> A\<close>
shows \<open>A\<close>
apply (rule disjE)
apply (rule assms)
apply assumption
apply (rule FalseE)
apply (rule_tac P = \<open>\<not> A\<close> in notE)
apply (rule assms)
apply assumption
done
subsection "Examples with quantifiers"
lemma
assumes \<open>\<forall>z. G(z)\<close>
shows \<open>\<forall>z. G(z) \<or> H(z)\<close>
apply (rule allI)
apply (rule disjI1)
apply (rule assms [THEN spec])
done
lemma \<open>\<forall>x. \<exists>y. x = y\<close>
apply (rule allI)
apply (rule exI)
apply (rule refl)
done
lemma \<open>\<exists>y. \<forall>x. x = y\<close>
apply (rule exI)
apply (rule allI)
apply (rule refl)?
oops
text \<open>Parallel lifting example.\<close>
lemma \<open>\<exists>u. \<forall>x. \<exists>v. \<forall>y. \<exists>w. P(u,x,v,y,w)\<close>
apply (rule exI allI)
apply (rule exI allI)
apply (rule exI allI)
apply (rule exI allI)
apply (rule exI allI)
oops
lemma
assumes \<open>(\<exists>z. F(z)) \<and> B\<close>
shows \<open>\<exists>z. F(z) \<and> B\<close>
apply (rule conjE)
apply (rule assms)
apply (rule exE)
apply assumption
apply (rule exI)
apply (rule conjI)
apply assumption
apply assumption
done
text \<open>A bigger demonstration of quantifiers -- not in the paper.\<close>
lemma \<open>(\<exists>y. \<forall>x. Q(x,y)) \<longrightarrow> (\<forall>x. \<exists>y. Q(x,y))\<close>
apply (rule impI)
apply (rule allI)
apply (rule exE, assumption)
apply (rule exI)
apply (rule allE, assumption)
apply assumption
done
end
|
function [Vx,Vy,reliab]=opticalFlow( I1, I2, varargin )
% Coarse-to-fine optical flow using Lucas&Kanade or Horn&Schunck.
%
% Implemented 'type' of optical flow estimation:
% LK: http://en.wikipedia.org/wiki/Lucas-Kanade_method
% HS: http://en.wikipedia.org/wiki/Horn-Schunck_method
% LK is a local, fast method (the implementation is fully vectorized).
% HS is a global, slower method (an SSE implementation is provided).
%
% Common parameters for LK and HS: 'smooth' determines smoothing prior to
% flow computation and can make flow estimation more robust. 'resample' can
% be used to downsample an image for faster but lower quality results, e.g.
% resample=.5 makes flow computation about 4x faster. LK: 'radius' controls
% integration window size (and smoothness of flow). HS: 'alpha' controls
% tradeoff between data and smoothness term (and smoothness of flow) and
% 'nIter' determines number of gradient decent steps.
%
% USAGE
% [Vx,Vy,reliab] = opticalFlow( I1, I2, pFlow )
%
% INPUTS
% I1, I2 - input images to calculate flow between
% pFlow - parameters (struct or name/value pairs)
% .type - ['LK'] may be either 'LK' or 'HS'
% .smooth - [1] smoothing radius for triangle filter (may be 0)
% .resample - [1] resampling amount (must be a power of 2)
% .radius - [5] integration radius for weighted window [LK only]
% .alpha - [1] smoothness constraint [HS only]
% .nIter - [250] number of iterations [HS only]
%
% OUTPUTS
% Vx, Vy - x,y components of flow [Vx>0->right, Vy>0->down]
% reliab - reliability of flow in given window [LK only]
%
% EXAMPLE - compute LK flow on test images
% load opticalFlowTest;
% [Vx,Vy]=opticalFlow(I1,I2,'smooth',1,'radius',10,'type','LK');
% figure(1); im(I1); figure(2); im(I2);
% figure(3); im([Vx Vy]); colormap jet;
%
% EXAMPLE - rectify I1 to I2 using computed flow
% load opticalFlowTest;
% [Vx,Vy]=opticalFlow(I1,I2,'smooth',1,'radius',10,'type','LK');
% I1=imtransform2(I1,[],'vs',-Vx,'us',-Vy,'pad','replicate');
% figure(1); im(I1); figure(2); im(I2);
%
% EXAMPLE - compare LK and HS flow
% load opticalFlowTest;
% prm={'smooth',1,'radius',10,'alpha',20,'nIter',200,'type'};
% tic, [Vx1,Vy1]=opticalFlow(I1,I2,prm{:},'LK'); toc
% tic, [Vx2,Vy2]=opticalFlow(I1,I2,prm{:},'HS'); toc
% figure(1); im([Vx1 Vy1; Vx2 Vy2]); colormap jet;
%
% See also convTri, imtransform2
%
% Piotr's Image&Video Toolbox Version 3.02
% Copyright 2012 Piotr Dollar. [pdollar-at-caltech.edu]
% Please email me if you find bugs, or have suggestions or questions!
% Licensed under the Simplified BSD License [see external/bsd.txt]
% get default parameters and do error checking
dfs={'type','LK','smooth',1,'resample',1,'radius',5,'alpha',1,'nIter',250};
[type,smooth,resample,radius,alpha,nIter]=getPrmDflt(varargin,dfs,1);
assert(any(strcmp(type,{'LK','HS'}))); useLk=strcmp(type,'LK');
if( ~ismatrix(I1) || ~ismatrix(I2) || any(size(I1)~=size(I2)) )
error('Input images must be 2D and have same dimensions.'); end
% run optical flow in coarse to fine fashion
if(~isa(I1,'single')), I1=single(I1); I2=single(I2); end
[h,w]=size(I1); nScales=floor(log2(min(h,w)))-2;
for s=1:nScales + round(log2(resample))
% get current scale and I1s and I2s at given scale
scale=2^(nScales-s); h1=round(h/scale); w1=round(w/scale);
if( scale==1 ), I1s=I1; I2s=I2; else
I1s=imResample(I1,[h1 w1]); I2s=imResample(I2,[h1 w1]); end
% initialize Vx,Vy or upsample from previous scale
if(s==1), Vx=zeros(h1,w1,'single'); Vy=Vx; else r=sqrt(h1*w1/numel(Vx));
Vx=imResample(Vx,[h1 w1])*r; Vy=imResample(Vy,[h1 w1])*r; end
% transform I1s according to current estimate of Vx and Vy
if(s), I1s=imtransform2(I1s,[],'pad','replciate','vs',-Vx,'us',-Vy); end
% smooth images
I1s=convTri(I1s,smooth); I2s=convTri(I2s,smooth);
% run optical flow on current scale
if( useLk ), [Vx1,Vy1,reliab]=opticalFlowLk(I1s,I2s,radius);
else [Vx1,Vy1]=opticalFlowHs(I1s,I2s,alpha,nIter); reliab=[]; end
Vx=Vx+Vx1; Vy=Vy+Vy1;
end
if(s~=nScales), r=sqrt(h*w/numel(Vx));
Vx=imResample(Vx,[h w])*r; Vy=imResample(Vy,[h w])*r; end
end
function [Vx,Vy,reliab] = opticalFlowLk( I1, I2, radius )
% Compute elements of A'A and also of A'b
radius=min(radius,floor(min(size(I1,1),size(I1,2))/2)-1);
[Ix,Iy]=gradient2(I1); It=I2-I1; AAxy=convTri(Ix.*Iy,radius);
AAxx=convTri(Ix.^2,radius)+1e-5; ABxt=convTri(-Ix.*It,radius);
AAyy=convTri(Iy.^2,radius)+1e-5; AByt=convTri(-Iy.*It,radius);
% Find determinant and trace of A'A
AAdet=AAxx.*AAyy-AAxy.^2; AAdeti=1./AAdet; AAtr=AAxx+AAyy;
% Compute components of velocity vectors (A'A)^-1 * A'b
Vx = AAdeti .* ( AAyy.*ABxt - AAxy.*AByt);
Vy = AAdeti .* (-AAxy.*ABxt + AAxx.*AByt);
% Check for ill conditioned second moment matrices
reliab = 0.5*AAtr - 0.5*sqrt(AAtr.^2-4*AAdet);
end
function [Vx,Vy] = opticalFlowHs( I1, I2, alpha, nIter )
% compute derivatives (averaging over 2x2 neighborhoods)
A00=shift(I1,0,0); A10=shift(I1,1,0);
A01=shift(I1,0,1); A11=shift(I1,1,1);
B00=shift(I2,0,0); B10=shift(I2,1,0);
B01=shift(I2,0,1); B11=shift(I2,1,1);
Ex=0.25*((A01+B01+A11+B11)-(A00+B00+A10+B10));
Ey=0.25*((A10+B10+A11+B11)-(A00+B00+A01+B01));
Et=0.25*((B00+B10+B01+B11)-(A00+A10+A01+A11));
Ex([1 end],:)=0; Ex(:,[1 end])=0;
Ey([1 end],:)=0; Ey(:,[1 end])=0;
Et([1 end],:)=0; Et(:,[1 end])=0;
Z=1./(alpha*alpha + Ex.*Ex + Ey.*Ey);
% iterate updating Ux and Vx in each iter
if( 1 )
[Vx,Vy]=opticalFlowHsMex(Ex,Ey,Et,Z,nIter);
Vx=Vx(2:end-1,2:end-1); Vy=Vy(2:end-1,2:end-1);
else
Vx=zeros(size(I1),'single'); Vy=Vx;
for i = 1:nIter
Mx=.25*(shift(Vx,-1,0)+shift(Vx,1,0)+shift(Vx,0,-1)+shift(Vx,0,1));
My=.25*(shift(Vy,-1,0)+shift(Vy,1,0)+shift(Vy,0,-1)+shift(Vy,0,1));
m=(Ex.*Mx+Ey.*My+Et).*Z; Vx=Mx-Ex.*m; Vy=My-Ey.*m;
Vx=Vx(2:end-1,2:end-1); Vy=Vy(2:end-1,2:end-1);
end
end
end
function J = shift( I, y, x )
% shift I by -1<=x,y<=1 pixels
[h,w]=size(I); J=zeros(h+2,w+2,'single');
J(2-y:end-1-y,2-x:end-1-x)=I;
end
|
[STATEMENT]
lemma vifintersection_vsubset_greatest:
assumes "I \<noteq> 0" and "\<And>i. i \<in>\<^sub>\<circ> I \<Longrightarrow> A \<subseteq>\<^sub>\<circ> f i"
shows "A \<subseteq>\<^sub>\<circ> (\<Inter>\<^sub>\<circ>i\<in>\<^sub>\<circ>I. f i)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. A \<subseteq>\<^sub>\<circ> \<Inter>\<^sub>\<circ> (VLambda I f `\<^sub>\<circ> I)
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
I \<noteq> 0
?i \<in>\<^sub>\<circ> I \<Longrightarrow> A \<subseteq>\<^sub>\<circ> f ?i
goal (1 subgoal):
1. A \<subseteq>\<^sub>\<circ> \<Inter>\<^sub>\<circ> (VLambda I f `\<^sub>\<circ> I)
[PROOF STEP]
by (intro vsubsetI vifintersectionI) auto |
make_it_tight = true;
subplot = @(m,n,p) subtightplot (m, n, p, [0.01 0.05], [0.1 0.01], [0.1 0.01]);
if ~make_it_tight, clear subplot; end
%% Upper and Lower Subplots with Titles
income = [3.2,4.1,5.0,5.6];
outgo = [2.5,4.0,3.35,4.9];
subplot(2,1,1); plot(income)
title('Income')
subplot(2,1,2); plot(outgo)
title('Outgo')
%% Subplots in Quadrants
figure
subplot(2,2,1)
text(.5,.5,{'subplot(2,2,1)';'or subplot 221'},...
'FontSize',14,'HorizontalAlignment','center')
subplot(2,2,2)
text(.5,.5,{'subplot(2,2,2)';'or subplot 222'},...
'FontSize',14,'HorizontalAlignment','center')
subplot(2,2,3)
text(.5,.5,{'subplot(2,2,3)';'or subplot 223'},...
'FontSize',14,'HorizontalAlignment','center')
subplot(2,2,4)
text(.5,.5,{'subplot(2,2,4)';'or subplot 224'},...
'FontSize',14,'HorizontalAlignment','center')
%% Asymmetrical Subplots
figure
subplot(2,2,[1 3])
text(.5,.5,'subplot(2,2,[1 3])',...
'FontSize',14,'HorizontalAlignment','center')
subplot(2,2,2)
text(.5,.5,'subplot(2,2,2)',...
'FontSize',14,'HorizontalAlignment','center')
subplot(2,2,4)
text(.5,.5,'subplot(2,2,4)',...
'FontSize',14,'HorizontalAlignment','center')
%%
figure
subplot(2,2,1:2)
text(.5,.5,'subplot(2,2,1:2)',...
'FontSize',14,'HorizontalAlignment','center')
subplot(2,2,3)
text(.5,.5,'subplot(2,2,3)',...
'FontSize',14,'HorizontalAlignment','center')
subplot(2,2,4)
text(.5,.5,'subplot(2,2,4)',...
'FontSize',14,'HorizontalAlignment','center')
%% Plotting Axes Over Subplots
figure
y = zeros(4,15);
for k = 1:4
y(k,:) = rand(1,15);
subplot(2, 2, k)
plot(y(k,:));
end
hax = axes('Position', [.35, .35, .3, .3]);
bar(hax,y,'EdgeColor','none')
set(hax,'XTick',[])
|
------------------------------------------------------------------------------
-- Testing the erasing of proof terms
------------------------------------------------------------------------------
{-# OPTIONS --exact-split #-}
{-# OPTIONS --no-sized-types #-}
{-# OPTIONS --no-universe-polymorphism #-}
{-# OPTIONS --without-K #-}
module ProofTerm2 where
postulate
D : Set
N : D β Set
_β‘_ : D β D β Set
postulate foo : β {m n} β (Nm : N m) β (Nn : N n) β m β‘ m
{-# ATP prove foo #-}
|
-- Andreas, 2016-07-08, issue reported by Nisse
module Issue2081 where
module M where
private
Private : Setβ
Private = Set
-- The local module should be private as well!
module Public where
-- The definitions inside this module should not be private
-- unless declared so explicitly!
Public : Setβ
Public = Set
private
Bla = Public -- should work!
-- This `where` should not give a 'useless private' error:
where open Public
module Pu = M.Public -- should fail!
|
(* Title: ZF/UNITY/SubstAx.thy
Author: Sidi O Ehmety, Computer Laboratory
Copyright 2001 University of Cambridge
Theory ported from HOL.
*)
section\<open>Weak LeadsTo relation (restricted to the set of reachable states)\<close>
theory SubstAx
imports WFair Constrains
begin
definition
(* The definitions below are not `conventional', but yield simpler rules *)
Ensures :: "[i,i] => i" (infixl \<open>Ensures\<close> 60) where
"A Ensures B == {F \<in> program. F \<in> (reachable(F) \<inter> A) ensures (reachable(F) \<inter> B) }"
definition
LeadsTo :: "[i, i] => i" (infixl \<open>\<longmapsto>w\<close> 60) where
"A \<longmapsto>w B == {F \<in> program. F:(reachable(F) \<inter> A) \<longmapsto> (reachable(F) \<inter> B)}"
(*Resembles the previous definition of LeadsTo*)
(* Equivalence with the HOL-like definition *)
lemma LeadsTo_eq:
"st_set(B)==> A \<longmapsto>w B = {F \<in> program. F:(reachable(F) \<inter> A) \<longmapsto> B}"
apply (unfold LeadsTo_def)
apply (blast dest: psp_stable2 leadsToD2 constrainsD2 intro: leadsTo_weaken)
done
lemma LeadsTo_type: "A \<longmapsto>w B <=program"
by (unfold LeadsTo_def, auto)
(*** Specialized laws for handling invariants ***)
(** Conjoining an Always property **)
lemma Always_LeadsTo_pre: "F \<in> Always(I) ==> (F:(I \<inter> A) \<longmapsto>w A') \<longleftrightarrow> (F \<in> A \<longmapsto>w A')"
by (simp add: LeadsTo_def Always_eq_includes_reachable Int_absorb2 Int_assoc [symmetric] leadsToD2)
lemma Always_LeadsTo_post: "F \<in> Always(I) ==> (F \<in> A \<longmapsto>w (I \<inter> A')) \<longleftrightarrow> (F \<in> A \<longmapsto>w A')"
apply (unfold LeadsTo_def)
apply (simp add: Always_eq_includes_reachable Int_absorb2 Int_assoc [symmetric] leadsToD2)
done
(* Like 'Always_LeadsTo_pre RS iffD1', but with premises in the good order *)
lemma Always_LeadsToI: "[| F \<in> Always(C); F \<in> (C \<inter> A) \<longmapsto>w A' |] ==> F \<in> A \<longmapsto>w A'"
by (blast intro: Always_LeadsTo_pre [THEN iffD1])
(* Like 'Always_LeadsTo_post RS iffD2', but with premises in the good order *)
lemma Always_LeadsToD: "[| F \<in> Always(C); F \<in> A \<longmapsto>w A' |] ==> F \<in> A \<longmapsto>w (C \<inter> A')"
by (blast intro: Always_LeadsTo_post [THEN iffD2])
(*** Introduction rules \<in> Basis, Trans, Union ***)
lemma LeadsTo_Basis: "F \<in> A Ensures B ==> F \<in> A \<longmapsto>w B"
by (auto simp add: Ensures_def LeadsTo_def)
lemma LeadsTo_Trans:
"[| F \<in> A \<longmapsto>w B; F \<in> B \<longmapsto>w C |] ==> F \<in> A \<longmapsto>w C"
apply (simp (no_asm_use) add: LeadsTo_def)
apply (blast intro: leadsTo_Trans)
done
lemma LeadsTo_Union:
"[|(!!A. A \<in> S ==> F \<in> A \<longmapsto>w B); F \<in> program|]==>F \<in> \<Union>(S) \<longmapsto>w B"
apply (simp add: LeadsTo_def)
apply (subst Int_Union_Union2)
apply (rule leadsTo_UN, auto)
done
(*** Derived rules ***)
lemma leadsTo_imp_LeadsTo: "F \<in> A \<longmapsto> B ==> F \<in> A \<longmapsto>w B"
apply (frule leadsToD2, clarify)
apply (simp (no_asm_simp) add: LeadsTo_eq)
apply (blast intro: leadsTo_weaken_L)
done
(*Useful with cancellation, disjunction*)
lemma LeadsTo_Un_duplicate: "F \<in> A \<longmapsto>w (A' \<union> A') ==> F \<in> A \<longmapsto>w A'"
by (simp add: Un_ac)
lemma LeadsTo_Un_duplicate2:
"F \<in> A \<longmapsto>w (A' \<union> C \<union> C) ==> F \<in> A \<longmapsto>w (A' \<union> C)"
by (simp add: Un_ac)
lemma LeadsTo_UN:
"[|(!!i. i \<in> I ==> F \<in> A(i) \<longmapsto>w B); F \<in> program|]
==>F:(\<Union>i \<in> I. A(i)) \<longmapsto>w B"
apply (simp add: LeadsTo_def)
apply (simp (no_asm_simp) del: UN_simps add: Int_UN_distrib)
apply (rule leadsTo_UN, auto)
done
(*Binary union introduction rule*)
lemma LeadsTo_Un:
"[| F \<in> A \<longmapsto>w C; F \<in> B \<longmapsto>w C |] ==> F \<in> (A \<union> B) \<longmapsto>w C"
apply (subst Un_eq_Union)
apply (rule LeadsTo_Union)
apply (auto dest: LeadsTo_type [THEN subsetD])
done
(*Lets us look at the starting state*)
lemma single_LeadsTo_I:
"[|(!!s. s \<in> A ==> F:{s} \<longmapsto>w B); F \<in> program|]==>F \<in> A \<longmapsto>w B"
apply (subst UN_singleton [symmetric], rule LeadsTo_UN, auto)
done
lemma subset_imp_LeadsTo: "[| A \<subseteq> B; F \<in> program |] ==> F \<in> A \<longmapsto>w B"
apply (simp (no_asm_simp) add: LeadsTo_def)
apply (blast intro: subset_imp_leadsTo)
done
lemma empty_LeadsTo: "F \<in> 0 \<longmapsto>w A \<longleftrightarrow> F \<in> program"
by (auto dest: LeadsTo_type [THEN subsetD]
intro: empty_subsetI [THEN subset_imp_LeadsTo])
declare empty_LeadsTo [iff]
lemma LeadsTo_state: "F \<in> A \<longmapsto>w state \<longleftrightarrow> F \<in> program"
by (auto dest: LeadsTo_type [THEN subsetD] simp add: LeadsTo_eq)
declare LeadsTo_state [iff]
lemma LeadsTo_weaken_R: "[| F \<in> A \<longmapsto>w A'; A'<=B'|] ==> F \<in> A \<longmapsto>w B'"
apply (unfold LeadsTo_def)
apply (auto intro: leadsTo_weaken_R)
done
lemma LeadsTo_weaken_L: "[| F \<in> A \<longmapsto>w A'; B \<subseteq> A |] ==> F \<in> B \<longmapsto>w A'"
apply (unfold LeadsTo_def)
apply (auto intro: leadsTo_weaken_L)
done
lemma LeadsTo_weaken: "[| F \<in> A \<longmapsto>w A'; B<=A; A'<=B' |] ==> F \<in> B \<longmapsto>w B'"
by (blast intro: LeadsTo_weaken_R LeadsTo_weaken_L LeadsTo_Trans)
lemma Always_LeadsTo_weaken:
"[| F \<in> Always(C); F \<in> A \<longmapsto>w A'; C \<inter> B \<subseteq> A; C \<inter> A' \<subseteq> B' |]
==> F \<in> B \<longmapsto>w B'"
apply (blast dest: Always_LeadsToI intro: LeadsTo_weaken Always_LeadsToD)
done
(** Two theorems for "proof lattices" **)
lemma LeadsTo_Un_post: "F \<in> A \<longmapsto>w B ==> F:(A \<union> B) \<longmapsto>w B"
by (blast dest: LeadsTo_type [THEN subsetD]
intro: LeadsTo_Un subset_imp_LeadsTo)
lemma LeadsTo_Trans_Un: "[| F \<in> A \<longmapsto>w B; F \<in> B \<longmapsto>w C |]
==> F \<in> (A \<union> B) \<longmapsto>w C"
apply (blast intro: LeadsTo_Un subset_imp_LeadsTo LeadsTo_weaken_L LeadsTo_Trans dest: LeadsTo_type [THEN subsetD])
done
(** Distributive laws **)
lemma LeadsTo_Un_distrib: "(F \<in> (A \<union> B) \<longmapsto>w C) \<longleftrightarrow> (F \<in> A \<longmapsto>w C & F \<in> B \<longmapsto>w C)"
by (blast intro: LeadsTo_Un LeadsTo_weaken_L)
lemma LeadsTo_UN_distrib: "(F \<in> (\<Union>i \<in> I. A(i)) \<longmapsto>w B) \<longleftrightarrow> (\<forall>i \<in> I. F \<in> A(i) \<longmapsto>w B) & F \<in> program"
by (blast dest: LeadsTo_type [THEN subsetD]
intro: LeadsTo_UN LeadsTo_weaken_L)
lemma LeadsTo_Union_distrib: "(F \<in> \<Union>(S) \<longmapsto>w B) \<longleftrightarrow> (\<forall>A \<in> S. F \<in> A \<longmapsto>w B) & F \<in> program"
by (blast dest: LeadsTo_type [THEN subsetD]
intro: LeadsTo_Union LeadsTo_weaken_L)
(** More rules using the premise "Always(I)" **)
lemma EnsuresI: "[| F:(A-B) Co (A \<union> B); F \<in> transient (A-B) |] ==> F \<in> A Ensures B"
apply (simp add: Ensures_def Constrains_eq_constrains)
apply (blast intro: ensuresI constrains_weaken transient_strengthen dest: constrainsD2)
done
lemma Always_LeadsTo_Basis: "[| F \<in> Always(I); F \<in> (I \<inter> (A-A')) Co (A \<union> A');
F \<in> transient (I \<inter> (A-A')) |]
==> F \<in> A \<longmapsto>w A'"
apply (rule Always_LeadsToI, assumption)
apply (blast intro: EnsuresI LeadsTo_Basis Always_ConstrainsD [THEN Constrains_weaken] transient_strengthen)
done
(*Set difference: maybe combine with leadsTo_weaken_L??
This is the most useful form of the "disjunction" rule*)
lemma LeadsTo_Diff:
"[| F \<in> (A-B) \<longmapsto>w C; F \<in> (A \<inter> B) \<longmapsto>w C |] ==> F \<in> A \<longmapsto>w C"
by (blast intro: LeadsTo_Un LeadsTo_weaken)
lemma LeadsTo_UN_UN:
"[|(!!i. i \<in> I ==> F \<in> A(i) \<longmapsto>w A'(i)); F \<in> program |]
==> F \<in> (\<Union>i \<in> I. A(i)) \<longmapsto>w (\<Union>i \<in> I. A'(i))"
apply (rule LeadsTo_Union, auto)
apply (blast intro: LeadsTo_weaken_R)
done
(*Binary union version*)
lemma LeadsTo_Un_Un:
"[| F \<in> A \<longmapsto>w A'; F \<in> B \<longmapsto>w B' |] ==> F:(A \<union> B) \<longmapsto>w (A' \<union> B')"
by (blast intro: LeadsTo_Un LeadsTo_weaken_R)
(** The cancellation law **)
lemma LeadsTo_cancel2: "[| F \<in> A \<longmapsto>w(A' \<union> B); F \<in> B \<longmapsto>w B' |] ==> F \<in> A \<longmapsto>w (A' \<union> B')"
by (blast intro: LeadsTo_Un_Un subset_imp_LeadsTo LeadsTo_Trans dest: LeadsTo_type [THEN subsetD])
lemma Un_Diff: "A \<union> (B - A) = A \<union> B"
by auto
lemma LeadsTo_cancel_Diff2: "[| F \<in> A \<longmapsto>w (A' \<union> B); F \<in> (B-A') \<longmapsto>w B' |] ==> F \<in> A \<longmapsto>w (A' \<union> B')"
apply (rule LeadsTo_cancel2)
prefer 2 apply assumption
apply (simp (no_asm_simp) add: Un_Diff)
done
lemma LeadsTo_cancel1: "[| F \<in> A \<longmapsto>w (B \<union> A'); F \<in> B \<longmapsto>w B' |] ==> F \<in> A \<longmapsto>w (B' \<union> A')"
apply (simp add: Un_commute)
apply (blast intro!: LeadsTo_cancel2)
done
lemma Diff_Un2: "(B - A) \<union> A = B \<union> A"
by auto
lemma LeadsTo_cancel_Diff1: "[| F \<in> A \<longmapsto>w (B \<union> A'); F \<in> (B-A') \<longmapsto>w B' |] ==> F \<in> A \<longmapsto>w (B' \<union> A')"
apply (rule LeadsTo_cancel1)
prefer 2 apply assumption
apply (simp (no_asm_simp) add: Diff_Un2)
done
(** The impossibility law **)
(*The set "A" may be non-empty, but it contains no reachable states*)
lemma LeadsTo_empty: "F \<in> A \<longmapsto>w 0 ==> F \<in> Always (state -A)"
apply (simp (no_asm_use) add: LeadsTo_def Always_eq_includes_reachable)
apply (cut_tac reachable_type)
apply (auto dest!: leadsTo_empty)
done
(** PSP \<in> Progress-Safety-Progress **)
(*Special case of PSP \<in> Misra's "stable conjunction"*)
lemma PSP_Stable: "[| F \<in> A \<longmapsto>w A'; F \<in> Stable(B) |]==> F:(A \<inter> B) \<longmapsto>w (A' \<inter> B)"
apply (simp add: LeadsTo_def Stable_eq_stable, clarify)
apply (drule psp_stable, assumption)
apply (simp add: Int_ac)
done
lemma PSP_Stable2: "[| F \<in> A \<longmapsto>w A'; F \<in> Stable(B) |] ==> F \<in> (B \<inter> A) \<longmapsto>w (B \<inter> A')"
apply (simp (no_asm_simp) add: PSP_Stable Int_ac)
done
lemma PSP: "[| F \<in> A \<longmapsto>w A'; F \<in> B Co B'|]==> F \<in> (A \<inter> B') \<longmapsto>w ((A' \<inter> B) \<union> (B' - B))"
apply (simp (no_asm_use) add: LeadsTo_def Constrains_eq_constrains)
apply (blast dest: psp intro: leadsTo_weaken)
done
lemma PSP2: "[| F \<in> A \<longmapsto>w A'; F \<in> B Co B' |]==> F:(B' \<inter> A) \<longmapsto>w ((B \<inter> A') \<union> (B' - B))"
by (simp (no_asm_simp) add: PSP Int_ac)
lemma PSP_Unless:
"[| F \<in> A \<longmapsto>w A'; F \<in> B Unless B'|]==> F:(A \<inter> B) \<longmapsto>w ((A' \<inter> B) \<union> B')"
apply (unfold op_Unless_def)
apply (drule PSP, assumption)
apply (blast intro: LeadsTo_Diff LeadsTo_weaken subset_imp_LeadsTo)
done
(*** Induction rules ***)
(** Meta or object quantifier ????? **)
lemma LeadsTo_wf_induct: "[| wf(r);
\<forall>m \<in> I. F \<in> (A \<inter> f-``{m}) \<longmapsto>w
((A \<inter> f-``(converse(r) `` {m})) \<union> B);
field(r)<=I; A<=f-``I; F \<in> program |]
==> F \<in> A \<longmapsto>w B"
apply (simp (no_asm_use) add: LeadsTo_def)
apply auto
apply (erule_tac I = I and f = f in leadsTo_wf_induct, safe)
apply (drule_tac [2] x = m in bspec, safe)
apply (rule_tac [2] A' = "reachable (F) \<inter> (A \<inter> f -`` (converse (r) ``{m}) \<union> B) " in leadsTo_weaken_R)
apply (auto simp add: Int_assoc)
done
lemma LessThan_induct: "[| \<forall>m \<in> nat. F:(A \<inter> f-``{m}) \<longmapsto>w ((A \<inter> f-``m) \<union> B);
A<=f-``nat; F \<in> program |] ==> F \<in> A \<longmapsto>w B"
apply (rule_tac A1 = nat and f1 = "%x. x" in wf_measure [THEN LeadsTo_wf_induct])
apply (simp_all add: nat_measure_field)
apply (simp add: ltI Image_inverse_lessThan vimage_def [symmetric])
done
(******
To be ported ??? I am not sure.
integ_0_le_induct
LessThan_bounded_induct
GreaterThan_bounded_induct
*****)
(*** Completion \<in> Binary and General Finite versions ***)
lemma Completion: "[| F \<in> A \<longmapsto>w (A' \<union> C); F \<in> A' Co (A' \<union> C);
F \<in> B \<longmapsto>w (B' \<union> C); F \<in> B' Co (B' \<union> C) |]
==> F \<in> (A \<inter> B) \<longmapsto>w ((A' \<inter> B') \<union> C)"
apply (simp (no_asm_use) add: LeadsTo_def Constrains_eq_constrains Int_Un_distrib)
apply (blast intro: completion leadsTo_weaken)
done
lemma Finite_completion_aux:
"[| I \<in> Fin(X);F \<in> program |]
==> (\<forall>i \<in> I. F \<in> (A(i)) \<longmapsto>w (A'(i) \<union> C)) \<longrightarrow>
(\<forall>i \<in> I. F \<in> (A'(i)) Co (A'(i) \<union> C)) \<longrightarrow>
F \<in> (\<Inter>i \<in> I. A(i)) \<longmapsto>w ((\<Inter>i \<in> I. A'(i)) \<union> C)"
apply (erule Fin_induct)
apply (auto simp del: INT_simps simp add: Inter_0)
apply (rule Completion, auto)
apply (simp del: INT_simps add: INT_extend_simps)
apply (blast intro: Constrains_INT)
done
lemma Finite_completion:
"[| I \<in> Fin(X); !!i. i \<in> I ==> F \<in> A(i) \<longmapsto>w (A'(i) \<union> C);
!!i. i \<in> I ==> F \<in> A'(i) Co (A'(i) \<union> C);
F \<in> program |]
==> F \<in> (\<Inter>i \<in> I. A(i)) \<longmapsto>w ((\<Inter>i \<in> I. A'(i)) \<union> C)"
by (blast intro: Finite_completion_aux [THEN mp, THEN mp])
lemma Stable_completion:
"[| F \<in> A \<longmapsto>w A'; F \<in> Stable(A');
F \<in> B \<longmapsto>w B'; F \<in> Stable(B') |]
==> F \<in> (A \<inter> B) \<longmapsto>w (A' \<inter> B')"
apply (unfold Stable_def)
apply (rule_tac C1 = 0 in Completion [THEN LeadsTo_weaken_R])
prefer 5
apply blast
apply auto
done
lemma Finite_stable_completion:
"[| I \<in> Fin(X);
(!!i. i \<in> I ==> F \<in> A(i) \<longmapsto>w A'(i));
(!!i. i \<in> I ==>F \<in> Stable(A'(i))); F \<in> program |]
==> F \<in> (\<Inter>i \<in> I. A(i)) \<longmapsto>w (\<Inter>i \<in> I. A'(i))"
apply (unfold Stable_def)
apply (rule_tac C1 = 0 in Finite_completion [THEN LeadsTo_weaken_R], simp_all)
apply (rule_tac [3] subset_refl, auto)
done
ML \<open>
(*proves "ensures/leadsTo" properties when the program is specified*)
fun ensures_tac ctxt sact =
SELECT_GOAL
(EVERY [REPEAT (Always_Int_tac ctxt 1),
eresolve_tac ctxt @{thms Always_LeadsTo_Basis} 1
ORELSE (*subgoal may involve LeadsTo, leadsTo or ensures*)
REPEAT (ares_tac ctxt [@{thm LeadsTo_Basis}, @{thm leadsTo_Basis},
@{thm EnsuresI}, @{thm ensuresI}] 1),
(*now there are two subgoals: co & transient*)
simp_tac (ctxt addsimps (Named_Theorems.get ctxt \<^named_theorems>\<open>program\<close>)) 2,
Rule_Insts.res_inst_tac ctxt
[((("act", 0), Position.none), sact)] [] @{thm transientI} 2,
(*simplify the command's domain*)
simp_tac (ctxt addsimps [@{thm domain_def}]) 3,
(* proving the domain part *)
clarify_tac ctxt 3,
dresolve_tac ctxt @{thms swap} 3, force_tac ctxt 4,
resolve_tac ctxt @{thms ReplaceI} 3, force_tac ctxt 3, force_tac ctxt 4,
asm_full_simp_tac ctxt 3, resolve_tac ctxt @{thms conjI} 3, simp_tac ctxt 4,
REPEAT (resolve_tac ctxt @{thms state_update_type} 3),
constrains_tac ctxt 1,
ALLGOALS (clarify_tac ctxt),
ALLGOALS (asm_full_simp_tac (ctxt addsimps [@{thm st_set_def}])),
ALLGOALS (clarify_tac ctxt),
ALLGOALS (asm_lr_simp_tac ctxt)]);
\<close>
method_setup ensures = \<open>
Args.goal_spec -- Scan.lift Args.embedded_inner_syntax >>
(fn (quant, s) => fn ctxt => SIMPLE_METHOD'' quant (ensures_tac ctxt s))
\<close> "for proving progress properties"
end
|
(*
Author: Florian Messner <[email protected]>
Author: Julian Parsert <[email protected]>
Author: Jonas SchΓΆpf <[email protected]>
Author: Christian Sternagel <[email protected]>
License: LGPL
*)
section \<open>Homogeneous Linear Diophantine Equations\<close>
theory Linear_Diophantine_Equations
imports List_Vector
begin
(*TODO: move*)
lemma lcm_div_le:
fixes a :: nat
shows "lcm a b div b \<le> a"
by (metis div_by_0 div_le_dividend div_le_mono div_mult_self_is_m lcm_nat_def neq0_conv)
(*TODO: move*)
lemma lcm_div_le':
fixes a :: nat
shows "lcm a b div a \<le> b"
by (metis lcm.commute lcm_div_le)
(*TODO: move*)
lemma lcm_div_gt_0:
fixes a :: nat
assumes "a > 0" and "b > 0"
shows "lcm a b div a > 0"
proof -
have "lcm a b = (a * b) div (gcd a b)"
using lcm_nat_def by blast
moreover have "\<dots> > 0"
using assms
by (metis assms calculation lcm_pos_nat)
ultimately show ?thesis
using assms
by simp (metis div_greater_zero_iff div_le_mono2 div_mult_self_is_m gcd_le2_nat not_gr0)
qed
(*TODO: move*)
lemma sum_list_list_update_Suc:
assumes "i < length u"
shows "sum_list (u[i := Suc (u ! i)]) = Suc (sum_list u)"
using assms
proof (induct u arbitrary: i)
case (Cons x xs)
then show ?case by (simp_all split: nat.splits)
qed (simp)
(*TODO: move*)
lemma lessThan_conv:
assumes "card A = n" and "\<forall>x\<in>A. x < n"
shows "A = {..<n}"
using assms by (simp add: card_subset_eq subsetI)
text \<open>
Given a non-empty list \<open>xs\<close> of \<open>n\<close> natural numbers,
either there is a value in \<open>xs\<close> that is \<open>0\<close> modulo \<open>n\<close>,
or there are two values whose moduli coincide.
\<close>
lemma list_mod_cases:
assumes "length xs = n" and "n > 0"
shows "(\<exists>x\<in>set xs. x mod n = 0) \<or>
(\<exists>i<length xs. \<exists>j<length xs. i \<noteq> j \<and> (xs ! i) mod n = (xs ! j) mod n)"
proof -
let ?f = "\<lambda>x. x mod n" and ?X = "set xs"
have *: "\<forall>x \<in> ?f ` ?X. x < n" using \<open>n > 0\<close> by auto
consider (eq) "card (?f ` ?X) = card ?X" | (less) "card (?f ` ?X) < card ?X"
using antisym_conv2 and card_image_le by blast
then show ?thesis
proof (cases)
case eq
show ?thesis
proof (cases "distinct xs")
assume "distinct xs"
with eq have "card (?f ` ?X) = n"
using \<open>distinct xs\<close> by (simp add: assms card_distinct distinct_card)
from lessThan_conv [OF this *] and \<open>n > 0\<close>
have "\<exists>x\<in>set xs. x mod n = 0" by (metis imageE lessThan_iff)
then show ?thesis ..
next
assume "\<not> distinct xs"
then show ?thesis by (auto) (metis distinct_conv_nth)
qed
next
case less
from pigeonhole [OF this]
show ?thesis by (auto simp: inj_on_def iff: in_set_conv_nth)
qed
qed
text \<open>
Homogeneous linear Diophantine equations:
\<open>a\<^sub>1x\<^sub>1 + \<cdots> + a\<^sub>mx\<^sub>m = b\<^sub>1y\<^sub>1 + \<cdots> + b\<^sub>ny\<^sub>n\<close>
\<close>
locale hlde_ops =
fixes a b :: "nat list"
begin
abbreviation "m \<equiv> length a"
abbreviation "n \<equiv> length b"
\<comment> \<open>The set of all solutions.\<close>
definition Solutions :: "(nat list \<times> nat list) set"
where
"Solutions = {(x, y). a \<bullet> x = b \<bullet> y \<and> length x = m \<and> length y = n}"
lemma in_Solutions_iff:
"(x, y) \<in> Solutions \<longleftrightarrow> length x = m \<and> length y = n \<and> a \<bullet> x = b \<bullet> y"
by (auto simp: Solutions_def)
\<comment> \<open>The set of pointwise minimal solutions.\<close>
definition Minimal_Solutions :: "(nat list \<times> nat list) set"
where
"Minimal_Solutions = {(x, y) \<in> Solutions. nonzero x \<and>
\<not> (\<exists>(u, v) \<in> Solutions. nonzero u \<and> u @ v <\<^sub>v x @ y)}"
definition dij :: "nat \<Rightarrow> nat \<Rightarrow> nat"
where
"dij i j = lcm (a ! i) (b ! j) div (a ! i)"
definition eij :: "nat \<Rightarrow> nat \<Rightarrow> nat"
where
"eij i j = lcm (a ! i) (b ! j) div (b ! j)"
definition sij :: "nat \<Rightarrow> nat \<Rightarrow> (nat list \<times> nat list)"
where
"sij i j = ((zeroes m)[i := dij i j], (zeroes n)[j := eij i j])"
subsection \<open>Further Constraints on Minimal Solutions\<close>
definition Ej :: "nat \<Rightarrow> nat list \<Rightarrow> nat set"
where
"Ej j x = { eij i j - 1 | i. i < length x \<and> x ! i \<ge> dij i j }"
definition Di :: "nat \<Rightarrow> nat list \<Rightarrow> nat set"
where
"Di i y = { dij i j - 1 | j. j < length y \<and> y ! j \<ge> eij i j }"
definition Di' :: "nat \<Rightarrow> nat list \<Rightarrow> nat set"
where
"Di' i y = { dij i (j + length b - length y) - 1 | j. j < length y \<and> y ! j \<ge> eij i (j + length b - length y) }"
lemma Ej_take_subset:
"Ej j (take k x) \<subseteq> Ej j x"
by (auto simp: Ej_def)
lemma Di_take_subset:
"Di i (take l y) \<subseteq> Di i y"
by (auto simp: Di_def)
lemma Di'_drop_subset:
"Di' i (drop l y) \<subseteq> Di' i y"
by (auto simp: Di'_def) (metis add.assoc add.commute less_diff_conv)
lemma finite_Ej:
"finite (Ej j x)"
by (rule finite_subset [of _ "(\<lambda>i. eij i j - 1) ` {0 ..< length x}"]) (auto simp: Ej_def)
lemma finite_Di:
"finite (Di i y)"
by (rule finite_subset [of _ "(\<lambda>j. dij i j - 1) ` {0 ..< length y}"]) (auto simp: Di_def)
lemma finite_Di':
"finite (Di' i y)"
by (rule finite_subset [of _ "(\<lambda>j. dij i (j + length b - length y) - 1) ` {0 ..< length y}"])
(auto simp: Di'_def)
definition max_y :: "nat list \<Rightarrow> nat \<Rightarrow> nat"
where
"max_y x j = (if j < n \<and> Ej j x \<noteq> {} then Min (Ej j x) else Max (set a))"
definition max_x :: "nat list \<Rightarrow> nat \<Rightarrow> nat"
where
"max_x y i = (if i < m \<and> Di i y \<noteq> {} then Min (Di i y) else Max (set b))"
definition max_x' :: "nat list \<Rightarrow> nat \<Rightarrow> nat"
where
"max_x' y i = (if i < m \<and> Di' i y \<noteq> {} then Min (Di' i y) else Max (set b))"
lemma Min_Ej_le:
assumes "j < n"
and "e \<in> Ej j x"
and "length x \<le> m"
shows "Min (Ej j x) \<le> Max (set a)" (is "?m \<le> _")
proof -
have "?m \<in> Ej j x"
using assms and finite_Ej and Min_in by blast
then obtain i where
i: "?m = eij i j - 1" "i < length x" "x ! i \<ge> dij i j"
by (auto simp: Ej_def)
have "lcm (a ! i) (b ! j) div b ! j \<le> a ! i" by (rule lcm_div_le)
then show ?thesis
using i and assms
by (auto simp: eij_def)
(meson List.finite_set Max_ge diff_le_self le_trans less_le_trans nth_mem)
qed
lemma Min_Di_le:
assumes "i < m"
and "e \<in> Di i y"
and "length y \<le> n"
shows "Min (Di i y) \<le> Max (set b)" (is "?m \<le> _")
proof -
have "?m \<in> Di i y"
using assms and finite_Di and Min_in by blast
then obtain j where
j: "?m = dij i j - 1" "j < length y" "y ! j \<ge> eij i j"
by (auto simp: Di_def)
have "lcm (a ! i) (b ! j) div a ! i \<le> b ! j" by (rule lcm_div_le')
then show ?thesis
using j and assms
by (auto simp: dij_def)
(meson List.finite_set Max_ge diff_le_self le_trans less_le_trans nth_mem)
qed
lemma Min_Di'_le:
assumes "i < m"
and "e \<in> Di' i y"
and "length y \<le> n"
shows "Min (Di' i y) \<le> Max (set b)" (is "?m \<le> _")
proof -
have "?m \<in> Di' i y"
using assms and finite_Di' and Min_in by blast
then obtain j where
j: "?m = dij i (j + length b - length y) - 1" "j < length y" "y ! j \<ge> eij i (j + length b - length y)"
by (auto simp: Di'_def)
then have "j + length b - length y < length b" using assms by auto
moreover
have "lcm (a ! i) (b ! (j + length b - length y)) div a ! i \<le> b ! (j + length b - length y)" by (rule lcm_div_le')
ultimately show ?thesis
using j and assms
by (auto simp: dij_def)
(meson List.finite_set Max_ge diff_le_self le_trans less_le_trans nth_mem)
qed
lemma max_y_le_take:
assumes "length x \<le> m"
shows "max_y x j \<le> max_y (take k x) j"
using assms and Min_Ej_le and Ej_take_subset and Min.subset_imp [OF _ _ finite_Ej]
by (auto simp: max_y_def) blast
lemma max_x_le_take:
assumes "length y \<le> n"
shows "max_x y i \<le> max_x (take l y) i"
using assms and Min_Di_le and Di_take_subset and Min.subset_imp [OF _ _ finite_Di]
by (auto simp: max_x_def) blast
lemma max_x'_le_drop:
assumes "length y \<le> n"
shows "max_x' y i \<le> max_x' (drop l y) i"
using assms and Min_Di'_le and Di'_drop_subset and Min.subset_imp [OF _ _ finite_Di']
by (auto simp: max_x'_def) blast
end
abbreviation "Solutions \<equiv> hlde_ops.Solutions"
abbreviation "Minimal_Solutions \<equiv> hlde_ops.Minimal_Solutions"
abbreviation "dij \<equiv> hlde_ops.dij"
abbreviation "eij \<equiv> hlde_ops.eij"
abbreviation "sij \<equiv> hlde_ops.sij"
declare hlde_ops.dij_def [code]
declare hlde_ops.eij_def [code]
declare hlde_ops.sij_def [code]
lemma Solutions_sym: "(x, y) \<in> Solutions a b \<longleftrightarrow> (y, x) \<in> Solutions b a"
by (auto simp: hlde_ops.in_Solutions_iff)
lemma Minimal_Solutions_imp_Solutions: "(x, y) \<in> Minimal_Solutions a b \<Longrightarrow> (x, y) \<in> Solutions a b"
by (auto simp: hlde_ops.Minimal_Solutions_def)
lemma Minimal_SolutionsI:
assumes "(x, y) \<in> Solutions a b"
and "nonzero x"
and "\<not> (\<exists>(u, v) \<in> Solutions a b. nonzero u \<and> u @ v <\<^sub>v x @ y)"
shows "(x, y) \<in> Minimal_Solutions a b"
using assms by (auto simp: hlde_ops.Minimal_Solutions_def)
lemma minimize_nonzero_solution:
assumes "(x, y) \<in> Solutions a b" and "nonzero x"
obtains u and v where "u @ v \<le>\<^sub>v x @ y" and "(u, v) \<in> Minimal_Solutions a b"
using assms
proof (induct "x @ y" arbitrary: x y thesis rule: wf_induct [OF wf_less])
case 1
then show ?case
proof (cases "(x, y) \<in> Minimal_Solutions a b")
case False
then obtain u and v where "nonzero u" and "(u, v) \<in> Solutions a b" and uv: "u @ v <\<^sub>v x @ y"
using 1(3,4) by (auto simp: hlde_ops.Minimal_Solutions_def)
with 1(1) [rule_format, of "u @ v" u v] obtain u' and v' where uv': "u' @ v' \<le>\<^sub>v u @ v"
and "(u', v') \<in> Minimal_Solutions a b" by blast
moreover have "u' @ v' \<le>\<^sub>v x @ y" using uv and uv' by auto
ultimately show ?thesis by (intro 1(2))
qed blast
qed
lemma Minimal_SolutionsI':
assumes "(x, y) \<in> Solutions a b"
and "nonzero x"
and "\<not> (\<exists>(u, v) \<in> Minimal_Solutions a b. u @ v <\<^sub>v x @ y)"
shows "(x, y) \<in> Minimal_Solutions a b"
proof (rule Minimal_SolutionsI [OF assms(1,2)])
show "\<not> (\<exists>(u, v) \<in> Solutions a b. nonzero u \<and> u @ v <\<^sub>v x @ y)"
proof
assume "\<exists>(u, v) \<in> Solutions a b. nonzero u \<and> u @ v <\<^sub>v x @ y"
then obtain u and v where "(u, v) \<in> Solutions a b" and "nonzero u"
and uv: "u @ v <\<^sub>v x @ y" by blast
then obtain u' and v' where "(u', v') \<in> Minimal_Solutions a b"
and uv': "u' @ v' \<le>\<^sub>v u @ v" by (blast elim: minimize_nonzero_solution)
moreover have "u' @ v' <\<^sub>v x @ y" using uv and uv' by auto
ultimately show False using assms by blast
qed
qed
lemma Minimal_Solutions_length:
"(x, y) \<in> Minimal_Solutions a b \<Longrightarrow> length x = length a \<and> length y = length b"
by (auto simp: hlde_ops.Minimal_Solutions_def hlde_ops.in_Solutions_iff)
lemma Minimal_Solutions_gt0:
"(x, y) \<in> Minimal_Solutions a b \<Longrightarrow> zeroes (length x) <\<^sub>v x"
using zero_less by (auto simp: hlde_ops.Minimal_Solutions_def)
lemma Minimal_Solutions_sym:
assumes "0 \<notin> set a" and "0 \<notin> set b"
shows "(xs, ys) \<in> Minimal_Solutions a b \<longrightarrow> (ys, xs) \<in> Minimal_Solutions b a"
using assms
by (auto simp: hlde_ops.Minimal_Solutions_def hlde_ops.Solutions_def
dest: dotprod_eq_nonzero_iff dest!: less_append_swap [of _ _ ys xs])
locale hlde = hlde_ops +
assumes no0: "0 \<notin> set a" "0 \<notin> set b"
begin
lemma nonzero_Solutions_iff:
assumes "(x, y) \<in> Solutions"
shows "nonzero x \<longleftrightarrow> nonzero y"
using assms and no0 by (auto simp: in_Solutions_iff dest: dotprod_eq_nonzero_iff)
lemma Minimal_Solutions_min:
assumes "(x, y) \<in> Minimal_Solutions"
and "u @ v <\<^sub>v x @ y"
and "a \<bullet> u = b \<bullet> v"
and [simp]: "length u = m"
and non0: "nonzero (u @ v)"
shows False
proof -
have [simp]: "length v = n" using assms by (force dest: less_appendD Minimal_Solutions_length)
have "(u, v) \<in> Solutions" using \<open>a \<bullet> u = b \<bullet> v\<close> by (simp add: in_Solutions_iff)
moreover from nonzero_Solutions_iff [OF this] have "nonzero u" using non0 by auto
ultimately show False using assms by (auto simp: hlde_ops.Minimal_Solutions_def)
qed
lemma Solutions_snd_not_0:
assumes "(x, y) \<in> Solutions"
and "nonzero x"
shows "nonzero y"
using assms by (metis nonzero_Solutions_iff)
end
subsection \<open>Pointwise Restricting Solutions\<close>
text \<open>
Constructing the list of \<open>u\<close> vectors from Huet's proof \<^cite>\<open>"Huet1978"\<close>, satisfying
\<^item> \<open>\<forall>i<length u. u ! i \<le> y ! i\<close> and
\<^item> \<open>0 < sum_list u \<le> a\<^sub>k\<close>.
\<close>
text \<open>
Given \<open>y\<close>, increment a "previous" \<open>u\<close> vector at first position
starting from \<open>i\<close> where \<open>u\<close> is strictly smaller than \<open>y\<close>. If this
is not possible, return \<open>u\<close> unchanged.
\<close>
function inc :: "nat list \<Rightarrow> nat \<Rightarrow> nat list \<Rightarrow> nat list"
where
"inc y i u =
(if i < length y then
if u ! i < y ! i then u[i := u ! i + 1]
else inc y (Suc i) u
else u)"
by (pat_completeness) auto
termination inc
by (relation "measure (\<lambda>(y, i, u). max (length y) (length u) - i)") auto
(*inc.simps may cause simplification to loop*)
declare inc.simps [simp del]
text \<open>
Starting from the 0-vector produce \<open>u\<close>s by iteratively
incrementing with respect to \<open>y\<close>.
\<close>
definition huets_us :: "nat list \<Rightarrow> nat \<Rightarrow> nat list" ("\<^bold>u" 1000)
where
"\<^bold>u y i = ((inc y 0) ^^ Suc i) (zeroes (length y))"
lemma huets_us_simps [simp]:
"\<^bold>u y 0 = inc y 0 (zeroes (length y))"
"\<^bold>u y (Suc i) = inc y 0 (\<^bold>u y i)"
by (auto simp: huets_us_def)
lemma length_inc [simp]: "length (inc y i u) = length u"
by (induct y i u rule: inc.induct) (simp add: inc.simps)
lemma length_us [simp]:
"length (\<^bold>u y i) = length y"
by (induct i) (simp_all)
text \<open>
\<open>inc\<close> produces vectors that are pointwise smaller than \<open>y\<close>
\<close>
lemma us_le:
assumes "length y > 0"
shows "\<^bold>u y i \<le>\<^sub>v y"
using assms by (induct i) (auto simp: inc_le le_length)
lemma sum_list_inc_le:
"u \<le>\<^sub>v y \<Longrightarrow> sum_list (inc y i u) \<le> sum_list y"
by (induct y i u rule: inc.induct)
(auto simp: inc.simps intro: le_sum_list_mono)
lemma sum_list_inc_gt0:
assumes "sum_list u > 0" and "length y = length u"
shows "sum_list (inc y i u) > 0"
using assms
proof (induct y i u rule: inc.induct)
case (1 y i u)
then show ?case
by (auto simp add: inc.simps)
(meson Suc_neq_Zero gr_zeroI set_update_memI sum_list_eq_0_iff)
qed
lemma sum_list_inc_gt0':
assumes "length u = length y" and "i < length y" and "y ! i > 0" and "j \<le> i"
shows "sum_list (inc y j u) > 0"
using assms
proof (induct y j u rule: inc.induct)
case (1 y i u)
then show ?case
by (auto simp: inc.simps [of y i] sum_list_update)
(metis elem_le_sum_list le_antisym le_zero_eq neq0_conv not_less_eq_eq sum_list_inc_gt0)
qed
lemma sum_list_us_gt0:
assumes "sum_list y \<noteq> 0"
shows "0 < sum_list (\<^bold>u y i)"
using assms by (induct i) (auto simp: in_set_conv_nth sum_list_inc_gt0' sum_list_inc_gt0)
lemma sum_list_inc_le':
assumes "length u = length y"
shows "sum_list (inc y i u) \<le> sum_list u + 1"
using assms
by (induct y i u rule: inc.induct) (auto simp: inc.simps sum_list_update)
lemma sum_list_us_bounded:
assumes "i < k"
shows "sum_list (\<^bold>u y i) \<le> k"
using assms and sum_list_us_le [of y i] by force
lemma sum_list_inc_eq_sum_list_Suc:
assumes "length u = length y" and "i < length y"
and "\<exists>j\<ge>i. j < length y \<and> u ! j < y ! j"
shows "sum_list (inc y i u) = Suc (sum_list u)"
using assms
by (induct y i u rule: inc.induct)
(metis inc.simps Suc_eq_plus1 Suc_leI antisym_conv2 leD sum_list_list_update_Suc)
lemma sum_list_us_eq:
assumes "i < sum_list y"
shows "sum_list (\<^bold>u y i) = i + 1"
using assms
proof (induct i)
case (Suc i)
then show ?case
by (auto)
(metis (no_types, lifting) Suc_eq_plus1 gr_implies_not0 length_pos_if_in_set
length_us less_Suc_eq_le less_imp_le_nat antisym_conv2 not_less_eq_eq
sum_list_eq_0_iff sum_list_inc_eq_sum_list_Suc sum_list_less_diff_Ex us_le)
qed (metis Suc_eq_plus1 Suc_leI antisym_conv gr_implies_not0 sum_list_us_gt0 sum_list_us_le)
lemma inc_ge: "length u = length y \<Longrightarrow> u \<le>\<^sub>v inc y i u"
by (induct y i u rule: inc.induct) (auto simp: inc.simps nth_list_update less_eq_def)
lemma us_le_mono:
assumes "i < j"
shows "\<^bold>u y i \<le>\<^sub>v \<^bold>u y j"
using assms
proof (induct "j - i" arbitrary: j i)
case (Suc n)
then show ?case
by (simp add: Suc.prems inc_ge order.strict_implies_order order_vec.lift_Suc_mono_le)
qed simp
lemma us_mono:
assumes "i < j" and "j < sum_list y"
shows "\<^bold>u y i <\<^sub>v \<^bold>u y j"
proof -
let ?u = "\<^bold>u y i" and ?v = "\<^bold>u y j"
have "?u \<le>\<^sub>v ?v"
using us_le_mono [OF \<open>i < j\<close>] by simp
moreover have "sum_list ?u < sum_list ?v"
using assms by (auto simp: sum_list_us_eq)
ultimately show ?thesis by (intro le_sum_list_less) (auto simp: less_eq_def)
qed
context hlde
begin
lemma max_coeff_bound_right:
assumes "(xs, ys) \<in> Minimal_Solutions"
shows "\<forall>x \<in> set xs. x \<le> maxne0 ys b" (is "\<forall>x\<in>set xs. x \<le> ?m")
proof (rule ccontr)
assume "\<not> ?thesis"
then obtain k
where k_def: "k < length xs \<and> \<not> (xs ! k \<le> ?m)"
by (metis in_set_conv_nth)
have sol: "(xs, ys) \<in> Solutions"
using assms Minimal_Solutions_def by auto
then have len: "m = length xs" by (simp add: in_Solutions_iff)
have max_suml: "?m * sum_list ys \<ge> b \<bullet> ys"
using maxne0_times_sum_list_gt_dotprod sol by (auto simp: in_Solutions_iff)
then have is_sol: "b \<bullet> ys = a \<bullet> xs"
using sol by (auto simp: in_Solutions_iff)
then have a_ge_ak: "a \<bullet> xs \<ge> a ! k * xs ! k"
using dotprod_pointwise_le k_def len by auto
then have ak_gt_max: "a ! k * xs ! k > a ! k * ?m"
using no0 in_set_conv_nth k_def len by fastforce
then have sl_ys_g_ak: "sum_list ys > a ! k"
by (metis a_ge_ak is_sol less_le_trans max_suml
mult.commute mult_le_mono1 not_le)
define Seq where
Seq_def: "Seq = map (\<^bold>u ys) [0 ..< a ! k]"
have ak_n0: "a ! k \<noteq> 0"
using \<open>a ! k * ?m < a ! k * xs ! k\<close> by auto
have "zeroes (length ys) <\<^sub>v ys"
by (intro zero_less) (metis gr_implies_not0 nonzero_iff sl_ys_g_ak sum_list_eq_0_iff)
then have "length Seq > 0"
using ak_n0 Seq_def by auto
have u_in_nton: "\<forall>u \<in> set Seq. length u = length ys"
by (simp add: Seq_def)
have prop_3: "\<forall>u \<in> set Seq. u \<le>\<^sub>v ys"
proof -
have "length ys > 0"
using sl_ys_g_ak by auto
then show ?thesis
using us_le [of ys ] less_eq_def Seq_def by (simp)
qed
have prop_4_1: "\<forall>u \<in> set Seq. sum_list u > 0"
by (metis Seq_def sl_ys_g_ak gr_implies_not_zero imageE
set_map sum_list_us_gt0)
have prop_4_2: "\<forall>u \<in> set Seq. sum_list u \<le> a ! k"
by (simp add: Seq_def sum_list_us_bounded)
have prop_5: "\<exists>u. length u = length ys \<and> u \<le>\<^sub>v ys \<and> sum_list u > 0 \<and> sum_list u \<le> a ! k"
using \<open>0 < length Seq\<close> nth_mem prop_3 prop_4_1 prop_4_2 u_in_nton by blast
define Us where
"Us = {u. length u = length ys \<and> u \<le>\<^sub>v ys \<and> sum_list u > 0 \<and> sum_list u \<le> a ! k}"
have "\<exists>u \<in> Us. b \<bullet> u mod a ! k = 0"
proof (rule ccontr)
assume neg_th: "\<not> ?thesis"
define Seq_p where
"Seq_p = map (dotprod b) Seq"
have "length Seq = a ! k"
by (simp add: Seq_def)
then consider (eq_0) "(\<exists>x\<in>set Seq_p. x mod (a ! k) = 0)" |
(not_0) "(\<exists>i<length Seq_p. \<exists>j<length Seq_p. i \<noteq> j \<and>
(Seq_p ! i) mod (a!k) = (Seq_p ! j) mod (a!k))"
using list_mod_cases[of Seq_p] Seq_p_def ak_n0 by auto force
then show False
proof (cases)
case eq_0
have "\<exists>u \<in> set Seq. b \<bullet> u mod a ! k = 0"
using Seq_p_def eq_0 by auto
then show False
by (metis (mono_tags, lifting) Us_def mem_Collect_eq
neg_th prop_3 prop_4_1 prop_4_2 u_in_nton)
next
case not_0
obtain i and j where
i_j: "i<length Seq_p" "j<length Seq_p" " i \<noteq> j"
" Seq_p ! i mod a ! k = Seq_p ! j mod a ! k"
using not_0 by blast
define v where
v_def: "v = Seq!i"
define w where
w_def: "w = Seq!j"
have mod_eq: "b \<bullet> v mod a!k = b \<bullet> w mod a!k"
using Seq_p_def i_j w_def v_def i_j by auto
have "v <\<^sub>v w \<or> w <\<^sub>v v"
using \<open>i \<noteq> j\<close> and i_j
proof (cases "i < j")
case True
then show ?thesis
using Seq_p_def sl_ys_g_ak i_j(2) local.Seq_def us_mono v_def w_def by auto
next
case False
then show ?thesis
using Seq_p_def sl_ys_g_ak \<open>i \<noteq> j\<close> i_j(1) local.Seq_def us_mono v_def w_def by auto
qed
then show False
proof
assume ass: "v <\<^sub>v w"
define u where
u_def: "u = w -\<^sub>v v"
have "w \<le>\<^sub>v ys"
using Seq_p_def w_def i_j(2) prop_3 by force
then have prop_3: "less_eq u ys"
using vdiff_le ass less_eq_def order_vec.less_imp_le u_def by auto
have prop_4_1: "sum_list u > 0"
using le_sum_list_mono [of v w] ass u_def sum_list_vdiff_distr [of v w]
by (simp add: less_vec_sum_list_less)
have prop_4_2: "sum_list u \<le> a ! k"
proof -
have "u \<le>\<^sub>v w" using u_def
using ass less_eq_def order_vec.less_imp_le vdiff_le by auto
then show ?thesis
by (metis Seq_p_def i_j(2) length_map le_sum_list_mono
less_le_trans not_le nth_mem prop_4_2 w_def)
qed
have "b \<bullet> u mod a ! k = 0"
by (metis (mono_tags, lifting) in_Solutions_iff \<open>w \<le>\<^sub>v ys\<close> u_def ass no0(2)
less_eq_def mem_Collect_eq mod_eq mods_with_vec_2 prod.simps(2) sol)
then show False using neg_th
by (metis (mono_tags, lifting) Us_def less_eq_def mem_Collect_eq
prop_3 prop_4_1 prop_4_2)
next
assume ass: "w <\<^sub>v v"
define u where
u_def: "u = v -\<^sub>v w"
have "v \<le>\<^sub>v ys"
using Seq_p_def v_def i_j(1) prop_3 by force
then have prop_3: "u \<le>\<^sub>v ys"
using vdiff_le ass less_eq_def order_vec.less_imp_le u_def by auto
have prop_4_1: "sum_list u > 0"
using le_sum_list_mono [of w v] sum_list_vdiff_distr [of w v]
\<open>u \<equiv> v -\<^sub>v w\<close> ass less_vec_sum_list_less by auto
have prop_4_2: "sum_list u \<le> a!k"
proof -
have "u \<le>\<^sub>v v" using u_def
using ass less_eq_def order_vec.less_imp_le vdiff_le by auto
then show ?thesis
by (metis Seq_p_def i_j(1) le_neq_implies_less length_map less_imp_le_nat
less_le_trans nth_mem prop_4_2 le_sum_list_mono v_def)
qed
have "b \<bullet> u mod a ! k = 0"
by (metis (mono_tags, lifting) in_Solutions_iff \<open>v \<le>\<^sub>v ys\<close> u_def ass no0(2)
less_eq_def mem_Collect_eq mod_eq mods_with_vec_2 prod.simps(2) sol)
then show False
by (metis (mono_tags, lifting) neg_th Us_def less_eq_def mem_Collect_eq prop_3 prop_4_1 prop_4_2)
qed
qed
qed
then obtain u where
u3_4: "u \<le>\<^sub>v ys" "sum_list u > 0" "sum_list u \<le> a ! k" " b \<bullet> u mod (a ! k) = 0"
"length u = length ys"
unfolding Us_def by auto
have u_b_len: "length u = n"
using less_eq_def u3_4 in_Solutions_iff sol by simp
have "b \<bullet> u \<le> maxne0 u b * sum_list u"
by (simp add: maxne0_times_sum_list_gt_dotprod u_b_len)
also have "... \<le> ?m * a ! k"
by (intro mult_le_mono) (simp_all add: u3_4 maxne0_mono)
also have "... < a ! k * xs ! k"
using ak_gt_max by auto
then obtain zk where
zk: "b \<bullet> u = zk * a ! k"
using u3_4(4) by auto
have "length xs > k"
by (simp add: k_def)
have "zk \<noteq> 0"
proof -
have "\<exists>e \<in> set u. e \<noteq> 0"
using u3_4
by (metis neq0_conv sum_list_eq_0_iff)
then have "b \<bullet> u > 0"
using assms no0 u3_4
unfolding dotprod_gt0_iff[OF u_b_len [symmetric]]
by (fastforce simp add: in_set_conv_nth u_b_len)
then have "a ! k > 0"
using \<open>a ! k \<noteq> 0\<close> by blast
then show ?thesis
using \<open>0 < b \<bullet> u\<close> zk by auto
qed
define z where
z_def: "z = (zeroes (length xs))[k := zk]"
then have zk_zk: "z ! k = zk"
by (auto simp add: \<open>k < length xs\<close>)
have "length z = length xs"
using assms z_def \<open>k < length xs\<close> by auto
then have bu_eq_akzk: "b \<bullet> u = a ! k * z ! k"
by (simp add: \<open>b \<bullet> u = zk * a ! k\<close> zk_zk)
then have "z!k < xs!k"
using ak_gt_max calculation by auto
then have z_less_xs: "z <\<^sub>v xs"
by (auto simp add: z_def) (metis \<open>k < length xs\<close> le0 le_list_update less_def
less_imp_le order_vec.dual_order.antisym nat_neq_iff z_def zk_zk)
then have "z @ u <\<^sub>v xs @ ys"
by (intro less_append) (auto simp add: u3_4(1) z_less_xs)
moreover have "(z, u) \<in> Solutions"
by (auto simp add: bu_eq_akzk in_Solutions_iff z_def u_b_len \<open>k < length xs\<close> len)
moreover have "nonzero z"
using \<open>length z = length xs\<close> and \<open>zk \<noteq> 0\<close> and k_def and zk_zk by (auto simp: nonzero_iff)
ultimately show False using assms by (auto simp: Minimal_Solutions_def)
qed
text \<open>Proof of Lemma 1 of Huet's paper.\<close>
lemma max_coeff_bound:
assumes "(xs, ys) \<in> Minimal_Solutions"
shows "(\<forall>x \<in> set xs. x \<le> maxne0 ys b) \<and> (\<forall>y \<in> set ys. y \<le> maxne0 xs a)"
proof -
interpret ba: hlde b a by (standard) (auto simp: no0)
show ?thesis
using assms and Minimal_Solutions_sym [OF no0, of xs ys]
by (auto simp: max_coeff_bound_right ba.max_coeff_bound_right)
qed
lemma max_coeff_bound':
assumes "(x, y) \<in> Minimal_Solutions"
shows "\<forall>i<length x. x ! i \<le> Max (set b)" and "\<forall>j<length y. y ! j \<le> Max (set a)"
using max_coeff_bound [OF assms] and maxne0_le_Max
by auto (metis le_eq_less_or_eq less_le_trans nth_mem)+
lemma Minimal_Solutions_alt_def:
"Minimal_Solutions = {(x, y)\<in>Solutions.
(x, y) \<noteq> (zeroes m, zeroes n) \<and>
x \<le>\<^sub>v replicate m (Max (set b)) \<and>
y \<le>\<^sub>v replicate n (Max (set a)) \<and>
\<not> (\<exists>(u, v)\<in>Solutions. nonzero u \<and> u @ v <\<^sub>v x @ y)}"
by (auto simp: not_nonzero_iff Minimal_Solutions_imp_Solutions less_eq_def Minimal_Solutions_length max_coeff_bound'
intro!: Minimal_SolutionsI' dest: Minimal_Solutions_gt0)
(auto simp: Minimal_Solutions_def nonzero_Solutions_iff not_nonzero_iff)
subsection \<open>Special Solutions\<close>
definition Special_Solutions :: "(nat list \<times> nat list) set"
where
"Special_Solutions = {sij i j | i j. i < m \<and> j < n}"
lemma dij_neq_0:
assumes "i < m"
and "j < n"
shows "dij i j \<noteq> 0"
proof -
have "a ! i > 0" and "b ! j > 0"
using assms and no0 by (simp_all add: in_set_conv_nth)
then have "dij i j > 0"
using lcm_div_gt_0 [of "a ! i" "b ! j"] by (simp add: dij_def)
then show ?thesis by simp
qed
lemma eij_neq_0:
assumes "i < m"
and "j < n"
shows "eij i j \<noteq> 0"
proof -
have "a ! i > 0" and "b ! j > 0"
using assms and no0 by (simp_all add: in_set_conv_nth)
then have "eij i j > 0"
using lcm_div_gt_0[of "b ! j" "a ! i"] by (simp add: eij_def lcm.commute)
then show ?thesis
by simp
qed
lemma Special_Solutions_in_Solutions:
"x \<in> Special_Solutions \<Longrightarrow> x \<in> Solutions"
by (auto simp: in_Solutions_iff Special_Solutions_def sij_def dij_def eij_def)
lemma Special_Solutions_in_Minimal_Solutions:
assumes "(x, y) \<in> Special_Solutions"
shows "(x, y) \<in> Minimal_Solutions"
proof (intro Minimal_SolutionsI')
show "(x, y) \<in> Solutions" by (fact Special_Solutions_in_Solutions [OF assms])
then have [simp]: "length x = m" "length y = n" by (auto simp: in_Solutions_iff)
show "nonzero x" using assms and dij_neq_0
by (auto simp: Special_Solutions_def sij_def nonzero_iff)
(metis length_replicate set_update_memI)
show "\<not> (\<exists>(u, v)\<in>Minimal_Solutions. u @ v <\<^sub>v x @ y)"
proof
assume "\<exists>(u, v)\<in>Minimal_Solutions. u @ v <\<^sub>v x @ y"
then obtain u and v where uv: "(u, v) \<in> Minimal_Solutions" and "u @ v <\<^sub>v x @ y"
and [simp]: "length u = m" "length v = n"
and "nonzero u" by (auto simp: Minimal_Solutions_def in_Solutions_iff)
then consider "u <\<^sub>v x" and "v \<le>\<^sub>v y" | "v <\<^sub>v y" and "u \<le>\<^sub>v x" by (auto elim: less_append_cases)
then show False
proof (cases)
case 1
then obtain i and j where ij: "i < m" "j < n"
and less_dij: "u ! i < dij i j"
and "u \<le>\<^sub>v (zeroes m)[i := dij i j]"
and "v \<le>\<^sub>v (zeroes n)[j := eij i j]"
using assms by (auto simp: Special_Solutions_def sij_def unit_less)
then have u: "u = (zeroes m)[i := u ! i]" and v: "v = (zeroes n)[j := v ! j]"
by (auto simp: less_eq_def list_eq_iff_nth_eq)
(metis le_zero_eq length_list_update length_replicate rep_upd_unit)+
then have "u ! i > 0" using \<open>nonzero u\<close> and ij
by (metis gr_implies_not0 neq0_conv unit_less zero_less)
define c where "c = a ! i * u ! i"
then have ac: "a ! i dvd c" by simp
have "a \<bullet> u = b \<bullet> v" using uv by (auto simp: Minimal_Solutions_def in_Solutions_iff)
then have "c = b ! j * v ! j"
using ij unfolding c_def by (subst (asm) u, subst (asm)v, subst u, subst v) auto
then have bc: "b ! j dvd c" by simp
have "a ! i * u ! i < a ! i * dij i j"
using less_dij and no0 and ij by (auto simp: in_set_conv_nth)
then have "c < lcm (a ! i) (b ! j)" by (auto simp: dij_def c_def)
moreover have "lcm (a ! i) (b ! j) dvd c" by (simp add: ac bc)
moreover have "c > 0" using \<open>u ! i > 0\<close> and no0 and ij by (auto simp: c_def in_set_conv_nth)
ultimately show False using ac and bc by (auto dest: nat_dvd_not_less)
next
case 2
then obtain i and j where ij: "i < m" "j < n"
and less_dij: "v ! j < eij i j"
and "u \<le>\<^sub>v (zeroes m)[i := dij i j]"
and "v \<le>\<^sub>v (zeroes n)[j := eij i j]"
using assms by (auto simp: Special_Solutions_def sij_def unit_less)
then have u: "u = (zeroes m)[i := u ! i]" and v: "v = (zeroes n)[j := v ! j]"
by (auto simp: less_eq_def list_eq_iff_nth_eq)
(metis le_zero_eq length_list_update length_replicate rep_upd_unit)+
moreover have "nonzero v"
using \<open>nonzero u\<close> and \<open>(u, v) \<in> Minimal_Solutions\<close>
and Minimal_Solutions_imp_Solutions Solutions_snd_not_0 by blast
ultimately have "v ! j > 0" using ij
by (metis gr_implies_not0 neq0_conv unit_less zero_less)
define c where "c = b ! j * v ! j"
then have bc: "b ! j dvd c" by simp
have "a \<bullet> u = b \<bullet> v" using uv by (auto simp: Minimal_Solutions_def in_Solutions_iff)
then have "c = a ! i * u ! i"
using ij unfolding c_def by (subst (asm) u, subst (asm)v, subst u, subst v) auto
then have ac: "a ! i dvd c" by simp
have "b ! j * v ! j < b ! j * eij i j"
using less_dij and no0 and ij by (auto simp: in_set_conv_nth)
then have "c < lcm (a ! i) (b ! j)" by (auto simp: eij_def c_def)
moreover have "lcm (a ! i) (b ! j) dvd c" by (simp add: ac bc)
moreover have "c > 0" using \<open>v ! j > 0\<close> and no0 and ij by (auto simp: c_def in_set_conv_nth)
ultimately show False using ac and bc by (auto dest: nat_dvd_not_less)
qed
qed
qed
(*Lemma 2 of Huet*)
lemma non_special_solution_non_minimal:
assumes "(x, y) \<in> Solutions - Special_Solutions"
and ij: "i < m" "j < n"
and "x ! i \<ge> dij i j" and "y ! j \<ge> eij i j"
shows "(x, y) \<notin> Minimal_Solutions"
proof
assume min: "(x, y) \<in> Minimal_Solutions"
moreover have "sij i j \<in> Solutions"
using ij by (intro Special_Solutions_in_Solutions) (auto simp: Special_Solutions_def)
moreover have "(case sij i j of (u, v) \<Rightarrow> u @ v) <\<^sub>v x @ y"
using assms and min
apply (cases "sij i j")
apply (auto simp: sij_def Special_Solutions_def)
by (metis List_Vector.le0 Minimal_Solutions_length le_append le_list_update less_append order_vec.dual_order.strict_iff_order same_append_eq)
moreover have "(case sij i j of (u, v) \<Rightarrow> nonzero u)"
apply (auto simp: sij_def)
by (metis dij_neq_0 ij length_replicate nonzero_iff set_update_memI)
ultimately show False
by (auto simp: Minimal_Solutions_def)
qed
subsection \<open>Huet's conditions\<close>
(*A*)
definition "cond_A xs ys \<longleftrightarrow> (\<forall>x\<in>set xs. x \<le> maxne0 ys b)"
(*B*)
definition "cond_B x \<longleftrightarrow>
(\<forall>k\<le>m. take k a \<bullet> take k x \<le> b \<bullet> map (max_y (take k x)) [0 ..< n])"
(*C*)
definition "boundr x y \<longleftrightarrow> (\<forall>j<n. y ! j \<le> max_y x j)"
(*D*)
definition "cond_D x y \<longleftrightarrow> (\<forall>l\<le>n. take l b \<bullet> take l y \<le> a \<bullet> x)"
subsection \<open>New conditions: facilitating generation of candidates from right to left\<close>
(*condition on right sub-dotproduct*)
definition "subdprodr y \<longleftrightarrow>
(\<forall>l\<le>n. take l b \<bullet> take l y \<le> a \<bullet> map (max_x (take l y)) [0 ..< m])"
(*condition on left sub-dotproduct*)
definition "subdprodl x y \<longleftrightarrow> (\<forall>k\<le>m. take k a \<bullet> take k x \<le> b \<bullet> y)"
(*bound on elements of left vector*)
definition "boundl x y \<longleftrightarrow> (\<forall>i<m. x ! i \<le> max_x y i)"
lemma boundr:
assumes min: "(x, y) \<in> Minimal_Solutions"
and "(x, y) \<notin> Special_Solutions"
shows "boundr x y"
proof (unfold boundr_def, intro allI impI)
fix j
assume ass: "j < n"
have ln: "m = length x \<and> n = length y"
using assms Minimal_Solutions_def in_Solutions_iff min by auto
have is_sol: "(x, y) \<in> Solutions"
using assms Minimal_Solutions_def min by auto
have j_less_l: "j < n"
using assms ass le_less_trans by linarith
consider (notemp) "Ej j x \<noteq> {}" | (empty) " Ej j x = {}"
by blast
then show "y ! j \<le> max_y x j"
proof (cases)
case notemp
have max_y_def: "max_y x j = Min (Ej j x)"
using j_less_l max_y_def notemp by auto
have fin_e: "finite (Ej j x)"
using finite_Ej [of j x] by auto
have e_def': "\<forall>e \<in> Ej j x. (\<exists>i<length x. x ! i \<ge> dij i j \<and> eij i j - 1 = e)"
using Ej_def [of j x] by auto
then have "\<exists>i<length x. x ! i \<ge> dij i j \<and> eij i j - 1 = Min (Ej j x)"
using notemp Min_in e_def' fin_e by blast
then obtain i where
i: "i < length x" "x ! i \<ge> dij i j" "eij i j - 1 = Min (Ej j x)"
by blast
show ?thesis
proof (rule ccontr)
assume "\<not> ?thesis"
with non_special_solution_non_minimal [of x y i j]
and i and ln and assms and is_sol and j_less_l
have "case sij i j of (u, v) \<Rightarrow> u @ v \<le>\<^sub>v x @ y"
by (force simp: max_y_def)
then have cs:"case sij i j of (u, v) \<Rightarrow> u @ v <\<^sub>v x @ y"
using assms by(auto simp: Special_Solutions_def) (metis append_eq_append_conv
i(1) j_less_l length_list_update length_replicate sij_def
order_vec.le_neq_trans ln prod.sel(1))
then obtain u v where
u_v: "sij i j = (u, v)" "u @ v <\<^sub>v x @ y"
by blast
have dij_gt0: "dij i j > 0"
using assms(1) assms(2) dij_neq_0 i(1) j_less_l ln by auto
then have not_0_u: "nonzero u"
proof (unfold nonzero_iff)
have "i < length (zeroes m)" by (simp add: i(1) ln)
then show "\<exists>i\<in>set u. i \<noteq> 0"
by (metis (no_types) Pair_inject dij_gt0 set_update_memI sij_def u_v(1) neq0_conv)
qed
then have "sij i j \<in> Solutions"
by (metis (mono_tags, lifting) Special_Solutions_def i(1)
Special_Solutions_in_Solutions j_less_l ln mem_Collect_eq u_v(1))
then show False
using assms cs u_v not_0_u Minimal_Solutions_def min by auto
qed
next
case empty
have "\<forall>y\<in>set y. y \<le> Max (set a)"
using assms and max_coeff_bound and maxne0_le_Max
using le_trans by blast
then show ?thesis
using empty j_less_l ln max_y_def by auto
qed
qed
lemma boundl:
assumes min: "(x, y) \<in> Minimal_Solutions"
and "(x, y) \<notin> Special_Solutions"
shows "boundl x y"
proof (unfold boundl_def, intro allI impI)
fix i
assume ass: "i < m"
have ln: "n = length y \<and> m = length x"
using assms Minimal_Solutions_def in_Solutions_iff min by auto
have is_sol: "(x, y) \<in> Solutions"
using assms Minimal_Solutions_def min by auto
have i_less_l: "i < m"
using assms ass le_less_trans by linarith
consider (notemp) "Di i y \<noteq> {}" | (empty) " Di i y = {}"
by blast
then show "x ! i \<le> max_x y i"
proof (cases)
case notemp
have max_x_def: "max_x y i = Min (Di i y)"
using i_less_l max_x_def notemp by auto
have fin_e: "finite (Di i y)"
using finite_Di [of i y] by auto
have e_def': "\<forall>e \<in> Di i y. (\<exists>j<length y. y ! j \<ge> eij i j \<and> dij i j - 1 = e)"
using Di_def [of i y] by auto
then have "\<exists>j<length y. y ! j \<ge> eij i j \<and> dij i j - 1 = Min (Di i y)"
using notemp Min_in e_def' fin_e by blast
then obtain j where
j: "j < length y" "y ! j \<ge> eij i j" "dij i j - 1 = Min (Di i y)"
by blast
show ?thesis
proof (rule ccontr)
assume "\<not> ?thesis"
with non_special_solution_non_minimal [of x y i j]
and j and ln and assms and is_sol and i_less_l
have "case sij i j of (u, v) \<Rightarrow> u @ v \<le>\<^sub>v x @ y"
by (force simp: max_x_def)
then have cs: "case sij i j of (u, v) \<Rightarrow> u @ v <\<^sub>v x @ y"
using assms by(auto simp: Special_Solutions_def) (metis append_eq_append_conv
j(1) i_less_l length_list_update length_replicate sij_def
order_vec.le_neq_trans ln prod.sel(1))
then obtain u v where
u_v: "sij i j = (u, v)" "u @ v <\<^sub>v x @ y"
by blast
have dij_gt0: "dij i j > 0"
using assms(1) assms(2) dij_neq_0 j(1) i_less_l ln by auto
then have not_0_u: "nonzero u"
proof (unfold nonzero_iff)
have "i < length (zeroes m)"
using ass by simp
then show "\<exists>i\<in>set u. i \<noteq> 0"
by (metis (no_types) Pair_inject dij_gt0 set_update_memI sij_def u_v(1) neq0_conv)
qed
then have "sij i j \<in> Solutions"
by (metis (mono_tags, lifting) Special_Solutions_def j(1)
Special_Solutions_in_Solutions i_less_l ln mem_Collect_eq u_v(1))
then show False
using assms cs u_v not_0_u Minimal_Solutions_def min by auto
qed
next
case empty
have "\<forall>x\<in>set x. x \<le> Max (set b)"
using assms and max_coeff_bound and maxne0_le_Max
using le_trans by blast
then show ?thesis
using empty i_less_l ln max_x_def by auto
qed
qed
lemma Solution_imp_cond_D:
assumes "(x, y) \<in> Solutions"
shows "cond_D x y"
using assms and dotprod_le_take by (auto simp: cond_D_def in_Solutions_iff)
lemma Solution_imp_subdprodl:
assumes "(x, y) \<in> Solutions"
shows "subdprodl x y"
using assms and dotprod_le_take
by (auto simp: subdprodl_def in_Solutions_iff) metis
theorem conds:
assumes min: "(x, y) \<in> Minimal_Solutions"
shows cond_A: "cond_A x y"
and cond_B: "(x, y) \<notin> Special_Solutions \<Longrightarrow> cond_B x"
and "(x, y) \<notin> Special_Solutions \<Longrightarrow> boundr x y"
and cond_D: "cond_D x y"
and subdprodr: "(x, y) \<notin> Special_Solutions \<Longrightarrow> subdprodr y"
and subdprodl: "subdprodl x y"
proof -
have sol: "a \<bullet> x = b \<bullet> y" and ln: "m = length x \<and> n = length y"
using min by (auto simp: Minimal_Solutions_def in_Solutions_iff)
then have "\<forall>i<m. x ! i \<le> maxne0 y b"
by (metis min max_coeff_bound_right nth_mem)
then show "cond_A x y"
using min and le_less_trans by (auto simp: cond_A_def max_coeff_bound)
show "(x, y) \<notin> Special_Solutions \<Longrightarrow> cond_B x"
proof (unfold cond_B_def, intro allI impI)
fix k assume non_spec: "(x, y) \<notin> Special_Solutions" and k: "k \<le> m"
from k have "take k a \<bullet> take k x \<le> a \<bullet> x"
using dotprod_le_take ln by blast
also have "... = b \<bullet> y" by fact
also have map_b_dot_p: "... \<le> b \<bullet> map (max_y x) [0..<n]" (is "_ \<le> _ b \<bullet> ?nt")
using non_spec and less_eq_def and ln and boundr and min
by (fastforce intro!: dotprod_le_right simp: boundr_def)
also have "... \<le> b \<bullet> map (max_y (take k x)) [0..<n]" (is "_ \<le> _ \<bullet> ?t")
proof -
have "\<forall>j<n. ?nt!j \<le> ?t!j"
using min and ln and max_y_le_take and k by auto
then have "?nt \<le>\<^sub>v ?t"
using less_eq_def by auto
then show ?thesis
by (simp add: dotprod_le_right)
qed
finally show "take k a \<bullet> take k x \<le> b \<bullet> map (max_y (take k x)) [0..<n]"
by (auto simp: cond_B_def)
qed
show "(x, y) \<notin> Special_Solutions \<Longrightarrow> subdprodr y"
proof (unfold subdprodr_def, intro allI impI)
fix l assume non_spec: "(x, y) \<notin> Special_Solutions" and l: "l \<le> n"
from l have "take l b \<bullet> take l y \<le> b \<bullet> y"
using dotprod_le_take ln by blast
also have "... = a \<bullet> x" by (simp add: sol)
also have map_b_dot_p: "... \<le> a \<bullet> map (max_x y) [0..<m]" (is "_ \<le> _ a \<bullet> ?nt")
using non_spec and less_eq_def and ln and boundl and min
by (fastforce intro!: dotprod_le_right simp: boundl_def)
also have "... \<le> a \<bullet> map (max_x (take l y)) [0..<m]" (is "_ \<le> _ \<bullet> ?t")
proof -
have "\<forall>i<m. ?nt ! i \<le> ?t ! i"
using min and ln and max_x_le_take and l by auto
then have "?nt \<le>\<^sub>v ?t"
using less_eq_def by auto
then show ?thesis
by (simp add: dotprod_le_right)
qed
finally show "take l b \<bullet> take l y \<le> a \<bullet> map (max_x (take l y)) [0..<m]"
by (auto simp: cond_B_def)
qed
show "(x, y) \<notin> Special_Solutions \<Longrightarrow> boundr x y"
using boundr [of x y] and min by blast
show "cond_D x y"
using ln and dotprod_le_take and sol by (auto simp: cond_D_def)
show "subdprodl x y"
using ln and dotprod_le_take and sol by (force simp: subdprodl_def)
qed
lemma le_imp_Ej_subset:
assumes "u \<le>\<^sub>v x"
shows "Ej j u \<subseteq> Ej j x"
using assms and le_trans by (force simp: Ej_def less_eq_def dij_def eij_def)
lemma le_imp_max_y_ge:
assumes "u \<le>\<^sub>v x"
and "length x \<le> m"
shows "max_y u j \<ge> max_y x j"
using assms and le_imp_Ej_subset and Min_Ej_le [of j, OF _ _ assms(2)]
by (metis Min.subset_imp Min_in emptyE finite_Ej max_y_def order_refl subsetCE)
lemma le_imp_Di_subset:
assumes "v \<le>\<^sub>v y"
shows "Di i v \<subseteq> Di i y"
using assms and le_trans by (force simp: Di_def less_eq_def dij_def eij_def)
lemma le_imp_max_x_ge:
assumes "v \<le>\<^sub>v y"
and "length y \<le> n"
shows "max_x v i \<ge> max_x y i"
using assms and le_imp_Di_subset and Min_Di_le [of i, OF _ _ assms(2)]
by (metis Min.subset_imp Min_in emptyE finite_Di max_x_def order_refl subsetCE)
end
end
|
partial def f (x : Nat) : Nat β Nat
| 0 => x + 1
| i+1 => h i + 2
where
g y := f x y
h y := g y + 1
def reverse (as : List Ξ±) : List Ξ± :=
loop as []
where
loop : List Ξ± β List Ξ± β List Ξ±
| [], acc => acc
| a::as, acc => loop as (a::acc)
theorem ex : reverse [1, 2, 3] = [3, 2, 1] :=
rfl
theorem lengthReverse (as : List Ξ±) : (reverse as).length = as.length :=
revLoop as []
where
revLoop (as bs : List Ξ±) : (reverse.loop as bs).length = as.length + bs.length := by
induction as generalizing bs with
| nil => simp [reverse.loop]
| cons a as ih =>
show (reverse.loop as (a::bs)).length = (a :: as).length + bs.length
simp [ih, Nat.add_succ, Nat.succ_add]
def h : Nat -> Nat
| 0 => g 0
| x+1 => g (h x)
where
g x := x + 1
|
(* Title: HOL/MicroJava/J/TypeRel.thy
Author: David von Oheimb, Technische Universitaet Muenchen
*)
section \<open>Relations between Java Types\<close>
theory TypeRel
imports Decl
begin
\<comment> "direct subclass, cf. 8.1.3"
inductive_set
subcls1 :: "'c prog => (cname \<times> cname) set"
and subcls1' :: "'c prog => cname \<Rightarrow> cname => bool" ("_ \<turnstile> _ \<prec>C1 _" [71,71,71] 70)
for G :: "'c prog"
where
"G \<turnstile> C \<prec>C1 D \<equiv> (C, D) \<in> subcls1 G"
| subcls1I: "\<lbrakk>class G C = Some (D,rest); C \<noteq> Object\<rbrakk> \<Longrightarrow> G \<turnstile> C \<prec>C1 D"
abbreviation
subcls :: "'c prog => cname \<Rightarrow> cname => bool" ("_ \<turnstile> _ \<preceq>C _" [71,71,71] 70)
where "G \<turnstile> C \<preceq>C D \<equiv> (C, D) \<in> (subcls1 G)^*"
lemma subcls1D:
"G\<turnstile>C\<prec>C1D \<Longrightarrow> C \<noteq> Object \<and> (\<exists>fs ms. class G C = Some (D,fs,ms))"
apply (erule subcls1.cases)
apply auto
done
lemma subcls1_def2:
"subcls1 P =
(SIGMA C:{C. is_class P C}. {D. C\<noteq>Object \<and> fst (the (class P C))=D})"
by (auto simp add: is_class_def dest: subcls1D intro: subcls1I)
lemma finite_subcls1: "finite (subcls1 G)"
apply(simp add: subcls1_def2 del: mem_Sigma_iff)
apply(rule finite_SigmaI [OF finite_is_class])
apply(rule_tac B = "{fst (the (class G C))}" in finite_subset)
apply auto
done
lemma subcls_is_class: "(C, D) \<in> (subcls1 G)^+ ==> is_class G C"
apply (unfold is_class_def)
apply(erule trancl_trans_induct)
apply (auto dest!: subcls1D)
done
lemma subcls_is_class2 [rule_format (no_asm)]:
"G\<turnstile>C\<preceq>C D \<Longrightarrow> is_class G D \<longrightarrow> is_class G C"
apply (unfold is_class_def)
apply (erule rtrancl_induct)
apply (drule_tac [2] subcls1D)
apply auto
done
definition class_rec :: "'c prog \<Rightarrow> cname \<Rightarrow> 'a \<Rightarrow>
(cname \<Rightarrow> fdecl list \<Rightarrow> 'c mdecl list \<Rightarrow> 'a \<Rightarrow> 'a) \<Rightarrow> 'a" where
"class_rec G == wfrec ((subcls1 G)^-1)
(\<lambda>r C t f. case class G C of
None \<Rightarrow> undefined
| Some (D,fs,ms) \<Rightarrow>
f C fs ms (if C = Object then t else r D t f))"
lemma class_rec_lemma:
assumes wf: "wf ((subcls1 G)^-1)"
and cls: "class G C = Some (D, fs, ms)"
shows "class_rec G C t f = f C fs ms (if C=Object then t else class_rec G D t f)"
by (subst wfrec_def_adm[OF class_rec_def])
(auto simp: assms adm_wf_def fun_eq_iff subcls1I split: option.split)
definition
"wf_class G = wf ((subcls1 G)^-1)"
text \<open>Code generator setup\<close>
code_pred
(modes: i \<Rightarrow> i \<Rightarrow> o \<Rightarrow> bool, i \<Rightarrow> i \<Rightarrow> i \<Rightarrow> bool)
subcls1p
.
declare subcls1_def [code_pred_def]
code_pred
(modes: i \<Rightarrow> i \<times> o \<Rightarrow> bool, i \<Rightarrow> i \<times> i \<Rightarrow> bool)
[inductify]
subcls1
.
definition subcls' where "subcls' G = (subcls1p G)^**"
code_pred
(modes: i \<Rightarrow> i \<Rightarrow> i \<Rightarrow> bool, i \<Rightarrow> i \<Rightarrow> o \<Rightarrow> bool)
[inductify]
subcls'
.
lemma subcls_conv_subcls' [code_unfold]:
"(subcls1 G)^* = {(C, D). subcls' G C D}"
by(simp add: subcls'_def subcls1_def rtrancl_def)
lemma class_rec_code [code]:
"class_rec G C t f =
(if wf_class G then
(case class G C of
None \<Rightarrow> class_rec G C t f
| Some (D, fs, ms) \<Rightarrow>
if C = Object then f Object fs ms t else f C fs ms (class_rec G D t f))
else class_rec G C t f)"
apply(cases "wf_class G")
apply(unfold class_rec_def wf_class_def)
apply(subst wfrec, assumption)
apply(cases "class G C")
apply(simp add: wfrec)
apply clarsimp
apply(rename_tac D fs ms)
apply(rule_tac f="f C fs ms" in arg_cong)
apply(clarsimp simp add: cut_def)
apply(blast intro: subcls1I)
apply simp
done
lemma wf_class_code [code]:
"wf_class G \<longleftrightarrow> (\<forall>(C, rest) \<in> set G. C \<noteq> Object \<longrightarrow> \<not> G \<turnstile> fst (the (class G C)) \<preceq>C C)"
proof
assume "wf_class G"
hence wf: "wf (((subcls1 G)^+)^-1)" unfolding wf_class_def by(rule wf_converse_trancl)
hence acyc: "acyclic ((subcls1 G)^+)" by(auto dest: wf_acyclic)
show "\<forall>(C, rest) \<in> set G. C \<noteq> Object \<longrightarrow> \<not> G \<turnstile> fst (the (class G C)) \<preceq>C C"
proof(safe)
fix C D fs ms
assume "(C, D, fs, ms) \<in> set G"
and "C \<noteq> Object"
and subcls: "G \<turnstile> fst (the (class G C)) \<preceq>C C"
from \<open>(C, D, fs, ms) \<in> set G\<close> obtain D' fs' ms'
where "class": "class G C = Some (D', fs', ms')"
unfolding class_def by(auto dest!: weak_map_of_SomeI)
hence "G \<turnstile> C \<prec>C1 D'" using \<open>C \<noteq> Object\<close> ..
hence *: "(C, D') \<in> (subcls1 G)^+" ..
also from * acyc have "C \<noteq> D'" by(auto simp add: acyclic_def)
with subcls "class" have "(D', C) \<in> (subcls1 G)^+" by(auto dest: rtranclD)
finally show False using acyc by(auto simp add: acyclic_def)
qed
next
assume rhs[rule_format]: "\<forall>(C, rest) \<in> set G. C \<noteq> Object \<longrightarrow> \<not> G \<turnstile> fst (the (class G C)) \<preceq>C C"
have "acyclic (subcls1 G)"
proof(intro acyclicI strip notI)
fix C
assume "(C, C) \<in> (subcls1 G)\<^sup>+"
thus False
proof(cases)
case base
then obtain rest where "class G C = Some (C, rest)"
and "C \<noteq> Object" by cases
from \<open>class G C = Some (C, rest)\<close> have "(C, C, rest) \<in> set G"
unfolding class_def by(rule map_of_SomeD)
with \<open>C \<noteq> Object\<close> \<open>class G C = Some (C, rest)\<close>
have "\<not> G \<turnstile> C \<preceq>C C" by(auto dest: rhs)
thus False by simp
next
case (step D)
from \<open>G \<turnstile> D \<prec>C1 C\<close> obtain rest where "class G D = Some (C, rest)"
and "D \<noteq> Object" by cases
from \<open>class G D = Some (C, rest)\<close> have "(D, C, rest) \<in> set G"
unfolding class_def by(rule map_of_SomeD)
with \<open>D \<noteq> Object\<close> \<open>class G D = Some (C, rest)\<close>
have "\<not> G \<turnstile> C \<preceq>C D" by(auto dest: rhs)
moreover from \<open>(C, D) \<in> (subcls1 G)\<^sup>+\<close>
have "G \<turnstile> C \<preceq>C D" by(rule trancl_into_rtrancl)
ultimately show False by contradiction
qed
qed
thus "wf_class G" unfolding wf_class_def
by(rule finite_acyclic_wf_converse[OF finite_subcls1])
qed
definition "method" :: "'c prog \<times> cname => (sig \<rightharpoonup> cname \<times> ty \<times> 'c)"
\<comment> "methods of a class, with inheritance, overriding and hiding, cf. 8.4.6"
where [code]: "method \<equiv> \<lambda>(G,C). class_rec G C empty (\<lambda>C fs ms ts.
ts ++ map_of (map (\<lambda>(s,m). (s,(C,m))) ms))"
definition fields :: "'c prog \<times> cname => ((vname \<times> cname) \<times> ty) list"
\<comment> "list of fields of a class, including inherited and hidden ones"
where [code]: "fields \<equiv> \<lambda>(G,C). class_rec G C [] (\<lambda>C fs ms ts.
map (\<lambda>(fn,ft). ((fn,C),ft)) fs @ ts)"
definition field :: "'c prog \<times> cname => (vname \<rightharpoonup> cname \<times> ty)"
where [code]: "field == map_of o (map (\<lambda>((fn,fd),ft). (fn,(fd,ft)))) o fields"
lemma method_rec_lemma: "[|class G C = Some (D,fs,ms); wf ((subcls1 G)^-1)|] ==>
method (G,C) = (if C = Object then empty else method (G,D)) ++
map_of (map (\<lambda>(s,m). (s,(C,m))) ms)"
apply (unfold method_def)
apply (simp split del: if_split)
apply (erule (1) class_rec_lemma [THEN trans])
apply auto
done
lemma fields_rec_lemma: "[|class G C = Some (D,fs,ms); wf ((subcls1 G)^-1)|] ==>
fields (G,C) =
map (\<lambda>(fn,ft). ((fn,C),ft)) fs @ (if C = Object then [] else fields (G,D))"
apply (unfold fields_def)
apply (simp split del: if_split)
apply (erule (1) class_rec_lemma [THEN trans])
apply auto
done
lemma field_fields:
"field (G,C) fn = Some (fd, fT) \<Longrightarrow> map_of (fields (G,C)) (fn, fd) = Some fT"
apply (unfold field_def)
apply (rule table_of_remap_SomeD)
apply simp
done
\<comment> "widening, viz. method invocation conversion,cf. 5.3 i.e. sort of syntactic subtyping"
inductive
widen :: "'c prog => [ty , ty ] => bool" ("_ \<turnstile> _ \<preceq> _" [71,71,71] 70)
for G :: "'c prog"
where
refl [intro!, simp]: "G\<turnstile> T \<preceq> T" \<comment> "identity conv., cf. 5.1.1"
| subcls : "G\<turnstile>C\<preceq>C D ==> G\<turnstile>Class C \<preceq> Class D"
| null [intro!]: "G\<turnstile> NT \<preceq> RefT R"
code_pred widen .
lemmas refl = HOL.refl
\<comment> "casting conversion, cf. 5.5 / 5.1.5"
\<comment> "left out casts on primitve types"
inductive
cast :: "'c prog => [ty , ty ] => bool" ("_ \<turnstile> _ \<preceq>? _" [71,71,71] 70)
for G :: "'c prog"
where
widen: "G\<turnstile> C\<preceq> D ==> G\<turnstile>C \<preceq>? D"
| subcls: "G\<turnstile> D\<preceq>C C ==> G\<turnstile>Class C \<preceq>? Class D"
lemma widen_PrimT_RefT [iff]: "(G\<turnstile>PrimT pT\<preceq>RefT rT) = False"
apply (rule iffI)
apply (erule widen.cases)
apply auto
done
lemma widen_RefT: "G\<turnstile>RefT R\<preceq>T ==> \<exists>t. T=RefT t"
apply (ind_cases "G\<turnstile>RefT R\<preceq>T")
apply auto
done
lemma widen_RefT2: "G\<turnstile>S\<preceq>RefT R ==> \<exists>t. S=RefT t"
apply (ind_cases "G\<turnstile>S\<preceq>RefT R")
apply auto
done
lemma widen_Class: "G\<turnstile>Class C\<preceq>T ==> \<exists>D. T=Class D"
apply (ind_cases "G\<turnstile>Class C\<preceq>T")
apply auto
done
lemma widen_Class_NullT [iff]: "(G\<turnstile>Class C\<preceq>NT) = False"
apply (rule iffI)
apply (ind_cases "G\<turnstile>Class C\<preceq>NT")
apply auto
done
lemma widen_Class_Class [iff]: "(G\<turnstile>Class C\<preceq> Class D) = (G\<turnstile>C\<preceq>C D)"
apply (rule iffI)
apply (ind_cases "G\<turnstile>Class C \<preceq> Class D")
apply (auto elim: widen.subcls)
done
lemma widen_NT_Class [simp]: "G \<turnstile> T \<preceq> NT \<Longrightarrow> G \<turnstile> T \<preceq> Class D"
by (ind_cases "G \<turnstile> T \<preceq> NT", auto)
lemma cast_PrimT_RefT [iff]: "(G\<turnstile>PrimT pT\<preceq>? RefT rT) = False"
apply (rule iffI)
apply (erule cast.cases)
apply auto
done
lemma cast_RefT: "G \<turnstile> C \<preceq>? Class D \<Longrightarrow> \<exists> rT. C = RefT rT"
apply (erule cast.cases)
apply simp apply (erule widen.cases)
apply auto
done
theorem widen_trans[trans]: "\<lbrakk>G\<turnstile>S\<preceq>U; G\<turnstile>U\<preceq>T\<rbrakk> \<Longrightarrow> G\<turnstile>S\<preceq>T"
proof -
assume "G\<turnstile>S\<preceq>U" thus "\<And>T. G\<turnstile>U\<preceq>T \<Longrightarrow> G\<turnstile>S\<preceq>T"
proof induct
case (refl T T') thus "G\<turnstile>T\<preceq>T'" .
next
case (subcls C D T)
then obtain E where "T = Class E" by (blast dest: widen_Class)
with subcls show "G\<turnstile>Class C\<preceq>T" by auto
next
case (null R RT)
then obtain rt where "RT = RefT rt" by (blast dest: widen_RefT)
thus "G\<turnstile>NT\<preceq>RT" by auto
qed
qed
end
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.