Datasets:
AI4M
/

text
stringlengths
0
3.34M
/- Copyright (c) 2022 Yury Kudryashov. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yury Kudryashov -/ import topology.metric_space.emetric_paracompact import analysis.convex.partition_of_unity /-! # Lemmas about (e)metric spaces that need partition of unity The main lemma in this file (see `metric.exists_continuous_real_forall_closed_ball_subset`) says the following. Let `X` be a metric space. Let `K : ι → set X` be a locally finite family of closed sets, let `U : ι → set X` be a family of open sets such that `K i ⊆ U i` for all `i`. Then there exists a positive continuous function `δ : C(X, → ℝ)` such that for any `i` and `x ∈ K i`, we have `metric.closed_ball x (δ x) ⊆ U i`. We also formulate versions of this lemma for extended metric spaces and for different codomains (`ℝ`, `ℝ≥0`, and `ℝ≥0∞`). We also prove a few auxiliary lemmas to be used later in a proof of the smooth version of this lemma. ## Tags metric space, partition of unity, locally finite -/ open_locale topology ennreal big_operators nnreal filter open set function filter topological_space variables {ι X : Type*} namespace emetric variables [emetric_space X] {K : ι → set X} {U : ι → set X} /-- Let `K : ι → set X` be a locally finitie family of closed sets in an emetric space. Let `U : ι → set X` be a family of open sets such that `K i ⊆ U i` for all `i`. Then for any point `x : X`, for sufficiently small `r : ℝ≥0∞` and for `y` sufficiently close to `x`, for all `i`, if `y ∈ K i`, then `emetric.closed_ball y r ⊆ U i`. -/ lemma exists_forall_closed_ball_subset_aux₁ (hK : ∀ i, is_closed (K i)) (hU : ∀ i, is_open (U i)) (hKU : ∀ i, K i ⊆ U i) (hfin : locally_finite K) (x : X) : ∃ r : ℝ, ∀ᶠ y in 𝓝 x, r ∈ Ioi (0 : ℝ) ∩ ennreal.of_real ⁻¹' ⋂ i (hi : y ∈ K i), {r | closed_ball y r ⊆ U i} := begin have := (ennreal.continuous_of_real.tendsto' 0 0 ennreal.of_real_zero).eventually (eventually_nhds_zero_forall_closed_ball_subset hK hU hKU hfin x).curry, rcases this.exists_gt with ⟨r, hr0, hr⟩, refine ⟨r, hr.mono (λ y hy, ⟨hr0, _⟩)⟩, rwa [mem_preimage, mem_Inter₂] end lemma exists_forall_closed_ball_subset_aux₂ (y : X) : convex ℝ (Ioi (0 : ℝ) ∩ ennreal.of_real ⁻¹' ⋂ i (hi : y ∈ K i), {r | closed_ball y r ⊆ U i}) := (convex_Ioi _).inter $ ord_connected.convex $ ord_connected.preimage_ennreal_of_real $ ord_connected_Inter $ λ i, ord_connected_Inter $ λ hi, ord_connected_set_of_closed_ball_subset y (U i) /-- Let `X` be an extended metric space. Let `K : ι → set X` be a locally finite family of closed sets, let `U : ι → set X` be a family of open sets such that `K i ⊆ U i` for all `i`. Then there exists a positive continuous function `δ : C(X, ℝ)` such that for any `i` and `x ∈ K i`, we have `emetric.closed_ball x (ennreal.of_real (δ x)) ⊆ U i`. -/ lemma exists_continuous_real_forall_closed_ball_subset (hK : ∀ i, is_closed (K i)) (hU : ∀ i, is_open (U i)) (hKU : ∀ i, K i ⊆ U i) (hfin : locally_finite K) : ∃ δ : C(X, ℝ), (∀ x, 0 < δ x) ∧ ∀ i (x ∈ K i), closed_ball x (ennreal.of_real $ δ x) ⊆ U i := by simpa only [mem_inter_iff, forall_and_distrib, mem_preimage, mem_Inter, @forall_swap ι X] using exists_continuous_forall_mem_convex_of_local_const exists_forall_closed_ball_subset_aux₂ (exists_forall_closed_ball_subset_aux₁ hK hU hKU hfin) /-- Let `X` be an extended metric space. Let `K : ι → set X` be a locally finite family of closed sets, let `U : ι → set X` be a family of open sets such that `K i ⊆ U i` for all `i`. Then there exists a positive continuous function `δ : C(X, ℝ≥0)` such that for any `i` and `x ∈ K i`, we have `emetric.closed_ball x (δ x) ⊆ U i`. -/ lemma exists_continuous_nnreal_forall_closed_ball_subset (hK : ∀ i, is_closed (K i)) (hU : ∀ i, is_open (U i)) (hKU : ∀ i, K i ⊆ U i) (hfin : locally_finite K) : ∃ δ : C(X, ℝ≥0), (∀ x, 0 < δ x) ∧ ∀ i (x ∈ K i), closed_ball x (δ x) ⊆ U i := begin rcases exists_continuous_real_forall_closed_ball_subset hK hU hKU hfin with ⟨δ, hδ₀, hδ⟩, lift δ to C(X, ℝ≥0) using λ x, (hδ₀ x).le, refine ⟨δ, hδ₀, λ i x hi, _⟩, simpa only [← ennreal.of_real_coe_nnreal] using hδ i x hi end /-- Let `X` be an extended metric space. Let `K : ι → set X` be a locally finite family of closed sets, let `U : ι → set X` be a family of open sets such that `K i ⊆ U i` for all `i`. Then there exists a positive continuous function `δ : C(X, ℝ≥0∞)` such that for any `i` and `x ∈ K i`, we have `emetric.closed_ball x (δ x) ⊆ U i`. -/ lemma exists_continuous_ennreal_forall_closed_ball_subset (hK : ∀ i, is_closed (K i)) (hU : ∀ i, is_open (U i)) (hKU : ∀ i, K i ⊆ U i) (hfin : locally_finite K) : ∃ δ : C(X, ℝ≥0∞), (∀ x, 0 < δ x) ∧ ∀ i (x ∈ K i), closed_ball x (δ x) ⊆ U i := let ⟨δ, hδ₀, hδ⟩ := exists_continuous_nnreal_forall_closed_ball_subset hK hU hKU hfin in ⟨continuous_map.comp ⟨coe, ennreal.continuous_coe⟩ δ, λ x, ennreal.coe_pos.2 (hδ₀ x), hδ⟩ end emetric namespace metric variables [metric_space X] {K : ι → set X} {U : ι → set X} /-- Let `X` be a metric space. Let `K : ι → set X` be a locally finite family of closed sets, let `U : ι → set X` be a family of open sets such that `K i ⊆ U i` for all `i`. Then there exists a positive continuous function `δ : C(X, ℝ≥0)` such that for any `i` and `x ∈ K i`, we have `metric.closed_ball x (δ x) ⊆ U i`. -/ lemma exists_continuous_nnreal_forall_closed_ball_subset (hK : ∀ i, is_closed (K i)) (hU : ∀ i, is_open (U i)) (hKU : ∀ i, K i ⊆ U i) (hfin : locally_finite K) : ∃ δ : C(X, ℝ≥0), (∀ x, 0 < δ x) ∧ ∀ i (x ∈ K i), closed_ball x (δ x) ⊆ U i := begin rcases emetric.exists_continuous_nnreal_forall_closed_ball_subset hK hU hKU hfin with ⟨δ, hδ0, hδ⟩, refine ⟨δ, hδ0, λ i x hx, _⟩, rw [← emetric_closed_ball_nnreal], exact hδ i x hx end /-- Let `X` be a metric space. Let `K : ι → set X` be a locally finite family of closed sets, let `U : ι → set X` be a family of open sets such that `K i ⊆ U i` for all `i`. Then there exists a positive continuous function `δ : C(X, ℝ)` such that for any `i` and `x ∈ K i`, we have `metric.closed_ball x (δ x) ⊆ U i`. -/ lemma exists_continuous_real_forall_closed_ball_subset (hK : ∀ i, is_closed (K i)) (hU : ∀ i, is_open (U i)) (hKU : ∀ i, K i ⊆ U i) (hfin : locally_finite K) : ∃ δ : C(X, ℝ), (∀ x, 0 < δ x) ∧ ∀ i (x ∈ K i), closed_ball x (δ x) ⊆ U i := let ⟨δ, hδ₀, hδ⟩ := exists_continuous_nnreal_forall_closed_ball_subset hK hU hKU hfin in ⟨continuous_map.comp ⟨coe, nnreal.continuous_coe⟩ δ, hδ₀, hδ⟩ end metric
{-# OPTIONS --safe #-} module Tools.Function where -- Identity function idᶠ : {A : Set} → A → A idᶠ x = x -- Function composition (simply typed variant) _∘ᶠ_ : {A B C : Set} → (B → C) → (A → B) → A → C _∘ᶠ_ f g a = f (g a)
State Before: α : Type u_2 β : Type u_1 γ : Type ?u.23283 δ : Type ?u.23286 r : α → α → Prop a✝ b✝ c d : α p : β → β → Prop a b : α f : α → β h : ∀ (a b : α), r a b → TransGen p (f a) (f b) hab : TransGen r a b ⊢ TransGen p (f a) (f b) State After: no goals Tactic: simpa [transGen_idem] using hab.lift f h
%% Demo: Threshold-Free Cluster Enhancement (TFCE) on surface dataset % % The data used here is available from http://cosmomvpa.org/datadb.zip % % This example uses the following dataset: % + 'digit' % A participant made finger pressed with the index and middle finger of % the right hand during 4 runs in an fMRI study. Each run was divided in % 4 blocks with presses of each finger and analyzed with the GLM, % resulting in 2*4*4=32 t-values % % This example illustrates the use of Threshold-Free Cluster Enhancement % with a permutation test to correct for multiple comparisons. % % TFCE reference: Stephen M. Smith, Thomas E. Nichols, Threshold-free % cluster enhancement: Addressing problems of smoothing, threshold % dependence and localisation in cluster inference, NeuroImage, Volume 44, % Issue 1, 1 January 2009, Pages 83-98. % % # For CoSMoMVPA's copyright information and license terms, # % # see the COPYING file distributed with CoSMoMVPA. # %% Check externals cosmo_check_external({'surfing','afni'}); %% Set data paths % The function cosmo_config() returns a struct containing paths to tutorial % data. (Alternatively the paths can be set manually without using % cosmo_config.) config=cosmo_config(); digit_study_path=fullfile(config.tutorial_data_path,'digit'); readme_fn=fullfile(digit_study_path,'README'); cosmo_type(readme_fn); output_path=config.output_data_path; % resolution parameter for input surfaces % 64 is for high-quality results; use 16 for fast execution ld=16; % reset citation list cosmo_check_external('-tic'); % load single surface intermediate_fn=fullfile(digit_study_path,... sprintf('ico%d_mh.intermediate_al.asc', ld)); [vertices,faces]=surfing_read(intermediate_fn); %% Load functional data data_path=digit_study_path; data_fn=fullfile(data_path,'glm_T_stats_perblock+orig'); targets=repmat(1:2,1,16)'; % class labels: 1 2 1 2 1 2 1 2 1 2 ... 1 2 chunks=floor(((1:32)-1)/4)+1; % half-run: 1 1 1 1 2 2 2 2 3 3 ... 8 8 vol_ds = cosmo_fmri_dataset(data_fn,'targets',targets,'chunks',chunks); %% Map univariate response data to surface % this measure averages the data near each node to get a surface dataset radius=0; surf_band_range=[-2 2]; % get voxel data within 2mm from surface surf_def={vertices,faces,[-2 2]}; nbrhood=cosmo_surficial_neighborhood(vol_ds,surf_def,'radius',radius); measure=@(x,opt) cosmo_structjoin('samples',mean(x.samples,2),'sa',x.sa); surf_ds=cosmo_searchlight(vol_ds,nbrhood,measure); fprintf('Univariate surface data:\n'); cosmo_disp(surf_ds); %% Average data in each chunk % for this example only consider the samples in the first condition % (targets==1), and average the samples in each chunk % % for group analysis: set chunks to (1:nsubj)', assuming each sample is % data from a single participant surf_ds=cosmo_slice(surf_ds,surf_ds.sa.targets==1); surf_ds=cosmo_average_samples(surf_ds); fn_surf_ds=fullfile(output_path, 'digit_target1.niml.dset'); % save to disc cosmo_map2surface(surf_ds, fn_surf_ds); fprintf('Input data saved to %s\n', fn_surf_ds); %% Run Threshold-Free Cluster Enhancement (TFCE) % All data is prepared; surf_ds has 8 samples and 5124 nodes. We want to % see if there are clusters that show a significant difference from zero in % their response. Thus, .sa.targets is set to all ones (the same % condition), whereas .sa.chunks is set to (1:8)', indicating that all % samples are assumed to be independent. % % (While this is a within-subject analysis, exactly the same logic can be % applied to a group-level analysis) % define neighborhood for each feature % (cosmo_cluster_neighborhood can be used also for meeg or volumetric % fmri datasets) cluster_nbrhood=cosmo_cluster_neighborhood(surf_ds,... 'vertices',vertices,'faces',faces); fprintf('Cluster neighborhood:\n'); cosmo_disp(cluster_nbrhood); opt=struct(); % number of null iterations. for publication-quality, use >=1000; % 10000 is even better opt.niter=250; % in this case we run a one-sample test against a mean of 0, and it is % necessary to specify the mean under the null hypothesis % (when testing classification accuracies, h0_mean should be set to chance % level, assuming a balanced crossvalidation scheme was used) opt.h0_mean=0; % this example uses the data itself (with resampling) to obtain cluster % statistcs under the null hypothesis. This is (in this case) somewhat % conservative due to how the resampling is performed. % Alternatively, and for better estimates (at the cost of computational % cost), one can generate a set of (say, 50) datasets using permuted data % e.g. using cosmo_randomize_targets), put them in a cell and provide % them as the null argument. opt.null=[]; fprintf('Running multiple-comparison correction with these options:\n'); cosmo_disp(opt); % Run TFCE-based cluster correction for multiple comparisons. % The output has z-scores for each node indicating the probablity to find % the same, or higher, TFCE value under the null hypothesis tfce_ds=cosmo_montecarlo_cluster_stat(surf_ds,cluster_nbrhood,opt); %% Show results fprintf('TFCE z-score dataset\n'); cosmo_disp(tfce_ds); nfeatures=size(tfce_ds.samples,2); percentiles=(1:nfeatures)/nfeatures*100; plot(percentiles,sort(tfce_ds.samples)) title('sorted TFCE z-scores'); xlabel('feature percentile'); ylabel('z-score'); nvertices=size(vertices,1); disp_opt=struct(); disp_opt.DataRange=[-2 2]; DispIVSurf(vertices,faces,1:nvertices,tfce_ds.samples',0,disp_opt); % store results fn_tfce_ds=fullfile(output_path, 'digit_target1_tfce.niml.dset'); cosmo_map2surface(tfce_ds, fn_tfce_ds); surf_fn=fullfile(output_path, 'digit_intermediate.asc'); surfing_write(surf_fn,vertices,faces); % show citation information cosmo_check_external('-cite');
% Created 2021-07-08 Thu 10:15 % Intended LaTeX compiler: pdflatex \documentclass[presentation,aspectratio=1610]{beamer} \usepackage[utf8]{inputenc} \usepackage[T1]{fontenc} \usepackage{graphicx} \usepackage{grffile} \usepackage{longtable} \usepackage{wrapfig} \usepackage{rotating} \usepackage[normalem]{ulem} \usepackage{amsmath} \usepackage{textcomp} \usepackage{amssymb} \usepackage{capt-of} \usepackage{hyperref} \usepackage{khpreamble} \usepackage{amssymb} \DeclareMathOperator{\shift}{q} \DeclareMathOperator{\diff}{p} \usetheme{default} \author{Kjartan Halvorsen} \date{2021-07-08} \title{Discretizing continuous-time controllers} \hypersetup{ pdfauthor={Kjartan Halvorsen}, pdftitle={Discretizing continuous-time controllers}, pdfkeywords={}, pdfsubject={}, pdfcreator={Emacs 26.3 (Org mode 9.4.6)}, pdflang={English}} \begin{document} \maketitle \section{Intro} \label{sec:org5096880} \section{Discretization} \label{sec:org497335f} \begin{frame}[label={sec:orga5cf6e7}]{Context} \begin{itemize} \item Controller \(F(s)\) obtained from a design in continuous time. \item Need discrete approxmation in order to implement on a computer \end{itemize} \begin{center} \includegraphics[width=0.7\linewidth]{../../figures/fig8-1.png}\\ \footnotesize Source: Åström \& Wittenmark \end{center} \end{frame} \section{Warm-up: Differentiation} \label{sec:org1fa17cb} \begin{frame}[label={sec:org07d6f70}]{Warm-up exercise} \begin{columns} \begin{column}{0.4\columnwidth} \begin{center} \includegraphics[width=\linewidth]{../../figures/block-simple-derivative} \end{center} \alert{Draw the Bode diagram for the transfer function} \end{column} \begin{column}{0.6\columnwidth} \begin{center} \includegraphics[width=\linewidth]{../../figures/bode-derivative-empty} \end{center} \end{column} \end{columns} \end{frame} \begin{frame}[label={sec:org2b5bba8}]{Discrete-time differentiation} \begin{center} \begin{tabular}{lll} \includegraphics[width=0.3\linewidth]{../../figures/block-simple-shift-z} & \(\Leftrightarrow\) & \includegraphics[width=0.3\linewidth]{../../figures/block-simple-shift}\\ \end{tabular} \end{center} \begin{columns} \begin{column}{0.4\columnwidth} \vspace*{5mm} \includegraphics[width=\linewidth]{../../figures/block-simple-discrete-derivative-fwd} \textcolor{white}{Space} \begin{center} \includegraphics[width=\linewidth]{../../figures/block-simple-discrete-derivative} \end{center} \end{column} \begin{column}{0.6\columnwidth} \end{column} \end{columns} \end{frame} \begin{frame}[label={sec:orgc048ecb}]{Discretization methods} \begin{enumerate} \item Forward difference. Substitute \[ s = \frac{z-1}{h} \] in \(F(s)\) to get \[ F_d(z) = F(s')|_{s'=\frac{z-1}{h}}. \] \item Backward difference. Substitute \[ s = \frac{z-1}{zh} \] in \(F(s)\) to get \[ F_d(z) = F(s')|_{s'=\frac{z-1}{zh}}. \] \end{enumerate} \end{frame} \begin{frame}[label={sec:org6c3e541}]{Discretization methods, contd.} \begin{enumerate} \setcounter{enumi}{2} \item Tustin's method (also known as the bilinear transform). Substitute \[ s = \frac{2}{h}\frac{z-1}{z+1} \] in \(F(s)\) to get \[ F_d(z) = F(s')|_{s'=\frac{2}{h}\cdot \frac{z-1}{z+1}}. \] \item Ramp invariance. This is similar to ZoH, which is step-invariant approximation. Since a unit ramp has z-transform \(\frac{zh}{(z-1)^2}\) and Laplace-transform \(1/s^2\), the discretization becomes \[ F_d(z) = \frac{(z-1)^2}{zh} \ztrf{\laplaceinv{\frac{F(s)}{s^2}}}. \] \end{enumerate} \end{frame} \begin{frame}[label={sec:orgcde3040}]{Frequency warping using Tustin's} \begin{center} \includegraphics[width=0.6\linewidth]{../../figures/fig8_3.png} \end{center} The infinite positive imaginary axis in the s-plane is mapped to the finite-length upper half of the unit circle in the z-plane. \end{frame} \begin{frame}[label={sec:org92aad28}]{Exercise} Find the discrete approximation of the lead-compensator \(F(s) = \frac{s+b}{s+a}\), and determine the pole for \begin{enumerate} \item Forward difference. Substitute \[ F_d(z) = F(s')|_{s'=\frac{z-1}{h}}. \] \item Backward difference. Substitute \[ F_d(z) = F(s')|_{s'=\frac{z-1}{zh}}. \] \item Tustin's approximation \[ F_d(z) = F(s')|_{s'=\frac{2}{h}\cdot \frac{z-1}{z+1}}. \] \end{enumerate} \end{frame} \begin{frame}[label={sec:org4007077}]{Forward difference exercise} \begin{center} \includegraphics[width=\linewidth]{../../figures/forward-diff-exercise} \end{center} \end{frame} \begin{frame}[label={sec:org912e773}]{Backward difference exercise} \begin{center} \includegraphics[width=\linewidth]{../../figures/backward-diff-exercise} \end{center} \end{frame} \end{document}
module Deque %default total %access private public export interface Deque (q : Type -> Type) where empty : q a isEmpty : q a -> Bool cons : a -> q a -> q a head : q a -> a tail : q a -> q a snoc : q a -> a -> q a last : q a -> a init : q a -> q a
{-# OPTIONS --cubical --safe #-} module Cubical.Data.Everything where import Cubical.Data.BinNat import Cubical.Data.Bool import Cubical.Data.Empty import Cubical.Data.Equality import Cubical.Data.Fin import Cubical.Data.Nat import Cubical.Data.Nat.Algebra import Cubical.Data.Nat.Order import Cubical.Data.NatMinusOne import Cubical.Data.NatMinusTwo import Cubical.Data.NatPlusOne import Cubical.Data.Int import Cubical.Data.Sum import Cubical.Data.Prod import Cubical.Data.Unit import Cubical.Data.Sigma import Cubical.Data.DiffInt import Cubical.Data.Group import Cubical.Data.HomotopyGroup import Cubical.Data.List import Cubical.Data.Graph import Cubical.Data.InfNat import Cubical.Data.Queue
Antimony is in the nitrogen group ( group 15 ) and has an electronegativity of 2 @.@ 05 . As expected from periodic trends , it is more electronegative than tin or bismuth , and less electronegative than tellurium or arsenic . Antimony is stable in air at room temperature , but reacts with oxygen if heated , to form antimony trioxide , Sb2O3 .
Require Import Crypto.Arithmetic.PrimeFieldTheorems. Require Import Crypto.Specific.solinas64_2e171m19_4limbs.Synthesis. (* TODO : change this to field once field isomorphism happens *) Definition freeze : { freeze : feBW_tight -> feBW_limbwidths | forall a, phiBW_limbwidths (freeze a) = phiBW_tight a }. Proof. Set Ltac Profiling. Time synthesize_freeze (). Show Ltac Profile. Time Defined. Print Assumptions freeze.
/** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <algorithm> #include <boost/regex.hpp> #include <iostream> #include <thread> #include "ClassHierarchy.h" #include "ConcurrentContainers.h" #include "DexUtil.h" #include "IRCode.h" #include "ProguardMatcher.h" #include "ProguardPrintConfiguration.h" #include "ProguardRegex.h" #include "ProguardReporting.h" #include "ReachableClasses.h" #include "StringBuilder.h" #include "Timer.h" #include "WorkQueue.h" using namespace redex; namespace { using RegexMap = std::unordered_map<std::string, boost::regex>; std::unique_ptr<boost::regex> make_rx(const std::string& s, bool convert = true) { if (s.empty()) return nullptr; auto wc = convert ? proguard_parser::convert_wildcard_type(s) : s; auto rx = proguard_parser::form_type_regex(wc); return std::make_unique<boost::regex>(rx); } bool match_annotation_rx(const DexClass* cls, const boost::regex& annorx) { const auto* annos = cls->get_anno_set(); if (!annos) return false; for (const auto& anno : annos->get_annotations()) { if (boost::regex_match(anno->type()->c_str(), annorx)) { return true; } } return false; } /** * Helper class that holds the conditions for a class-level match on a keep * rule. */ struct ClassMatcher { explicit ClassMatcher(const KeepSpec& ks) : setFlags_(ks.class_spec.setAccessFlags), unsetFlags_(ks.class_spec.unsetAccessFlags), m_class_name(ks.class_spec.className), m_cls(make_rx(ks.class_spec.className)), m_anno(make_rx(ks.class_spec.annotationType, false)), m_extends(make_rx(ks.class_spec.extendsClassName)), m_extends_anno(make_rx(ks.class_spec.extendsAnnotationType, false)) {} bool match(const DexClass* cls) { // Check for class name match // `match_name` is really slow; let's short-circuit it for wildcard-only // matches if (m_class_name != "*" && m_class_name != "**" && !match_name(cls)) { return false; } // Check for access match if (!match_access(cls)) { return false; } // Check to see if an annotation guard needs to be matched. if (!match_annotation(cls)) { return false; } // Check to see if an extends clause needs to be matched. return match_extends(cls); } private: bool match_name(const DexClass* cls) const { const auto& deob_name = cls->get_deobfuscated_name(); return boost::regex_match(deob_name, *m_cls); } bool match_access(const DexClass* cls) const { return access_matches(setFlags_, unsetFlags_, cls->get_access()); } bool match_annotation(const DexClass* cls) const { if (!m_anno) return true; return match_annotation_rx(cls, *m_anno); } bool match_extends(const DexClass* cls) { if (!m_extends) return true; return search_extends_and_interfaces(cls); } bool type_and_annotation_match(const DexClass* cls) const { if (cls == nullptr) return false; if (cls->get_type() == get_object_type()) return false; // First check to see if an annotation type needs to be matched. if (m_extends_anno) { if (!match_annotation_rx(cls, *m_extends_anno)) { return false; } } const auto& deob_name = cls->get_deobfuscated_name(); return boost::regex_match(deob_name, *m_extends); } bool search_interfaces(const DexClass* cls) { const auto* interfaces = cls->get_interfaces(); if (!interfaces) return false; for (const auto& impl : interfaces->get_type_list()) { auto impl_class = type_class(impl); if (impl_class) { if (search_extends_and_interfaces(impl_class)) { return true; } } } return false; } bool search_extends_and_interfaces(const DexClass* cls) { auto cached_it = m_extends_result_cache.find(cls); if (cached_it != m_extends_result_cache.end()) { return cached_it->second; } auto result = search_extends_and_interfaces_nocache(cls); m_extends_result_cache.emplace(cls, result); return result; } bool search_extends_and_interfaces_nocache(const DexClass* cls) { always_assert(cls != nullptr); // Does this class match the annotation and type wildcard? if (type_and_annotation_match(cls)) { return true; } // Do any of the classes and interfaces above match? auto super_type = cls->get_super_class(); if (super_type && super_type != get_object_type()) { auto super_class = type_class(super_type); if (super_class) { if (search_extends_and_interfaces(super_class)) { return true; } } } // Do any of the interfaces from here and up match? return search_interfaces(cls); } DexAccessFlags setFlags_; DexAccessFlags unsetFlags_; std::string m_class_name; std::unique_ptr<boost::regex> m_cls; std::unique_ptr<boost::regex> m_anno; std::unique_ptr<boost::regex> m_extends; std::unique_ptr<boost::regex> m_extends_anno; std::unordered_map<const DexClass*, bool> m_extends_result_cache; }; enum class RuleType { WHY_ARE_YOU_KEEPING, KEEP, ASSUME_NO_SIDE_EFFECTS, }; std::string to_string(RuleType rule_type) { switch (rule_type) { case RuleType::WHY_ARE_YOU_KEEPING: return "whyareyoukeeping"; case RuleType::KEEP: return "classes and members"; case RuleType::ASSUME_NO_SIDE_EFFECTS: return "assumenosideeffects"; } } /* * Build a DAG of class -> subclass and implementors. This is fairly similar to * build_type_hierarchy and friends, but Proguard doesn't distinguish between * subclasses and interface implementors, so this function combines them * together. */ void build_extends_or_implements_hierarchy(const Scope& scope, ClassHierarchy* hierarchy) { for (const auto& cls : scope) { const auto* type = cls->get_type(); // ensure an entry for the DexClass is created (*hierarchy)[type]; const auto* super = cls->get_super_class(); if (super != nullptr) { (*hierarchy)[super].insert(type); } for (const auto& impl : cls->get_interfaces()->get_type_list()) { (*hierarchy)[impl].insert(type); } } } /* * This class contains the logic for matching against a single keep rule. */ class KeepRuleMatcher { public: KeepRuleMatcher(RuleType rule_type, const KeepSpec& keep_rule, RegexMap& regex_map) : m_rule_type(rule_type), m_keep_rule(keep_rule), m_regex_map(regex_map) {} void keep_processor(DexClass*); void mark_class_and_members_for_keep(DexClass* cls); bool any_method_matches(const DexClass* cls, const MemberSpecification& method_keep, const boost::regex& method_regex); // Check that each method keep matches at least one method in :cls. bool all_method_keeps_match( const std::vector<MemberSpecification>& method_keeps, const DexClass* cls); bool any_field_matches(const DexClass* cls, const MemberSpecification& field_keep); // Check that each field keep matches at least one field in :cls. bool all_field_keeps_match( const std::vector<MemberSpecification>& field_keeps, const DexClass* cls); void process_whyareyoukeeping(DexClass* cls); bool process_mark_conditionally(const DexClass* cls); void process_assumenosideeffects(DexClass* cls); template <class DexMember> void apply_rule(DexMember*); void apply_field_keeps(const DexClass* cls, bool apply_modifiers); void apply_method_keeps(const DexClass* cls, bool apply_modifiers); template <class Container> void keep_fields(bool apply_modifiers, const Container& fields, const redex::MemberSpecification& fieldSpecification, const boost::regex& fieldname_regex); template <class Container> void keep_methods(bool apply_modifiers, const redex::MemberSpecification& methodSpecification, const Container& methods, const boost::regex& method_regex); bool field_level_match(const redex::MemberSpecification& fieldSpecification, const DexField* field, const boost::regex& fieldname_regex); bool method_level_match(const redex::MemberSpecification& methodSpecification, const DexMethod* method, const boost::regex& method_regex); template <class DexMember> bool has_annotation(const DexMember* member, const std::string& annotation) const; boost::regex register_matcher(const std::string& regex) const { if (!m_regex_map.count(regex)) { m_regex_map.emplace(regex, boost::regex{regex}); } return m_regex_map.at(regex); } private: RuleType m_rule_type; const KeepSpec& m_keep_rule; RegexMap& m_regex_map; }; class ProguardMatcher { public: ProguardMatcher(const ProguardMap& pg_map, const Scope& classes, const Scope& external_classes) : m_pg_map(pg_map), m_classes(classes), m_external_classes(external_classes) { build_extends_or_implements_hierarchy(m_classes, &m_hierarchy); // We need to include external classes in the hierarchy because keep rules // may, for instance, forbid renaming of all classes that inherit from a // given external class. build_extends_or_implements_hierarchy(m_external_classes, &m_hierarchy); } void process_proguard_rules(const ProguardConfiguration& pg_config); void mark_all_annotation_classes_as_keep(); void process_keep(const KeepSpecSet& keep_rules, RuleType rule_type, bool process_external = false); DexClass* find_single_class(const std::string& descriptor) const; private: const ProguardMap& m_pg_map; const Scope& m_classes; const Scope& m_external_classes; ClassHierarchy m_hierarchy; }; // Updates a class, field or method to add keep modifiers. // Note: includedescriptorclasses and allowoptimization are not implemented. template <class DexMember> void apply_keep_modifiers(const KeepSpec& k, DexMember* member) { // We only set allowshrinking when no other keep rule has been applied to this // class or member. // // Note that multiple keep rules could set or unset the modifier // *conflictingly*. It would be best if all the keep rules are never // contradictory each other. But verifying the integrity takes some time, and // programmers must fix the rules. Instead, we pick a conservative choice: // don't shrink or don't obfuscate. if (k.allowshrinking) { if (!has_keep(member)) { member->rstate.set_allowshrinking(); } else { // We already observed a keep rule for this member. So, even if another // "-keep,allowshrinking" tries to set allowshrinking, we must ignore it. } } else { // Otherwise reset it: don't allow shrinking. member->rstate.unset_allowshrinking(); } // The same case: unsetting allowobfuscation has a priority. if (k.allowobfuscation) { if (!has_keep(member) && strcmp(member->get_name()->c_str(), "<init>") != 0) { member->rstate.set_allowobfuscation(); } } else { member->rstate.unset_allowobfuscation(); } } // Is this keep_rule an application of a blanket top-level keep // "-keep,allowshrinking class *" or "-keepnames class *" rule? // See keepclassnames.pro, or T1890454. inline bool is_blanket_keepnames_rule(const KeepSpec& keep_rule) { if (keep_rule.allowshrinking) { const auto& spec = keep_rule.class_spec; if (spec.className == "*" && spec.annotationType == "" && spec.fieldSpecifications.empty() && spec.methodSpecifications.empty() && spec.extendsAnnotationType == "" && spec.extendsClassName == "" && spec.setAccessFlags == 0 && spec.unsetAccessFlags == 0) { return true; } } return false; } template <class DexMember> bool KeepRuleMatcher::has_annotation(const DexMember* member, const std::string& annotation) const { auto annos = member->get_anno_set(); if (annos != nullptr) { auto annotation_regex = proguard_parser::form_type_regex(annotation); const boost::regex& annotation_matcher = register_matcher(annotation_regex); for (const auto& anno : annos->get_annotations()) { if (boost::regex_match(anno->type()->c_str(), annotation_matcher)) { return true; } } } return false; } // From a fully qualified descriptor for a field, exract just the // name of the field which occurs between the ;. and : characters. std::string extract_field_name(std::string qualified_fieldname) { auto p = qualified_fieldname.find(";."); if (p == std::string::npos) { return qualified_fieldname; } return qualified_fieldname.substr(p + 2); } std::string extract_method_name_and_type(std::string qualified_fieldname) { auto p = qualified_fieldname.find(";."); return qualified_fieldname.substr(p + 2); } bool KeepRuleMatcher::field_level_match( const redex::MemberSpecification& fieldSpecification, const DexField* field, const boost::regex& fieldname_regex) { // Check for annotation guards. if (!(fieldSpecification.annotationType.empty())) { if (!has_annotation(field, fieldSpecification.annotationType)) { return false; } } // Check for access match. if (!access_matches(fieldSpecification.requiredSetAccessFlags, fieldSpecification.requiredUnsetAccessFlags, field->get_access())) { return false; } // Match field name against regex. auto dequalified_name = extract_field_name(field->get_deobfuscated_name()); return boost::regex_match(dequalified_name, fieldname_regex); } template <class Container> void KeepRuleMatcher::keep_fields( bool apply_modifiers, const Container& fields, const redex::MemberSpecification& fieldSpecification, const boost::regex& fieldname_regex) { for (DexField* field : fields) { if (!field_level_match(fieldSpecification, field, fieldname_regex)) { continue; } if (apply_modifiers) { apply_keep_modifiers(m_keep_rule, field); } apply_rule(field); fieldSpecification.count++; } } std::string field_regex(const MemberSpecification& field_spec) { string_builders::StaticStringBuilder<3> ss; ss << proguard_parser::form_member_regex(field_spec.name); ss << "\\:"; ss << proguard_parser::form_type_regex(field_spec.descriptor); return ss.str(); } void KeepRuleMatcher::apply_field_keeps(const DexClass* cls, bool apply_modifiers) { for (const auto& field_spec : m_keep_rule.class_spec.fieldSpecifications) { auto fieldname_regex = field_regex(field_spec); const boost::regex& matcher = register_matcher(fieldname_regex); keep_fields(apply_modifiers, cls->get_ifields(), field_spec, matcher); keep_fields(apply_modifiers, cls->get_sfields(), field_spec, matcher); } } bool KeepRuleMatcher::method_level_match( const redex::MemberSpecification& methodSpecification, const DexMethod* method, const boost::regex& method_regex) { // Check to see if the method match is guarded by an annotation match. if (!(methodSpecification.annotationType.empty())) { if (!has_annotation(method, methodSpecification.annotationType)) { return false; } } if (!access_matches(methodSpecification.requiredSetAccessFlags, methodSpecification.requiredUnsetAccessFlags, method->get_access())) { return false; } auto dequalified_name = extract_method_name_and_type(method->get_deobfuscated_name()); return boost::regex_match(dequalified_name.c_str(), method_regex); } void keep_clinits(DexClass* cls) { for (auto method : cls->get_dmethods()) { if (is_clinit(method) && method->get_code()) { auto ii = InstructionIterable(method->get_code()); auto it = ii.begin(); while (opcode::is_load_param(it->insn->opcode())) { ++it; } if (!(it->insn->opcode() == OPCODE_RETURN_VOID && (++it) == ii.end())) { method->rstate.set_has_keep(keep_reason::CLINIT); } break; } } } template <class Container> void KeepRuleMatcher::keep_methods( bool apply_modifiers, const redex::MemberSpecification& methodSpecification, const Container& methods, const boost::regex& method_regex) { for (DexMethod* method : methods) { if (method_level_match(methodSpecification, method, method_regex)) { if (apply_modifiers) { apply_keep_modifiers(m_keep_rule, method); } apply_rule(method); methodSpecification.count++; } } } std::string method_regex(const MemberSpecification& method_spec) { auto qualified_method_regex = proguard_parser::form_member_regex(method_spec.name); qualified_method_regex += "\\:"; qualified_method_regex += proguard_parser::form_type_regex(method_spec.descriptor); return qualified_method_regex; } void KeepRuleMatcher::apply_method_keeps(const DexClass* cls, bool apply_modifiers) { auto methodSpecifications = m_keep_rule.class_spec.methodSpecifications; for (auto& method_spec : methodSpecifications) { auto qualified_method_regex = method_regex(method_spec); const boost::regex& method_regex = register_matcher(qualified_method_regex); keep_methods(apply_modifiers, method_spec, cls->get_vmethods(), method_regex); keep_methods(apply_modifiers, method_spec, cls->get_dmethods(), method_regex); } } bool classname_contains_wildcard(const std::string& classname) { for (char ch : classname) { if (ch == '*' || ch == '?' || ch == '!' || ch == '%' || ch == ',') { return true; } } return false; } bool KeepRuleMatcher::any_method_matches(const DexClass* cls, const MemberSpecification& method_keep, const boost::regex& method_regex) { auto match = [&](const DexMethod* method) { return method_level_match(method_keep, method, method_regex); }; return std::any_of(cls->get_vmethods().begin(), cls->get_vmethods().end(), match) || std::any_of(cls->get_dmethods().begin(), cls->get_dmethods().end(), match); } // Check that each method keep matches at least one method in :cls. bool KeepRuleMatcher::all_method_keeps_match( const std::vector<MemberSpecification>& method_keeps, const DexClass* cls) { return std::all_of(method_keeps.begin(), method_keeps.end(), [&](const MemberSpecification& method_keep) { auto qualified_method_regex = method_regex(method_keep); const boost::regex& matcher = register_matcher(qualified_method_regex); return any_method_matches(cls, method_keep, matcher); }); } bool KeepRuleMatcher::any_field_matches(const DexClass* cls, const MemberSpecification& field_keep) { auto fieldtype_regex = field_regex(field_keep); const boost::regex& matcher = register_matcher(fieldtype_regex); auto match = [&](const DexField* field) { return field_level_match(field_keep, field, matcher); }; return std::any_of(cls->get_ifields().begin(), cls->get_ifields().end(), match) || std::any_of(cls->get_sfields().begin(), cls->get_sfields().end(), match); } // Check that each field keep matches at least one field in :cls. bool KeepRuleMatcher::all_field_keeps_match( const std::vector<MemberSpecification>& field_keeps, const DexClass* cls) { return std::all_of(field_keeps.begin(), field_keeps.end(), [&](const MemberSpecification& field_keep) { return any_field_matches(cls, field_keep); }); } bool KeepRuleMatcher::process_mark_conditionally(const DexClass* cls) { const auto& class_spec = m_keep_rule.class_spec; if (class_spec.fieldSpecifications.empty() && class_spec.methodSpecifications.empty()) { std::cerr << "WARNING: A keepclasseswithmembers rule for class " << class_spec.className << " has no field or member specifications.\n"; } return all_field_keeps_match(class_spec.fieldSpecifications, cls) && all_method_keeps_match(class_spec.methodSpecifications, cls); } // Once a match has been made against a class i.e. the class name // matches, the annotations match, the extends clause matches and the // access modifier filters match, then start to apply the keep control // bits to the class, members and appropriate classes and members // in the class hierarchy. // // Parallelization note: We parallelize process_keep, and this function will be // eventually executed concurrently. There are potential races in rstate: // (1) m_keep, (2) m_(un)set_allow(shrinking|obfuscation), (3) // m_blanket_keepnames, and (4) m_keep_count. We use an atomic value for // m_keep_count, but the other boolean values are always overwritten. These WAW // (write-after-write) races are benign and do not affect the results. void KeepRuleMatcher::mark_class_and_members_for_keep(DexClass* cls) { // First check to see if we need to mark conditionally to see if all // field and method rules match i.e. we have a -keepclasseswithmembers // rule to process. if (m_keep_rule.mark_conditionally) { // If this class does not incur at least one match for each field // and method rule, then don't mark this class or its members. if (!process_mark_conditionally(cls)) { return; } } // Mark descriptor classes if (m_keep_rule.includedescriptorclasses) { std::cerr << "WARNING: 'includedescriptorclasses' keep modifier is NOT " "implemented: " << redex::show_keep(m_keep_rule) << std::endl; } if (m_keep_rule.allowoptimization) { std::cerr << "WARNING: 'allowoptimization' keep modifier is NOT implemented: " << redex::show_keep(m_keep_rule) << std::endl; } m_keep_rule.count++; if (m_keep_rule.mark_classes || m_keep_rule.mark_conditionally) { apply_keep_modifiers(m_keep_rule, cls); cls->rstate.set_has_keep(&m_keep_rule); if (cls->rstate.report_whyareyoukeeping()) { TRACE( PGR, 2, "whyareyoukeeping Class %s kept by %s", redex::dexdump_name_to_dot_name(cls->get_deobfuscated_name()).c_str(), show_keep(m_keep_rule).c_str()); } if (!m_keep_rule.allowobfuscation) { cls->rstate.increment_keep_count(); } if (is_blanket_keepnames_rule(m_keep_rule)) { cls->rstate.set_blanket_keepnames(); } // Mark non-empty <clinit> methods as seeds. keep_clinits(cls); } // Walk up the hierarchy performing seed marking. DexClass* class_to_mark = cls; bool apply_modifiers = true; while (class_to_mark != nullptr && !class_to_mark->is_external()) { // Mark unconditionally. apply_field_keeps(class_to_mark, apply_modifiers); apply_method_keeps(class_to_mark, apply_modifiers); apply_modifiers = false; auto typ = class_to_mark->get_super_class(); if (typ == nullptr) { break; } class_to_mark = type_class(typ); } } // This function is also executed concurrently. void KeepRuleMatcher::process_whyareyoukeeping(DexClass* cls) { cls->rstate.set_whyareyoukeeping(); apply_field_keeps(cls, false); // Set any method-level keep whyareyoukeeping bits. apply_method_keeps(cls, false); } // This function is also executed concurrently. void KeepRuleMatcher::process_assumenosideeffects(DexClass* cls) { cls->rstate.set_assumenosideeffects(); // Apply any method-level keep specifications. apply_method_keeps(cls, false); } template <class DexMember> void KeepRuleMatcher::apply_rule(DexMember* member) { switch (m_rule_type) { case RuleType::WHY_ARE_YOU_KEEPING: member->rstate.set_whyareyoukeeping(); break; case RuleType::KEEP: { member->rstate.set_has_keep(&m_keep_rule); if (member->rstate.report_whyareyoukeeping()) { TRACE(PGR, 2, "whyareyoukeeping %s kept by %s", SHOW(member), show_keep(m_keep_rule).c_str()); } break; } case RuleType::ASSUME_NO_SIDE_EFFECTS: member->rstate.set_assumenosideeffects(); break; } } void KeepRuleMatcher::keep_processor(DexClass* cls) { switch (m_rule_type) { case RuleType::WHY_ARE_YOU_KEEPING: process_whyareyoukeeping(cls); break; case RuleType::KEEP: mark_class_and_members_for_keep(cls); break; case RuleType::ASSUME_NO_SIDE_EFFECTS: process_assumenosideeffects(cls); break; } } DexClass* ProguardMatcher::find_single_class( const std::string& descriptor) const { auto const& dsc = JavaNameUtil::external_to_internal(descriptor); DexType* typ = DexType::get_type(m_pg_map.translate_class(dsc).c_str()); if (typ == nullptr) { typ = DexType::get_type(dsc.c_str()); if (typ == nullptr) { return nullptr; } } return type_class(typ); } void ProguardMatcher::process_keep(const KeepSpecSet& keep_rules, RuleType rule_type, bool process_external) { Timer t("Process keep for " + to_string(rule_type)); auto process_single_keep = [rule_type, process_external]( ClassMatcher& class_match, const KeepSpec& keep_rule, DexClass* cls, RegexMap& regex_map) { // Skip external classes. if (cls == nullptr || (!process_external && cls->is_external())) { return; } if (class_match.match(cls)) { KeepRuleMatcher rule_matcher(rule_type, keep_rule, regex_map); rule_matcher.keep_processor(cls); } }; // We only parallelize if keep_rule needs to be applied to all classes. auto wq = workqueue_foreach<const KeepSpec*>([&](const KeepSpec* keep_rule) { RegexMap regex_map; ClassMatcher class_match(*keep_rule); for (const auto& cls : m_classes) { process_single_keep(class_match, *keep_rule, cls, regex_map); } if (process_external) { for (const auto& cls : m_external_classes) { process_single_keep(class_match, *keep_rule, cls, regex_map); } } }); RegexMap regex_map; for (const auto& keep_rule_ptr : keep_rules) { const auto& keep_rule = *keep_rule_ptr; ClassMatcher class_match(keep_rule); // This case is very fast. Just process it immediately in the main thread. const auto& className = keep_rule.class_spec.className; if (!classname_contains_wildcard(className)) { DexClass* cls = find_single_class(className); process_single_keep(class_match, keep_rule, cls, regex_map); continue; } // This is also very fast. Process it in the main thread, too. const auto& extendsClassName = keep_rule.class_spec.extendsClassName; if (extendsClassName != "" && !classname_contains_wildcard(extendsClassName)) { DexClass* super = find_single_class(extendsClassName); if (super != nullptr) { TypeSet children; get_all_children(m_hierarchy, super->get_type(), children); process_single_keep(class_match, keep_rule, super, regex_map); for (auto const* type : children) { process_single_keep(class_match, keep_rule, type_class(type), regex_map); } } continue; } TRACE(PGR, 2, "Slow rule: %s", show_keep(keep_rule).c_str()); // Otherwise, it might take a longer time. Add to the work queue. wq.add_item(&keep_rule); } wq.run_all(); } void ProguardMatcher::process_proguard_rules( const ProguardConfiguration& pg_config) { // Now process each of the different kinds of rules as well // as -assumenosideeffects and -whyareyoukeeping. process_keep(pg_config.whyareyoukeeping_rules, RuleType::WHY_ARE_YOU_KEEPING); process_keep(pg_config.keep_rules, RuleType::KEEP); process_keep(pg_config.assumenosideeffects_rules, RuleType::ASSUME_NO_SIDE_EFFECTS, /* process_external = */ true); } void ProguardMatcher::mark_all_annotation_classes_as_keep() { for (auto cls : m_classes) { if (is_annotation(cls)) { cls->rstate.set_has_keep(keep_reason::ANNO); if (cls->rstate.report_whyareyoukeeping()) { TRACE(PGR, 2, "whyareyoukeeping Class %s kept because it is an annotation " "class\n", redex::dexdump_name_to_dot_name(cls->get_deobfuscated_name()) .c_str()); } cls->rstate.increment_keep_count(); } } } } // namespace namespace redex { void process_proguard_rules(const ProguardMap& pg_map, const Scope& classes, const Scope& external_classes, const ProguardConfiguration& pg_config, bool keep_all_annotation_classes) { ProguardMatcher pg_matcher(pg_map, classes, external_classes); pg_matcher.process_proguard_rules(pg_config); if (keep_all_annotation_classes) { pg_matcher.mark_all_annotation_classes_as_keep(); } } } // namespace redex
// Copyright Oliver Kowalke 2013. // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) #include "boost/fiber/scheduler.hpp" #include <chrono> #include <mutex> #include <boost/assert.hpp> #include "boost/fiber/algo/round_robin.hpp" #include "boost/fiber/context.hpp" #include "boost/fiber/exceptions.hpp" #ifdef BOOST_HAS_ABI_HEADERS # include BOOST_ABI_PREFIX #endif namespace boost { namespace fibers { void scheduler::release_terminated_() noexcept { while ( ! terminated_queue_.empty() ) { context * ctx = & terminated_queue_.front(); terminated_queue_.pop_front(); BOOST_ASSERT( ctx->is_context( type::worker_context) ); BOOST_ASSERT( ! ctx->is_context( type::pinned_context) ); BOOST_ASSERT( this == ctx->get_scheduler() ); BOOST_ASSERT( ctx->is_resumable() ); BOOST_ASSERT( ! ctx->worker_is_linked() ); BOOST_ASSERT( ! ctx->ready_is_linked() ); #if ! defined(BOOST_FIBERS_NO_ATOMICS) BOOST_ASSERT( ! ctx->remote_ready_is_linked() ); #endif BOOST_ASSERT( ! ctx->sleep_is_linked() ); BOOST_ASSERT( ctx->wait_queue_.empty() ); BOOST_ASSERT( ctx->terminated_); // if last reference, e.g. fiber::join() or fiber::detach() // have been already called, this will call ~context(), // the context is automatically removeid from worker-queue intrusive_ptr_release( ctx); } } #if ! defined(BOOST_FIBERS_NO_ATOMICS) void scheduler::remote_ready2ready_() noexcept { remote_ready_queue_type tmp; detail::spinlock_lock lk{ remote_ready_splk_ }; remote_ready_queue_.swap( tmp); lk.unlock(); // get context from remote ready-queue while ( ! tmp.empty() ) { context * ctx = & tmp.front(); tmp.pop_front(); // store context in local queues schedule( ctx); } } #endif void scheduler::sleep2ready_() noexcept { // move context which the deadline has reached // to ready-queue // sleep-queue is sorted (ascending) std::chrono::steady_clock::time_point now = std::chrono::steady_clock::now(); sleep_queue_type::iterator e = sleep_queue_.end(); for ( sleep_queue_type::iterator i = sleep_queue_.begin(); i != e;) { context * ctx = & ( * i); // dispatcher context must never be pushed to sleep-queue BOOST_ASSERT( ! ctx->is_context( type::dispatcher_context) ); BOOST_ASSERT( main_ctx_ == ctx || ctx->worker_is_linked() ); BOOST_ASSERT( ! ctx->ready_is_linked() ); // remote_ready_hook_ can be linked in that point in case when the ctx // has been signaled concurrently when sleep2ready_ is called. In that // case sleep_waker_.wake() is just no-op, because sleep_waker_ is // outdated BOOST_ASSERT( ! ctx->terminated_is_linked() ); // set fiber to state_ready if deadline was reached if ( ctx->tp_ <= now) { // remove context from sleep-queue i = sleep_queue_.erase( i); // reset sleep-tp ctx->tp_ = (std::chrono::steady_clock::time_point::max)(); ctx->sleep_waker_.wake(); } else { break; // first context with now < deadline } } } scheduler::scheduler() noexcept : algo_{ new algo::round_robin() } { } scheduler::~scheduler() { BOOST_ASSERT( nullptr != main_ctx_); BOOST_ASSERT( nullptr != dispatcher_ctx_.get() ); BOOST_ASSERT( context::active() == main_ctx_); // signal dispatcher-context termination shutdown_ = true; // resume pending fibers // by resuming dispatcher-context context::active()->suspend(); // no context' in worker-queue BOOST_ASSERT( worker_queue_.empty() ); BOOST_ASSERT( terminated_queue_.empty() ); BOOST_ASSERT( sleep_queue_.empty() ); // set active context to nullptr context::reset_active(); // deallocate dispatcher-context BOOST_ASSERT( ! dispatcher_ctx_->ready_is_linked() ); dispatcher_ctx_.reset(); // set main-context to nullptr main_ctx_ = nullptr; } boost::context::fiber scheduler::dispatch() noexcept { BOOST_ASSERT( context::active() == dispatcher_ctx_); for (;;) { if ( shutdown_) { // notify sched-algorithm about termination algo_->notify(); if ( worker_queue_.empty() ) { break; } } // release terminated context' release_terminated_(); #if ! defined(BOOST_FIBERS_NO_ATOMICS) // get context' from remote ready-queue remote_ready2ready_(); #endif // get sleeping context' // must be called after remote_ready2ready_() sleep2ready_(); // get next ready context context * ctx = algo_->pick_next(); if ( nullptr != ctx) { BOOST_ASSERT( ctx->is_resumable() ); BOOST_ASSERT( ! ctx->ready_is_linked() ); #if ! defined(BOOST_FIBERS_NO_ATOMICS) BOOST_ASSERT( ! ctx->remote_ready_is_linked() ); #endif BOOST_ASSERT( ! ctx->sleep_is_linked() ); BOOST_ASSERT( ! ctx->terminated_is_linked() ); // push dispatcher-context to ready-queue // so that ready-queue never becomes empty ctx->resume( dispatcher_ctx_.get() ); BOOST_ASSERT( context::active() == dispatcher_ctx_.get() ); } else { // no ready context, wait till signaled // set deadline to highest value std::chrono::steady_clock::time_point suspend_time = (std::chrono::steady_clock::time_point::max)(); // get lowest deadline from sleep-queue sleep_queue_type::iterator i = sleep_queue_.begin(); if ( sleep_queue_.end() != i) { suspend_time = i->tp_; } // no ready context, wait till signaled algo_->suspend_until( suspend_time); } } // release termianted context' release_terminated_(); // return to main-context return main_ctx_->suspend_with_cc(); } void scheduler::schedule( context * ctx) noexcept { BOOST_ASSERT( nullptr != ctx); BOOST_ASSERT( ! ctx->ready_is_linked() ); #if ! defined(BOOST_FIBERS_NO_ATOMICS) BOOST_ASSERT( ! ctx->remote_ready_is_linked() ); #endif BOOST_ASSERT( ! ctx->terminated_is_linked() ); // remove context ctx from sleep-queue // (might happen if blocked in timed_mutex::try_lock_until()) if ( ctx->sleep_is_linked() ) { // unlink it from sleep-queue ctx->sleep_unlink(); } // push new context to ready-queue algo_->awakened( ctx); } #if ! defined(BOOST_FIBERS_NO_ATOMICS) void scheduler::schedule_from_remote( context * ctx) noexcept { BOOST_ASSERT( nullptr != ctx); // another thread might signal the main-context of this thread BOOST_ASSERT( ! ctx->is_context( type::dispatcher_context) ); BOOST_ASSERT( this == ctx->get_scheduler() ); BOOST_ASSERT( ! ctx->ready_is_linked() ); BOOST_ASSERT( ! ctx->remote_ready_is_linked() ); BOOST_ASSERT( ! ctx->terminated_is_linked() ); // protect for concurrent access detail::spinlock_lock lk{ remote_ready_splk_ }; BOOST_ASSERT( ! shutdown_); BOOST_ASSERT( nullptr != main_ctx_); BOOST_ASSERT( nullptr != dispatcher_ctx_.get() ); // push new context to remote ready-queue ctx->remote_ready_link( remote_ready_queue_); lk.unlock(); // notify scheduler algo_->notify(); } #endif boost::context::fiber scheduler::terminate( detail::spinlock_lock & lk, context * ctx) noexcept { BOOST_ASSERT( nullptr != ctx); BOOST_ASSERT( context::active() == ctx); BOOST_ASSERT( this == ctx->get_scheduler() ); BOOST_ASSERT( ctx->is_context( type::worker_context) ); BOOST_ASSERT( ! ctx->is_context( type::pinned_context) ); BOOST_ASSERT( ! ctx->ready_is_linked() ); #if ! defined(BOOST_FIBERS_NO_ATOMICS) BOOST_ASSERT( ! ctx->remote_ready_is_linked() ); #endif BOOST_ASSERT( ! ctx->sleep_is_linked() ); BOOST_ASSERT( ! ctx->terminated_is_linked() ); BOOST_ASSERT( ctx->wait_queue_.empty() ); // store the terminated fiber in the terminated-queue // the dispatcher-context will call ctx->terminated_link( terminated_queue_); // remove from the worker-queue ctx->worker_unlink(); // release lock lk.unlock(); // resume another fiber return algo_->pick_next()->suspend_with_cc(); } void scheduler::yield( context * ctx) noexcept { BOOST_ASSERT( nullptr != ctx); BOOST_ASSERT( context::active() == ctx); BOOST_ASSERT( ctx->is_context( type::worker_context) || ctx->is_context( type::main_context) ); BOOST_ASSERT( ! ctx->ready_is_linked() ); #if ! defined(BOOST_FIBERS_NO_ATOMICS) BOOST_ASSERT( ! ctx->remote_ready_is_linked() ); #endif BOOST_ASSERT( ! ctx->sleep_is_linked() ); BOOST_ASSERT( ! ctx->terminated_is_linked() ); // resume another fiber algo_->pick_next()->resume( ctx); } bool scheduler::wait_until( context * ctx, std::chrono::steady_clock::time_point const& sleep_tp) noexcept { BOOST_ASSERT( nullptr != ctx); BOOST_ASSERT( context::active() == ctx); BOOST_ASSERT( ctx->is_context( type::worker_context) || ctx->is_context( type::main_context) ); BOOST_ASSERT( ! ctx->ready_is_linked() ); #if ! defined(BOOST_FIBERS_NO_ATOMICS) BOOST_ASSERT( ! ctx->remote_ready_is_linked() ); #endif BOOST_ASSERT( ! ctx->sleep_is_linked() ); BOOST_ASSERT( ! ctx->terminated_is_linked() ); ctx->sleep_waker_ = ctx->create_waker(); ctx->tp_ = sleep_tp; ctx->sleep_link( sleep_queue_); // resume another context algo_->pick_next()->resume(); // context has been resumed // check if deadline has reached return std::chrono::steady_clock::now() < sleep_tp; } bool scheduler::wait_until( context * ctx, std::chrono::steady_clock::time_point const& sleep_tp, detail::spinlock_lock & lk, waker && w) noexcept { BOOST_ASSERT( nullptr != ctx); BOOST_ASSERT( context::active() == ctx); BOOST_ASSERT( ctx->is_context( type::worker_context) || ctx->is_context( type::main_context) ); BOOST_ASSERT( ! ctx->ready_is_linked() ); #if ! defined(BOOST_FIBERS_NO_ATOMICS) BOOST_ASSERT( ! ctx->remote_ready_is_linked() ); #endif BOOST_ASSERT( ! ctx->sleep_is_linked() ); BOOST_ASSERT( ! ctx->terminated_is_linked() ); // push active context to sleep-queue ctx->sleep_waker_ = std::move( w); ctx->tp_ = sleep_tp; ctx->sleep_link( sleep_queue_); // resume another context algo_->pick_next()->resume( lk); // context has been resumed // check if deadline has reached return std::chrono::steady_clock::now() < sleep_tp; } void scheduler::suspend() noexcept { // resume another context algo_->pick_next()->resume(); } void scheduler::suspend( detail::spinlock_lock & lk) noexcept { // resume another context algo_->pick_next()->resume( lk); } bool scheduler::has_ready_fibers() const noexcept { return algo_->has_ready_fibers(); } void scheduler::set_algo( algo::algorithm::ptr_t algo) noexcept { // move remaining context in current scheduler to new one while ( algo_->has_ready_fibers() ) { algo->awakened( algo_->pick_next() ); } algo_ = std::move( algo); } void scheduler::attach_main_context( context * ctx) noexcept { BOOST_ASSERT( nullptr != ctx); // main-context represents the execution context created // by the system, e.g. main()- or thread-context // should not be in worker-queue main_ctx_ = ctx; main_ctx_->scheduler_ = this; } void scheduler::attach_dispatcher_context( intrusive_ptr< context > ctx) noexcept { BOOST_ASSERT( ctx); // dispatcher context has to handle // - remote ready context' // - sleeping context' // - extern event-loops // - suspending the thread if ready-queue is empty (waiting on external event) // should not be in worker-queue dispatcher_ctx_.swap( ctx); // add dispatcher-context to ready-queue // so it is the first element in the ready-queue // if the main context tries to suspend the first time // the dispatcher-context is resumed and // scheduler::dispatch() is executed dispatcher_ctx_->scheduler_ = this; algo_->awakened( dispatcher_ctx_.get() ); } void scheduler::attach_worker_context( context * ctx) noexcept { BOOST_ASSERT( nullptr != ctx); BOOST_ASSERT( nullptr == ctx->get_scheduler() ); BOOST_ASSERT( ! ctx->ready_is_linked() ); #if ! defined(BOOST_FIBERS_NO_ATOMICS) BOOST_ASSERT( ! ctx->remote_ready_is_linked() ); #endif BOOST_ASSERT( ! ctx->sleep_is_linked() ); BOOST_ASSERT( ! ctx->terminated_is_linked() ); BOOST_ASSERT( ! ctx->worker_is_linked() ); ctx->worker_link( worker_queue_); ctx->scheduler_ = this; // an attached context must belong at least to worker-queue } void scheduler::detach_worker_context( context * ctx) noexcept { BOOST_ASSERT( nullptr != ctx); BOOST_ASSERT( ! ctx->ready_is_linked() ); #if ! defined(BOOST_FIBERS_NO_ATOMICS) BOOST_ASSERT( ! ctx->remote_ready_is_linked() ); #endif BOOST_ASSERT( ! ctx->sleep_is_linked() ); BOOST_ASSERT( ! ctx->terminated_is_linked() ); BOOST_ASSERT( ctx->worker_is_linked() ); BOOST_ASSERT( ! ctx->is_context( type::pinned_context) ); ctx->worker_unlink(); BOOST_ASSERT( ! ctx->worker_is_linked() ); ctx->scheduler_ = nullptr; // a detached context must not belong to any queue } }} #ifdef BOOST_HAS_ABI_HEADERS # include BOOST_ABI_SUFFIX #endif
State Before: ι : Type ?u.76204 α : Type u_1 β : Type ?u.76210 inst✝ : LinearOrderedSemifield α a b c d e : α m n : ℤ hb : 0 < b ⊢ 1 < a / b ↔ b < a State After: no goals Tactic: rw [lt_div_iff hb, one_mul]
// // Copyright (c) 2016-2019Damian Jarek (damian dot jarek93 at gmail dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // // Official repository: https://github.com/boostorg/beast // #ifndef BOOST_BEAST_DETAIL_TUPLE_HPP #define BOOST_BEAST_DETAIL_TUPLE_HPP #include <boost/mp11/algorithm.hpp> #include <boost/mp11/integer_sequence.hpp> #include <boost/type_traits/copy_cv.hpp> #include <boost/type_traits/remove_cv.hpp> #include <cstdlib> #include <utility> namespace boost { namespace beast { namespace detail { template <std::size_t I, class T> struct tuple_element_impl { T t; tuple_element_impl(T const &t_) : t(t_) {} tuple_element_impl(T &&t_) : t(std::move(t_)) {} }; template <std::size_t I, class T> struct tuple_element_impl<I, T &> { T &t; tuple_element_impl(T &t_) : t(t_) {} }; template <class... Ts> struct tuple_impl; template <class... Ts, std::size_t... Is> struct tuple_impl<boost::mp11::index_sequence<Is...>, Ts...> : tuple_element_impl<Is, Ts>... { template <class... Us> explicit tuple_impl(Us &&... us) : tuple_element_impl<Is, Ts>(std::forward<Us>(us))... {} }; template <class... Ts> struct tuple : tuple_impl<boost::mp11::index_sequence_for<Ts...>, Ts...> { template <class... Us> explicit tuple(Us &&... us) : tuple_impl<boost::mp11::index_sequence_for<Ts...>, Ts...>{ std::forward<Us>(us)...} {} }; template <std::size_t I, class T> T &get(tuple_element_impl<I, T> &te) { return te.t; } template <std::size_t I, class T> T const &get(tuple_element_impl<I, T> const &te) { return te.t; } template <std::size_t I, class T> T &&get(tuple_element_impl<I, T> &&te) { return std::move(te.t); } template <std::size_t I, class T> T &get(tuple_element_impl<I, T &> &&te) { return te.t; } template <std::size_t I, class T> using tuple_element = typename boost::copy_cv<mp11::mp_at_c<typename remove_cv<T>::type, I>, T>::type; } // namespace detail } // namespace beast } // namespace boost #endif
In 2010 , the city 's population included about 16 percent under the age of 18 and about 12 percent who were 65 years of age or older . Females accounted for 54 percent of the total . Students at the university comprised about a third of the city 's population .
# Make Confusion matrix table #-----------------------------------------------------------------------------------------------------# # Library #-----------------------------------------------------------------------------------------------------# #-----------------------------------------------------------------------------------------------------# # Settings #-----------------------------------------------------------------------------------------------------# # Details about ROC curves s_ROC_direction = "<" s_ROC_Level = "Class1" #-----------------------------------------------------------------------------------------------------# # 1 load + 2 Rename #-----------------------------------------------------------------------------------------------------# CM_Rootdir = "C:/DATA STORAGE/Projects/PPMI/Results" # Table has 32 rows; 2*4*4; make counter CM_table = data.frame(matrix(nrow=2*4*4,ncol=20+7+2)) Colnamess = c("Outcome","Feature subset","Algorithm","NumberOfFeatures","TP","TN","FP","FN","Accuracy","Kappa ","AccuracyLower","AccuracyUpper","AccuracyNull","AccuracyPValue","McnemarPValue","Sensitivity","Specificity","Pos Pred Value","Neg Pred Value","Precision","Recall","F1","Prevalence ","Detection Rate","Detection Prevalence","Balanced Accuracy","MCC","AUC","AUC_CI") colnames(CM_table)=Colnamess counter = 1 # Class for( i_class in c(0,1)){ # 0 = Impairment outcome, 1 = PDD outcome, 2 = MCIvsPDD if(i_class==0){temp_Outcome = "Impairment"} if(i_class==1){temp_Outcome = "PDD"} if(i_class==2){temp_Outcome = "MCIvsPDD"} if(i_class==3){temp_Outcome = "Subjective_complaint"} # Subset for( o_subset in c("All","Clinical","Serum","Genetic_Epigenetic")){ #c("All","Clinical","Serum","Genetic_Epigenetic") # Machine learning algorithm for( p_algo in c("cforest","svmLinear","glmnet", "rf")){ # svmLinear/glmnet problems ; c("cforest","svmLinear","glmnet", "rf") load(paste0(CM_Rootdir,"/",temp_Outcome,"/",o_subset,"/",p_algo,"_model.RData")) resdata = list(Pred_P = superresult$Combine_data_main$model[[1]]$predictions_test, Pred_P_p = superresult$Combine_data_main$model[[1]]$predictions_test_p,Obs_C = force(superresult$Combine_data_main$model[[1]]$Test_samples_class[,2]),Metrics=superresult$Combine_data_main$model[[1]]$resultMCC_TEST,NumberOfFeatures = length(superresult$Combine_data_main$model[[1]]$fit$coefnames)) minitable = table(data.frame(resdata$Pred_P,resdata$Obs_C)) MLmetrics = caret::confusionMatrix(minitable,positive="Class1") # reset TP=NA;TN=NA;FP=NA;FN=NA # assign; but can fail TN=minitable["Class0","Class0"] TP=minitable["Class1","Class1"] FN=minitable["Class0","Class1"] FP=minitable["Class1","Class0"] MCC = as.numeric(resdata$Metrics["MCC"]) rocc = pROC::roc(resdata$Obs_C, resdata$Pred_P_p[,s_ROC_Level], plot=FALSE, legacy.axes=FALSE, percent=TRUE, lwd=2, print.auc=TRUE,direction=s_ROC_direction) AUC = rocc$auc[1] auc_ci = paste0(round(as.numeric(pROC::ci.auc(rocc)),1)[c(1,3)],collapse = " - ",sep="%") # put in format CM_table[counter,] = c(i_class,o_subset,p_algo,resdata$NumberOfFeatures,TP,TN,FP,FN,MLmetrics$overall,MLmetrics$byClass,MCC,round(AUC/100,3),auc_ci) counter = counter+1 #browser(auc_ci) } } } #-----------------------------------------------------------------------------------------------------# # Cleanup! #-----------------------------------------------------------------------------------------------------# # all except starting with ROC_ rm(list = ls()[grep(ls(),pattern = "^CM_",invert = T)]) #-----------------------------------------------------------------------------------------------------# # Start cleaning table metrics #-----------------------------------------------------------------------------------------------------# CM_table[,-c(1:3,29)] = apply(CM_table[,-c(1:3,29)],2,function(x){try(round(as.numeric(x),3))}) CM_table_MAIN = CM_table[,colnames(CM_table)%in%c("Outcome","Feature subset","Algorithm","NumberOfFeatures","TP","TN","FP","FN","Accuracy","Sensitivity","Specificity","MCC","AUC","AUC_CI")] #-----------------------------------------------------------------------------------------------------# # save #-----------------------------------------------------------------------------------------------------# write.table(CM_table,file = paste0(CM_Rootdir,"/Table_metrics_full_",format(Sys.time(), "%M%S_%d%m%y"),".txt"),sep = "\t",quote = FALSE,row.names=FALSE) write.table(CM_table_MAIN,file = paste0(CM_Rootdir,"/Table_metrics_main_",format(Sys.time(), "%M%S_%d%m%y"),".txt"),sep = "\t",quote = FALSE,row.names=FALSE)
{-# LANGUAGE CPP #-} {-# LANGUAGE DefaultSignatures, TypeOperators, FlexibleContexts, TypeSynonymInstances, FlexibleInstances #-} module Pure.Data.Default where import Control.Applicative import Data.Complex import Data.Int import Data.Monoid import Data.Ratio import Data.Word import GHC.Generics class Default a where def :: a default def :: (Generic a, GDefault (Rep a)) => a def = to gdef instance Default Bool where def = False -- like other languages with construction defaults instance Default () where def = () instance Default Ordering where def = EQ instance Default Any where def = Any False instance Default All where def = All True instance Default (Last a) where def = Last Nothing instance (Num a) => Default (Sum a) where def = Sum 0 instance (Num a) => Default (Product a) where def = Product 1 instance Default (Endo a) where def = Endo id instance Default a => Default (Const a b) where def = Const def instance Default (Maybe a) where def = Nothing instance Default [a] where def = [] -- Note that (def :: Try a) /= mempty -- instance Default (Try a) where def = Trying -- instance Default Txt where def = mempty -- instance (Eq a, Hashable a) => Default (HashMap.HashMap a b) where -- def = mempty -- instance Default Value where -- def = -- #ifdef __GHCJS__ -- nullValue -- #else -- Null -- #endif -- instance Default Obj where def = mempty -- instance Default Micros where def = 0 -- instance Default Millis where def = 0 instance Default Int where def = 0 instance Default Int8 where def = 0 instance Default Int16 where def = 0 instance Default Int32 where def = 0 instance Default Int64 where def = 0 instance Default Word where def = 0 instance Default Word8 where def = 0 instance Default Word16 where def = 0 instance Default Word32 where def = 0 instance Default Word64 where def = 0 instance Default Integer where def = 0 instance Default Float where def = 0 instance Default Double where def = 0 instance (Integral a) => Default (Ratio a) where def = 0 instance (Default a,RealFloat a) => Default (Complex a) where def = def :+ def instance {-# OVERLAPPABLE #-} Default r => Default (x -> r) where def = const def instance {-# OVERLAPPING #-} Default (a -> a) where def = id instance Default a => Default (IO a) where def = return def instance Default a => Default (Dual a) where def = Dual def instance (Default a, Default b) => Default (a, b) where def = (def, def) instance (Default a, Default b, Default c) => Default (a, b, c) where def = (def, def, def) instance (Default a, Default b, Default c, Default d) => Default (a, b, c, d) where def = (def, def, def, def) instance (Default a, Default b, Default c, Default d, Default e) => Default (a, b, c, d, e) where def = (def, def, def, def, def) instance (Default a, Default b, Default c, Default d, Default e, Default f) => Default (a, b, c, d, e, f) where def = (def, def, def, def, def, def) instance (Default a, Default b, Default c, Default d, Default e, Default f, Default g) => Default (a, b, c, d, e, f, g) where def = (def, def, def, def, def, def, def) -- Inspired by Lukas Mai's data-default-class with a default instance for -- sum types based on lexicographical order - similar to the Ord and Enum -- instances class GDefault f where gdef :: f a instance GDefault V1 where gdef = undefined instance GDefault U1 where gdef = U1 instance (Default a) => GDefault (K1 i a) where gdef = K1 def instance (GDefault a, GDefault b) => GDefault (a :*: b) where gdef = gdef :*: gdef instance (GDefault a, GDefault b) => GDefault (a :+: b) where gdef = L1 gdef instance (GDefault a) => GDefault (M1 i c a) where gdef = M1 gdef
/- Copyright (c) 2021 Markus Himmel. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Markus Himmel -/ import category_theory.monoidal.free.basic import category_theory.groupoid import category_theory.discrete_category /-! # The monoidal coherence theorem In this file, we prove the monoidal coherence theorem, stated in the following form: the free monoidal category over any type `C` is thin. We follow a proof described by Ilya Beylin and Peter Dybjer, which has been previously formalized in the proof assistant ALF. The idea is to declare a normal form (with regard to association and adding units) on objects of the free monoidal category and consider the discrete subcategory of objects that are in normal form. A normalization procedure is then just a functor `full_normalize : free_monoidal_category C ⥤ discrete (normal_monoidal_object C)`, where functoriality says that two objects which are related by associators and unitors have the same normal form. Another desirable property of a normalization procedure is that an object is isomorphic (i.e., related via associators and unitors) to its normal form. In the case of the specific normalization procedure we use we not only get these isomorphismns, but also that they assemble into a natural isomorphism `𝟭 (free_monoidal_category C) ≅ full_normalize ⋙ inclusion`. But this means that any two parallel morphisms in the free monoidal category factor through a discrete category in the same way, so they must be equal, and hence the free monoidal category is thin. ## References * [Ilya Beylin and Peter Dybjer, Extracting a proof of coherence for monoidal categories from a proof of normalization for monoids][beylin1996] -/ universe u namespace category_theory open monoidal_category namespace free_monoidal_category variables {C : Type u} section variables (C) /-- We say an object in the free monoidal category is in normal form if it is of the form `(((𝟙_ C) ⊗ X₁) ⊗ X₂) ⊗ ⋯`. -/ @[nolint has_inhabited_instance] inductive normal_monoidal_object : Type u | unit : normal_monoidal_object | tensor : normal_monoidal_object → C → normal_monoidal_object end local notation `F` := free_monoidal_category local notation `N` := discrete ∘ normal_monoidal_object local infixr ` ⟶ᵐ `:10 := hom /-- Auxiliary definition for `inclusion`. -/ @[simp] def inclusion_obj : normal_monoidal_object C → F C | normal_monoidal_object.unit := unit | (normal_monoidal_object.tensor n a) := tensor (inclusion_obj n) (of a) /-- The discrete subcategory of objects in normal form includes into the free monoidal category. -/ @[simp] def inclusion : N C ⥤ F C := discrete.functor inclusion_obj /-- Auxiliary definition for `normalize`. -/ @[simp] def normalize_obj : F C → normal_monoidal_object C → normal_monoidal_object C | unit n := n | (of X) n := normal_monoidal_object.tensor n X | (tensor X Y) n := normalize_obj Y (normalize_obj X n) @[simp] lemma normalize_obj_unitor (n : N C) : normalize_obj (𝟙_ (F C)) n = n := rfl @[simp] lemma normalize_obj_tensor (X Y : F C) (n : N C) : normalize_obj (X ⊗ Y) n = normalize_obj Y (normalize_obj X n) := rfl section open hom /-- Auxiliary definition for `normalize`. Here we prove that objects that are related by associators and unitors map to the same normal form. -/ @[simp] def normalize_map_aux : Π {X Y : F C}, (X ⟶ᵐ Y) → ((discrete.functor (normalize_obj X) : _ ⥤ N C) ⟶ discrete.functor (normalize_obj Y)) | _ _ (id _) := 𝟙 _ | _ _ (α_hom _ _ _) := ⟨λ X, 𝟙 _⟩ | _ _ (α_inv _ _ _) := ⟨λ X, 𝟙 _⟩ | _ _ (l_hom _) := ⟨λ X, 𝟙 _⟩ | _ _ (l_inv _) := ⟨λ X, 𝟙 _⟩ | _ _ (ρ_hom _) := ⟨λ X, 𝟙 _⟩ | _ _ (ρ_inv _) := ⟨λ X, 𝟙 _⟩ | X Y (@comp _ U V W f g) := normalize_map_aux f ≫ normalize_map_aux g | X Y (@hom.tensor _ T U V W f g) := ⟨λ X, (normalize_map_aux g).app (normalize_obj T X) ≫ (discrete.functor (normalize_obj W) : _ ⥤ N C).map ((normalize_map_aux f).app X), by tidy⟩ end section variables (C) /-- Our normalization procedure works by first defining a functor `F C ⥤ (N C ⥤ N C)` (which turns out to be very easy), and then obtain a functor `F C ⥤ N C` by plugging in the normal object `𝟙_ C`. -/ @[simp] def normalize : F C ⥤ N C ⥤ N C := { obj := λ X, discrete.functor (normalize_obj X), map := λ X Y, quotient.lift normalize_map_aux (by tidy) } /-- A variant of the normalization functor where we consider the result as an object in the free monoidal category (rather than an object of the discrete subcategory of objects in normal form). -/ @[simp] def normalize' : F C ⥤ N C ⥤ F C := normalize C ⋙ (whiskering_right _ _ _).obj inclusion /-- The normalization functor for the free monoidal category over `C`. -/ def full_normalize : F C ⥤ N C := { obj := λ X, ((normalize C).obj X).obj normal_monoidal_object.unit, map := λ X Y f, ((normalize C).map f).app normal_monoidal_object.unit } /-- Given an object `X` of the free monoidal category and an object `n` in normal form, taking the tensor product `n ⊗ X` in the free monoidal category is functorial in both `X` and `n`. -/ @[simp] def tensor_func : F C ⥤ N C ⥤ F C := { obj := λ X, discrete.functor (λ n, (inclusion.obj n) ⊗ X), map := λ X Y f, ⟨λ n, 𝟙 _ ⊗ f, by tidy⟩ } lemma tensor_func_map_app {X Y : F C} (f : X ⟶ Y) (n) : ((tensor_func C).map f).app n = 𝟙 _ ⊗ f := rfl lemma tensor_func_obj_map (Z : F C) {n n' : N C} (f : n ⟶ n') : ((tensor_func C).obj Z).map f = inclusion.map f ⊗ 𝟙 Z := by tidy /-- Auxiliary definition for `normalize_iso`. Here we construct the isomorphism between `n ⊗ X` and `normalize X n`. -/ @[simp] def normalize_iso_app : Π (X : F C) (n : N C), ((tensor_func C).obj X).obj n ≅ ((normalize' C).obj X).obj n | (of X) n := iso.refl _ | unit n := ρ_ _ | (tensor X Y) n := (α_ _ _ _).symm ≪≫ tensor_iso (normalize_iso_app X n) (iso.refl _) ≪≫ normalize_iso_app _ _ @[simp] lemma normalize_iso_app_tensor (X Y : F C) (n : N C) : normalize_iso_app C (X ⊗ Y) n = (α_ _ _ _).symm ≪≫ tensor_iso (normalize_iso_app C X n) (iso.refl _) ≪≫ normalize_iso_app _ _ _ := rfl @[simp] lemma normalize_iso_app_unitor (n : N C) : normalize_iso_app C (𝟙_ (F C)) n = ρ_ _ := rfl /-- Auxiliary definition for `normalize_iso`. -/ @[simp] def normalize_iso_aux (X : F C) : (tensor_func C).obj X ≅ (normalize' C).obj X := nat_iso.of_components (normalize_iso_app C X) (by tidy) /-- The isomorphism between `n ⊗ X` and `normalize X n` is natural (in both `X` and `n`, but naturality in `n` is trivial and was "proved" in `normalize_iso_aux`). This is the real heart of our proof of the coherence theorem. -/ def normalize_iso : tensor_func C ≅ normalize' C := nat_iso.of_components (normalize_iso_aux C) begin rintros X Y f, apply quotient.induction_on f, intro f, ext n, induction f generalizing n, { simp only [mk_id, functor.map_id, category.id_comp, category.comp_id] }, { dsimp, simp only [id_tensor_associator_inv_naturality_assoc, ←pentagon_inv_assoc, tensor_hom_inv_id_assoc, tensor_id, category.id_comp, discrete.functor_map_id, comp_tensor_id, iso.cancel_iso_inv_left, category.assoc], dsimp, simp only [category.comp_id] }, { dsimp, simp only [discrete.functor_map_id, comp_tensor_id, category.assoc, pentagon_inv_assoc, ←associator_inv_naturality_assoc, tensor_id, iso.cancel_iso_inv_left], dsimp, simp only [category.comp_id],}, { dsimp, rw triangle_assoc_comp_right_assoc, simp only [discrete.functor_map_id, category.assoc], dsimp, simp only [category.comp_id] }, { dsimp, simp only [triangle_assoc_comp_left_inv_assoc, inv_hom_id_tensor_assoc, tensor_id, category.id_comp, discrete.functor_map_id], dsimp, simp only [category.comp_id] }, { dsimp, rw [←(iso.inv_comp_eq _).2 (right_unitor_tensor _ _), category.assoc, ←right_unitor_naturality], simp only [discrete.functor_map_id, iso.cancel_iso_inv_left, category.assoc], dsimp, simp only [category.comp_id] }, { dsimp, simp only [←(iso.eq_comp_inv _).1 (right_unitor_tensor_inv _ _), iso.hom_inv_id_assoc, right_unitor_conjugation, discrete.functor_map_id, category.assoc], dsimp, simp only [category.comp_id], }, { dsimp at *, rw [id_tensor_comp, category.assoc, f_ih_g ⟦f_g⟧, ←category.assoc, f_ih_f ⟦f_f⟧, category.assoc, ←functor.map_comp], congr' 2 }, { dsimp at *, rw associator_inv_naturality_assoc, slice_lhs 2 3 { rw [←tensor_comp, f_ih_f ⟦f_f⟧] }, conv_lhs { rw [←@category.id_comp (F C) _ _ _ ⟦f_g⟧] }, simp only [category.comp_id, tensor_comp, category.assoc], congr' 2, rw [←mk_tensor, quotient.lift_mk], dsimp, rw [functor.map_comp, ←category.assoc, ←f_ih_g ⟦f_g⟧, ←@category.comp_id (F C) _ _ _ ⟦f_g⟧, ←category.id_comp ((discrete.functor inclusion_obj).map _), tensor_comp], dsimp, simp only [category.assoc, category.comp_id], congr' 1, convert (normalize_iso_aux C f_Z).hom.naturality ((normalize_map_aux f_f).app n), exact (tensor_func_obj_map _ _ _).symm } end /-- The isomorphism between an object and its normal form is natural. -/ def full_normalize_iso : 𝟭 (F C) ≅ full_normalize C ⋙ inclusion := nat_iso.of_components (λ X, (λ_ X).symm ≪≫ ((normalize_iso C).app X).app normal_monoidal_object.unit) begin intros X Y f, dsimp, rw [left_unitor_inv_naturality_assoc, category.assoc, iso.cancel_iso_inv_left], exact congr_arg (λ f, nat_trans.app f normal_monoidal_object.unit) ((normalize_iso.{u} C).hom.naturality f), end end /-- The monoidal coherence theorem. -/ instance subsingleton_hom {X Y : F C} : subsingleton (X ⟶ Y) := ⟨λ f g, have (full_normalize C).map f = (full_normalize C).map g, from subsingleton.elim _ _, begin rw [←functor.id_map f, ←functor.id_map g], simp [←nat_iso.naturality_2 (full_normalize_iso.{u} C), this] end⟩ section groupoid section open hom /-- Auxiliary construction for showing that the free monoidal category is a groupoid. Do not use this, use `is_iso.inv` instead. -/ def inverse_aux : Π {X Y : F C}, (X ⟶ᵐ Y) → (Y ⟶ᵐ X) | _ _ (id X) := id X | _ _ (α_hom _ _ _) := α_inv _ _ _ | _ _ (α_inv _ _ _) := α_hom _ _ _ | _ _ (ρ_hom _) := ρ_inv _ | _ _ (ρ_inv _) := ρ_hom _ | _ _ (l_hom _) := l_inv _ | _ _ (l_inv _) := l_hom _ | _ _ (comp f g) := (inverse_aux g).comp (inverse_aux f) | _ _ (hom.tensor f g) := (inverse_aux f).tensor (inverse_aux g) end instance : groupoid.{u} (F C) := { inv := λ X Y, quotient.lift (λ f, ⟦inverse_aux f⟧) (by tidy), ..(infer_instance : category (F C)) } end groupoid end free_monoidal_category end category_theory
(* ===== TODOs ===== - Use [Lemma]s (or [Hint Extern]s) to remove duplication in proofs. - Clean up ordering of definitions/lemmas/parameters/notations/etc. - Improve names of lemmas/theorems/etc. - Remove dead code. - Break up into separate files? - Improve compilation speed. - Improve evaluation speed. - Set up extraction. *) From Coq Require Import Classes.RelationClasses Lists.List Program.Equality Program.Tactics Setoid ssreflect. From Equations Require Import Equations. From mathcomp Require Import bigop choice eqtype seq ssrbool ssrfun ssrnat. From deriving Require Import deriving. From extructures Require Import fmap fset ord. From AlphaPearl Require Import Util. Set Asymmetric Patterns. Set Implicit Arguments. Set Bullet Behavior "Strict Subproofs". Unset Printing Implicit Defensive. Obligation Tactic := program_simpl. #[local] Open Scope fset_scope. Definition Fresh_correct (𝒱 : ordType) (Fresh : {fset 𝒱} -> 𝒱) : Prop := forall s : {fset 𝒱}, Fresh s ∉ s. Module Type Alpha. Parameter 𝒱 : ordType. Parameter Fresh : {fset 𝒱} -> 𝒱. Parameter HFresh : Fresh_correct Fresh. Inductive term : Type := | abstraction : 𝒱 -> term -> term | application : term -> term -> term | variable : 𝒱 -> term. #[export] Hint Constructors term : core. Declare Scope term_scope. Bind Scope term_scope with term. Delimit Scope term_scope with term. Fixpoint free_variables (t : term) : {fset 𝒱} := match t with | abstraction x t => free_variables t :\ x | application t1 t2 => free_variables t1 ∪ free_variables t2 | variable x => fset1 x end. End Alpha. Module AlphaFacts (Import M : Alpha). #[local] Implicit Type Fresh : {fset 𝒱} -> 𝒱. #[local] Notation "x '∪' '{' y '}'" := (x :|: fset1 y) (at level 52, format "x '∪' '{' y '}'") : fset_scope. Canonical term_indType := IndType term [indDef for term_rect]. Canonical term_eqType := EqType term [derive eqMixin for term]. Canonical term_choiceType := ChoiceType term [derive choiceMixin for term]. Canonical term_ordType := OrdType term [derive ordMixin for term]. Implicit Types (W X Y Z : {fset 𝒱}) (t u : term) (v w x y z : 𝒱) (R S : {fmap 𝒱 → 𝒱}). #[local] Notation FV := free_variables. Definition Tm X t : bool := FV t ⊆ X. (** Page 2: "Instead of defining a set of terms we define a family of sets Tm(X) of terms with free variables in X ⊆fin 𝒱 inductively...." *) Section in_Tm. #[local] Reserved Notation "t '∈' 'Tm' X" (at level 40). Inductive in_Tm : {fset 𝒱} -> term -> Prop := | Tm_variable : forall X x, x ∈ X -> variable x ∈ Tm X | Tm_application : forall X t u, t ∈ Tm X -> u ∈ Tm X -> application t u ∈ Tm X | Tm_abstraction : forall X t x, t ∈ Tm (X ∪ {x}) -> abstraction x t ∈ Tm X where "t '∈' 'Tm' X" := (in_Tm X t). End in_Tm. #[local] Hint Constructors in_Tm : core. Lemma TmP : forall X t, reflect (in_Tm X t) (t ∈ Tm X). Proof. rewrite /Tm /in_mem /=. introv. gen X. induction t; intros; simpl; ((rewrite fsubD1set fsetUC; destruct (IHt (X ∪ {s})) as [HX_s|HX_s]) || (rewrite fsubUset; destruct (IHt1 X) as [H1|H1], (IHt2 X) as [H2|H2]) || (rewrite fsub1set; destruct (s ∈ X) eqn:Hs)); repeat constructor; auto; intros HX; inverts HX; auto. rewrite H1 // in Hs. Qed. Definition is_subset_of R X Y : bool := (domm R ⊆ X) && (codomm R ⊆ Y). #[local] Notation "R '⊆' X '×' Y" := (is_subset_of R X Y) (at level 40, X at next level). Lemma is_subset_ofP : forall {R} {X} {Y}, reflect (forall x y, R x y -> x ∈ X /\ y ∈ Y) (is_subset_of R X Y). Proof. unfold is_subset_of. introv. apply Bool.iff_reflect. split; intros. - rewrite <- (rwP (@andP (domm R ⊆ X) (codomm R ⊆ Y))). split; apply (rwP fsubsetP); intros x HRx. + apply (rwP dommP) in HRx as [v HRx]. eapply H. eauto. + apply (rwP codommP) in HRx as [v HRx]. eapply H. eauto. - apply (rwP andP) in H as [HRX HRY]. apply (rwP fsubsetP) in HRX, HRY. split. + apply HRX. apply (rwP dommP). eauto. + apply HRY. apply (rwP codommP). eauto. Qed. #[local] Notation partial_bijection := is_injective (only parsing). (** Page 3: "Given R a partial bijection as above and x, y ∈ 𝒱 we define the symmetric update of R as...." *) Definition update R x y : {fmap 𝒱 → 𝒱} := unionm (remm (rem_valm R y) x) [fmap (x, y)]. #[local] Notation "R '⦅' x ',' y '⦆'" := (update R x y) (at level 0, format "R '⦅' x ',' y '⦆'"). Lemma updateE : forall R x y k, getm (R⦅x,y⦆) k = if k == x then Some y else match getm R k with | Some v' => if y == v' then None else Some v' | None => None end. Proof. introv. rewrite unionmE setmE remmE rem_valmE /=. destruct (k =P x); subst; auto. destruct (getm R k) eqn:HRk; auto. destruct (y =P s); subst; auto. Qed. (** Page 3: "It is easy to see that R(x,y) is a partial bijection." *) Lemma partial_bijection_update : forall R x y, partial_bijection R -> partial_bijection R⦅x,y⦆. Proof. introv HRinj. apply (rwP injectivemP) in HRinj. rewrite <- (rwP (injectivemP (m := R⦅x,y⦆))). intros k1 Hk1 k2 Hks. apply (rwP dommP) in Hk1 as [v1 Hkv1]. rewrite !updateE in Hkv1, Hks. destruct (k1 =P x); subst. - inverts Hkv1. destruct (k2 =P x); subst; auto. destruct (getm R k2) eqn:HRk2; rewrite ?HRk2 // in Hks. destruct (v1 =P s); subst; inverts Hks. exfalso. auto. - destruct (getm R k1) eqn:HRk1; rewrite ?HRk1 // in Hkv1, Hks. destruct (y =P s); subst; inverts Hkv1. destruct (k2 =P x); subst. * inverts Hks. exfalso. auto. * destruct (getm R k2) eqn:HRk2; rewrite ?HRk2 // in Hks. destruct (y =P s); subst; inverts Hks. apply HRinj. -- apply (rwP dommP). eauto. -- rewrite HRk1 //. Qed. Lemma domm_update : forall R x y, domm R⦅x,y⦆ ⊆ (domm R ∪ {x}). Proof. introv. apply (rwP fsubsetP). intros k HR'k. rewrite domm_union domm_rem in_fsetU in_fsetD in_fset1 in HR'k. rewrite in_fsetU in_fset1 orbC. destruct (k =P x); subst; auto. apply (rwP dommP). apply (rwP orP) in HR'k as [HR'k|Hk]. - apply (rwP andP) in HR'k as [Hknx HR'k]. apply (rwP dommP) in HR'k as [v HR'k]. rewrite rem_valmE in HR'k. destruct (getm R k) eqn:HRk; eauto. - rewrite domm_set in_fsetU in_fset1 orbC domm0 /= in Hk. apply (rwP eqP) in Hk. subst. contradiction. Qed. Lemma codomm_update : forall R x y, codomm R⦅x,y⦆ ⊆ (codomm R ∪ {y}). Proof. introv. apply (rwP fsubsetP). intros v HvℛR'. apply (rwP codommP) in HvℛR' as [k HR'k]. rewrite updateE in HR'k. rewrite in_fsetU in_fset1 orbC. destruct (k =P x); subst. { inverts HR'k. rewrite eq_refl //. } destruct (getm R k) eqn:HRk; cycle 1. { inverts HR'k. } destruct (y =P s); subst; inverts HR'k. apply not_eq_sym, (introF eqP) in n0. rewrite n0. apply (rwP codommP). eauto. Qed. (** Page 3: "R(x,y) ... ∈ (X ∪ {x}) × ...." *) Lemma update_type : forall X Y R x y, R ⊆ X × Y -> R⦅x,y⦆ ⊆ (X ∪ {x}) × (Y ∪ {y}). Proof. introv HRtype. apply (rwP is_subset_ofP). intros x' y' HR'x'. rewrite !in_fsetU !in_fset1 ![_ || (_ == _)]orbC. rewrite /fmap_to_Prop updateE in HR'x'. destruct (x' =P x); subst. { inverts HR'x'. rewrite eq_refl //. } destruct (getm R x') eqn:HRx'; cycle 1. { inverts HR'x'. } destruct (y =P s); subst; inverts HR'x'. apply not_eq_sym, (introF eqP) in n0. rewrite <- (rwP is_subset_ofP) in HRtype. apply HRtype in HRx' as [Hx'X Hy'Y]. rewrite n0 Hx'X Hy'Y //. Qed. #[local] Reserved Notation "t '≡_α^' R u" (at level 40, R at level 0, format "t '≡_α^' R u"). Fixpoint α_equivalent' R t u : bool := match t, u with | variable x, variable y => (x, y) ∈ R | application t u, application t' u' => t ≡_α^R t' && (u ≡_α^R u') | abstraction x t, abstraction y u => t ≡_α^R⦅x,y⦆ u | _, _ => false end where "t '≡_α^' R u" := (α_equivalent' R t u). (** Page 3: "We now define ≡α^R ⊆ Tm(X) × Tm(Y) parametrized by a partial bijection R ⊆ X × Y, inductively...." *) Section α_equivalent''. #[local] Reserved Notation "t '≡_α^' R u" (at level 40, R at level 0). Inductive α_equivalent'' : {fmap 𝒱 -> 𝒱} -> term -> term -> Prop := | α_equivalent''_variable : forall R x y, (x, y) ∈ R -> variable x ≡_α^R variable y | α_equivalent''_application : forall R t t' u u', t ≡_α^R t' -> u ≡_α^R u' -> application t u ≡_α^R application t' u' | α_equivalent''_abstraction : forall R t u x y, t ≡_α^R⦅x,y⦆ u -> abstraction x t ≡_α^R abstraction y u where "x '≡_α^' R y" := (α_equivalent'' R x y). End α_equivalent''. #[local] Hint Constructors α_equivalent'' : core. Lemma α_equivalent'P : forall R t u, reflect (α_equivalent'' R t u) (t ≡_α^R u). Proof. introv. destruct (t ≡_α^R u) eqn:Hα; constructor. - gen R u. induction t; intros; destruct u; inverts Hα as Hα; auto. apply (rwP andP) in Hα as []; auto. - introv Hα'. dependent induction Hα'; inverts Hα; auto. + rewrite H // in H1. + apply negbT, (rwP nandP) in H0 as [H|H]; apply negbTE in H; auto. Qed. (** Page 3: "We now define ≡α^R ⊆ Tm(X) × Tm(Y)." *) Lemma α_equivalent'_type : forall R t u, t ≡_α^R u -> t ∈ Tm (domm R) /\ u ∈ Tm (codomm R). Proof. rewrite /in_mem /= /Tm. introv Hα. gen R u. induction t; simpl; introv Hα; destruct u; inverts Hα. - apply IHt in H0 as [Httype Hutype]. apply (rwP fsubsetP) in Httype, Hutype. split; apply (rwP fsubsetP); intros x H; rewrite in_fsetD in_fset1 in H; apply (rwP andP) in H as [Hxneqs Hxtype]; apply negbTE in Hxneqs. + apply Httype, (rwP dommP) in Hxtype as [v HR'x]. rewrite updateE Hxneqs in HR'x. destruct (getm R x) eqn:HRx; cycle 1. { inverts HR'x. } destruct (s0 =P s1); subst; inverts HR'x. apply (rwP dommP). eauto. + apply Hutype, (rwP codommP) in Hxtype as [k HR'k]. rewrite updateE /= in HR'k. destruct (k =P s); subst; auto. { inverts HR'k. rewrite eq_refl // in Hxneqs. } destruct (getm R k) eqn:HRk; cycle 1. { inverts HR'k. } destruct (s0 =P s1); subst; inverts HR'k. apply (rwP codommP). eauto. - apply (rwP andP) in H0 as [Ht1 Ht2]. apply IHt1 in Ht1 as [Ht1 Hu1]. apply IHt2 in Ht2 as [Ht2 Hu2]. apply (rwP fsubsetP) in Ht1, Hu1, Ht2, Hu2. split; apply (rwP fsubsetP); introv H; rewrite in_fsetU in H; apply (rwP orP) in H as [Hxt1|Hxt2]; auto. - rewrite /in_mem /= /in_mem /= in H0. apply (rwP getmP) in H0. rewrite !fsub1set. split. + apply (rwP dommP). eauto. + apply (rwP codommP). eauto. Qed. (** TODO Formalize "Note that we cannot replace partial bijections by bijections..."? *) (** Page 3: "Given X, Y, Z ⊂fin 𝒱 we write 1X = ...." *) Definition identity : {fset 𝒱} -> {fmap 𝒱 → 𝒱} := mkfmapf id. Lemma identityE : forall X x, getm (identity X) x = if x ∈ X then Some x else None. Proof. introv. rewrite mkfmapfE. destruct (x ∈ X) eqn:HxX; rewrite HxX //. Qed. Class Identity (A : Type) := { identity' : forall X, A }. #[global] Hint Mode Identity ! : typeclass_instances. Arguments identity' _ : simpl never. #[local] Notation "'1__' X" := (identity' X) (at level 40, format "'1__' X"). #[global] Instance fmap_𝒱_Identity : Identity {fmap 𝒱 → 𝒱} := { identity' := identity }. #[global] Instance fmap_term_Identity : Identity {fmap 𝒱 → term} := { identity' X := mapm variable (1__X : {fmap 𝒱 → 𝒱}) }. #[global] Instance fmap_to_Prop_Identity : Identity (𝒱 -> 𝒱 -> Prop) := { identity' := identity }. (** Page 3: "1X ... ⊆ X × X." *) Lemma identity_type : forall X, (1__X : {fmap 𝒱 → 𝒱}) ⊆ X × X. Proof. introv. apply (rwP is_subset_ofP). introv Hxy. rewrite /identity' /= /fmap_to_Prop identityE in Hxy. destruct (x ∈ X) eqn:HxX; inverts Hxy. auto. Qed. (** Page 3: "1X... obviously is a partial bijection." *) Lemma partial_bijection_identity : forall X, partial_bijection (1__X : {fmap 𝒱 → 𝒱}). Proof. intros. rewrite /partial_bijection /fmap_IsInjective /injective /identity' /fmap_𝒱_Identity /identity. rewrite <- (rwP injectivemP). intros x Hx x' Hxx'. apply (rwP dommP) in Hx as [v Hx]. rewrite !identityE in Hx, Hxx'. destruct (x ∈ X) eqn:HxX; inverts Hx. destruct (x' ∈ X) eqn:Hx'X; inverts Hxx'. auto. Qed. (** Page 3: "Given R ⊆ X × Y and S ⊆ Y × Z we write Rᵒ...." *) Definition converse R : {fmap 𝒱 → 𝒱} := invm R. #[local] Notation "R 'ᵒ'" := (converse R) (at level 40). (** Page 3: "Both operations are closed under partial bijections." *) Lemma converse_closed_under_partial_bijection : forall R, partial_bijection R -> partial_bijection (R ᵒ). Proof. introv HRinj. apply (rwP injectivemP) in HRinj. simpl. rewrite <- (rwP injectivemP). intros x HR'x x' HR'x'. apply (rwP dommP) in HR'x as [v HR'x]. rewrite HR'x in HR'x'. symmetry in HR'x'. apply getm_inv in HR'x, HR'x'. rewrite HR'x in HR'x'. inverts HR'x'. auto. Qed. Lemma domm_converse : forall R, partial_bijection R -> domm (R ᵒ) = codomm R. Proof. introv HRinj. apply eq_fset. intros x. apply Bool.eq_iff_eq_true. split; introv H. - rewrite codomm_domm_invm //. - rewrite codomm_domm_invm // in H. Qed. Lemma codomm_converse : forall R, partial_bijection R -> codomm (R ᵒ) = domm R. Proof. introv HRinj. assert (partial_bijection (R ᵒ)) as HR'inj. { apply converse_closed_under_partial_bijection. auto. } apply eq_fset. intros x. apply Bool.eq_iff_eq_true. split; introv H. - rewrite codomm_domm_invm // in H. apply (rwP dommP) in H as [v HR'x]. rewrite invmK in HR'x. + apply (rwP dommP). eauto. + apply (rwP injectivemP). auto. - rewrite codomm_domm_invm //. apply (rwP dommP). rewrite invmK. + apply (rwP dommP). eauto. + apply (rwP injectivemP). auto. Qed. (** Page 3: "Rᵒ ... ⊆ Y × X." *) Lemma converse_type : forall R X Y, R ⊆ X × Y -> R ᵒ ⊆ Y × X. Proof. introv HRtype. apply (rwP is_subset_ofP). intros y x HR'y. rewrite <- (rwP is_subset_ofP) in HRtype. apply and_comm, HRtype. apply getm_inv. auto. Qed. (** Page 3: "Given R ⊆ X × Y and S ⊆ Y × Z we write... R; S...." *) Definition compose R S : {fmap 𝒱 → 𝒱} := mkfmapfp (fun k => match getm R k with | Some v => getm S v | None => None end) (domm R). #[local] Notation "R ';;' S" := (compose R S) (at level 40, format "R ';;' S"). Lemma composeE : forall R S x, getm (R;;S) x = match getm R x with | Some x => getm S x | None => None end. Proof. introv. rewrite mkfmapfpE. destruct (x ∈ domm R) eqn:HRx; rewrite HRx //. apply negbT, (rwP dommPn) in HRx. rewrite HRx //. Qed. Lemma domm_compose : forall R S, domm (R;;S) ⊆ domm R. Proof. introv. apply (rwP fsubsetP). introv HRSx. apply (rwP dommP) in HRSx as [v HRSx]. rewrite composeE in HRSx. destruct (x ∈ domm R) eqn:HRx. - apply (rwP dommP) in HRx as [v' HRx]. rewrite HRx // in HRSx. - apply negbT, (rwP dommPn) in HRx. rewrite HRx // in HRSx. Qed. Lemma codomm_compose : forall R S, codomm (R;;S) ⊆ codomm S. Proof. introv. apply (rwP fsubsetP). introv HxℛRS. apply (rwP codommP) in HxℛRS as [k HRSx]. rewrite composeE in HRSx. destruct (getm R k) eqn:HRk. - apply (rwP codommP). eauto. - inverts HRSx. Qed. (** Page 3: "R;S ... ⊆ X × Z." *) Lemma compose_type : forall R S X Y Z, R ⊆ X × Y -> S ⊆ Y × Z -> R;;S ⊆ X × Z. Proof. introv HRtype HStype. rewrite <- (rwP is_subset_ofP) in HRtype. rewrite <- (rwP is_subset_ofP) in HStype. apply (rwP is_subset_ofP). intros x z HRSx. rewrite /fmap_to_Prop composeE in HRSx. destruct (getm R x) eqn:HRx; cycle 1. { inverts HRSx. } split. - eapply HRtype. eauto. - eapply HStype. eauto. Qed. (** Page 3: "The set of partial bijections is closed under both operations." *) Lemma compose_closed_under_partial_bijection : forall R S, partial_bijection R -> partial_bijection S -> partial_bijection (R;;S). Proof. unfold partial_bijection. introv HRinj HSinj. apply (rwP injectivemP) in HRinj, HSinj. simpl. rewrite <- (rwP injectivemP). intros x HRSx x' Hxx'. apply (rwP dommP) in HRSx as [v HRSx]. rewrite composeE in HRSx, Hxx'. destruct (getm R x) eqn:HRx; cycle 1. { inverts HRSx. } rewrite HRSx composeE in Hxx'. destruct (getm R x') eqn:HRx'; cycle 1. { inverts Hxx'. } rewrite -HRSx in Hxx'. apply HSinj in Hxx'; cycle 1. { apply (rwP dommP). eauto. } subst. rewrite -HRx in HRx'. apply HRinj in HRx'; auto. rewrite HRx in HRx'. apply (rwP dommP). eauto. Qed. (** Page 3: Lemma 1.1. *) Lemma update_identity : forall X x, (1__X)⦅x,x⦆ = 1__(X ∪ {x}). Proof. introv. apply eq_fmap. intros k. rewrite updateE !identityE in_fsetU in_fset1 orbC. destruct (k =P x); subst; auto. destruct (k ∈ X) eqn:HkX; auto. apply not_eq_sym, (introF eqP) in n. rewrite n //. Qed. (** Page 3: Lemma 1.2. *) Lemma update_converse : forall R x y, partial_bijection R -> R⦅x,y⦆ᵒ = R ᵒ⦅y,x⦆. Proof. introv HRinj. apply eq_fmap. intros k. rewrite updateE /converse. destruct (k =P y); subst. - apply getm_inv. rewrite invmK. + rewrite updateE eq_refl //. + intros k HR'k k' Hkk'. epose proof @partial_bijection_update _ _ _ HRinj as H. apply (rwP injectivemP) in H. apply H; eauto. - destruct (invm R k) eqn:HR'k; rewrite ?HR'k. + apply getm_inv in HR'k. destruct (x =P s); subst. * apply invm_None. { apply partial_bijection_update. auto. } rewrite <- (rwP (@codommPn _ 𝒱 _ _)). intros. rewrite updateE. destruct (k' =P s); subst. -- apply Bool.negb_true_iff, Bool.not_true_iff_false. introv Hyk. apply (rwP eqP) in Hyk. inverts Hyk. auto. -- destruct (getm R k') eqn:HRk'; rewrite ?HRk'; auto. destruct (y =P s0); subst; auto. apply Bool.negb_true_iff, Bool.not_true_iff_false. introv Hs0k. apply (rwP eqP) in Hs0k. inverts Hs0k. apply n0. apply (rwP injectivemP) in HRinj. apply HRinj. ++ apply (rwP dommP). eauto. ++ rewrite HRk' //. * apply getm_inv. rewrite invmK; cycle 1. { intros k' HR'k' k'' Hk'k''. epose proof @partial_bijection_update _ _ _ HRinj as H. apply (rwP injectivemP) in H. apply H; eauto. } rewrite updateE. replace (s == x) with false; cycle 1. { symmetry. apply Bool.not_true_iff_false. introv Hsx. apply (rwP eqP) in Hsx. subst. auto. } destruct (getm R s) eqn:HRs; rewrite HR'k. -- destruct (y =P s0); subst; inverts HR'k; auto. contradiction. -- inverts HR'k. + apply invm_None in HR'k; auto. apply invm_None. { apply partial_bijection_update. auto. } rewrite <- (rwP (@codommPn _ 𝒱 _ _)). intros k'. rewrite updateE. destruct (k' =P x); subst. * apply Bool.negb_true_iff, Bool.not_true_iff_false. introv Hyk. apply (rwP eqP) in Hyk. inverts Hyk. auto. * destruct (getm R k') eqn:HRk'; rewrite ?HRk' //. destruct (y =P s); subst; auto. rewrite <- (rwP (@codommPn _ _ R k)) in HR'k. apply Bool.negb_true_iff, Bool.not_true_iff_false. introv Hsk. apply (rwP eqP) in Hsk. inverts Hsk. pose proof (HR'k k') as HnRk'. rewrite HRk' eq_refl // in HnRk'. Qed. (** Page 3: Lemma 1.3. *) Lemma update_compose : forall R S x y z a b, (R⦅x,y⦆;;S⦅y,z⦆) a b -> ((R;;S)⦅x,z⦆) a b. Proof. introv HR'S'. cut ((a = x /\ z = b) \/ (x <> a /\ z <> b /\ (R;;S) a b)). { rewrite /fmap_to_Prop updateE. introv [[Haz Hzb]|(Hxa & Hzb & Hab)]; subst. - rewrite eq_refl //. - apply not_eq_sym in Hxa. apply (introF eqP) in Hxa, Hzb. rewrite Hxa Hab Hzb //. } cut (exists c, (a = x /\ y = c /\ z = b) \/ (x <> a /\ y <> c /\ z <> b /\ R a c /\ S c b)). { rewrite /fmap_to_Prop composeE. introv [c [(Haz & Hyc & Hzb)|(Hxz & Hyc & Hzb & Hac & Hcb)]]; subst; auto. rewrite Hac Hcb. auto. } { rewrite /fmap_to_Prop composeE updateE in HR'S'. destruct (a =P x); subst. { rewrite updateE eq_refl in HR'S'. inverts HR'S'. eauto. } destruct (getm R a) eqn:HRa; cycle 1. { inverts HR'S'. } destruct (y =P s); subst. { inverts HR'S'. } apply not_eq_sym in n0. apply (introF eqP) in n, n0. rewrite updateE n0 in HR'S'. destruct (getm S s) eqn:HSs; cycle 1. { inverts HR'S'. } destruct (z =P s0); subst; inverts HR'S'; eauto. apply (elimF eqP) in n, n0. exists s. right. repeat split; auto. } Qed. Lemma α_equivalent'_with_behaviorally_identical_maps : forall R S t u, (forall x y, R x y -> x ∈ FV t -> S x y) -> t ≡_α^R u -> t ≡_α^S u. Proof. introv HReqvS Htαu. gen R S u. induction t; introv HReqvS Htαu; destruct u; inverts Htαu. - apply IHt with (R := R⦅s,s0⦆); auto. introv HR'xy Hxt. rewrite /fmap_to_Prop updateE in HR'xy. rewrite /fmap_to_Prop updateE. destruct (x =P s); subst; auto. destruct (getm R x) eqn:HRx; cycle 1. { inverts HR'xy. } destruct (s0 =P s1); subst; inverts HR'xy. apply HReqvS in HRx. + rewrite HRx. apply (introF eqP) in n0. rewrite n0 //. + rewrite /= in_fsetD in_fset1 Hxt andbT. apply (introF eqP) in n. rewrite n //. - apply (rwP andP) in H0 as [Hα1 Hα2]. simpl. rewrite <- (rwP andP). split; (apply IHt1 with R + apply IHt2 with R); auto; introv HRxy Hx; apply HReqvS; auto; rewrite /= in_fsetU Hx ?orbT //. - apply (rwP getmP), HReqvS in H0. + apply (rwP getmP). rewrite H0 //. + rewrite /= in_fset1 eq_refl //. Qed. Lemma α_equivalent'_supermap : forall (R__sub R__super : {fmap 𝒱 → 𝒱}) t u, (forall (k : 𝒱) v, R__sub k v -> R__super k v) -> t ≡_α^R__sub u -> t ≡_α^R__super u. Proof. introv Hsub Hαsub. apply α_equivalent'_with_behaviorally_identical_maps with R__sub; auto. Qed. (** Page 4: "We now define ≡α = ≡α^1X." *) Definition α_equivalent t u := t ≡_α^(1__(FV t)) u. Infix "≡_α" := α_equivalent (at level 40). Notation "t '≢_α' u" := (~~ (t ≡_α u)) (at level 40). (** We will use these notations when the assumptions make it impossible for a substitution to fail, but Coq can't figure that out (without a lot of dependent-type boilerplate, which we want to avoid for clarity). *) (* We will use [#[program]] to discover the wildcard variables, since their values don't actually matter. *) #[local] Notation "a '`≡_α' b" := (odflt (variable _) a ≡_α odflt (variable _) b) (at level 40). #[local] Notation "a '`≡_α^' R b" := (odflt (variable _) a ≡_α^R odflt (variable _) b) (at level 40, R at level 0, format "a '`≡_α^' R b"). (** Page 4: Proposition 2.1. *) Proposition α_equivalent'_identity : forall X t, t ∈ Tm X -> t ≡_α^(1__X) t. Proof. introv HtX. apply (rwP fsubsetP) in HtX. gen X. induction t; intros; simpl. - rewrite update_identity. apply IHt. introv Hxt. rewrite in_fsetU in_fset1 orbC. destruct (x =P s); subst; auto. apply (introF eqP) in n. apply HtX. rewrite /= in_fsetD in_fset1 n Hxt //. - rewrite <- (rwP andP). split; (apply IHt1 || apply IHt2); introv Hx; apply HtX; rewrite /= in_fsetU Hx ?orbT //. - assert (s ∈ fset1 s) as Hss. { rewrite in_fset1 eq_refl //. } apply HtX in Hss. apply (rwP getmP). rewrite identityE Hss //. Qed. (** Page 4: Proposition 2.2. *) Proposition α_equivalent'_converse : forall R t u, partial_bijection R -> t ≡_α^R u -> u ≡_α^(R ᵒ) t. Proof. introv HRinj Hα. gen R u. induction t; introv HRinj Hα; destruct u; inverts Hα as Hα. - apply IHt in Hα. + rewrite /= -update_converse //. + apply partial_bijection_update. auto. - apply (rwP andP) in Hα as [Hα1 Hα2]. apply IHt1 in Hα1; auto. apply IHt2 in Hα2; auto. rewrite /= Hα1 Hα2 //. - apply (rwP getmP) in Hα. apply (rwP getmP), getm_inv. rewrite invmK //. apply (rwP injectivemP). auto. Qed. Lemma converseK : forall R, partial_bijection R -> R ᵒ ᵒ = R. Proof. introv HRinj. apply eq_fmap. intros k. apply (rwP injectivemP) in HRinj. unfold "ᵒ". rewrite invmK //. Qed. Proposition α_equivalent'_converse' : forall R t u, partial_bijection R -> t ≡_α^R u = u ≡_α^(R ᵒ) t. Proof. introv HRinj. apply Bool.eq_iff_eq_true; split; introv Hα. - apply α_equivalent'_converse; auto. - apply α_equivalent'_converse in Hα. + rewrite converseK // in Hα. + apply converse_closed_under_partial_bijection. auto. Qed. (** Page 4: Proposition 2.3. *) Proposition α_equivalent'_compose : forall R S t u (v : term), t ≡_α^R u -> u ≡_α^S v -> t ≡_α^(R;;S) v. Proof. introv Htαu Huαv. gen u v R S. induction t; introv Htαu Huαv; destruct u, v; inverts Htαu as Htαu; inverts Huαv as Huαv. - apply IHt with (S := S⦅s0,s1⦆) (v := v) in Htαu; auto. apply α_equivalent'_supermap with (R__super := (R;;S)⦅s,s1⦆) in Htαu; cycle 1. { intros. eapply update_compose; eauto. } rewrite /= Htαu //. - apply (rwP andP) in Htαu as [Htαu1 Htαu2], Huαv as [Huαv1 Huαv2]. apply IHt1 with (R := R) (S := S) (v := v1) in Htαu1; auto. apply IHt2 with (R := R) (S := S) (v := v2) in Htαu2; auto. rewrite /= Htαu1 Htαu2 //. - apply (rwP getmP) in Htαu, Huαv. apply (rwP getmP). rewrite /= composeE Htαu //. Qed. Lemma α_equivalent'_maps_all_FV : forall R t u x, t ≡_α^R u -> x ∈ FV t -> exists y, getm R x = Some y /\ y ∈ FV u. Proof. introv Hα Hx. gen R u. induction t; introv Hα; destruct u; inverts Hα as Hα. - rewrite /= in_fsetD in_fset1 in Hx. apply (rwP andP) in Hx as [Hxns Hxt]. pose proof Hα. apply IHt in Hα as (y & HR'x & Hyu); auto. rewrite updateE in HR'x. destruct (x =P s); subst; auto. destruct (getm R x) eqn:HRx; cycle 1. { inverts HR'x. } destruct (s0 =P s1); subst; inverts HR'x. exists y. split; auto. rewrite /= in_fsetD in_fset1 Hyu //. apply not_eq_sym, (introF eqP) in n0. rewrite n0 //. - apply (rwP andP) in Hα as [Hα1 Hα2]. rewrite /= in_fsetU in Hx. apply (rwP orP) in Hx as [Hx|Hx]. + apply IHt1 with (u := u1) in Hα1 as (y & HRx & Hyu1); auto. exists y. rewrite in_fsetU Hyu1 //. + apply IHt2 with (u := u2) in Hα2 as (y & HRx & Hyu2); auto. exists y. rewrite in_fsetU Hyu2 orbT //. - apply (rwP getmP) in Hα. rewrite /= in_fset1 in Hx. apply (rwP eqP) in Hx. subst. exists s0. rewrite /= in_fset1 eq_refl //. Qed. Lemma α_equivalent'_maps_all_FV' : forall R t u y, partial_bijection R -> t ≡_α^R u -> y ∈ FV u -> exists x, getm R x = Some y /\ x ∈ FV t. Proof. introv HRinj Hα Hyu. apply α_equivalent'_converse in Hα; auto. pose proof α_equivalent'_maps_all_FV _ _ _ _ Hα Hyu as (x & HR'y & Hxt). apply getm_inv in HR'y. eauto. Qed. Lemma α_equivalent'_implies_related_FV : forall R t u, partial_bijection R -> t ≡_α^R u -> FV u = pimfset (getm R) (FV t). Proof. introv HRinj Hα. apply eq_fset. intros y. rewrite in_pimfset. symmetry. destruct (y ∈ FV u) eqn:Hxu. - eapply α_equivalent'_maps_all_FV' in Hxu as (x & HRx & Hxt); eauto. apply (rwP imfsetP). eauto. - apply Bool.not_true_iff_false. introv Hyt'. apply (rwP imfsetP) in Hyt' as [x Hxt HRx]. eapply α_equivalent'_maps_all_FV in Hxt as (y' & H'Rx & Hy'u); eauto. rewrite H'Rx in HRx. inverts HRx. rewrite Hy'u // in Hxu. Qed. Lemma α_equivalent'_bijection_includes_all_FV : forall R t u, t ≡_α^R u -> t ∈ Tm (domm R). Proof. introv Hα. rewrite /Tm /in_mem /=. apply (rwP fsubsetP). introv Hxt. gen R u. induction t; introv Hα; destruct u; inverts Hα as Hα. - rewrite /= in_fsetD in_fset1 in Hxt. apply (rwP andP) in Hxt as [Hxns Hxt]. cut (x ∈ domm R⦅s,s0⦆ = true). { introv HR'x. apply (rwP dommP) in HR'x as [v HR'x]. rewrite updateE in HR'x. destruct (x =P s); subst; auto. destruct (getm R x) eqn:HRx. - eapply (rwP dommP). eauto. - inverts HR'x. } eapply IHt; eauto. - apply (rwP andP) in Hα as [Hα1 Hα2]. rewrite /= /in_mem /= in_fsetU in Hxt. apply (rwP orP) in Hxt as [Hx|Hx]; eauto. - apply (rwP getmP) in Hα. rewrite in_fset1 in Hxt. apply (rwP eqP) in Hxt. subst. apply (rwP dommP). eauto. Qed. Lemma α_equivalent'_bijection_includes_all_FV' : forall R t u, partial_bijection R -> t ≡_α^R u -> u ∈ Tm (codomm R). Proof. introv HRinj Hα. eapply α_equivalent'_converse in Hα; eauto. rewrite codomm_domm_invm //. eapply α_equivalent'_bijection_includes_all_FV; eauto. Qed. Lemma FV_respects_α_equivalence : forall t u, t ≡_α u -> FV u = FV t. Proof. introv Hα. replace (FV t) with (pimfset (getm (1__(FV t) : {fmap 𝒱 → 𝒱})) (FV t)); cycle 1. { apply eq_fset. intros x. rewrite in_pimfset. destruct (x ∈ FV t) eqn:Hxt. - apply (rwP imfsetP). exists x; auto. rewrite identityE Hxt //. - apply negbTE, (introN imfsetP). intros [y Hyt]. rewrite identityE Hyt in H. inverts H. rewrite Hyt // in Hxt. } eapply α_equivalent'_implies_related_FV; eauto. apply partial_bijection_identity. Qed. Lemma domm_identity : forall X, domm (1__X : {fmap 𝒱 → 𝒱}) = X. Proof. introv. apply eq_fset. intros x. destruct (x ∈ X) eqn:HxX. - apply (rwP dommP). exists x. rewrite identityE HxX //. - apply negbTE. apply (rwP dommPn). rewrite identityE HxX //. Qed. Lemma α_equivalent'_implies_α_equivalent : forall t u, t ≡_α u <-> exists X, t ≡_α^(1__X) u. Proof. introv. split; introv Hα; eauto. destruct Hα as [X Hα]. apply α_equivalent'_with_behaviorally_identical_maps with (R := 1__X); auto. intros x y Hxy Hxt. rewrite /fmap_to_Prop identityE in Hxy. rewrite /fmap_to_Prop identityE Hxt. eapply α_equivalent'_bijection_includes_all_FV in Hα; eauto. rewrite domm_identity /= in Hα. apply (rwP fsubsetP) in Hα. apply Hα in Hxt. rewrite Hxt // in Hxy. Qed. Lemma compose_identity_right : forall R, R;;(1__(codomm R)) = R. Proof. introv. apply eq_fmap. intros x. rewrite composeE. destruct (getm R x) eqn:HRx; auto. rewrite identityE. replace (s ∈ codomm R) with true; auto. symmetry. apply (rwP codommP). eauto. Qed. Lemma compose_identity_left : forall R, (1__(domm R));;R = R. Proof. introv. apply eq_fmap. intros x. rewrite composeE identityE. destruct (x ∈ domm R) eqn:HRx; auto. apply negbT, (rwP dommPn) in HRx. auto. Qed. Lemma codomm_identity : forall X, codomm (1__X : {fmap 𝒱 → 𝒱}) = X. Proof. introv. apply eq_fset. intros x. destruct (x ∈ X) eqn:HxX. - apply (rwP codommP). exists x. rewrite identityE HxX //. - apply negbTE. rewrite <- (rwP (@codommPn _ 𝒱 _ _)). intros y. apply (introN eqP). introv HXy. rewrite identityE in HXy. destruct (y ∈ X) eqn:HyX; inverts HXy. rewrite HyX // in HxX. Qed. Lemma compose_identity : forall X Y, (1__X);;(1__Y) = 1__(X ∩ Y). Proof. introv. apply eq_fmap. intros x. rewrite composeE !identityE in_fsetI. destruct (x ∈ X) eqn:HxX; auto. rewrite identityE //. Qed. Lemma compose_identity' : forall X, (1__X);;(1__X) = 1__X. Proof. introv. pose proof codomm_identity X as Hℛ1. pose proof compose_identity_right (1__X) as Hℛ1r. rewrite Hℛ1 // in Hℛ1r. Qed. Lemma converse_identity : forall X, (1__X)ᵒ = 1__X. Proof. introv. apply eq_fmap. intros x. rewrite identityE. destruct (x ∈ X) eqn:HxX. - apply getm_inv. rewrite invmK. + rewrite identityE HxX //. + apply (rwP injectivemP). apply partial_bijection_identity. - apply invm_None. + apply partial_bijection_identity. + rewrite <- (rwP (@codommPn _ 𝒱 _ _)). intros x'. apply (introN eqP). introv Hxx'. rewrite identityE in Hxx'. destruct (x' ∈ X) eqn:Hx'X; inverts Hxx'. rewrite Hx'X // in HxX. Qed. (** Page 4: "≡α is... reflexive." *) Corollary α_equivalent_reflexive : forall t, t ≡_α t. Proof. introv. apply α_equivalent'_identity. rewrite /Tm /in_mem /= fsubsetxx //. Qed. Corollary α_equivalent_transitive' : forall X t u (v : term), t ≡_α^(1__X) u -> u ≡_α^(1__X) v -> t ≡_α^(1__X) v. Proof. introv Htαu Huαv. pose proof α_equivalent'_compose _ _ _ _ _ Htαu Huαv as Htαv. rewrite compose_identity fsetIid // in Htαv. Qed. (** Page 4: "≡α is... transitive." *) Corollary α_equivalent_transitive : forall t u (v : term), t ≡_α u -> u ≡_α v -> t ≡_α v. Proof. introv Htαu Huαv. apply α_equivalent_transitive' with u; auto. apply FV_respects_α_equivalence in Htαu. rewrite -Htαu //. Qed. (** Page 4: "≡α is... symmetric." *) Corollary α_equivalent_symmetric : forall t u, t ≡_α u -> u ≡_α t. Proof. introv Hα. apply α_equivalent'_converse in Hα. - rewrite converse_identity in Hα. eapply α_equivalent'_implies_α_equivalent; eauto. - apply partial_bijection_identity. Qed. (** Page 4: Corollary 3. *) #[global] Instance α_equivalent_Equivalence : Equivalence α_equivalent. Proof. split; intros t. - apply α_equivalent_reflexive. - apply α_equivalent_symmetric. - apply α_equivalent_transitive. Qed. Add Parametric Relation : term α_equivalent reflexivity proved by α_equivalent_reflexive symmetry proved by α_equivalent_symmetric transitivity proved by α_equivalent_transitive as α_equivalent_rel. Add Parametric Morphism : FV with signature α_equivalent ==> eq as FV_morph. Proof. introv Hα. apply FV_respects_α_equivalence. symmetry. auto. Qed. (** Since Coq doesn't directly support quotient types, we're representing "Tm^α(X)" as "Tm(X)" and manually proving that functions respect "≡α". *) Implicit Types f g : {fmap 𝒱 → term}. (** Page 4: "Given a substitution f and x ∈ 𝒱, t ∈ Tm(Y) we define the update...." *) Definition update_substitution (A : Type) : {fmap 𝒱 → A} -> 𝒱 -> A -> {fmap 𝒱 → A} := @setm _ _. #[local] Notation "f '[' x ',' t ']'" := (update_substitution f x t) (at level 10, x at next level, t at next level, format "f [ x ',' t ]"). (** Page 4: "f[[x,t]] ∈ X ∪ {x} ⟶ ...." *) Lemma domm_update_substitution : forall f x t, domm (f[x,t]) = domm f ∪ {x}. Proof. introv. apply eq_fset. intros k. rewrite in_fsetU in_fset1. apply Bool.eq_iff_eq_true. split; introv Hk. - apply (rwP dommP) in Hk as [v Hf'k]. rewrite setmE in Hf'k. destruct (k =P x); subst. { apply orbT. } rewrite orbF. apply (rwP dommP). eauto. - apply (rwP dommP). rewrite setmE. apply (rwP orP) in Hk as [Hfk|Hkx]. + apply (rwP dommP) in Hfk as [v Hfk]. destruct (k =P x); subst; eauto. + rewrite Hkx. eauto. Qed. Definition codomm_Tm_set f : {fset 𝒱} := ⋃_(i ∈ codomm f) (FV i). Lemma codomm_Tm_setP : forall {f} {x}, reflect (exists t, x ∈ FV t /\ t ∈ codomm f) (x ∈ codomm_Tm_set f). Proof. introv. destruct (x ∈ codomm_Tm_set f) eqn:Hxℛf; constructor; rewrite /= /codomm_Tm_set in_bigcup in Hxℛf. - apply (rwP hasP) in Hxℛf as [t Hxℱf]. exists t. auto. - apply negbT, (rwP hasPn) in Hxℛf. intros (t & Hxt & Htℛf). apply Hxℛf in Htℛf. rewrite Hxt // in Htℛf. Qed. (** Page 4: "f[[x,t]] ∈ ... ⟶ Tm(Y)." *) Lemma codomm_update_substitution' : forall Y f x t, codomm_Tm_set f ⊆ Y -> t ∈ Tm Y -> codomm_Tm_set (f[x,t]) ⊆ Y. Proof. introv HℛfY HtY. apply (rwP fsubsetP) in HℛfY. apply (rwP fsubsetP). intros k Hℛf'k. apply (rwP codomm_Tm_setP) in Hℛf'k as (t' & Hkt' & Hℛf't'). apply (rwP codommP) in Hℛf't' as [k' Hf'k']. rewrite setmE in Hf'k'. destruct (k' =P x); subst. { inverts Hf'k'. apply (rwP fsubsetP) in HtY. auto. } apply HℛfY, (rwP codomm_Tm_setP). exists t'. split; auto. apply (rwP codommP). eauto. Qed. #[local] Reserved Notation "'`⦇' f '⦈'" (format "'`⦇' f '⦈'"). (** Page 4: "A substitution can be extended to a function on terms ⦇f⦈ ∈ Tm(X) ⟶ Tm(Y)...." *) Fixpoint lift_substitution' f Fresh t : term := match t with | variable x => odflt t (getm f x) | application t u => application (`⦇f⦈ Fresh t) (`⦇f⦈ Fresh u) | abstraction x t => let Y := codomm_Tm_set f in let z := Fresh Y in abstraction z (`⦇f[x,variable z]⦈ Fresh t) end where "'`⦇' f '⦈'" := (lift_substitution' f). Definition lift_substitution f : term -> term := `⦇f⦈ Fresh. #[local] Notation "'⦇' f '⦈'" := (lift_substitution f) (format "'⦇' f '⦈'"). Lemma α_equivalent_update : forall R t u x y, x ∉ domm R -> y ∉ codomm R -> t ≡_α^R u -> t ≡_α^(R⦅x,y⦆) u. Proof. introv HnRx HnR'y Hα. apply α_equivalent'_supermap with (R__sub := R); auto. introv HRk. apply (rwP dommPn) in HnRx. destruct (k =P x); subst. { rewrite HRk // in HnRx. } rewrite <- (rwP (@codommPn _ _ R y)) in HnR'y. destruct (y =P v); subst. { pose proof HnR'y k as HnRk. rewrite HRk eq_refl // in HnRk. } apply (introF eqP) in n, n0. rewrite /fmap_to_Prop updateE n HRk n0 //. Qed. Lemma α_equivalent_update_reorder : forall R t u x y z z', z ∉ domm R -> z' ∉ codomm R -> t ≡_α^(R⦅x,y⦆) u -> t ≡_α^(R⦅z,z'⦆⦅x,y⦆) u. Proof. introv HnRz HnR'z' Hα. apply α_equivalent'_supermap with (R__sub := R⦅x,y⦆); auto. introv HR'k. rewrite /fmap_to_Prop updateE in HR'k. repeat rewrite /fmap_to_Prop updateE. destruct (k =P x); subst; auto. destruct (k =P z); subst. - destruct (getm R z) eqn:HRz; cycle 1. { inverts HR'k. } destruct (y =P s); subst; inverts HR'k. assert (z ∈ domm R) as H'Rz by (apply (rwP dommP); eauto). rewrite H'Rz // in HnRz. - destruct (getm R k) eqn:HRk; cycle 1. { inverts HR'k. } destruct (y =P s); subst; inverts HR'k. destruct (z' =P v); subst. { assert (v ∈ codomm R) as HR'v by (apply (rwP codommP); eauto). rewrite HR'v // in HnR'z'. } apply (introF eqP) in n1. rewrite /= n1 //. Qed. Lemma in_update : forall R x y z z', z ∉ domm R -> z' ∉ codomm R -> (x, y) ∈ R -> (x, y) ∈ R⦅z,z'⦆. Proof. introv HnRz HnR'z' HRx. apply (rwP getmP) in HRx. apply (rwP getmP). rewrite updateE HRx. destruct (x =P z); subst. { assert (z ∈ domm R) as HRz by (apply (rwP dommP); eauto). rewrite HRz // in HnRz. } destruct (z' =P y); subst; auto. assert (y ∈ codomm R) as HR'y by (apply (rwP codommP); eauto). rewrite HR'y // in HnR'z'. Qed. Lemma update_repeat_noop : forall R x y, R⦅x,y⦆⦅x,y⦆ = R⦅x,y⦆. Proof. introv. apply eq_fmap. intros k. rewrite !updateE. destruct (k =P x); subst; auto. destruct (getm R k) eqn:HRk; auto. destruct (y =P s); subst; auto. apply (introF eqP) in n0. rewrite /= n0 //. Qed. Lemma codomm_Tm_setPn : forall {f} {x}, reflect (forall t, ~ (x ∈ FV t /\ t ∈ codomm f)) (x ∉ codomm_Tm_set f). Proof. introv. destruct (x ∉ codomm_Tm_set f) eqn:Hnℛf; rewrite /= /codomm_Tm_set in_bigcup in Hnℛf; constructor; introv H. - destruct H as [Hxt Htℛf]. apply negbTE, Bool.not_true_iff_false in Hnℛf. apply Hnℛf. apply (rwP hasP). exists t; auto. - apply Bool.negb_false_iff, (rwP hasP) in Hnℛf as [t Htℛf]. apply H with t. auto. Qed. Lemma α_equivalent'_with_behaviorally_identical_maps' : forall R S t u, (forall x y, R x y -> x ∈ FV t -> y ∈ FV u -> S x y) -> t ≡_α^R u -> t ≡_α^S u. Proof. introv HReqvS Hα. gen R S u. induction t; introv HReqvS Hα; destruct u; inverts Hα. - apply IHt with (R := R⦅s,s0⦆); auto. introv HR'x Hxt Hyu. rewrite /fmap_to_Prop updateE in HR'x. rewrite /fmap_to_Prop updateE. destruct (x =P s); subst; auto. destruct (getm R x) eqn:HRx; cycle 1. { inverts HR'x. } destruct (s0 =P s1); subst; inverts HR'x. apply HReqvS in HRx. + rewrite HRx. apply (introF eqP) in n0. rewrite n0 //. + rewrite /= in_fsetD in_fset1 Hxt andbT. apply (introF eqP) in n. rewrite n //. + rewrite in_fsetD in_fset1 Hyu andbT. apply not_eq_sym, (introF eqP) in n0. rewrite n0 //. - apply (rwP andP) in H0 as [Hα1 Hα2]. simpl. rewrite <- (rwP andP). split; (apply IHt1 with R + apply IHt2 with R); auto; introv HRx Hxt Hyu; apply HReqvS; auto; rewrite /= in_fsetU ?Hxt ?Hyu ?orbT //. - apply (rwP getmP), HReqvS in H0. + apply (rwP getmP). rewrite H0 //. + rewrite /= in_fset1 eq_refl //. + rewrite /= in_fset1 eq_refl //. Qed. (** Page 5: Lemma 5. *) #[program] Lemma lemma5 : forall R S f g, R ⊆ domm f × domm g -> partial_bijection R -> partial_bijection S -> (forall x x', R x x' -> getm f x `≡_α^S getm g x') -> forall x y z z', z ∉ codomm_Tm_set f -> z' ∉ codomm_Tm_set g -> forall w w' : 𝒱, R⦅x,y⦆ w w' -> getm (f[x,variable z]) w `≡_α^(S⦅z,z'⦆) getm (g[y,variable z']) w'. Proof. introv HRtype HRinj HSinj HRα Hnzℛf Hnz'ℛg HR'w. rewrite /fmap_to_Prop updateE in HR'w. rewrite !setmE. destruct (w =P x); subst. - inverts HR'w. rewrite !eq_refl. apply (rwP getmP). rewrite updateE eq_refl //. - destruct (getm R w) eqn:HRw; cycle 1. { inverts HR'w. } destruct (y =P s); subst; inverts HR'w. apply not_eq_sym, (introF eqP) in n0. rewrite n0. pose proof HRw as H'Rw. apply HRα in H'Rw. inverts H'Rw. rewrite <- (rwP is_subset_ofP) in HRtype. apply HRtype in HRw as [Hfw Hα]. apply (rwP dommP) in Hfw as [t Hfw], Hα as [t' Hgw']. rewrite -> Hfw, Hgw' in *. apply α_equivalent'_with_behaviorally_identical_maps' with (R := S); auto. intros x' y' HSx' Hx't Hy't'. rewrite /fmap_to_Prop updateE. destruct (x' =P z); subst. { rewrite <- (rwP codomm_Tm_setPn) in Hnzℛf. exfalso. apply Hnzℛf with t. split; auto. apply (rwP codommP). eauto. } rewrite HSx'. destruct (z' =P y'); subst; auto. rewrite <- (rwP codomm_Tm_setPn) in Hnz'ℛg. exfalso. apply Hnz'ℛg with t'. split; auto. apply (rwP codommP). eauto. Qed. Lemma subset_domm_substitution : forall f x t, domm f ⊆ domm (f[x,t]). Proof. introv. apply (rwP fsubsetP). intros x' Hfx'. apply (rwP dommP) in Hfx' as [t' Hfx']. apply (rwP dommP). rewrite setmE. destruct (x' =P x); subst; eauto. Qed. (** Page 4: Proposition 4. *) #[program] Proposition substitution'_preserves_α_congruence' : forall Fresh R S f g, Fresh_correct Fresh -> R ⊆ domm f × domm g -> partial_bijection R -> partial_bijection S -> (forall x x', R x x' -> getm f x `≡_α^S getm g x') -> forall t u, t ≡_α^R u -> `⦇f⦈ Fresh t ≡_α^S `⦇g⦈ Fresh u. Proof. introv HFresh HRtype HRinj HSinj HRα Hα. gen R S f g u. induction t; introv HRinj HSinj HRtype HRα Hα; destruct u; inverts Hα. - eapply IHt with (R := R⦅s,s0⦆); eauto. + apply partial_bijection_update. auto. + apply partial_bijection_update. auto. + rewrite !domm_set /=. apply (rwP is_subset_ofP). intros x y HR'x. rewrite /= !in_fsetU !in_fset1. rewrite /fmap_to_Prop updateE in HR'x. destruct (x =P s); subst. { inverts HR'x. rewrite eq_refl //. } destruct (getm R x) eqn:HRx; cycle 1. { inverts HR'x. } destruct (s0 =P s1); subst; inverts HR'x. rewrite <- (rwP is_subset_ofP) in HRtype. apply HRtype in HRx as [Hnxs Hns0y]. simpl in *. rewrite Hnxs Hns0y orbT //. + introv HR'x. eapply lemma5; eauto; apply Fresh_correct. - apply (rwP andP) in H0 as [Hα1 Hα2]. eapply IHt1 with (S := S) in Hα1; eauto. eapply IHt2 with (S := S) in Hα2; eauto. rewrite /= Hα1 Hα2 //. - apply (rwP getmP) in H0. pose proof H0 as HRs. apply HRα in HRs. rewrite <- (rwP is_subset_ofP) in HRtype. apply HRtype in H0 as [Hfs Hgs0]. simpl in *. apply (rwP dommP) in Hfs as [v Hfs], Hgs0 as [v' Hgs0]. rewrite -> Hfs, Hgs0 in *. auto. Qed. #[program] Proposition substitution_preserves_α_congruence' : forall R S f g, R ⊆ domm f × domm g -> partial_bijection R -> partial_bijection S -> (forall x x', R x x' -> getm f x `≡_α^S getm g x') -> forall t u, t ≡_α^R u -> ⦇f⦈ t ≡_α^S ⦇g⦈ u. Proof. introv HRtype HRinj HSinj HRα Hα. eapply substitution'_preserves_α_congruence'; eauto. apply HFresh. Qed. #[program] Corollary substitution'_preserves_α_congruence_identity : forall Fresh f g, Fresh_correct Fresh -> (forall x, x ∈ domm f ∩ domm g -> getm f x `≡_α^(1__(codomm_Tm_set f ∩ codomm_Tm_set g)) getm g x) -> forall t u, t ≡_α^(1__(domm f ∩ domm g)) u -> `⦇f⦈ Fresh t ≡_α^(1__(codomm_Tm_set f ∩ codomm_Tm_set g)) `⦇g⦈ Fresh u. Proof. introv HFresh Hα Htαu. eapply substitution'_preserves_α_congruence'; eauto; try apply partial_bijection_identity; try apply (rwP is_subset_ofP); intros x y Hxy; rewrite /fmap_to_Prop identityE in_fsetI in Hxy; destruct (x ∈ domm f) eqn:Hfx; inverts Hxy as Hxy; destruct (x ∈ domm g) eqn:Hgx; inverts Hxy as Hxy. - rewrite Hgx //. - apply Hα. rewrite /= in_fsetI Hgx Hfx //. Qed. #[program] Corollary substitution_preserves_α_congruence_identity : forall f g, (forall x, x ∈ domm f ∩ domm g -> getm f x `≡_α^(1__(codomm_Tm_set f ∩ codomm_Tm_set g)) getm g x) -> forall t u, t ≡_α^(1__(domm f ∩ domm g)) u -> ⦇f⦈ t ≡_α^(1__(codomm_Tm_set f ∩ codomm_Tm_set g)) ⦇g⦈ u. Proof. introv Hα Htαu. eapply substitution'_preserves_α_congruence_identity; eauto. apply HFresh. Qed. (** Page 5: "Clearly, the preservation property arises as a special case by setting R = 1X and S = 1Y." *) #[program] Theorem substitution'_preserves_α_congruence : forall Fresh f g, Fresh_correct Fresh -> (forall x, x ∈ domm f ∩ domm g -> getm f x `≡_α getm g x) -> forall t u, t ∈ Tm (domm f ∩ domm g) -> t ≡_α u -> `⦇f⦈ Fresh t ≡_α `⦇g⦈ Fresh u. Proof. introv HFresh Hα Hfgt Htαu. eapply α_equivalent'_implies_α_equivalent. exists (codomm_Tm_set f ∩ codomm_Tm_set g). apply substitution'_preserves_α_congruence_identity; auto. - introv Hfgx. pose proof Hfgx as H'fgx. rewrite in_fsetI in Hfgx. apply (rwP andP) in Hfgx as [Hfx Hgx]. apply Hα in H'fgx. apply (rwP dommP) in Hfx as [y__f Hfx], Hgx as [y__g Hgx]. rewrite Hfx Hgx /=. rewrite Hfx Hgx /= in H'fgx. apply α_equivalent'_supermap with (R__sub := 1__(FV y__f)); auto. introv Hkv. rewrite /fmap_to_Prop identityE in Hkv. destruct (k ∈ FV y__f) eqn:Hky__f; inverts Hkv. rewrite /fmap_to_Prop identityE in_fsetI. assert (v ∈ codomm_Tm_set f) as Hℛfv. { apply (rwP codomm_Tm_setP). exists y__f. split; auto. apply (rwP codommP). eauto. } assert (v ∈ codomm_Tm_set g) as Hℛgv. { apply (rwP codomm_Tm_setP). exists y__g. apply FV_respects_α_equivalence in H'fgx. rewrite H'fgx. split; auto. apply (rwP codommP). eauto. } rewrite Hℛfv Hℛgv //. - apply α_equivalent'_supermap with (R__sub := 1__(FV t)); auto. introv Hkv. rewrite /fmap_to_Prop identityE in Hkv. destruct (k ∈ FV t) eqn:Hkt; inverts Hkv. apply (rwP fsubsetP) in Hfgt. apply Hfgt in Hkt. rewrite /fmap_to_Prop identityE Hkt //. Qed. #[program] Theorem substitution_preserves_α_congruence : forall f g, (forall x, x ∈ domm f ∩ domm g -> getm f x `≡_α getm g x) -> forall t u, t ∈ Tm (domm f ∩ domm g) -> t ≡_α u -> ⦇f⦈ t ≡_α ⦇g⦈ u. Proof. introv Hα Hfgt Htαu. eapply substitution'_preserves_α_congruence; eauto. apply HFresh. Qed. (** Page 5: "A consequence of proposition 4 is that substitution is an operation on α-equivalence classes." *) Theorem lift_substitution'_respects_α_equivalence : forall Fresh f t u, Fresh_correct Fresh -> t ∈ Tm (domm f) -> t ≡_α u -> `⦇f⦈ Fresh t ≡_α `⦇f⦈ Fresh u. Proof. introv HFresh Hft Hα. eapply substitution'_preserves_α_congruence; eauto. - reflexivity. - rewrite fsetIid //. Qed. Theorem lift_substitution_respectsα_equivalence : forall f t u, t ∈ Tm (domm f) -> t ≡_α u -> ⦇f⦈ t ≡_α ⦇f⦈ u. Proof. introv Hft Hα. apply lift_substitution'_respects_α_equivalence; auto. apply HFresh. Qed. Lemma codomm_Tm_set_mapm_variable : forall R, codomm_Tm_set (mapm variable R) = codomm R. Proof. introv. apply eq_fset. intros t. apply Bool.eq_iff_eq_true. split; introv HR't. - apply (rwP codomm_Tm_setP) in HR't as (x & Hxt & HR'x). apply (rwP codommP) in HR'x as [k HR'k]. rewrite mapmE in HR'k. destruct (getm R k) eqn:HRk; inverts HR'k. rewrite in_fset1 in Hxt. apply (rwP eqP) in Hxt. subst. apply (rwP codommP). eauto. - apply (rwP codommP) in HR't as [k HRk]. apply (rwP codomm_Tm_setP). exists (variable t). split. + rewrite in_fset1 eq_refl //. + apply (rwP codommP). exists k. rewrite mapmE HRk //. Qed. (** Page 6: Lemma 7. *) Lemma lemma7' : forall Fresh (f : {fmap 𝒱 → 𝒱}) t, Fresh_correct Fresh -> partial_bijection f -> t ∈ Tm (domm f) -> `⦇mapm variable f⦈ Fresh t ≡_α^(f ᵒ) t. Proof. introv HFresh Hfinj Hft. apply (rwP fsubsetP) in Hft. gen f. induction t; introv Hfinj Hft; simpl in *. - rewrite /= /update_substitution -mapm_setm -/update_substitution -update_converse //. rewrite codomm_Tm_set_mapm_variable. replace (setm f s (Fresh0 (codomm f))) with (f⦅s,Fresh0 (codomm f)⦆); cycle 1. { apply eq_fmap. intros x. rewrite updateE setmE /=. destruct (x =P s); subst; auto. destruct (getm f x) eqn:Hfx; auto. destruct (Fresh0 (codomm f) =P s0); subst; auto. assert (Fresh0 (codomm f) ∈ codomm f) as HFreshℛf. { apply (rwP codommP). eauto. } pose proof HFresh (codomm f) as HnFresh. rewrite HFreshℛf // in HnFresh. } apply IHt; auto. + apply partial_bijection_update. auto. + introv Hxt. apply (rwP dommP). rewrite updateE. destruct (x =P s); subst; simpl; eauto. assert (x ∈ FV t :\ s) as Hxtns. { apply (introF eqP) in n. rewrite in_fsetD in_fset1 n Hxt //. } apply Hft, (rwP dommP) in Hxtns as [v Hfx]. rewrite Hfx /=. destruct (Fresh0 (codomm f) =P v); subst; simpl; eauto. assert (Fresh0 (codomm f) ∈ codomm f) as HFreshℛf. { apply (rwP codommP). eauto. } pose proof HFresh (codomm f) as HnFresh. rewrite HFreshℛf // in HnFresh. - rewrite <- (rwP andP). split. + apply IHt1; auto. introv Hxt1. apply Hft. rewrite in_fsetU Hxt1 //. + apply IHt2; auto. introv Hxt2. apply Hft. rewrite in_fsetU Hxt2 orbT //. - apply α_equivalent'_converse; auto. rewrite /= mapmE. assert (s ∈ fset1 s) as Hss. { rewrite in_fset1 eq_refl //. } apply Hft, (rwP dommP) in Hss as [v Hfs]. rewrite Hfs /=. apply (rwP getmP). auto. Qed. Lemma lemma7 : forall (f : {fmap 𝒱 → 𝒱}) t, partial_bijection f -> t ∈ Tm (domm f) -> ⦇mapm variable f⦈ t ≡_α^(f ᵒ) t. Proof. introv Hfinj Hft. apply lemma7'; auto. apply HFresh. Qed. (** Page 6: "η(x) = x." *) Definition η__ X : {fmap 𝒱 → term} := 1__X. Lemma ηE : forall X x, getm (η__ X) x = if x ∈ X then Some (variable x) else None. Proof. introv. rewrite mapmE identityE. destruct (x ∈ X) eqn:HxX; auto. Qed. (** Page 6: "ηX ∈ X ⟶ ...." *) Lemma domm_η : forall X, domm (η__ X) = X. Proof. introv. rewrite domm_map. apply domm_identity. Qed. (** Page 6: "ηX ∈ ... ⟶ Tm^α(X)." *) Lemma codomm_Tm_set_η : forall X, codomm_Tm_set (η__ X) = X. Proof. introv. apply eq_fset. intros x. apply Bool.eq_iff_eq_true. split; introv HxX. - apply (rwP codomm_Tm_setP) in HxX as (t & Hxt & Hℛηt). apply (rwP codommP) in Hℛηt as [x' Hℛηt]. rewrite mapmE identityE in Hℛηt. destruct (x' ∈ X) eqn:Hx'X; inverts Hℛηt. rewrite in_fset1 in Hxt. apply (rwP eqP) in Hxt. subst. auto. - apply (rwP codomm_Tm_setP). exists (variable x). split. { rewrite /= in_fset1 eq_refl //. } apply (rwP codommP). exists x. rewrite ηE HxX //. Qed. Lemma update_substitution_overwrite : forall f x y y', f[x,variable y][x,variable y'] = f[x, variable y']. Proof. introv. apply eq_fmap. intros x'. rewrite !setmE. destruct (x' =P x); subst; auto. Qed. Lemma update_substitution_reorder : forall f x x' y y', x <> x' -> f[x,variable y][x',variable y'] = f[x',variable y'][x,variable y]. Proof. introv Hnxx'. apply eq_fmap. intros z. rewrite !setmE. destruct (z =P x); subst; auto. apply (introF eqP) in Hnxx'. rewrite Hnxx' //. Qed. Lemma α_equivalent_update' : forall R t u x y, x ∉ FV t -> y ∉ FV u -> t ≡_α^R u -> t ≡_α^(R⦅x,y⦆) u. Proof. introv Hnxt Hnyu Hα. apply α_equivalent'_with_behaviorally_identical_maps' with (R := R); auto. intros x' y' HRx' Hx't Hy'u. rewrite /fmap_to_Prop updateE. destruct (x' =P x); subst. { rewrite Hx't // in Hnxt. } rewrite HRx'. destruct (y =P y'); subst; auto. rewrite Hy'u // in Hnyu. Qed. Lemma FV_lift_substitution' : forall Fresh f t, Fresh_correct Fresh -> t ∈ Tm (domm f) -> FV (`⦇f⦈ Fresh t) = ⋃_(u ∈ pimfset (getm f) (FV t)) (FV u). Proof. introv HFresh Hft. apply (rwP fsubsetP) in Hft. apply eq_fset. intros x. rewrite in_bigcup. apply Bool.eq_iff_eq_true. split; introv H. - apply (rwP hasP). gen f. induction t; intros; simpl in *. + rewrite in_fsetD in_fset1 in H. apply (rwP andP) in H as [HnxFresh Hℛfx]. apply IHt in Hℛfx as [y Hℛfy Hxy]. * apply (rwP pimfsetP) in Hℛfy as [k Hkt Hf'k]. rewrite setmE in Hf'k. destruct (k =P s); subst. { inverts Hf'k. rewrite in_fset1 in Hxy. rewrite Hxy // in HnxFresh. } exists y; auto. apply (rwP pimfsetP). exists k; auto. apply (introF eqP) in n. rewrite in_fsetD in_fset1 n Hkt //. * intros y Hyt. rewrite domm_update_substitution in_fsetU in_fset1 orbC. destruct (y =P s); subst; auto. apply (introF eqP) in n. apply Hft. rewrite in_fsetD in_fset1 n Hyt //. + rewrite in_fsetU in H. apply (rwP orP) in H as [Hf'x|Hf'x]. * apply IHt1 in Hf'x as [k Hf'k Hxk]; cycle 1. { intros k Hkt1. apply Hft. rewrite in_fsetU Hkt1 //. } apply (rwP pimfsetP) in Hf'k as [y Hyt1 Hfy]. exists k; auto. apply (rwP pimfsetP). exists y; auto. rewrite in_fsetU Hyt1 //. * apply IHt2 in Hf'x as [k Hf'k Hxk]; cycle 1. { intros k Hkt2. apply Hft. rewrite in_fsetU Hkt2 orbT //. } apply (rwP pimfsetP) in Hf'k as [y Hyt2 Hfy]. exists k; auto. apply (rwP pimfsetP). exists y; auto. rewrite in_fsetU Hyt2 orbT //. + assert (s ∈ fset1 s) as Hss. { rewrite in_fset1 eq_refl //. } apply Hft, (rwP dommP) in Hss as [v Hfs]. exists v. * apply (rwP (@pimfsetP _ _ (getm f) (fset1 s) v)). exists s; auto. rewrite in_fset1 eq_refl //. * rewrite Hfs // in H. - apply (rwP hasP) in H as [t' Hft' Hxt']. apply (rwP pimfsetP) in Hft' as [x' Hx't Hfx']. gen f. induction t; introv Hftns Hfx'; simpl in *. + rewrite in_fsetD in_fset1 in Hx't. apply (rwP andP) in Hx't as [Hnx's Hx't]. rewrite in_fsetD in_fset1. assert (x ∈ codomm_Tm_set f) as Hℛfx. { apply (rwP codomm_Tm_setP). exists t'. split; auto. apply (rwP codommP). eauto. } pose proof HFresh (codomm_Tm_set f) as HFreshℛf. destruct (x =P Fresh0 (codomm_Tm_set f)); subst. { rewrite Hℛfx // in HFreshℛf. } apply IHt; auto. * intros y Hyt. rewrite domm_set in_fsetU in_fset1. destruct (y =P s); subst; auto. apply (introF eqP) in n0. apply Hftns. rewrite in_fsetD in_fset1 n0 Hyt //. * apply negbTE in Hnx's. rewrite setmE Hnx's //. + rewrite in_fsetU. rewrite in_fsetU in Hx't. apply (rwP orP) in Hx't as [Hx't1|Hx't2]. * eapply IHt1 in Hx't1; eauto. -- rewrite Hx't1 //. -- intros y Hyt1. apply Hftns. rewrite in_fsetU Hyt1 //. * eapply IHt2 in Hx't2; eauto. -- rewrite Hx't2 orbT //. -- intros y Hyt2. apply Hftns. rewrite in_fsetU Hyt2 orbT //. + rewrite in_fset1 in Hx't. apply (rwP eqP) in Hx't. subst. rewrite Hfx' //. Qed. Lemma FV_lift_substitution : forall f t, t ∈ Tm (domm f) -> FV (⦇f⦈ t) = ⋃_(u ∈ pimfset (getm f) (FV t)) (FV u). Proof. introv Htf. apply FV_lift_substitution'; auto. apply HFresh. Qed. (** Page 4: "⦇f⦈ ∈ Tm(X) ⟶ Tm(Y)." *) Lemma lift_substitution_type : forall f t, t ∈ Tm (domm f) -> ⦇f⦈ t ∈ Tm (codomm_Tm_set f). Proof. introv Hft. rewrite /Tm /in_mem /=. apply (rwP fsubsetP). introv Hf'x. rewrite FV_lift_substitution // in_bigcup in Hf'x. apply (rwP hasP) in Hf'x as [t' Hf't' Hxt']. apply (rwP pimfsetP) in Hf't' as [x' Hx't Hfx']. apply (rwP codomm_Tm_setP). exists t'. split; auto. apply (rwP codommP). eauto. Qed. #[program] Lemma lift_substitution'_indistinguishable_substitutions' : forall Fresh R f g t, Fresh_correct Fresh -> t ∈ Tm (domm f ∩ domm g) -> (forall x, x ∈ FV t -> getm f x `≡_α^R getm g x) -> `⦇f⦈ Fresh t ≡_α^R `⦇g⦈ Fresh t. Proof. introv HFresh Hfgt Hα. apply (rwP fsubsetP) in Hfgt. gen R f g. induction t; intros. - apply IHt; simpl; introv Hxt. + rewrite in_fsetI !domm_set !in_fsetU !in_fset1. destruct (x =P s); subst; auto. apply (introF eqP) in n. assert (x ∈ FV t :\ s) as Hxtns. { rewrite in_fsetD in_fset1 n Hxt //. } apply Hfgt in Hxtns. rewrite /= in_fsetI in Hxtns. apply (rwP andP) in Hxtns as [Hfx Hgx]. rewrite Hfx Hgx //. + rewrite !setmE. destruct (x =P s); subst. { apply (rwP getmP). rewrite /= updateE eq_refl //. } apply (introF eqP) in n. assert (x ∈ FV t :\ s) as Hxtns. { rewrite in_fsetD in_fset1 n Hxt //. } pose proof Hxtns as H'xtns. apply Hα in H'xtns. apply Hfgt in Hxtns. rewrite in_fsetI in Hxtns. apply (rwP andP) in Hxtns as [Hfx Hgx]. apply (rwP dommP) in Hfx as [t__f Hfx], Hgx as [t__g Hgx]. apply α_equivalent_update'; eauto; apply negbT, Bool.not_true_iff_false; introv HFreshℛf; rewrite ?Hfx ?Hgx /= in HFreshℛf. * pose proof HFresh (codomm_Tm_set f) as HFreshf. rewrite <- (rwP codomm_Tm_setPn) in HFreshf. apply (HFreshf t__f). split; auto. apply (rwP codommP). eauto. * pose proof HFresh (codomm_Tm_set g) as HFreshg. rewrite <- (rwP codomm_Tm_setPn) in HFreshg. apply (HFreshg t__g). split; auto. apply (rwP codommP). eauto. - simpl. rewrite <- (rwP andP). split; (apply IHt1 || apply IHt2); introv Hxt; (apply Hfgt || apply Hα); rewrite /= in_fsetU Hxt ?orbT //. - apply Hα. rewrite /= in_fset1 eq_refl //. Qed. #[program] Lemma lift_substitution'_indistinguishable_substitutions : forall Fresh f g t, Fresh_correct Fresh -> t ∈ Tm (domm f ∩ domm g) -> (forall x, x ∈ FV t -> getm f x `≡_α getm g x) -> `⦇f⦈ Fresh t ≡_α `⦇g⦈ Fresh t. Proof. introv HFresh Hfgt Hα. apply lift_substitution'_indistinguishable_substitutions'; auto. introv Hxt. apply (rwP fsubsetP) in Hfgt. pose proof Hxt as H'xt. pose proof Hxt as H''xt. apply Hα in Hxt. apply Hfgt in H'xt. rewrite /= in_fsetI in H'xt. apply (rwP andP) in H'xt as [Hfx Hgx]. apply (rwP dommP) in Hfx as [t__f Hfx]. eapply α_equivalent'_supermap; cycle 1. { apply Hxt. } introv Hf'k. rewrite /fmap_to_Prop identityE Hfx in Hf'k. inverts Hf'k as Hf'k. destruct (k ∈ FV t__f) eqn:Hkt__f; inverts Hf'k. rewrite /fmap_to_Prop identityE. cut (v ∈ FV (`⦇f⦈ Fresh0 t) : Prop). { introv Hf'v. rewrite Hf'v //. } rewrite FV_lift_substitution' //; cycle 1. { rewrite /Tm /in_mem /=. apply (rwP fsubsetP). intros x' Hx't. apply Hfgt in Hx't. rewrite /= in_fsetI in Hx't. apply (rwP andP) in Hx't as [Hfx' Hgx']. auto. } rewrite in_bigcup. apply (rwP hasP). exists t__f; auto. apply (rwP pimfsetP). eauto. Qed. #[program] Lemma lift_substitution_indistinguishable_substitutions : forall f g t, t ∈ Tm (domm f ∩ domm g) -> (forall x, x ∈ FV t -> getm f x `≡_α getm g x) -> ⦇f⦈ t ≡_α ⦇g⦈ t. Proof. introv Hfgt Hα. apply lift_substitution'_indistinguishable_substitutions; auto. apply HFresh. Qed. (** Page 7: "We have to show ⦇f[[z0 = z1]]⦈ ∘ g[[x = z0]](v) ≡α (⦇f⦈ ∘ g)[[x = z1]](v)." *) #[program] Lemma lift_update_substitution'_compose_substitution_update : forall Fresh f g x z0 z1, Fresh_correct Fresh -> codomm_Tm_set g ⊆ domm f -> z1 ∉ codomm_Tm_set f -> z0 ∉ codomm_Tm_set g -> forall v, v ∈ (domm g ∪ {x}) -> getm (`⦇f[z0,variable z1]⦈ Fresh ∘ g[x,variable z0]) v `≡_α getm ((`⦇f⦈ Fresh ∘ g)[x,variable z1]) v. Proof. introv HFresh Hℛgf Hnℛfz1 Hnℛgz0 Hg'v. apply (rwP fsubsetP) in Hℛgf. rewrite !setmE !mapmE /= !setmE. rewrite in_fsetU in_fset1 in Hg'v. apply (rwP orP) in Hg'v as [Hgv|Hvx]; cycle 1. { rewrite Hvx /= setmE eq_refl. reflexivity. } destruct (v =P x); subst. { rewrite /= setmE eq_refl. reflexivity. } apply (rwP dommP) in Hgv as [t Hgv]. rewrite Hgv /=. apply lift_substitution'_indistinguishable_substitutions; auto. - rewrite /Tm /in_mem /=. apply (rwP fsubsetP). intros x' Hx't. rewrite domm_set in_fsetI in_fsetU in_fset1 orbC. destruct (x' ∈ domm f) eqn:Hfx'; auto. assert (x' ∈ codomm_Tm_set g) as Hℛgx'. { apply (rwP codomm_Tm_setP). exists t. split; auto. apply (rwP codommP). eauto. } apply Hℛgf in Hℛgx'. rewrite /= Hfx' // in Hℛgx'. - intros x' Hx't. rewrite setmE. destruct (x' =P z0); subst. + assert (z0 ∈ codomm_Tm_set g) as Hℛgz0. { apply (rwP codomm_Tm_setP). exists t. split; auto. apply (rwP codommP). eauto. } rewrite Hℛgz0 // in Hnℛgz0. + reflexivity. Qed. #[program] Lemma lift_update_substitution_compose_substitution_update : forall f g x z0 z1, codomm_Tm_set g ⊆ domm f -> z1 ∉ codomm_Tm_set f -> z0 ∉ codomm_Tm_set g -> forall v, v ∈ (domm g ∪ {x}) -> getm (⦇f[z0,variable z1]⦈ ∘ g[x,variable z0]) v `≡_α getm ((⦇f⦈ ∘ g)[x,variable z1]) v. Proof. introv Hℛgf Hnℛfz1 Hnℛgz0 Hg'v. apply lift_update_substitution'_compose_substitution_update; auto. apply HFresh. Qed. Lemma FV_lift_substitution'_η : forall Fresh X t, Fresh_correct Fresh -> t ∈ Tm X -> FV (`⦇η__ X⦈ Fresh t) = FV t. Proof. introv HFresh HtX. apply (rwP fsubsetP) in HtX. rewrite FV_lift_substitution'; auto; cycle 1. { rewrite /Tm /in_mem /=. apply (rwP fsubsetP). introv Hxt. rewrite domm_map domm_mkfmapf in_fset. apply HtX. auto. } apply eq_fset. intros x. rewrite in_bigcup. apply Bool.eq_iff_eq_true. split; introv Hxt. - apply (rwP hasP) in Hxt as [x' Hx't Hxx']. apply (rwP pimfsetP) in Hx't as [y Hyt Hηy]. rewrite mapmE identityE in Hηy. destruct (y ∈ X) eqn:HyX; inverts Hηy. rewrite in_fset1 in Hxx'. apply (rwP eqP) in Hxx'. subst. auto. - apply (rwP hasP). exists (variable x). + apply (rwP pimfsetP). exists x; auto. apply HtX in Hxt. rewrite ηE Hxt //. + rewrite in_fset1 eq_refl //. Qed. (** Page 6: Proposition 6.1. *) Proposition monad_substitution'1 : forall Fresh X t, Fresh_correct Fresh -> t ∈ Tm X -> `⦇η__ X⦈ Fresh t ≡_α t. Proof. introv HFresh HtX. apply (rwP fsubsetP) in HtX. transitivity (`⦇η__(FV t)⦈ Fresh0 t). { apply lift_substitution'_indistinguishable_substitutions; auto. - rewrite /Tm /in_mem /=. apply (rwP fsubsetP). introv Hxt. rewrite in_fsetI !domm_η Hxt andbT. auto. - introv Hxt. rewrite !ηE Hxt. apply HtX in Hxt. rewrite Hxt. reflexivity. } rewrite /α_equivalent -converse_identity. rewrite FV_lift_substitution'_η //. apply lemma7'; auto. - apply partial_bijection_identity. - rewrite /Tm /in_mem /=. apply (rwP fsubsetP). introv Hxt. rewrite domm_mkfmapf in_fset Hxt //. - rewrite /Tm /in_mem /=. apply (rwP fsubsetP). introv Hxt. auto. Qed. Proposition monad_substitution1 : forall X t, t ∈ Tm X -> ⦇η__ X⦈ t ≡_α t. Proof. introv HtX. apply monad_substitution'1; auto. apply HFresh. Qed. (** Page 6: Proposition 6.2. *) #[program] Proposition monad_substitution2 : forall f x, x ∈ domm f -> getm (⦇f⦈ ∘ η__(domm f)) x `≡_α getm f x. Proof. introv Hfx. rewrite !mapmE identityE Hfx. reflexivity. Qed. #[program] Lemma codomm_Tm_set_mapm_lift_substitution' : forall Fresh f g, Fresh_correct Fresh -> codomm_Tm_set g ⊆ domm f -> codomm_Tm_set (mapm (`⦇f⦈ Fresh) g) = ⋃_(x ∈ codomm_Tm_set g) (FV (odflt (variable _) (getm f x))). Proof. introv HFresh Hfℛg. apply (rwP fsubsetP) in Hfℛg. apply eq_fset. intros x. rewrite in_bigcup. apply Bool.eq_iff_eq_true. split; introv Hfgx. - apply (rwP codomm_Tm_setP) in Hfgx as (t & Hxt & Hfgt). apply (rwP codommP) in Hfgt as [t' Hfgt']. rewrite mapmE in Hfgt'. destruct (getm g t') eqn:Hgt'; inverts Hfgt'. rewrite FV_lift_substitution' in Hxt; auto; cycle 1. { rewrite /Tm /in_mem /=. apply (rwP fsubsetP). intros x' Hx't0. apply Hfℛg, (rwP codomm_Tm_setP). exists t0. split; auto. apply (rwP codommP). eauto. } rewrite in_bigcup in Hxt. apply (rwP hasP) in Hxt as [x' Hfx' Hxx']. apply (rwP pimfsetP) in Hfx' as [y Hyt0 Hfy]. apply (rwP hasP). exists y. { apply (rwP codomm_Tm_setP). exists t0. split; auto. apply (rwP codommP). eauto. } rewrite Hfy //. - apply (rwP hasP) in Hfgx as [x' Hℛgx' Hfx]. pose proof Hℛgx' as H'ℛgx'. apply Hfℛg, (rwP dommP) in H'ℛgx' as [t Hfx']. rewrite Hfx' /= in Hfx. apply (rwP codomm_Tm_setP) in Hℛgx' as (t' & Hx't' & Hg't'). apply (rwP codommP) in Hg't' as [y Hg'y]. apply (rwP codomm_Tm_setP). exists (`⦇f⦈ Fresh0 t'). split. { rewrite FV_lift_substitution'; auto; cycle 1. { rewrite /Tm /in_mem /=. apply (rwP fsubsetP). intros y' Hy't'. apply Hfℛg, (rwP codomm_Tm_setP). exists t'. split; auto. apply (rwP codommP). eauto. } rewrite /= in_bigcup. apply (rwP hasP). exists t; auto. apply (rwP pimfsetP). exists x'; auto. } apply (rwP codommP). exists y. rewrite mapmE Hg'y //. Qed. #[program] Lemma codomm_Tm_set_mapm_lift_substitution : forall f g, codomm_Tm_set g ⊆ domm f -> codomm_Tm_set (mapm ⦇f⦈ g) = ⋃_(x ∈ codomm_Tm_set g) (FV (odflt (variable _) (getm f x))). Proof. introv Hfℛg. apply codomm_Tm_set_mapm_lift_substitution'; auto. apply HFresh. Qed. (** Page 6: Proposition 6.3. *) Proposition monad_substitution'3 : forall Fresh f g t, Fresh_correct Fresh -> codomm_Tm_set g ⊆ domm f -> t ∈ Tm (domm g) -> (`⦇f⦈ Fresh ∘ `⦇g⦈ Fresh) t ≡_α `⦇`⦇f⦈ Fresh ∘ g⦈ Fresh t. Proof. introv HFresh Hfℛg Hgt. apply (rwP fsubsetP) in Hfℛg, Hgt. gen f g. induction t; intros. - set (z0 := Fresh0 (codomm_Tm_set g)). set (z1 := Fresh0 (codomm_Tm_set f)). set (X := FV (`⦇f[z0,variable z1]⦈ Fresh0 (`⦇g[s,variable z0]⦈ Fresh0 t))). assert (forall k v : 𝒱, getm (1__X) k = Some v -> getm (1__(X :\ z1 ∪ {z1})) k = Some v) as H. { introv Hkv. rewrite identityE in Hkv. rewrite identityE in_fsetU in_fsetD !in_fset1 orbC. destruct (k =P z1); subst; auto. destruct (z1 ∈ X) eqn:Hz1X; inverts Hkv. auto. } transitivity (`⦇f⦈ Fresh0 (abstraction z0 (`⦇g[s,variable z0]⦈ Fresh0 t))). { rewrite /α_equivalent/= update_identity -/z0 -/z1 -/X. apply α_equivalent'_supermap with (R__sub := 1__X); auto. apply α_equivalent'_identity. rewrite /Tm /in_mem /=. apply (rwP fsubsetP). introv Hfgx. auto. } transitivity (abstraction z1 ((`⦇f[z0,variable z1]⦈ Fresh0 ∘ `⦇g[s,variable z0]⦈ Fresh0) t)). { rewrite /α_equivalent /= update_identity -/z0 -/z1 -/X. apply α_equivalent'_supermap with (R__sub := 1__X); auto. apply α_equivalent'_identity. rewrite /Tm /in_mem /=. apply (rwP fsubsetP). introv Hfgx. auto. } assert (`⦇f[z0,variable z1]⦈ Fresh0 (`⦇g[s,variable z0]⦈ Fresh0 t) ≡_α `⦇`⦇f[z0,variable z1]⦈ Fresh0 ∘ g[s,variable z0]⦈ Fresh0 t) as H'. { apply IHt; introv Hg'x; rewrite domm_set in_fsetU in_fset1. - destruct (x =P z0); subst; auto. apply (rwP codomm_Tm_setP) in Hg'x as (t' & Hxt' & Hg't'). apply (rwP codommP) in Hg't' as [x' Hg't']. rewrite setmE in Hg't'. destruct (x' =P s); subst. { inverts Hg't'. rewrite in_fset1 in Hxt'. apply (rwP eqP) in Hxt'. subst. contradiction. } apply Hfℛg, (rwP codomm_Tm_setP). exists t'. split; auto. apply (rwP codommP). eauto. - destruct (x =P s); subst; auto. apply (introF eqP) in n. apply Hgt. rewrite /= in_fsetD in_fset1 n Hg'x //. } transitivity (abstraction z1 (`⦇`⦇f[z0,variable z1]⦈ Fresh0 ∘ g[s,variable z0]⦈ Fresh0 t)). { rewrite /α_equivalent /= update_identity -/z0 -/z1 -/X. apply α_equivalent'_supermap with (R__sub := 1__X); auto. } transitivity (abstraction z1 (`⦇(`⦇f⦈ Fresh0 ∘ g)[s,variable z1]⦈ Fresh0 t)). { apply FV_respects_α_equivalence in H'. rewrite /α_equivalent /= update_identity -/z0 -/z1 H' -/X. apply α_equivalent'_supermap with (R__sub := 1__X); auto. rewrite /X -H'. apply lift_substitution'_indistinguishable_substitutions; auto. - rewrite /Tm /in_mem /=. apply (rwP fsubsetP). introv Hxt. rewrite in_fsetI domm_set !domm_map domm_set !in_fsetU in_fset1 Bool.andb_diag. destruct (x =P s); subst; auto. apply (introF eqP) in n. apply Hgt. rewrite /= in_fsetD in_fset1 n Hxt //. - introv Hxt. apply lift_update_substitution'_compose_substitution_update; auto; try apply HFresh. + apply (rwP fsubsetP). auto. + rewrite in_fsetU in_fset1 orbC. destruct (x =P s); subst; auto. apply (introF eqP) in n. apply Hgt. rewrite /= in_fsetD in_fset1 n Hxt //. } rewrite /α_equivalent /=. apply substitution'_preserves_α_congruence' with (R := 1__(FV t)); auto. { rewrite !domm_set !domm_map. apply (rwP is_subset_ofP). introv Hxy. split; rewrite /= !in_fsetU !in_fset1; rewrite /fmap_to_Prop identityE in Hxy; destruct (x ∈ FV t) eqn:Hxt; inverts Hxy; destruct (y =P s); subst; auto; apply (introF eqP) in n; apply Hgt; rewrite /= in_fsetD in_fset1 n Hxt //. } { apply partial_bijection_identity. } { apply partial_bijection_update, partial_bijection_identity. } { introv Hxx'. rewrite /fmap_to_Prop identityE in Hxx'. destruct (x ∈ FV t) eqn:Hxt; inverts Hxx'. rewrite !setmE !mapmE. destruct (x' =P s); subst. { apply (rwP getmP). rewrite /= updateE eq_refl //. } apply (introF eqP) in n. assert (x' ∈ FV t :\ s) as Hx'tns. { rewrite in_fsetD in_fset1 n Hxt //. } apply Hgt, (rwP dommP) in Hx'tns as [t' Hgx']. rewrite Hgx' /=. assert {subset FV t' <= codomm_Tm_set g} as Ht'ℛg. { introv Hxt'. apply (rwP codomm_Tm_setP). exists t'. split; auto. apply (rwP codommP). eauto. } assert {subset FV (`⦇f⦈ Fresh0 t') <= codomm_Tm_set (mapm (`⦇f⦈ Fresh0) g)} as Hf'ℛfg. { introv Hf'x. rewrite FV_lift_substitution' in Hf'x; auto; cycle 1. { rewrite /Tm /in_mem /=. apply (rwP fsubsetP). intros y Hyt'. apply Hfℛg, Ht'ℛg, Hyt'. } rewrite in_bigcup in Hf'x. apply (rwP hasP) in Hf'x as [u Hf'u Hxu]. apply (rwP pimfsetP) in Hf'u as [y Hyt' Hfy]. rewrite /= codomm_Tm_set_mapm_lift_substitution'; auto; cycle 1. { apply (rwP fsubsetP). intros y' Hℛgy'. auto. } rewrite in_bigcup. apply (rwP hasP). exists y. { apply Ht'ℛg. auto. } rewrite /= Hfy //. } assert {subset FV (`⦇f⦈ Fresh0 t') <= codomm_Tm_set f} as Hfℛf. { introv Hf't'. rewrite FV_lift_substitution' in Hf't'; auto; cycle 1. { rewrite /Tm /in_mem /=. apply (rwP fsubsetP). intros y Hyt'. apply Hfℛg, (rwP codomm_Tm_setP). exists t'. split; auto. apply (rwP codommP). eauto. } rewrite in_bigcup in Hf't'. apply (rwP hasP) in Hf't' as [y Hf'y' Hxy]. apply (rwP pimfsetP) in Hf'y' as [y' Hy't' Hfy']. apply (rwP codomm_Tm_setP). exists y. split; auto. apply (rwP codommP). eauto. } assert (Fresh0 (codomm_Tm_set (mapm (`⦇f⦈ Fresh0) g)) ∉ FV (`⦇f⦈ Fresh0 t')) as Hℛfgnf'. { pose proof HFresh (codomm_Tm_set (mapm (`⦇f⦈ Fresh0) g)) as HnFreshℛfg. apply negbT, Bool.not_true_iff_false. introv HFreshℛfg. apply Hf'ℛfg in HFreshℛfg. rewrite HFreshℛfg // in HnFreshℛfg. } assert (z1 ∉ FV (`⦇f⦈ Fresh0 t')) as Hz1nf'. { subst z1. pose proof HFresh (codomm_Tm_set f) as HnFreshℛf. apply negbT, Bool.not_true_iff_false. introv HFreshℛf. apply Hfℛf in HFreshℛf. rewrite HFreshℛf // in HnFreshℛf. } apply α_equivalent_update'; auto. apply α_equivalent'_supermap with (R__sub := 1__(FV (`⦇f⦈ Fresh0 t'))); cycle 1. { apply α_equivalent_reflexive. } introv Hf'k. rewrite /fmap_to_Prop identityE in Hf'k. rewrite /fmap_to_Prop identityE in_fsetD in_fset1. destruct (k ∈ FV (`⦇f⦈ Fresh0 t')) eqn:H'f'k; inverts Hf'k. destruct (v =P z1); subst. { rewrite H'f'k // in Hz1nf'. } rewrite FV_lift_substitution'; auto; cycle 1. { rewrite /Tm /in_mem /=. apply (rwP fsubsetP). introv H'xt. rewrite domm_set domm_map in_fsetU in_fset1. destruct (x =P s); subst; auto. apply (introF eqP) in n1. apply Hgt. rewrite in_fsetD in_fset1 n1 H'xt //. } rewrite in_bigcup /=. cut (has (fun i => v ∈ FV i) (pimfset (getm ((mapm (`⦇f⦈ Fresh0) g)[s,variable z1])) (FV t)) : Prop). { introv Hfg'v. rewrite Hfg'v //. } apply (rwP hasP). exists (`⦇f⦈ Fresh0 t'); auto. rewrite <- (rwP (@pimfsetP _ _ (getm ((mapm (`⦇f⦈ Fresh0) g)[s,variable z1])) (FV t) (`⦇f⦈ Fresh0 t'))). exists x'; auto. rewrite setmE mapmE n Hgx' //. } apply α_equivalent'_identity. rewrite /Tm /in_mem /=. apply (rwP fsubsetP). introv Hxt. auto. - rewrite /α_equivalent /=. rewrite <- (rwP andP). split. + apply α_equivalent'_supermap with (R__sub := 1__(FV (`⦇f⦈ Fresh0 (`⦇g⦈ Fresh0 t1)))); cycle 1. { apply IHt1; auto. introv Hxt1. apply Hgt. rewrite in_fsetU Hxt1 //. } introv Hfg'k. rewrite /fmap_to_Prop identityE in Hfg'k. destruct (k ∈ FV (`⦇f⦈ Fresh0 (`⦇g⦈ Fresh0 t1))) eqn:H'fg'k; inverts Hfg'k. rewrite /fmap_to_Prop identityE in_fsetU H'fg'k //. + apply α_equivalent'_supermap with (R__sub := 1__(FV (`⦇f⦈ Fresh0 (`⦇g⦈ Fresh0 t2)))); cycle 1. { apply IHt2; auto. introv Hxt2. apply Hgt. rewrite in_fsetU Hxt2 orbT //. } introv Hfg'k. rewrite /fmap_to_Prop identityE in Hfg'k. destruct (k ∈ FV (`⦇f⦈ Fresh0 (`⦇g⦈ Fresh0 t2))) eqn:H'fg'k; inverts Hfg'k. rewrite /fmap_to_Prop identityE in_fsetU H'fg'k orbT //. - assert (s ∈ fset1 s) as Hss. { rewrite in_fset1 //. } apply Hgt, (rwP dommP) in Hss as [x Hgs]. rewrite /= mapmE Hgs. reflexivity. Qed. Proposition monad_substitution3 : forall f g t, codomm_Tm_set g ⊆ domm f -> t ∈ Tm (domm g) -> (⦇f⦈ ∘ ⦇g⦈) t ≡_α ⦇⦇f⦈ ∘ g⦈ t. Proof. introv Hfℛg Hgt. apply monad_substitution'3; auto. apply HFresh. Qed. Notation "t '`[' x '=' u ']' Fresh" := (`⦇(1__(FV t))[x,u]⦈ Fresh t) (at level 10, x at next level, u at next level, format "t `[ x '=' u ] Fresh"). Notation "t '[' x '=' u ']'" := (t`[x=u]Fresh) (at level 10, x at next level, u at next level, format "t [ x '=' u ]"). #[local] Notation "t '`[' x '⟵' u ']' Fresh" := (t`[x=u]Fresh) (at level 10, x at next level, u at next level, format "t `[ x '⟵' u ] Fresh"). #[local] Notation "t '[' x '⟵' u ']'" := (t[x=u]) (at level 10, x at next level, u at next level, format "t [ x '⟵' u ]"). (** Page 5: "To show that substitution is well behaved, i.e. laws such as...." *) Lemma substitution'_law1 : forall Fresh t u x, Fresh_correct Fresh -> x ∉ FV t -> t`[x⟵u] Fresh ≡_α t. Proof. introv HFresh Hnxt. transitivity (`⦇η__(FV t)⦈ Fresh0 t). - apply lift_substitution'_indistinguishable_substitutions; auto. + rewrite /Tm /in_mem /=. apply (rwP fsubsetP). intros y Hyt. rewrite !domm_map !domm_set !domm_map !domm_mkfmapf in_fsetI in_fsetU !in_fset Hyt orbT //. + intros y Hyt. rewrite setmE mapmE. destruct (y =P x); subst. { rewrite Hyt // in Hnxt. } reflexivity. - apply monad_substitution'1; auto. rewrite /Tm /in_mem /= fsubsetxx //. Qed. Lemma substitution_law1 : forall t u x, x ∉ FV t -> t`[x⟵u] Fresh ≡_α t. Proof. introv Hnxt. apply substitution'_law1; auto. apply HFresh. Qed. Lemma codomm_update_substitution : forall f x t, codomm_Tm_set (f[x,t]) = codomm_Tm_set (remm f x) ∪ FV t. Proof. introv. apply eq_fset. intros k. rewrite in_fsetU. apply Bool.eq_iff_eq_true. split; introv Hf'k. - apply (rwP codomm_Tm_setP) in Hf'k as (t' & Hkt' & Hℛf't'). apply (rwP codommP) in Hℛf't' as [k' Hf'k']. rewrite setmE in Hf'k'. destruct (k' =P x); subst. { inverts Hf'k'. rewrite Hkt' orbT //. } apply (rwP orP). left. apply (rwP codomm_Tm_setP). exists t'. split; auto. apply (rwP codommP). exists k'. apply (introF eqP) in n. rewrite remmE n Hf'k' //. - apply (rwP codomm_Tm_setP). apply (rwP orP) in Hf'k as [Hℛf'k|Hkt]. + apply (rwP codomm_Tm_setP) in Hℛf'k as (t' & Hkt' & Hℛf't'). apply (rwP codommP) in Hℛf't' as [k' Hℛf't']. rewrite remmE in Hℛf't'. destruct (k' =P x); subst. { inverts Hℛf't'. } exists t'. split; auto. apply (rwP codommP). exists k'. apply (introF eqP) in n. rewrite setmE n Hℛf't' //. + exists t. split; auto. apply (rwP codommP). exists x. rewrite setmE eq_refl //. Qed. Lemma domm_identity' : forall X, domm (1__X : {fmap 𝒱 → term}) = X. Proof. introv. rewrite domm_map domm_identity //. Qed. Lemma codomm_identity' : forall X, codomm (1__X : {fmap 𝒱 → term}) = variable @: X. Proof. introv. apply eq_fset. intros x. apply Bool.eq_iff_eq_true. split; introv HxX. - apply (rwP codommP) in HxX as [v HxX]. rewrite ηE in HxX. apply (rwP imfsetP). destruct (v ∈ X) eqn:HvX; inverts HxX. eauto. - apply (rwP imfsetP) in HxX as [y HyX Hxy]. subst. apply (rwP codommP). exists y. rewrite ηE HyX //. Qed. Lemma FV_after_substitute' : forall Fresh t u x, Fresh_correct Fresh -> x ∈ FV t -> FV (t`[x⟵u]Fresh) = (FV t :\ x) ∪ FV u. Proof. introv HFresh Hxt. replace (FV t :\ x) with (codomm_Tm_set (remm (1__(FV t)) x)); cycle 1. { apply eq_fset. intros k. rewrite in_fsetD in_fset1. destruct (k =P x); subst. - apply negbTE, (rwP codomm_Tm_setPn). intros t' [Hxt' Htt']. apply (rwP codommP) in Htt' as [x' Hxx']. rewrite remmE mapmE identityE in Hxx'. destruct (x' =P x); subst. { inverts Hxx'. } destruct (x' ∈ FV t) eqn:Hx't; inverts Hxx'. rewrite in_fset1 in Hxt'. apply (rwP eqP) in Hxt'. subst. auto. - destruct (k ∈ FV t) eqn:Hkt. + apply (rwP codomm_Tm_setP). exists (variable k). split. * rewrite in_fset1 eq_refl //. * apply (rwP codommP). exists k. apply (introF eqP) in n. rewrite remmE ηE n Hkt //. + apply negbTE, (rwP codomm_Tm_setPn). intros t' [Hkt' Htt']. apply (rwP codommP) in Htt' as [x' Hxx']. rewrite remmE ηE in Hxx'. destruct (x' =P x); subst. { inverts Hxx'. } destruct (x' ∈ FV t) eqn:Hx't; inverts Hxx'. rewrite in_fset1 in Hkt'. apply (rwP eqP) in Hkt'. subst. rewrite Hx't // in Hkt. } rewrite FV_lift_substitution'; auto. - apply eq_fset. intros k. apply Bool.eq_iff_eq_true. split; introv Hk. + rewrite in_fsetU. rewrite in_bigcup in Hk. apply (rwP hasP) in Hk as [t' Htt' Hkt']. apply (rwP pimfsetP) in Htt' as [y Hyt Hty]. rewrite setmE ηE in Hty. destruct (y =P x); subst. { inverts Hty. simpl in *. rewrite Hkt' orbT //. } rewrite Hyt in Hty. inverts Hty. rewrite in_fset1 in Hkt'. apply (rwP eqP) in Hkt'. subst. apply (rwP orP). left. apply (rwP codomm_Tm_setP). exists (variable y). split. * rewrite /= in_fset1 eq_refl //. * apply (rwP codommP). exists y. apply (introF eqP) in n. rewrite remmE ηE n Hyt //. + rewrite in_bigcup. apply (rwP hasP). rewrite in_fsetU in Hk. apply (rwP orP) in Hk as [Hkt|Hku]. * apply (rwP codomm_Tm_setP) in Hkt as (t' & Hkt' & Htt'). apply (rwP codommP) in Htt' as [y Hxy]. rewrite remmE ηE in Hxy. destruct (y =P x); subst. { inverts Hxy. } destruct (y ∈ FV t) eqn:Hyt; inverts Hxy. rewrite in_fset1 in Hkt'. apply (rwP eqP) in Hkt'. subst. exists (variable y). -- apply (rwP pimfsetP). exists y; auto. apply (introF eqP) in n. rewrite setmE ηE n Hyt //. -- rewrite /= in_fset1 eq_refl //. * exists u; auto. apply (rwP pimfsetP). exists x; auto. rewrite setmE ηE eq_refl //. - rewrite /Tm /in_mem /=. apply (rwP fsubsetP). intros y Hyt. rewrite domm_set domm_map domm_mkfmapf in_fsetU in_fset Hyt orbT //. Qed. Lemma FV_after_substitute : forall t u x, x ∈ FV t -> FV (t[x⟵u]) = (FV t :\ x) ∪ FV u. Proof. introv Hxt. apply FV_after_substitute'; auto. apply HFresh. Qed. Lemma FV_noop_substitute' : forall Fresh t u x, Fresh_correct Fresh -> x ∉ FV t -> FV (t`[x⟵u]Fresh) = FV t. Proof. introv HFresh Hnxt. apply FV_respects_α_equivalence. symmetry. apply substitution'_law1; auto. Qed. Lemma FV_noop_substitute : forall t u x, x ∉ FV t -> FV (t[x⟵u]) = FV t. Proof. introv Hnxt. apply FV_noop_substitute'; auto. apply HFresh. Qed. Lemma domm_update_identity : forall t u x, domm ((1__(FV t))[x, u]) = FV t ∪ {x}. Proof. introv. rewrite domm_update_substitution domm_map domm_mkfmapf fsvalK //. Qed. Lemma codomm_Tm_set_update_identity : forall X u x, codomm_Tm_set ((1__X)[x, u]) = (X :\ x) ∪ FV u. Proof. introv. rewrite codomm_update_substitution. repeat f_equal. apply eq_fset. intros k. rewrite in_fsetD in_fset1. apply Bool.eq_iff_eq_true. split; introv Hxk. + apply (rwP codomm_Tm_setP) in Hxk as (y & Hky & Hxy). apply (rwP codommP) in Hxy as [v Hxy]. rewrite remmE ηE in Hxy. destruct (v =P x); subst. { inverts Hxy. } destruct (v ∈ X) eqn:HvX; inverts Hxy. rewrite in_fset1 in Hky. apply (rwP eqP) in Hky. subst. apply (introF eqP) in n. rewrite n HvX //. + apply (rwP andP) in Hxk as [Hknx HkX]. apply (rwP codomm_Tm_setP). exists (variable k). split. * rewrite /= in_fset1 eq_refl //. * apply (rwP codommP). exists k. apply negbTE in Hknx. rewrite remmE ηE Hknx HkX //. Qed. (** Page 5: "To show that substitution is well behaved, i.e. laws such as...." *) Lemma substitution_law2 : forall t u (v : term) x y, x <> y -> x ∉ FV v -> (* See Exercise 2.2 in http://www.cse.chalmers.se/research/group/logic/TypesSS05/Extra/geuvers.pdf. *) t[x⟵u][y⟵v] ≡_α t[y⟵v][x⟵u[y⟵v]]. Proof. introv Hxny Hxnv. symmetry. transitivity (⦇⦇(1__(FV(⦇(1__(FV t))[y,v]⦈ t)))[x,⦇(1__(FV u))[y,v]⦈ u]⦈ ∘ (1__(FV t))[y,v]⦈ t). { destruct (y ∈ FV t) eqn:Hyt. (* TODO Can we remove the [destruct]s of this form? *) - apply monad_substitution3; try rewrite /Tm /in_mem /=; apply (rwP fsubsetP); intros k Hkt; rewrite domm_set domm_map domm_mkfmapf in_fsetU in_fset. + rewrite FV_after_substitute // in_fsetU in_fsetD !in_fset1. destruct (k =P x); subst; auto. apply (rwP codomm_Tm_setP) in Hkt as (t' & Hkt' & Htt'). apply (rwP codommP) in Htt' as [k' Htk']. rewrite setmE ηE in Htk'. destruct (k' =P y); subst. { inverts Htk'. rewrite Hkt' orbT //. } apply (introF eqP) in n0. destruct (k' ∈ FV t) eqn:Hk't; inverts Htk'. rewrite in_fset1 in Hkt'. apply (rwP eqP) in Hkt'. subst. rewrite n0 Hk't //. + rewrite Hkt orbT //. - transitivity (⦇(1__(FV(⦇(1__(FV t))[y,v]⦈ t)))[x,⦇(1__(FV u))[y,v]⦈ u]⦈ t). { apply lift_substitution_respectsα_equivalence. - rewrite /Tm /in_mem /=. apply (rwP fsubsetP). intros k Hkt. rewrite domm_set domm_map domm_mkfmapf in_fsetU in_fset Hkt orbT //. - apply substitution_law1. rewrite Hyt //. } apply lift_substitution_indistinguishable_substitutions. + rewrite /Tm /in_mem /=. apply (rwP fsubsetP). intros k Hkt. rewrite !domm_set !domm_map !domm_set !domm_map !domm_mkfmapf in_fsetI !in_fsetU !in_fset !in_fset1 Hkt orbT andbT. destruct (k =P x); subst; auto. rewrite FV_noop_substitute //. rewrite Hyt //. + intros k Hkt. rewrite !setmE !mapmE !setmE !ηE !identityE Hkt. destruct (k =P x); subst. { apply (introF eqP) in Hxny. rewrite /lift_substitution /= Hxny /= setmE mapmE eq_refl. reflexivity. } apply (introF eqP) in n. rewrite FV_noop_substitute; cycle 1. { rewrite Hyt //. } rewrite Hkt /=. destruct (k =P y); subst. { rewrite /= Hkt // in Hyt. } rewrite /lift_substitution /= setmE ηE n Hkt. reflexivity. } symmetry. transitivity (⦇⦇(1__(FV (⦇(1__(FV t))[x,u]⦈ t)))[y,v]⦈ ∘ (1__(FV t))[x,u]⦈ t). { destruct (x ∈ FV t) eqn:Hxt. - apply monad_substitution3; try rewrite /Tm /in_mem /=; apply (rwP fsubsetP); intros y' Hy't; rewrite domm_set domm_map domm_mkfmapf in_fsetU in_fset1 in_fset. + rewrite FV_after_substitute // in_fsetU in_fsetD !in_fset1. destruct (y' =P y); subst; auto. apply (rwP codomm_Tm_setP) in Hy't as (t' & Hy't' & Htt'). apply (rwP codommP) in Htt' as [k Hkt]. rewrite setmE ηE in Hkt. destruct (k =P x); subst. { inverts Hkt. rewrite Hy't' !orbT //. } apply (introF eqP) in n0. destruct (k ∈ FV t) eqn:H'kt; inverts Hkt. rewrite in_fset1 in Hy't'. apply (rwP eqP) in Hy't'. subst. rewrite n0 H'kt //. + rewrite Hy't orbT //. - transitivity (⦇(1__(FV (⦇(1__(FV t))[x,u]⦈ t)))[y,v]⦈ t). { apply lift_substitution_respectsα_equivalence; cycle 1. { apply substitution_law1. rewrite Hxt //. } rewrite /Tm /in_mem /=. apply (rwP fsubsetP). intros k Hkt. rewrite domm_set domm_map domm_mkfmapf in_fsetU in_fset in_fset1 Hkt orbT //. } apply lift_substitution_indistinguishable_substitutions. + rewrite /Tm /in_mem /=. apply (rwP fsubsetP). intros k Hkt. rewrite !domm_set !domm_map !domm_set !domm_map !domm_mkfmapf in_fsetI !in_fsetU !in_fset !in_fset1 Hkt orbT andbT FV_noop_substitute; cycle 1. { rewrite Hxt //. } destruct (k =P y); subst; auto. + intros k Hkt. rewrite !mapmE !setmE !ηE Hkt. destruct (k =P y); subst. { apply not_eq_sym, (introF eqP) in Hxny. rewrite /lift_substitution Hxny /= setmE eq_refl. reflexivity. } apply (introF eqP) in n. destruct (k =P x); subst. { rewrite /= Hkt // in Hxt. } rewrite /lift_substitution /= setmE ηE n FV_noop_substitute; cycle 1. { rewrite Hxt //. } rewrite Hkt. reflexivity. } apply lift_substitution_indistinguishable_substitutions. - rewrite /Tm /in_mem /=. apply (rwP fsubsetP). intros k Hkt. rewrite !domm_map !domm_set !domm_map !domm_mkfmapf in_fsetI !in_fsetU !in_fset !in_fset1 Hkt !orbT //. - intros k Hkt. rewrite !mapmE !setmE !ηE Hkt. destruct (k =P x); subst. + apply (introF eqP) in Hxny. rewrite /lift_substitution Hxny /= setmE mapmE identityE eq_refl /=. apply lift_substitution_indistinguishable_substitutions. * rewrite /Tm /in_mem /=. apply (rwP fsubsetP). intros k H'kt. rewrite domm_set domm_map domm_set domm_map !domm_mkfmapf in_fsetI !in_fsetU !in_fset1 !in_fset H'kt orbT andbT. destruct (k =P y); subst; auto. rewrite /= FV_after_substitute // in_fsetU in_fsetD !in_fset1 H'kt !orbT //. * intros k Hku. rewrite !setmE !ηE Hku. destruct (k =P y); subst. { reflexivity. } rewrite FV_after_substitute // in_fsetU in_fsetD !in_fset1 Hku orbT. reflexivity. + destruct (k =P y); subst. * rewrite /lift_substitution /= setmE mapmE eq_refl FV_after_substitute //. transitivity (⦇1__(FV v)⦈ v). { symmetry. apply monad_substitution1. rewrite /Tm /in_mem /=. apply (rwP fsubsetP). intros k H'kt. auto. } apply lift_substitution_indistinguishable_substitutions. -- rewrite /Tm /in_mem /=. apply (rwP fsubsetP). intros k H'kt. rewrite domm_set !domm_map !domm_mkfmapf in_fsetI in_fsetU in_fset in_fset in_fsetU in_fsetD !in_fset1 H'kt !orbT //. -- intros k Hkv. rewrite setmE !ηE in_fsetU in_fsetD in_fset1 Hkv orbT. destruct (k =P x); subst. { rewrite Hkv // in Hxnv. } reflexivity. * apply (introF eqP) in n, n0. rewrite /lift_substitution /= !setmE !ηE n n0. destruct (x ∈ FV t) eqn:Hxt. -- rewrite FV_after_substitute // in_fsetU in_fsetD in_fset1 Hkt andbT n /=. destruct (y ∈ FV t) eqn:Hyt. ++ rewrite FV_after_substitute // in_fsetU in_fsetD in_fset1 Hkt n0. reflexivity. ++ rewrite FV_noop_substitute; cycle 1. { rewrite Hyt //. } rewrite Hkt. reflexivity. -- rewrite FV_noop_substitute; cycle 1. { rewrite Hxt //. } rewrite Hkt. destruct (y ∈ FV t) eqn:Hyt. ++ rewrite FV_after_substitute // in_fsetU in_fsetD in_fset1 Hkt andbT n0. reflexivity. ++ rewrite FV_noop_substitute; cycle 1. { rewrite Hyt //. } rewrite Hkt. reflexivity. Qed. (** Page 7: "A monad gives rise to its Kleisli-category...." *) (** TODO Explicitly formalize the resulting Kliesli-category? *) Implicit Types (c d i j n : nat) (ϕ ψ : {fmap 𝒱 → nat}). Definition nat_to_pred n i : bool := i < n. (** Page 7: "Here we identify n ∈ Nat with the set {i ∈ Nat | i < n}." *) Canonical nat_predType := PredType nat_to_pred. Inductive de_Bruijn_term : Type := | de_Bruijn_abstraction : de_Bruijn_term -> de_Bruijn_term | de_Bruijn_application : de_Bruijn_term -> de_Bruijn_term -> de_Bruijn_term | de_Bruijn_variable : nat -> de_Bruijn_term. #[local] Coercion de_Bruijn_variable : nat >-> de_Bruijn_term. Implicit Types (dBt dBu : de_Bruijn_term). Fixpoint de_Bruijn_Tm n dBt : bool := match dBt with | de_Bruijn_abstraction dBt => dBt ∈ de_Bruijn_Tm (S n) | de_Bruijn_application dBt dBu => (dBt ∈ de_Bruijn_Tm n) && (dBu ∈ de_Bruijn_Tm n) | de_Bruijn_variable i => i ∈ n end. #[local] Notation "'Tm^db'" := de_Bruijn_Tm. (** Page 7: "For any n ∈ Nat we define the set Tm^db(n) of de Bruijn terms with at most n free Variables inductively by the following rules:...." *) Section in_de_Bruijn_Tm. Reserved Notation "x '∈' 'Tm^db' n" (at level 40). Inductive in_de_Bruijn_Tm : nat -> de_Bruijn_term -> Prop := | de_Bruijn_Tm_variable : forall n i, i ∈ n -> i ∈ Tm^db n | de_Bruijn_Tm_application : forall n dBt dBu, dBt ∈ Tm^db n -> dBu ∈ Tm^db n -> de_Bruijn_application dBt dBu ∈ Tm^db n | de_Bruijn_Tm_abstraction : forall n dBt, dBt ∈ Tm^db (n + 1) -> de_Bruijn_abstraction dBt ∈ Tm^db n where "t '∈' 'Tm^db' n" := (in_de_Bruijn_Tm n t). End in_de_Bruijn_Tm. Lemma de_Bruijn_TmP : forall n dBt, reflect (in_de_Bruijn_Tm n dBt) (dBt ∈ Tm^db n). Proof. rewrite /in_mem /=. introv. gen n. induction dBt; simpl; intros; rewrite /in_mem /=. - destruct (IHdBt n.+1); repeat constructor. + rewrite addn1 //. + introv HndBt. apply n0. inverts HndBt as HndBt. rewrite addn1 // in HndBt. - destruct (IHdBt1 n); repeat constructor. + destruct (IHdBt2 n); repeat constructor; auto. introv HndBt1t2. apply n0. inverts HndBt1t2. auto. + introv HndBt1t2. inverts HndBt1t2. auto. - rewrite /nat_to_pred. gen n0. induction n; intros; destruct n0; repeat constructor; intros_all; try solve [inverts H; inverts H2]. replace (n.+1 < n0.+1) with (n < n0) by auto. (pose proof (IHn n0) as Hn0); inverts Hn0; repeat constructor; auto. introv HSn0Sn1. inverts HSn0Sn1 as HSn0Sn1. rewrite /in_mem /= /nat_to_pred in HSn0Sn1. replace (n.+1 < n0.+1) with (n < n0) in HSn0Sn1 by auto. rewrite HSn0Sn1 // in H. Qed. Lemma de_Bruijn_Tm_subset : forall n n' dBt, n <= n' -> dBt ∈ Tm^db n -> dBt ∈ Tm^db n'. Proof. rewrite /in_mem /=. introv Hnn' HndBt. gen n n'. induction dBt; intros; simpl in *. - apply IHdBt with (n.+1); auto. - apply (rwP andP) in HndBt as [HndBt1 HndBt2]. eapply IHdBt1 with (n' := n') in HndBt1; auto. eapply IHdBt2 with (n' := n') in HndBt2; auto. rewrite /in_mem /= HndBt1 HndBt2 //. - apply leq_trans with n0; auto. Qed. Definition update_ϕ x ϕ : {fmap 𝒱 → nat} := setm (mapm S ϕ) x 0. #[local] Notation "ϕ '^+' x" := (update_ϕ x ϕ). Definition codomm_𝐍 ϕ : nat := S (\max_(i <- codomm ϕ) i). Lemma ϕ_type : forall ϕ n, n ∈ codomm ϕ -> n ∈ codomm_𝐍 ϕ. Proof. introv Hnℛϕ. rewrite /codomm_𝐍 -maxE. apply maximum_correct. auto. Qed. Lemma domm_update_ϕ : forall ϕ x, domm (ϕ^+x) = domm ϕ ∪ {x}. Proof. introv. rewrite domm_set domm_map fsetUC //. Qed. Lemma codomm_𝐍_update_ϕ : forall ϕ x, codomm_𝐍 (ϕ^+x) <= S (codomm_𝐍 ϕ). Proof. unfold codomm_𝐍. introv. rewrite codomm_setmE remm_mapm codomm_mapm big_idem_fsetU1 /=; try apply maxnn. rewrite max0n big_idem_imfset /=; try apply maxnn. pose proof codomm_rem ϕ x as Hxℛϕ. apply (rwP fsubsetP), bigmax_subset in Hxℛϕ. change (\max_(i <- codomm (remm ϕ x)) i.+1 <= (\max_(i <- codomm ϕ) i).+1). apply leq_trans with ((\max_(i <- codomm (remm ϕ x)) i).+1); auto. apply S_bigmax. Qed. Lemma codomm_update_ϕ : forall ϕ x, {subset codomm (ϕ^+x) <= S (codomm_𝐍 ϕ)}. Proof. intros ? ? v Hvℛϕ'. apply (rwP codommP) in Hvℛϕ' as [k Hkϕ']. rewrite setmE mapmE in Hkϕ'. destruct (k =P x); subst. { inverts Hkϕ'. auto. } destruct (getm ϕ k) eqn:Hϕk; inverts Hkϕ'. apply ϕ_type. apply (rwP codommP). eauto. Qed. (** Page 8: "where ϕ^+x(y) = ...." *) Lemma update_ϕ_correct : forall ϕ x y, y ∈ domm ϕ ∪ {x} -> getm (ϕ^+x) y = if y == x then Some 0 else omap S (getm ϕ y). Proof. introv Hyϕ'. rewrite setmE mapmE. rewrite /= in_fsetU in_fset1 in Hyϕ'. apply (rwP orP) in Hyϕ' as [Hyϕ|Hyx]. - destruct (y =P x); auto. - rewrite Hyx //. Qed. (** Page 8: "Note that ϕ^+x is an injection, if ϕ is." *) Lemma injective_update_ϕ : forall ϕ x, is_injective ϕ -> is_injective (ϕ^+x). Proof. introv Hϕinj. apply (rwP injectivemP) in Hϕinj. apply (rwP (@injectivemP _ _ (ϕ^+x))). intros k Hϕ'k k' Hkk'. apply (rwP dommP) in Hϕ'k as [v Hϕ'k]. rewrite setmE mapmE in Hϕ'k. rewrite !setmE !mapmE in Hkk'. destruct (k =P x); subst. - inverts Hϕ'k. destruct (k' =P x); subst; auto. destruct (getm ϕ k') eqn:Hϕk'; inverts Hkk'. - destruct (k' =P x); subst; destruct (getm ϕ k) eqn:Hϕk; inverts Hϕ'k as Hϕ'k. { inverts Hkk'. } + destruct (getm ϕ k') eqn:Hϕk'; inverts Hkk'. rewrite -Hϕk' in Hϕk. apply Hϕinj in Hϕk; auto. rewrite Hϕk' in Hϕk. apply (rwP dommP). eauto. Qed. #[local] Reserved Notation "t '^' ϕ" (at level 30, ϕ at level 30, format "t '^' ϕ"). (** Pages 7-8: "we assign to any t ∈ Tm(X) a de Bruijn term t^ϕ ∈ Tm^db(n) by...." *) Fixpoint to_de_Bruijn t ϕ : de_Bruijn_term := match t with | variable x => de_Bruijn_variable (odflt 0 (getm ϕ x)) | application t u => de_Bruijn_application (t^ϕ) (u^ϕ) | abstraction x t => de_Bruijn_abstraction (t^(ϕ^+x)) end where "t '^' ϕ" := (to_de_Bruijn t ϕ). (** Page 8: "t^ϕ ∈ Tm^db(n)". *) Lemma to_de_Bruijn_type : forall ϕ t, t ∈ Tm (domm ϕ) -> t^ϕ ∈ Tm^db (codomm_𝐍 ϕ). Proof. rewrite /in_mem /= /Tm. introv Hϕt. apply (rwP fsubsetP) in Hϕt. gen ϕ. induction t; intros; simpl in *. - apply de_Bruijn_Tm_subset with (codomm_𝐍 (ϕ^+s)). + apply codomm_𝐍_update_ϕ. + apply IHt. intros x Hxt. rewrite domm_set domm_map in_fsetU in_fset1. destruct (x =P s); subst; auto. apply (introF eqP) in n. apply Hϕt. rewrite in_fsetD in_fset1 n Hxt //. - apply (rwP (@andP (Tm^db _ _) (Tm^db _ _))). split; (apply IHt1 || apply IHt2); intros x Hxt; apply Hϕt; rewrite in_fsetU Hxt ?orbT //. - assert (s ∈ fset1 s) as Hss. { rewrite in_fset1 eq_refl //. } apply Hϕt, (rwP dommP) in Hss as [v Hss]. rewrite Hss. apply ϕ_type, (rwP codommP). eauto. Qed. (** Page 8: "where R is the pullback of ϕ and ψ, i.e. ...." *) Definition is_pullback R ϕ ψ : Prop := forall x y, R x y <-> (x ∈ domm ϕ /\ getm ϕ x = getm ψ y). Lemma lemma9' : forall R ϕ ψ x y, R ⊆ domm ϕ × domm ψ -> is_injective ϕ -> is_injective ψ -> is_pullback R ϕ ψ -> is_pullback (R⦅x,y⦆) (ϕ^+x) (ψ^+y). Proof. simpl. intros ? ? ? ? ? HRtype Hϕinj Hψinj HRϕψ x' y'. rewrite /fmap_to_Prop updateE !setmE !mapmE /=. split. - introv HR'x'. destruct (x' =P x); subst. { inverts HR'x'. rewrite eq_refl. split; auto. apply (rwP dommP). rewrite setmE mapmE eq_refl. eauto. } destruct (getm R x') eqn:HRx'; cycle 1. { inverts HR'x'. } destruct (y =P s); subst; inverts HR'x'. pose proof HRx' as H'Rx'. rewrite <- (rwP is_subset_ofP) in HRtype. apply HRtype in HRx' as [Hϕx' Hψy']. apply (rwP dommP) in Hϕx' as [n__ϕ Hϕx']. apply (rwP dommP) in Hψy' as [n__ψ Hψy']. apply not_eq_sym, (introF eqP) in n0. apply HRϕψ in H'Rx' as [H'ϕx' Hϕψ]. rewrite Hϕx' Hψy' in Hϕψ. inverts Hϕψ. rewrite Hϕx' Hψy' n0. split; auto. apply (rwP dommP). apply (introF eqP) in n. rewrite setmE mapmE n Hϕx' /=. eauto. - intros [Hϕ'x' Hϕψ]. destruct (x' =P x); subst. + destruct (y' =P y); subst; auto. destruct (getm ψ y') eqn:Hψy'; inverts Hϕψ. + destruct (getm R x') eqn:HRx'. * pose proof HRx' as H'Rx'. rewrite <- (rwP is_subset_ofP) in HRtype. apply HRtype in HRx' as [Hϕx' Hψs]. apply (rwP dommP) in Hϕx' as [v__ϕ Hϕx']. apply (rwP dommP) in Hψs as [v__ψ Hψs]. rewrite Hϕx' in Hϕψ. destruct (y' =P y); subst. { inverts Hϕψ. } destruct (getm ψ y') eqn:Hψy'; inverts Hϕψ. assert (R x' y') as HRx'. { apply HRϕψ. rewrite Hϕx' Hψy' //. split; auto. apply (rwP dommP). eauto. } rewrite HRx' in H'Rx'. inverts H'Rx'. destruct (y =P s); subst; auto. contradiction. * destruct (getm ϕ x') eqn:Hϕx'; destruct (y' =P y); subst; inverts Hϕψ as Hϕψ. -- destruct (getm ψ y') eqn:Hψy'; inverts Hϕψ. rewrite -Hψy' in Hϕx'. assert (x' ∈ domm ϕ) as H'ϕx'. { apply (rwP dommP). rewrite Hϕx' Hψy'. eauto. } assert (x' ∈ domm ϕ /\ getm ϕ x' = getm ψ y') as H by auto. apply HRϕψ in H. rewrite H // in HRx'. -- destruct (getm ψ y') eqn:Hψy'; inverts Hϕψ. rewrite -Hψy' in Hϕx'. apply (rwP dommP) in Hϕ'x' as [v Hϕ'x']. apply (introF eqP) in n. rewrite setmE mapmE n Hϕx' Hψy' // in Hϕ'x'. Qed. (** Page 8: Lemma 9. *) Lemma lemma9 : forall R ϕ ψ t u, R ⊆ domm ϕ × domm ψ -> is_injective ϕ -> is_injective ψ -> is_pullback R ϕ ψ -> t ∈ Tm (domm ϕ) -> u ∈ Tm (domm ψ) -> t ≡_α^R u <-> t^ϕ = u^ψ. Proof. introv HRtype Hϕinj Hψinj HRϕψ Hϕt Hψu. apply (rwP fsubsetP) in Hϕt, Hψu. gen R ϕ ψ u. induction t; intros; split; intros; destruct u; inverts H; simpl in *. - f_equal. eapply IHt; eauto. + apply injective_update_ϕ. auto. + intros x Hxt. rewrite domm_update_ϕ in_fsetU in_fset1 orbC. destruct (x =P s); subst; auto. apply (introF eqP) in n. apply Hϕt. rewrite in_fsetD in_fset1 n Hxt //. + rewrite !domm_update_ϕ. eapply update_type; eauto. + apply injective_update_ϕ. auto. + apply lemma9'; eauto. + intros x Hxu. rewrite domm_update_ϕ in_fsetU in_fset1 orbC. destruct (x =P s0); subst; auto. apply (introF eqP) in n. apply Hψu. rewrite in_fsetD in_fset1 n Hxu //. - eapply IHt in H1; eauto. + apply injective_update_ϕ. auto. + intros x Hxt. rewrite domm_update_ϕ in_fsetU in_fset1 orbC. destruct (x =P s); subst; auto. apply (introF eqP) in n. apply Hϕt. rewrite in_fsetD in_fset1 n Hxt //. + rewrite !domm_update_ϕ. eapply update_type; eauto. + apply injective_update_ϕ. auto. + apply lemma9'; eauto. + intros x Hxu. rewrite domm_update_ϕ in_fsetU in_fset1 orbC. destruct (x =P s0); subst; auto. apply (introF eqP) in n. apply Hψu. rewrite in_fsetD in_fset1 n Hxu //. - apply (rwP andP) in H1 as [Hα1 Hα2]. eapply IHt1 with (ϕ := ϕ) (ψ := ψ) in Hα1; cycle 1; intros_all; eauto. { apply Hϕt. rewrite in_fsetU H0 //. } { apply Hψu. rewrite in_fsetU H0 //. } eapply IHt2 with (ϕ := ϕ) (ψ := ψ) in Hα2; cycle 1; intros_all; eauto. { apply Hϕt. rewrite in_fsetU H0 orbT //. } { apply Hψu. rewrite in_fsetU H0 orbT //. } rewrite Hα1 Hα2 //. - eapply IHt1 in H1; cycle 1; intros_all; eauto. { apply Hϕt. rewrite in_fsetU H0 //. } { apply Hψu. rewrite in_fsetU H0 //. } eapply IHt2 in H2; cycle 1; intros_all; eauto. { apply Hϕt. rewrite in_fsetU H0 orbT //. } { apply Hψu. rewrite in_fsetU H0 orbT //. } rewrite H1 H2 //. - apply (rwP getmP) in H1. apply HRϕψ in H1 as [Hϕs Hϕψ]. apply (rwP dommP) in Hϕs as [v Hϕs]. rewrite Hϕs in Hϕψ. rewrite Hϕs -Hϕψ //. - rewrite <- (rwP getmP). assert (s ∈ fset1 s) as Hss. { rewrite in_fset1 //. } apply Hϕt, (rwP dommP) in Hss as [v Hϕs]. assert (s0 ∈ fset1 s0) as Hs0s0. { rewrite in_fset1 //. } apply Hψu, (rwP dommP) in Hs0s0 as [v' Hψs0]. rewrite Hϕs Hψs0 /= in H1. subst. apply HRϕψ. split. + apply (rwP dommP). eauto. + rewrite Hϕs Hψs0 //. Qed. Lemma identity_is_pullback : forall ϕ, is_injective ϕ -> is_pullback (1__(domm ϕ)) ϕ ϕ. Proof. introv Hϕinj. repeat (split; intros). - rewrite /fmap_to_Prop identityE in H. destruct (x ∈ domm ϕ) eqn:Hϕx; inverts H. auto. - rewrite /fmap_to_Prop identityE in H. destruct (x ∈ domm ϕ) eqn:Hϕx; inverts H. auto. - destruct H as [Hϕx Hϕxy]. rewrite /fmap_to_Prop identityE Hϕx. apply (rwP injectivemP) in Hϕxy; auto. subst. auto. Qed. (** Page 7: Proposition 8. *) Proposition to_de_Bruijn_chooses_canonical_representations : forall t u ϕ, is_injective ϕ -> t ∈ Tm (domm ϕ) -> u ∈ Tm (domm ϕ) -> t ≡_α u <-> t^ϕ = u^ϕ. Proof. introv Hϕinj Htϕ Huϕ. split; introv H. - apply lemma9 with (R := 1__(domm ϕ)); auto. + apply identity_type. + apply identity_is_pullback. auto. + apply α_equivalent'_supermap with (R__sub := 1__(FV t)); auto. introv Hkv. rewrite /fmap_to_Prop identityE in Hkv. destruct (k ∈ FV t) eqn:Hkt; inverts Hkv. apply (rwP fsubsetP) in Htϕ. apply Htϕ in Hkt. rewrite /fmap_to_Prop identityE Hkt //. - eapply lemma9 with (R := 1__(domm ϕ)) in H; auto. + apply α_equivalent'_implies_α_equivalent. eauto. + apply identity_type. + apply identity_is_pullback. auto. Qed. Lemma codomm_Tm_set_identity : forall X, codomm_Tm_set (1__X) = X. Proof. introv. apply eq_fset. intros x. apply Bool.eq_iff_eq_true. split; simpl; introv H. - apply (rwP codomm_Tm_setP) in H as (t & Hxt & HtX). apply (rwP codommP) in HtX as [y HtX]. rewrite ηE in HtX. destruct (y ∈ X) eqn:HyX; inverts HtX. rewrite in_fset1 in Hxt. apply (rwP eqP) in Hxt. subst. auto. - apply (rwP codomm_Tm_setP). exists (variable x). split. + rewrite /= in_fset1 eq_refl //. + rewrite codomm_identity'. apply (rwP imfsetP). exists x; auto. Qed. Lemma variable_substitution_as_α_equivalent' : forall t x y, y ∉ FV t -> t[x⟵variable y] ≡_α^(1__(FV t :\ x))⦅y,x⦆ t. Proof. introv Hnyt. replace ((1__(FV t :\ x))⦅y,x⦆) with (((1__(FV t :\ x))⦅x,y⦆)ᵒ); cycle 1. { rewrite update_converse. - rewrite converse_identity //. - apply partial_bijection_identity. } simpl. replace ((1__(FV t))[x,variable y]) with (mapm variable ((1__(FV t))⦅x,y⦆)); cycle 1. { apply eq_fmap. intros k. rewrite setmE mapmE updateE ηE. destruct (k =P x); subst; auto. rewrite identityE. destruct (k ∈ FV t) eqn:Hkt; auto. destruct (y =P k); subst; auto. rewrite Hkt // in Hnyt. } replace ((1__(FV t :\ x))⦅x,y⦆ᵒ) with ((1__(FV t))⦅x,y⦆ᵒ); cycle 1. { apply eq_fmap. intros k. rewrite !update_converse. - rewrite !updateE. destruct (k =P y); subst; auto. rewrite !converse_identity !identityE !in_fsetD !in_fset1. destruct (k =P x); subst; auto. destruct (x ∈ FV t) eqn:Hxt; auto. rewrite eq_refl //. - apply partial_bijection_identity. - apply partial_bijection_identity. } apply lemma7. - apply partial_bijection_update, partial_bijection_identity. - rewrite /Tm /in_mem /=. apply (rwP fsubsetP). intros k Hkt. apply (rwP dommP). rewrite updateE identityE. destruct (k =P x); subst; simpl; eauto. rewrite Hkt. destruct (y =P k); subst; simpl; eauto. rewrite Hkt // in Hnyt. Qed. Lemma update_as_update_ϕ : forall t u x y ϕ, is_injective ϕ -> t ∈ Tm (domm ϕ ∪ {y}) -> u ∈ Tm (domm ϕ ∪ {x}) -> t ≡_α^(1__(domm ϕ))⦅y,x⦆ u -> t^ϕ^+y = u^ϕ^+x. Proof. unfold Tm. introv Hϕinj Hϕ't Hϕ'u Hα. apply (rwP fsubsetP) in Hϕ't, Hϕ'u. apply lemma9 with (R := (1__(domm ϕ))⦅y,x⦆); auto. - rewrite !domm_set ![_ |: _]fsetUC. apply update_type. apply (rwP is_subset_ofP). intros k v Hϕk. rewrite /fmap_to_Prop identityE in Hϕk. destruct (k ∈ domm ϕ) eqn:H'ϕk; inverts Hϕk. rewrite domm_map H'ϕk //. - apply injective_update_ϕ. auto. - apply injective_update_ϕ. auto. - eapply lemma9'; eauto. + apply identity_type. + eapply identity_is_pullback; eauto. - rewrite /Tm /in_mem domm_set domm_map /= fsetUC. apply (rwP fsubsetP). intros k Hkt. apply Hϕ't. auto. - rewrite /Tm /in_mem domm_set domm_map /= fsetUC. apply (rwP fsubsetP). intros k Hkt. apply Hϕ'u. auto. Qed. Lemma to_de_Bruijn_with_indistinguishable_ϕ : forall ϕ ψ t, (forall x, x ∈ FV t -> getm ϕ x = getm ψ x) -> t^ϕ = t^ψ. Proof. introv Hϕψ. gen ϕ ψ. induction t; intros; simpl in *; f_equal. - apply IHt. intros x Hxt. rewrite !setmE !mapmE. destruct (x =P s); subst; auto. f_equal. apply Hϕψ. apply (introF eqP) in n. rewrite in_fsetD Hxt in_fset1 n //. - apply IHt1. intros x Hxt1. apply Hϕψ. rewrite in_fsetU Hxt1 //. - apply IHt2. intros x Hxt2. apply Hϕψ. rewrite in_fsetU Hxt2 orbT //. - f_equal. apply Hϕψ. rewrite in_fset1 eq_refl //. Qed. Lemma update_ϕ_remm : forall ϕ x, (remm ϕ x)^+x = ϕ^+x. Proof. introv. apply eq_fmap. intros k. rewrite !setmE !mapmE remmE. destruct (k =P x); subst; auto. Qed. Lemma substitution_id : forall t x, t[x⟵variable x] ≡_α t. Proof. introv. destruct (x ∈ FV t) eqn:Hxt; cycle 1. { apply substitution_law1. rewrite Hxt //. } transitivity (⦇η__(FV t)⦈ t); cycle 1. { apply monad_substitution1. rewrite /Tm /in_mem /=. apply (rwP fsubsetP). intros k Hkt. auto. } apply lift_substitution_indistinguishable_substitutions. - rewrite /in_mem /Tm /=. apply (rwP fsubsetP). intros k Hkt. rewrite in_fsetI !domm_set domm_map !domm_mkfmapf !in_fsetU !in_fset1 !in_fset orbC Hkt //. - intros k Hkt. rewrite setmE ηE Hkt. destruct (k =P x); subst; reflexivity. Qed. Lemma injective_remm_ϕ : forall ϕ x, is_injective ϕ -> is_injective (remm ϕ x). Proof. simpl. introv Hϕinj. rewrite <- (rwP injectivemP). intros k Hϕ'k v Hkv. rewrite domm_rem in_fsetD in_fset1 in Hϕ'k. apply (rwP andP) in Hϕ'k as [Hknx Hϕk]. apply negbTE in Hknx. rewrite !remmE Hknx in Hkv. destruct (v =P x); subst. - apply (rwP dommP) in Hϕk as [v Hϕk]. rewrite Hϕk // in Hkv. - apply (rwP injectivemP) in Hϕinj. apply Hϕinj in Hkv; auto. Qed. Lemma substitution_as_update_ϕ : forall ϕ t x y, is_injective ϕ -> t ∈ Tm (domm ϕ) -> y ∉ FV t -> (t[x⟵variable y])^ϕ^+y = t^ϕ^+x. Proof. unfold Tm. introv Hϕinj Hϕt Hnyt. apply (rwP fsubsetP) in Hϕt. destruct (x =P y); subst. { apply to_de_Bruijn_chooses_canonical_representations. - apply injective_update_ϕ. auto. - rewrite /Tm /in_mem /=. apply (rwP fsubsetP). intros k Hkt. rewrite FV_noop_substitute // in Hkt. apply Hϕt in Hkt. rewrite domm_set domm_map in_fsetU in_fset1 orbC Hkt //. - rewrite /Tm /in_mem /=. apply (rwP fsubsetP). intros k Hkt. apply Hϕt in Hkt. rewrite domm_set domm_map in_fsetU in_fset1 orbC Hkt //. - apply substitution_id. } eapply update_as_update_ϕ; eauto. - rewrite /Tm /in_mem /=. apply (rwP fsubsetP). intros k Hkt. rewrite in_fsetU in_fset1 orbC. destruct (k =P y); subst; auto. apply Hϕt. destruct (x ∈ FV t) eqn:Hxt. + rewrite FV_after_substitute // in_fsetU in_fsetD !in_fset1 in Hkt. apply (rwP orP) in Hkt as [H|Hky]. * apply (rwP andP) in H as [Hknx Hkt]. auto. * apply (rwP eqP) in Hky. subst. contradiction. + rewrite FV_noop_substitute // Hxt // in Hkt. - rewrite /Tm /in_mem /=. apply (rwP fsubsetP). intros k Hkt. apply Hϕt in Hkt. rewrite in_fsetU in_fset1 Hkt //. - apply α_equivalent'_supermap with (R__sub := (1__(FV t :\ x))⦅y,x⦆). + introv Ht'k. rewrite /fmap_to_Prop updateE identityE in_fsetD in_fset1 /= in Ht'k. rewrite /fmap_to_Prop updateE identityE. destruct (k =P y); subst; auto. destruct (k =P x); subst. { inverts Ht'k. } destruct (k ∈ FV t) eqn:Hkt; cycle 1. { inverts Ht'k. } simpl in *. destruct (x =P k); subst; inverts Ht'k. apply Hϕt in Hkt. apply not_eq_sym, (introF eqP) in n1. rewrite Hkt n1 //. + apply variable_substitution_as_α_equivalent'. auto. Qed. Lemma substitution_noop_if_shadow : forall t u x, (abstraction x t)[x⟵u] ≡_α abstraction x t. Proof. introv. apply substitution_law1. rewrite /= in_fsetD in_fset1 eq_refl //. Qed. Lemma old_index_after_update_ϕ : forall ϕ x i, is_injective ϕ -> getm ϕ x = Some i -> forall y, ~ getm (ϕ^+x) y = Some (S i). Proof. introv Hϕinj Hϕx Hϕ'y. rewrite setmE mapmE in Hϕ'y. destruct (y =P x); subst. { inverts Hϕ'y. } destruct (getm ϕ y) eqn:Hϕy; inverts Hϕ'y. rewrite -Hϕy in Hϕx. apply (rwP injectivemP) in Hϕinj. apply Hϕinj in Hϕx; auto. apply (rwP dommP). exists i. rewrite Hϕx Hϕy //. Qed. Lemma update_substitution_reorder' : forall f x x' t t', x <> x' -> f[x,t][x',t'] = f[x',t'][x,t]. Proof. introv Hxnx'. apply eq_fmap. intros k. rewrite !setmE. destruct (k =P x); subst; auto. apply (introF eqP) in Hxnx'. rewrite Hxnx' //. Qed. Fixpoint term_depth t : nat := S match t with | variable _ => 0 | application t u => maxn (term_depth t) (term_depth u) | abstraction _ t => term_depth t end. Lemma term_depth_respects_α_equivalent' : forall R t u, t ≡_α^R u -> term_depth t = term_depth u. Proof. introv Hα. gen R u. induction t; intros; destruct u; inverts Hα; simpl; eauto. f_equal. apply (rwP andP) in H0 as [Hα1 Hα2]. rewrite (IHt1 _ _ Hα1) (IHt2 _ _ Hα2) //. Qed. Lemma term_depth_respects_α_equivalent : forall t u, t ≡_α u -> term_depth t = term_depth u. Proof. introv Hα. eapply term_depth_respects_α_equivalent'; eauto. Qed. Add Parametric Morphism : term_depth with signature α_equivalent ==> eq as term_depth_morph. Proof. apply term_depth_respects_α_equivalent. Qed. Implicit Type bound : {fset 𝒱}. Fixpoint has_shadowing' bound t : bool := match t with | abstraction x t => (x ∈ bound) || has_shadowing' (bound ∪ {x}) t | application t1 t2 => has_shadowing' bound t1 || has_shadowing' bound t2 | variable x => false end. Definition has_shadowing : term -> bool := has_shadowing' ∅. Lemma has_shadowing'_sub : forall bound__sub bound__super t, bound__sub ⊆ bound__super -> has_shadowing' bound__sub t -> has_shadowing' bound__super t. Proof. introv Hsub Ht. gen bound__sub bound__super. induction t; intros; simpl in *. - apply (rwP orP). apply (rwP orP) in Ht as [Ht|Ht]. + apply (rwP fsubsetP) in Hsub. apply Hsub in Ht. auto. + right. apply IHt with (bound__sub := bound__sub ∪ {s}); auto. rewrite fsetSU //. - apply (rwP orP). apply (rwP orP) in Ht as [Ht|Ht]; eauto. - apply (rwP fsubsetP) in Hsub. auto. Qed. Fixpoint bound_variables t : {fset 𝒱} := match t with | abstraction x t => bound_variables t ∪ {x} | application t1 t2 => bound_variables t1 ∪ bound_variables t2 | variable _ => ∅ end. Lemma has_shadowing'_fsetU : forall bound bound' t, fdisjoint bound' (bound_variables t) -> has_shadowing' bound t = has_shadowing' (bound ∪ bound') t. Proof. unfold has_shadowing. introv Hdisj. gen bound. induction t; intros; auto; simpl in *. - rewrite in_fsetU. destruct (s ∈ bound) eqn:Hsbound; auto; simpl. assert (s ∈ bound' = false) as Hsnbound'. { apply negbTE, (rwP negP). introv Hsnbound'. rewrite <- (rwP fdisjointP) in Hdisj. apply Hdisj in Hsnbound'. rewrite in_fsetU in_fset1 eq_refl orbT // in Hsnbound'. } rewrite Hsnbound' /=. rewrite IHt. + f_equal. rewrite fsetUC fsetUA. f_equal. rewrite fsetUC //. + rewrite fdisjointUr in Hdisj. apply (rwP andP) in Hdisj as [Hdisj _]. auto. - rewrite fdisjointUr in Hdisj. apply (rwP andP) in Hdisj as [Hdisj1 Hdisj2]. rewrite IHt1 // IHt2 //. Qed. Fixpoint have_same_structure t u : bool := match t, u with | abstraction _ t, abstraction _ u => have_same_structure t u | application t1 t2, application u1 u2 => have_same_structure t1 u1 && have_same_structure t2 u2 | variable _, variable _ => true | _, _ => false end. Lemma have_same_structure_reflexive : forall t, have_same_structure t t. Proof. intros. induction t; simpl; auto. rewrite IHt1 IHt2 //. Qed. Lemma have_same_structure_symmetric : forall t u, have_same_structure t u -> have_same_structure u t. Proof. introv Htu. gen u. induction t; intros; destruct u; inverts Htu; simpl in *; auto. apply (rwP andP) in H0 as [Htu1 Htu2]. apply IHt1 in Htu1. apply IHt2 in Htu2. rewrite Htu1 Htu2 //. Qed. Lemma have_same_structure_transitive : forall t u (v : term), have_same_structure t u -> have_same_structure u v -> have_same_structure t v. Proof. introv Htu Huv. gen u v. induction t; intros; destruct u, v; inverts Htu; inverts Huv; simpl in *; eauto. apply (rwP andP) in H0 as [Htu1 Htu2], H1 as [Huv1 Huv2]. apply IHt1 in Huv1; auto. apply IHt2 in Huv2; auto. rewrite Huv1 Huv2 //. Qed. #[global] Instance have_same_structure_Equivalence : Equivalence have_same_structure. Proof. split; intros t. - apply have_same_structure_reflexive. - apply have_same_structure_symmetric. - apply have_same_structure_transitive. Qed. Definition preserves_structure f : Prop := forall t, t ∈ codomm f -> exists x, t = variable x. Lemma identity_preserves_structure : forall X, preserves_structure (1__X). Proof. intros X t Htf. rewrite codomm_identity' in Htf. apply (rwP imfsetP) in Htf as [u Hu]. eauto. Qed. Lemma preserves_structure_update_substitution : forall f x y, preserves_structure f -> preserves_structure (f[x,variable y]). Proof. introv Hf Htf. apply (rwP codommP) in Htf as [k Hk]. rewrite setmE in Hk. destruct (k =P x); subst. - inverts Hk. eauto. - apply Hf, (rwP codommP). eauto. Qed. Lemma preserves_structure_correct' : forall f g t u, FV t ⊆ domm f -> FV u ⊆ domm g -> preserves_structure f -> preserves_structure g -> have_same_structure t u -> have_same_structure (⦇f⦈ t) (⦇g⦈ u). Proof. introv Htf Hug Hf Hg Htu. gen f g u. induction t; intros; destruct u; inverts Htu; simpl in *. - apply (rwP fsubsetP) in Htf, Hug. apply IHt; auto. + apply (rwP fsubsetP). intros x Hxt. rewrite domm_set in_fsetU in_fset1. destruct (x =P s); subst; auto. apply (introF eqP) in n. apply Htf. rewrite in_fsetD in_fset1 n Hxt //. + apply preserves_structure_update_substitution. auto. + apply preserves_structure_update_substitution. auto. + apply (rwP fsubsetP). intros x Hxu. rewrite domm_set in_fsetU in_fset1. destruct (x =P s0); subst; auto. apply (introF eqP) in n. apply Hug. rewrite in_fsetD in_fset1 n Hxu //. - apply (rwP andP) in H0 as [Htu1 Htu2]. rewrite !fsubUset in Htf, Hug. apply (rwP andP) in Htf as [Ht1f Ht2f], Hug as [Hu1g Hu2g]. rewrite -(rwP andP). fold (lift_substitution f) (lift_substitution g). auto. - rewrite !fsub1set in Htf, Hug. apply (rwP dommP) in Htf as [t Ht], Hug as [u Hu]. rewrite /lift_substitution /= Ht Hu /=. assert (t ∈ codomm f) as Htf. { apply (rwP codommP). eauto. } apply Hf in Htf as [x Hx]. assert (u ∈ codomm g) as Hug. { apply (rwP codommP). eauto. } apply Hg in Hug as [y Hy]. subst. auto. Qed. Lemma α_equivalent'_implies_same_structure : forall R t u, t ≡_α^R u -> have_same_structure t u. Proof. introv Hα. gen R u. induction t; intros; destruct u; inverts Hα; simpl in *; eauto. apply (rwP andP) in H0 as [Htu1 Htu2]. rewrite -(rwP andP). split; eauto. Qed. Lemma α_equivalent_implies_same_structure : forall t u, t ≡_α u -> have_same_structure t u. Proof. introv Hα. eapply α_equivalent'_implies_same_structure; eauto. Qed. Lemma preserves_structure_correct : forall f t, FV t ⊆ domm f -> preserves_structure f -> have_same_structure t (⦇f⦈ t). Proof. introv Htf Hf. transitivity (⦇1__(FV t)⦈ t). - apply α_equivalent_implies_same_structure. symmetry. apply monad_substitution1. rewrite /Tm /in_mem /= fsubsetxx //. - apply preserves_structure_correct'; auto. + apply (rwP fsubsetP). intros x Hxt. rewrite domm_identity' //. + apply identity_preserves_structure. + reflexivity. Qed. Lemma term_depth_lift_variable_substitution : forall f t, preserves_structure f -> term_depth (⦇f⦈ t) = term_depth t. Proof. introv Hf. gen f. induction t; intros; simpl in *; auto. - f_equal. apply IHt. intros u Hf'. apply (rwP codommP) in Hf' as [x Hx]. rewrite setmE in Hx. destruct (x =P s); subst. + inverts Hx. eauto. + apply Hf, (rwP codommP). eauto. - fold (lift_substitution f). auto. - rewrite /lift_substitution /=. destruct (getm f s) eqn:Hfs; fold (lift_substitution f); auto. assert (t ∈ codomm f) as Hft. { apply (rwP codommP). eauto. } apply Hf in Hft as [x Ht]. subst. auto. Qed. Lemma has_shadowing'_overlap : forall bound t, ~ fdisjoint bound (bound_variables t) -> has_shadowing' bound t. Proof. introv Hdisj. apply (rwP negP) in Hdisj. gen bound. induction t; intros; simpl in *. - rewrite fdisjointUr fdisjoints1 negb_and negbK in Hdisj. apply (rwP orP) in Hdisj as [Hdisj|Hdisj]. + apply IHt in Hdisj. erewrite has_shadowing'_sub; eauto. * rewrite orbT //. * rewrite fsubsetUl //. + rewrite Hdisj //. - rewrite fdisjointUr negb_and in Hdisj. apply (rwP orP) in Hdisj as [Hdisj|Hdisj]. + apply IHt1 in Hdisj. rewrite Hdisj //. + apply IHt2 in Hdisj. rewrite Hdisj orbT //. - rewrite fdisjoints0 // in Hdisj. Qed. Lemma has_shadowing_only_with_extra_bound_variables : forall bound t, ~ has_shadowing t -> has_shadowing' bound t -> ~ fdisjoint bound (bound_variables t). Proof. unfold has_shadowing. introv Ht H Hdisj. rewrite -(rwP fdisjointP) in Hdisj. gen bound. induction t; intros; simpl in *; auto. - apply (rwP orP) in H as [H|H]. { apply Hdisj in H. rewrite in_fsetU in_fset1 eq_refl orbT // in H. } destruct (s ∈ bound) eqn:Hs. { apply Hdisj in Hs. rewrite in_fsetU negb_or in_fset1 eq_refl andbF // in Hs. } rewrite fset0U in Ht. destruct (fdisjoint (fset1 s) (bound_variables t)) eqn:Hst. + apply has_shadowing'_fsetU with (bound := bound) in Hst. rewrite -Hst in H. assert (~ has_shadowing t) as Hnt. { introv Ht'. eapply has_shadowing'_sub in Ht'; eauto. rewrite fsub0set //. } eapply IHt in Hnt; eauto. introv Hx. apply Hdisj in Hx. rewrite in_fsetU in_fset1 negb_or in Hx. apply (rwP andP) in Hx as [Hxnt Hxns]. auto. + apply Ht, has_shadowing'_overlap. introv Hsnt. rewrite Hst // in Hsnt. - apply (rwP negP) in Ht. rewrite negb_or in Ht. apply (rwP andP) in Ht as [Ht1 Ht2]. apply (rwP negP) in Ht1, Ht2. apply (rwP orP) in H as [H|H]; (apply IHt1 with bound in Ht1 + apply IHt2 with bound in Ht2); auto; introv Hx; apply Hdisj in Hx; rewrite in_fsetU negb_or in Hx; apply (rwP andP) in Hx as [Hx1 Hx2]; auto. Qed. Lemma term_depth_ind : forall P : term -> Prop, (forall t, (forall u, term_depth u < term_depth t -> P u) -> P t) -> forall t, P t. Proof. introv H. assert (forall t u, term_depth u <= term_depth t -> P u) as IH. { introv H'. gen u. induction t; intros; simpl in *. - apply H; intros v Hv. apply IHt, leq_trans with (n := (term_depth u).-1); destruct (term_depth u) eqn:?; auto. - apply H; intros v Hv. rewrite -maxnSS leq_max in H'. apply (rwP orP) in H' as [H'|H']. + apply IHt1. apply leq_trans with (n := (term_depth u).-1); destruct (term_depth u) eqn:?; auto. + apply IHt2. apply leq_trans with (n := (term_depth u).-1); destruct (term_depth u) eqn:?; auto. - apply H. intros v Hv. destruct u. + repeat (destruct u; inverts H'). + rewrite /= gtn_max in H'. apply (rwP andP) in H' as [Hu1 Hu2]. rewrite /= ltnS leq_max in Hv. apply (rwP orP) in Hv as [Hv|Hv]; destruct u1, u2; inverts Hu1; inverts Hu2. + destruct v; inverts Hv. } eauto. Qed. Lemma lift_substitution_cannot_decrease_term_depth : forall f t, t ∈ Tm (domm f) -> term_depth t <= term_depth (⦇f⦈ t). Proof. introv Htf. rewrite /Tm /in_mem /= in Htf. apply (rwP fsubsetP) in Htf. gen f. induction t; intros; simpl in *. - apply IHt. introv Hxt. rewrite domm_set in_fsetU in_fset1. destruct (x =P s); subst; auto. apply (introF eqP) in n. apply Htf. rewrite in_fsetD in_fset1 Hxt n //. - rewrite gtn_max !ltnS !leq_max. rewrite <- (rwP andP). split. + rewrite IHt1 //. introv Hx. apply Htf. rewrite in_fsetU Hx //. + rewrite IHt2 ?orbT //. introv Hx. apply Htf. rewrite in_fsetU Hx orbT //. - rewrite /lift_substitution /=. destruct (getm f s) eqn:Hfs; auto. destruct t; auto. Qed. Lemma variable_substitution'_as_α_equivalent'' : forall Fresh t x y, Fresh_correct Fresh -> y ∉ FV t -> t`[x⟵variable y]Fresh ≡_α^(1__(FV (t`[x⟵variable y]Fresh) :\ y))⦅y,x⦆ t. Proof. introv HFresh Hnyt. replace ((1__(FV (t`[x⟵variable y]Fresh0) :\ y))⦅y,x⦆) with (((1__(FV t))⦅x,y⦆)ᵒ); cycle 1. { rewrite update_converse. - rewrite converse_identity //. apply eq_fmap. intros k. rewrite !updateE !identityE /= !in_fsetD !in_fset1. destruct (x ∈ FV t) eqn:Hxt. + rewrite FV_after_substitute' // !in_fsetU !in_fsetD !in_fset1. destruct (k =P y); subst; auto. destruct (k =P x); subst. { rewrite Hxt eq_refl //. } destruct (k ∈ FV t) eqn:Hkt; auto. + rewrite FV_noop_substitute'; auto; cycle 1. { rewrite Hxt //. } destruct (k =P y); subst; auto. - apply partial_bijection_identity. } replace ((1__(FV t))[x,variable y]) with (mapm variable ((1__(FV t))⦅x,y⦆)); cycle 1. { apply eq_fmap. intros k. rewrite /fmap_to_Prop setmE !ηE mapmE updateE identityE. destruct (k =P x); subst; auto. destruct (k ∈ FV t) eqn:Hkt; auto. destruct (y =P k); subst; auto. rewrite Hkt // in Hnyt. } apply lemma7'; auto. { apply partial_bijection_update, partial_bijection_identity. } rewrite /Tm /in_mem /mem /=. apply (rwP fsubsetP). intros k Hkt. apply (rwP dommP). rewrite updateE identityE Hkt /=. destruct (k =P x); subst; simpl; eauto. destruct (y =P k); subst; simpl; eauto. rewrite Hkt // in Hnyt. Qed. Lemma variable_substitution_as_α_equivalent'' : forall t x y, y ∉ FV t -> t[x⟵variable y] ≡_α^(1__(FV (t[x⟵variable y]) :\ y))⦅y,x⦆ t. Proof. introv Hnyt. apply variable_substitution'_as_α_equivalent''; auto. apply HFresh. Qed. Lemma variable_substitution'_as_α_equivalent''' : forall Fresh t x y, Fresh_correct Fresh -> y ∉ FV t -> t`[x⟵variable y]Fresh ≡_α^(1__(FV (t`[x⟵variable y]Fresh)))⦅y,x⦆ t. Proof. introv HFresh Hnyt. apply α_equivalent'_supermap with (R__sub := (1__(FV (t`[x⟵variable y]Fresh0) :\ y))⦅y,x⦆); cycle 1. { apply variable_substitution'_as_α_equivalent''; auto. } introv Hkv. rewrite /fmap_to_Prop updateE identityE in Hkv. rewrite /fmap_to_Prop updateE identityE. destruct (k =P y); subst; auto. apply (introF eqP) in n. destruct (x ∈ FV t) eqn:Hxt. - rewrite FV_after_substitute' //. rewrite FV_after_substitute' // in Hkv. rewrite in_fsetD in_fsetU in_fsetD !in_fset1 n /= in Hkv. rewrite in_fsetU in_fsetD !in_fset1 orbC n /=. destruct (k =P x); subst; auto. destruct (k ∈ FV t) eqn:Hkt; auto. - rewrite FV_noop_substitute' //; cycle 1. { rewrite Hxt //. } rewrite FV_noop_substitute' // in Hkv; cycle 1. { rewrite Hxt //. } rewrite in_fsetD in_fset1 n /= in Hkv. destruct (k ∈ FV t) eqn:Hkt; auto. Qed. Lemma variable_substitution_as_α_equivalent''' : forall t x y, y ∉ FV t -> t`[x⟵variable y]Fresh ≡_α^(1__(FV (t`[x⟵variable y]Fresh)))⦅y,x⦆ t. Proof. introv Hnyt. apply variable_substitution'_as_α_equivalent'''; auto. apply HFresh. Qed. (* TODO Use throughout this file? *) (** See https://www.sciencedirect.com/science/article/pii/S1571066116300354. *) Definition α_compatible_predicate (P : term -> Prop) : Prop := forall t u, t ≡_α u -> P t -> P u. (** See https://www.sciencedirect.com/science/article/pii/S1571066116300354. *) Theorem term_α_ind : forall P : term -> Prop, α_compatible_predicate P -> (forall x, P (variable x)) -> (forall t u, P t -> P u -> P (application t u)) -> (exists s : {fset 𝒱}, forall t x, x ∉ s -> P t -> P (abstraction x t)) -> forall t, P t. Proof. intros P HP Hvar Happ [s Habs] t. induction t using term_depth_ind; destruct t; auto. - pose proof HFresh (s ∪ FV t) as HFresh. rewrite in_fsetU negb_or in HFresh. apply (rwP andP) in HFresh as [HsFresh HtFresh]. apply HP with (abstraction (Fresh (s ∪ FV t)) (t[s0⟵variable (Fresh (s ∪ FV t))])). + apply variable_substitution_as_α_equivalent''. auto. + apply Habs, H; auto. rewrite /= term_depth_lift_variable_substitution //. apply preserves_structure_update_substitution, identity_preserves_structure. - apply Happ; apply H; rewrite /= ltnS leq_max leqnn ?orbT //. Qed. #[local] Reserved Infix "=_α" (at level 40). Inductive trad_α : term -> term -> Prop := | trad_α_var : forall x, variable x =_α variable x | trad_α_abs : forall x t u, t =_α u -> abstraction x t =_α abstraction x u | trad_α_app : forall t1 t2 u1 u2, t1 =_α u1 -> t2 =_α u2 -> application t1 t2 =_α application u1 u2 | trad_α_renaming : forall v v' t, v' ∉ FV t -> abstraction v t =_α abstraction v' (t[v⟵variable v']) | trad_α_trans : forall t u (v : term), t =_α u -> u =_α v -> t =_α v where "x '=_α' y" := (trad_α x y). #[local] Hint Constructors trad_α : core. Lemma α_equivalent'_remove_noop_update' : forall R t u x y, x ∉ FV t -> y ∉ FV u -> t ≡_α^(R⦅x,y⦆) u -> t ≡_α^R u. Proof. introv HnRx HnR'y Hα. apply α_equivalent'_with_behaviorally_identical_maps with (R := R⦅x,y⦆); auto. intros x' y' HR'x' Hx't. rewrite /fmap_to_Prop updateE in HR'x'. rewrite /fmap_to_Prop. destruct (x' =P x); subst. { rewrite Hx't // in HnRx. } destruct (getm R x') eqn:HRx'; auto. destruct (y =P s); subst; inverts HR'x'. auto. Qed. Lemma α_equivalent'_update_FV : forall R t u x y, partial_bijection R -> t ≡_α^(R⦅x,y⦆) u -> (x ∈ FV t) = (y ∈ FV u). Proof. introv HRinj Hα. apply α_equivalent'_implies_related_FV in Hα; cycle 1. { apply partial_bijection_update. auto. } rewrite {}Hα. destruct (x ∈ FV t) eqn:Hxt; symmetry. - apply (rwP pimfsetP). exists x; auto. rewrite updateE eq_refl //. - apply negbTE, (introN pimfsetP). intros [k Hk]. rewrite updateE in H. destruct (k =P x); subst. { rewrite Hk // in Hxt. } destruct (getm R k) eqn:HRk. + destruct (y =P s); subst; inverts H. auto. + inverts H. Qed. Lemma α_equivalent'_remove_noop_update : forall R t u x y, partial_bijection R -> t ≡_α^(R⦅x,y⦆) u -> x ∉ FV t -> t ≡_α^R u. Proof. introv HRinj Hα HnRx. eapply α_equivalent'_remove_noop_update'; eauto. erewrite <- α_equivalent'_update_FV; eauto. Qed. Lemma α_equivalent_abstractions : forall x y t u, abstraction x t ≡_α abstraction y u -> t[x⟵variable y] ≡_α u. Proof. introv Hα. destruct (x =P y); subst; auto. { transitivity t. - rewrite substitution_id //. - rewrite /α_equivalent /= update_identity fsetU_after_fsetD in Hα. apply α_equivalent'_implies_α_equivalent. eauto. } apply not_eq_sym, (introF eqP) in n. assert (y ∉ FV t) as Hynt. { apply FV_respects_α_equivalence in Hα. cut (y ∉ FV t ∪ {x} = true). { intros H. rewrite in_fsetU in_fset1 negb_or in H. apply (rwP andP) in H as [Hynt Hynx]. auto. } simpl in Hα. rewrite -fsetU_after_fsetD -Hα in_fsetU in_fsetD !in_fset1 eq_refl n //. } destruct (x ∈ FV t) eqn:Hxt; cycle 1. - assert (y ∉ FV u) as Hynu. { rewrite /α_equivalent /= in Hα. apply α_equivalent'_implies_related_FV in Hα; cycle 1. { apply partial_bijection_update, partial_bijection_identity. } rewrite Hα. apply (introN pimfsetP). intros [z Hzt]. rewrite updateE identityE in_fsetD in_fset1 in H. destruct (z =P x); subst. { rewrite Hzt // in Hxt. } rewrite Hzt /= in H. destruct (y =P z); subst; inverts H; auto. } transitivity t. { rewrite substitution_law1 // Hxt //. } rewrite /α_equivalent /= in Hα. apply α_equivalent'_remove_noop_update in Hα; auto; cycle 1. { apply partial_bijection_identity. } { rewrite Hxt //. } apply α_equivalent'_supermap with (R__sub := 1__(FV t :\ x)); auto. intros k v Hkv. rewrite /fmap_to_Prop identityE in_fsetD in_fset1 in Hkv. rewrite /fmap_to_Prop identityE. destruct (k =P x); subst; auto. rewrite Hxt in Hkv; inverts Hkv. - rewrite /α_equivalent /= in Hα. rewrite /α_equivalent FV_after_substitute //=. replace (1__(FV t :\ x ∪ {y})) with (((1__(FV t :\ x))⦅y,x⦆);;((1__(FV t :\ x))⦅x,y⦆)); cycle 1. { apply eq_fmap. intros k. rewrite composeE updateE !identityE in_fsetU !in_fsetD !in_fset1 /=. destruct (k =P y); subst. { rewrite /= orbT updateE identityE in_fsetD in_fset1 eq_refl //. } apply not_eq_sym, (introF eqP) in n0. destruct (k =P x); subst; auto. apply not_eq_sym, (introF eqP) in n1. destruct (k ∈ FV t) eqn:Hkt; auto. rewrite /= n1 updateE identityE in_fsetD in_fset1 eq_sym/n1 n1 Hkt /= n0 //. } apply α_equivalent'_compose with (u := t); auto. apply variable_substitution_as_α_equivalent'. auto. Qed. Lemma α_equivalent_applications : forall t1 t2 u1 u2, application t1 t2 ≡_α application u1 u2 -> t1 ≡_α u1 /\ t2 ≡_α u2. Proof. introv Hα. rewrite /α_equivalent /= in Hα. apply (rwP andP) in Hα as [Hα1 Hα2]. split; eapply α_equivalent'_with_behaviorally_identical_maps; try apply Hα1; try apply Hα2; introv Hxy Hxt; rewrite /fmap_to_Prop identityE in_fsetU Hxt ?orbT in Hxy; inverts Hxy; rewrite /fmap_to_Prop identityE Hxt //. Qed. Lemma α_equivalent_variables : forall x y, variable x ≡_α variable y -> x = y. Proof. introv Hα. rewrite /α_equivalent /= in Hα. apply (rwP getmP) in Hα. rewrite identityE in_fset1 eq_refl in Hα. inverts Hα. auto. Qed. Theorem α_equivalent_correct : forall t u, t ≡_α u <-> t =_α u. Proof. introv. split; introv Hα. - gen u. induction t using term_depth_ind; intros; destruct t, u; inverts Hα; auto. + destruct (s =P s0); subst. { apply α_equivalent_abstractions in H1. constructor. apply H; auto. transitivity (t[s0⟵variable s0]); auto. symmetry. apply substitution_id. } apply not_eq_sym, (introF eqP) in n. assert (s0 ∉ FV t) as Hs0nt. { apply FV_respects_α_equivalence in H1. simpl in H1. cut (s0 ∉ FV t ∪ {s} = true). { intros Hgoal. rewrite in_fsetU in_fset1 negb_or in Hgoal. apply (rwP andP) in Hgoal as [Hs0nt Hs0ns]. auto. } rewrite -fsetU_after_fsetD -H1 in_fsetU in_fsetD !in_fset1 eq_refl n //. } apply α_equivalent_abstractions in H1. apply H in H1; cycle 1. { rewrite term_depth_lift_variable_substitution //. apply preserves_structure_update_substitution, identity_preserves_structure. } apply trad_α_trans with (abstraction s0 (t[s⟵variable s0])); auto. + apply α_equivalent_applications in H1 as [Ht Hu]. constructor; apply H; auto; rewrite ltnS leq_max leqnn ?orbT //. + apply α_equivalent_variables in H1. subst. auto. - induction Hα; simpl. + reflexivity. + rewrite /α_equivalent /= update_identity fsetU_after_fsetD. apply α_equivalent'_supermap with (R__sub := 1__(FV t)); auto. introv Hkv. rewrite /fmap_to_Prop identityE in Hkv. rewrite /fmap_to_Prop identityE in_fsetU in_fset1. destruct (k ∈ FV t) eqn:Hkt; inverts Hkv; auto. + rewrite /α_equivalent /=. rewrite <- (rwP andP). split; eapply α_equivalent'_supermap with (R__sub := 1__(FV _)); eauto; introv Hkv; rewrite /fmap_to_Prop identityE in Hkv; rewrite /fmap_to_Prop identityE in_fsetU; destruct (k ∈ FV t1) eqn:Hkt1, (k ∈ FV t2) eqn:Hkt2; rewrite ?Hkt1 ?Hkt2 // in Hkv. + destruct (v ∈ FV t) eqn:Hvt. * symmetry. rewrite /α_equivalent /=. rewrite FV_after_substitute // fsetD_after_fsetU. replace (FV t :\ v :\ v') with (FV t :\ v); cycle 1. { apply eq_fset. intros x. rewrite !in_fsetD !in_fset1. destruct (x =P v); subst. - rewrite andbF //. - destruct (x ∈ FV t) eqn:Hxt. + destruct (x =P v'); subst; auto. rewrite Hxt // in H. + destruct (x =P v'); subst; auto. } apply variable_substitution_as_α_equivalent'. auto. * transitivity (abstraction v' t); rewrite /α_equivalent /=. -- apply α_equivalent'_supermap with (R__sub := 1__(FV t)); cycle 1. { apply α_equivalent_reflexive. } intros k x Hkx. rewrite /fmap_to_Prop identityE in Hkx. rewrite /fmap_to_Prop updateE identityE in_fsetD !in_fset1 /=. destruct (k =P v); subst. { rewrite Hvt // in Hkx. } destruct (k ∈ FV t) eqn:Hkt; inverts Hkx. simpl. destruct (v' =P x); subst; auto. rewrite Hkt // in H. -- rewrite /α_equivalent /= update_identity fsetU_after_fsetD. apply α_equivalent'_supermap with (R__sub := 1__(FV t)); cycle 1. { apply α_equivalent_symmetric, substitution_law1. rewrite Hvt //. } intros k x Hkx. rewrite /fmap_to_Prop identityE in Hkx. rewrite /fmap_to_Prop identityE in_fsetU in_fset1. destruct (k ∈ FV t) eqn:Hkt; inverts Hkx. destruct (x =P v'); subst; auto. + transitivity u; auto. Qed. Lemma trad_α_reflexive : forall t, t =_α t. Proof. introv. apply α_equivalent_correct. reflexivity. Qed. Lemma trad_α_symmetric : forall t u, t =_α u -> u =_α t. Proof. intros t u Hα. apply α_equivalent_correct. symmetry. apply α_equivalent_correct. auto. Qed. Lemma FV_lift_substitution'_subset_codomm_Tm_set : forall Fresh f t, Fresh_correct Fresh -> FV t ⊆ domm f -> FV (`⦇f⦈ Fresh t) ⊆ codomm_Tm_set f. Proof. introv HFresh Htf. apply (rwP fsubsetP). introv Hxt. rewrite FV_lift_substitution' // in_bigcup in Hxt. apply (rwP hasP) in Hxt as [u Hu]. apply (rwP pimfsetP) in Hu as [k Hk]. apply (rwP codomm_Tm_setP). exists u. split; auto. apply (rwP codommP). eauto. Qed. Lemma FV_lift_substitution_subset_codomm_Tm_set : forall f t, FV t ⊆ domm f -> FV (⦇f⦈ t) ⊆ codomm_Tm_set f. Proof. introv Htf. apply FV_lift_substitution'_subset_codomm_Tm_set; auto. apply HFresh. Qed. Lemma lift_substitution'_disjoint_update_substitution : forall Fresh f x y t, Fresh_correct Fresh -> FV t ⊆ domm f -> getm f x = Some (variable x) -> x ∉ codomm_Tm_set (remm f x) -> y ∉ codomm_Tm_set f -> `⦇f[x,variable y]⦈ Fresh t ≡_α (`⦇f⦈ Fresh t)`[x⟵variable y]Fresh. Proof. introv HFresh Htf Hfx Hxnℛf Hynℛf. transitivity ((`⦇(1__(codomm_Tm_set f))[x,variable y]⦈ Fresh0 ∘ `⦇f⦈ Fresh0) t : term); cycle 1. { apply lift_substitution'_indistinguishable_substitutions; auto. { rewrite /Tm /in_mem /= !domm_update_substitution !domm_identity' fsubsetI !fsubsetU //. - rewrite fsubsetxx //. - rewrite FV_lift_substitution'_subset_codomm_Tm_set //. } intros z Hzft. rewrite /update_substitution !setmE !ηE Hzft. destruct (z =P x); subst. { reflexivity. } pose proof @FV_lift_substitution'_subset_codomm_Tm_set Fresh0 f t HFresh Htf as Hft. apply (rwP fsubsetP) in Hft. apply Hft in Hzft. rewrite Hzft. reflexivity. } transitivity (`⦇`⦇(1__(codomm_Tm_set f))[x,variable y]⦈ Fresh0 ∘ f⦈ Fresh0 t : term); cycle 1. { symmetry. apply monad_substitution'3; auto. rewrite domm_update_substitution domm_identity' fsubsetU // fsubsetxx //. } apply lift_substitution'_indistinguishable_substitutions; auto. { rewrite /Tm /in_mem /= domm_map !domm_update_substitution fsubsetI fsubsetU Htf //. } intros z Hzt. rewrite /update_substitution mapmE setmE. destruct (z =P x); subst. { apply (rwP fsubsetP) in Htf. apply Htf, (rwP dommP) in Hzt as [u Hxu]. rewrite Hxu in Hfx. inverts Hfx. rewrite Hxu /= setmE eq_refl. reflexivity. } destruct (getm f z) eqn:Hfz; cycle 1. { reflexivity. } transitivity (`⦇η__(FV t0)⦈ Fresh0 t0). { symmetry. apply monad_substitution'1; auto. rewrite /Tm /in_mem /= fsubsetxx //. } apply lift_substitution'_indistinguishable_substitutions; auto. - rewrite /Tm /in_mem /= domm_set !domm_map !domm_mkfmapf fsubsetI fsubsetU fsvalK. { rewrite fsubsetxx //. } apply (rwP orP). right. apply (rwP fsubsetP). intros k Hk. apply (rwP codomm_Tm_setP). exists t0. split; auto. apply (rwP codommP). eauto. - intros k Hk. rewrite setmE !mapmE !identityE Hk /=. destruct (k =P x); subst. { rewrite <- (rwP codomm_Tm_setPn) in Hxnℛf. exfalso. apply Hxnℛf with t0. split; auto. apply (rwP codommP). exists z. apply (introF eqP) in n. rewrite remmE n //. } assert (k ∈ codomm_Tm_set f) as Hkℛf. { apply (rwP codomm_Tm_setP). exists t0. split; auto. apply (rwP codommP). eauto. } rewrite Hkℛf /=. reflexivity. Qed. Lemma lift_substitution'_disjoint_update_substitution' : forall Fresh f x y t, Fresh_correct Fresh -> (FV t :\ x) ⊆ domm f -> x ∉ codomm_Tm_set f -> y ∉ codomm_Tm_set f -> x <> y -> `⦇f[x,variable y]⦈ Fresh t ≡_α (`⦇f[x,variable x]⦈ Fresh t)`[x⟵variable y]Fresh. Proof. introv HFresh Ht'f Hxnℛf Hynℛf Hxny. replace (f[x,variable y]) with (f[x,variable x][x,variable y]); cycle 1. { rewrite update_substitution_overwrite //. } apply lift_substitution'_disjoint_update_substitution; auto. - rewrite domm_update_substitution. apply (rwP fsubsetP). intros k Hkt. rewrite in_fsetU in_fset1 orbC. destruct (k =P x); subst; auto. apply (introF eqP) in n. apply (rwP fsubsetP) in Ht'f. apply Ht'f. rewrite in_fsetD in_fset1 n Hkt //. - rewrite setmE eq_refl //. - apply (rwP codomm_Tm_setPn). intros u [Hxu Hf'u]. apply (rwP codommP) in Hf'u as [k Hf'k]. rewrite remmE setmE in Hf'k. destruct (k =P x); subst. { inverts Hf'k. } rewrite <- (rwP codomm_Tm_setPn) in Hxnℛf. apply Hxnℛf with u. split; auto. apply (rwP codommP). eauto. - apply (rwP codomm_Tm_setPn). intros u [Hyu Hf'u]. rewrite codomm_setmE in_fsetU in_fset1 in Hf'u. apply (rwP orP) in Hf'u as [Hu|Hf'u]. + apply (rwP eqP) in Hu. subst. rewrite in_fset1 in Hyu. apply (rwP eqP) in Hyu. subst. auto. + apply (rwP codommP) in Hf'u as [k Hf'k]. rewrite remmE in Hf'k. destruct (k =P x); subst. { inverts Hf'k. } rewrite <- (rwP codomm_Tm_setPn) in Hynℛf. apply Hynℛf with u. split; auto. apply (rwP codommP). eauto. Qed. Lemma lift_substitution_disjoint_update_substitution' : forall f x y t, (FV t :\ x) ⊆ domm f -> x ∉ codomm_Tm_set f -> y ∉ codomm_Tm_set f -> x <> y -> ⦇f[x,variable y]⦈ t ≡_α (⦇f[x,variable x]⦈ t)[x⟵variable y]. Proof. introv Ht'f Hxnℛf Hynℛf Hxny. apply lift_substitution'_disjoint_update_substitution'; auto. apply HFresh. Qed. Lemma α_equivalent_abstractions_respects_α_equivalence : forall t u x, t ≡_α u -> abstraction x t ≡_α abstraction x u. Proof. introv Hα. apply α_equivalent_correct. constructor. apply α_equivalent_correct. auto. Qed. Lemma α_equivalent'_morphl : forall R t u u', t ≡_α u -> t ≡_α^R u' = u ≡_α^R u'. Proof. intros R. assert (forall t t' u, t ≡_α t' -> t ≡_α^R u -> t' ≡_α^R u) as H. { introv Hα' Hα. replace R with ((1__(domm R));;R); cycle 1. { apply compose_identity_left. } apply α_equivalent'_compose with (u := t); auto. apply α_equivalent'_supermap with (R__sub := 1__(FV t')). - introv Hkv. rewrite /fmap_to_Prop identityE in Hkv. rewrite /fmap_to_Prop identityE. destruct (k ∈ FV t') eqn:Hkt; inverts Hkv. apply α_equivalent'_bijection_includes_all_FV in Hα; auto. rewrite /Tm /in_mem /= in Hα. apply (rwP fsubsetP) in Hα. apply FV_respects_α_equivalence in Hα'. rewrite Hα' in Hkt. apply Hα in Hkt. rewrite Hkt //. - apply α_equivalent_symmetric. auto. } intros t t' Hα u. apply Bool.eq_iff_eq_true; split; introv Hα'; eapply H; eauto. symmetry. auto. Qed. Lemma α_equivalent'_morphr : forall R t u u', partial_bijection R -> u ≡_α u' -> t ≡_α^R u = t ≡_α^R u'. Proof. introv HRinj Hα'. apply Bool.eq_iff_eq_true. split; introv Hα; (apply α_equivalent_symmetric in Hα' + idtac); rewrite α_equivalent'_converse' //; erewrite α_equivalent'_morphl; eauto; apply α_equivalent'_converse; auto. Qed. Add Parametric Morphism R (HRinj : partial_bijection R) : (α_equivalent' R) with signature α_equivalent ==> α_equivalent ==> eq as α_equivalent'_morph. Proof. intros t u Hα t' u' Hα'. apply Bool.eq_iff_eq_true. split; introv Hαt'. - setoid_rewrite α_equivalent'_morphl with (u := t); auto; cycle 1. { symmetry. auto. } setoid_rewrite α_equivalent'_morphr with (t := t); eauto. symmetry. auto. - setoid_rewrite α_equivalent'_morphr with (t := t); eauto. setoid_rewrite α_equivalent'_morphl with (u := u); eauto. Qed. Lemma lift_substitution'_abstractions_wedge : forall Fresh f t x y z, Fresh_correct Fresh -> FV t ⊆ (domm f ∪ {z}) -> x ∉ codomm_Tm_set f -> y ∉ codomm_Tm_set f -> abstraction x (`⦇f[z,variable x]⦈ Fresh t) ≡_α abstraction y (`⦇f[z,variable y]⦈ Fresh t). Proof. introv HFresh Htfz Hx Hy. destruct (x =P y); subst. { reflexivity. } rewrite /α_equivalent /=. apply (rwP fsubsetP) in Htfz. apply lift_substitution'_indistinguishable_substitutions'; auto. - rewrite /Tm /in_mem /=. apply (rwP fsubsetP). intros k Hkt. rewrite in_fsetI !domm_update_substitution Bool.andb_diag Htfz //. - intros k Hkt. rewrite !setmE. pose proof Hkt as Hkt'. apply Htfz in Hkt. rewrite in_fsetU in_fset1 orbC in Hkt. destruct (k =P z); subst. { apply (rwP getmP). rewrite updateE eq_refl //. } apply (introF eqP) in n0. apply (rwP dommP) in Hkt as [u Hfk]. rewrite Hfk /=. assert (forall x, x ∉ codomm_Tm_set f -> x ∉ FV u) as Hnu. { intros x' Hx'. rewrite <- (rwP codomm_Tm_setPn) in Hx'. apply (rwP negP). introv Hx'u. apply Hx' with u. split; auto. apply (rwP codommP). eauto. } apply Hnu in Hx, Hy. apply α_equivalent_update'; auto. apply α_equivalent'_supermap with (R__sub := 1__(FV u)); cycle 1. { apply α_equivalent_reflexive. } intros k' v Hk'v. rewrite /fmap_to_Prop identityE in Hk'v. rewrite /fmap_to_Prop identityE in_fsetD in_fset1. destruct (k' ∈ FV u) eqn:Hk'u; inverts Hk'v. destruct (v =P x); subst. { rewrite Hk'u // in Hx. } destruct (v ∈ FV (`⦇f[z,variable x]⦈ Fresh0 t)) eqn:Hvf't; auto. rewrite FV_lift_substitution' // in Hvf't; cycle 1. { rewrite /Tm /in_mem /= domm_update_substitution. apply (rwP fsubsetP). auto. } rewrite in_bigcup in Hvf't. apply negbT in Hvf't. apply (rwP hasPn) in Hvf't. assert (u ∈ pimfset (getm (f[z,variable x])) (FV t)) as Hvnu. { apply (rwP pimfsetP). exists k; auto. rewrite setmE n0 //. } apply Hvf't in Hvnu. rewrite Hk'u // in Hvnu. Qed. Lemma lift_substitution'_independent_of_Fresh' : forall Fresh' f t, Fresh_correct Fresh' -> FV t ⊆ domm f -> `⦇f⦈ Fresh' t ≡_α ⦇f⦈ t. Proof. introv HFresh' Hft. gen t f. elim/term_depth_ind; intros; destruct t; simpl in *. - destruct (Fresh' (codomm_Tm_set f) =P Fresh (codomm_Tm_set f)). + rewrite e. apply α_equivalent_abstractions_respects_α_equivalence. apply H; auto. rewrite domm_update_substitution. rewrite <- (rwP fsubsetP). intros x Hx. rewrite in_fsetU in_fset1 orbC. destruct (x =P s); subst; auto. apply (introF eqP) in n. apply (rwP fsubsetP) in Hft. apply Hft. rewrite in_fsetD in_fset1 Hx n //. + assert (FV t ⊆ (domm f ∪ {s})) as Htfs. { apply (rwP fsubsetP) in Hft. rewrite <- (rwP fsubsetP). intros x Hxt. rewrite in_fsetU in_fset1 orbC. destruct (x =P s); subst; auto. apply (introF eqP) in n0. apply Hft. rewrite in_fsetD in_fset1 Hxt n0 //. } rewrite /lift_substitution /=. transitivity (abstraction (Fresh' (codomm_Tm_set f)) (⦇f[s,variable (Fresh' (codomm_Tm_set f))]⦈ t)). { apply α_equivalent_abstractions_respects_α_equivalence, H; auto. rewrite domm_update_substitution //. } apply lift_substitution'_abstractions_wedge; auto; apply HFresh. - rewrite /α_equivalent /=. rewrite <- (rwP andP). split. + apply α_equivalent'_supermap with (R__sub := 1__(FV (`⦇f⦈ Fresh' t1))). * introv Hkv. rewrite /fmap_to_Prop identityE in Hkv. rewrite /fmap_to_Prop identityE in_fsetU. destruct (k ∈ FV (`⦇f⦈ Fresh' t1)) eqn:Hkft; inverts Hkv. auto. * apply H. -- rewrite ltnS leq_max leqnn //. -- rewrite fsubUset in Hft. apply (rwP andP) in Hft as [Ht1f Ht2f]. auto. + apply α_equivalent'_supermap with (R__sub := 1__(FV (`⦇f⦈ Fresh' t2))). * introv Hkv. rewrite /fmap_to_Prop identityE in Hkv. rewrite /fmap_to_Prop identityE in_fsetU. destruct (k ∈ FV (`⦇f⦈ Fresh' t2)) eqn:Hkft; inverts Hkv. rewrite orbC //. * apply H. -- rewrite ltnS leq_max leqnn orbC //. -- rewrite fsubUset in Hft. apply (rwP andP) in Hft as [Ht1f Ht2f]. auto. - rewrite fsub1set in Hft. apply (rwP dommP) in Hft as [k Hk]. rewrite /lift_substitution /= Hk. reflexivity. Qed. Lemma lift_substitution'_independent_of_Fresh : forall Fresh1 Fresh2 f t, Fresh_correct Fresh1 -> Fresh_correct Fresh2 -> FV t ⊆ domm f -> `⦇f⦈ Fresh1 t ≡_α `⦇f⦈ Fresh2 t. Proof. introv HFresh1 HFresh2 Hft. transitivity (⦇f⦈ t). { apply lift_substitution'_independent_of_Fresh'; auto. } symmetry. apply lift_substitution'_independent_of_Fresh'; auto. Qed. Inductive Lift (A : Type) : Type := | new | old (x : A). Arguments new {_}. Arguments old {_} _. Definition Lift_map (A B : Type) (f : A -> B) (x : Lift A) : Lift B := match x with | new => new | old x => old (f x) end. Inductive Lam (A : Type) : Type := | var (x : A) : Lam A | app (t : Lam A) (u : Lam A) : Lam A | abs (t : Lam (Lift A)) : Lam A. Fixpoint Lam_map (X Y : Type) (f : X -> Y) (t : Lam X) : Lam Y := match t with | var x => var (f x) | app t u => app (Lam_map f t) (Lam_map f u) | abs t => abs (Lam_map (Lift_map f) t) end. Definition lift (X Y : Type) (f : X -> Lam Y) (x : Lift X) : Lam (Lift Y) := match x with | new => var new | old x => Lam_map old (f x) end. Fixpoint bind (X Y : Type) (f : X -> Lam Y) (t : Lam X) : Lam Y := match t with | var x => f x | app s t => app (bind f s) (bind f t) | abs t => abs (bind (lift f) t) end. Lemma Lift_map_id : forall A : Type, @Lift_map A A id = id. Proof. intros. apply functional_extensionality. intros. destruct x; auto. Qed. Lemma Lift_map_linear : forall (A B C : Type) (f : A -> B) (g : B -> C), Lift_map (g ∘ f) = Lift_map g ∘ Lift_map f. Proof. intros. apply functional_extensionality. destruct x; auto. Qed. Lemma Lam_map_id : forall (A : Type) (t : Lam A), Lam_map id t = t. Proof. intros. induction t; simpl; f_equal; auto. rewrite Lift_map_id //. Qed. Lemma Lam_map_linear : forall (A B C : Type) (f : A -> B) (g : B -> C), Lam_map (g ∘ f) = Lam_map g ∘ Lam_map f. Proof. intros. apply functional_extensionality. intros. gen B C f g. induction x; intros; simpl; f_equal. - rewrite IHx1 //. - rewrite IHx2 //. - rewrite Lift_map_linear IHx //. Qed. Lemma bind_var : forall (X : Type) (t : Lam X), bind (@var X) t = t. Proof. intros. induction t; simpl; auto. - rewrite IHt1 IHt2 //. - rewrite <- IHt at 2. repeat f_equal. apply functional_extensionality. destruct x; auto. Qed. Lemma bind_comp1 : forall (A B C : Type) (f : B -> C) (g : A -> Lam B), Lam_map f ∘ bind g = bind (Lam_map f ∘ g). Proof. intros. apply functional_extensionality. intros. simpl. gen B C f g. induction x; intros; simpl; f_equal; auto. rewrite IHx. f_equal. apply functional_extensionality. intros y. destruct y as [|y]; simpl; auto. change ((Lam_map (Lift_map f) ∘ Lam_map old) (g y) = Lam_map old (Lam_map f (g y))). rewrite -Lam_map_linear. change (Lam_map (Lift_map f ∘ old) (g y) = (Lam_map old ∘ Lam_map f) (g y)). rewrite -Lam_map_linear //. Qed. Lemma bind_comp2 : forall (A B C : Type) (f : B -> Lam C) (g : A -> B), bind f ∘ Lam_map g = bind (f ∘ g). Proof. intros. apply functional_extensionality. intros. simpl. gen B C f g. induction x; intros; simpl; f_equal; auto. rewrite IHx. f_equal. apply functional_extensionality. intros y. destruct y as [|y]; simpl; auto. Qed. Lemma bind_comp3 : forall (A B : Type) (g : A -> Lam B), bind (lift g) ∘ Lam_map old = Lam_map old ∘ bind g. Proof. intros. rewrite bind_comp1 bind_comp2 //. Qed. Lemma bind_comp : forall (A B C : Type) (f : A -> Lam B) (g : B -> Lam C), bind g ∘ bind f = bind (bind g ∘ f). Proof. intros. apply functional_extensionality. intros. simpl. gen B C f g. induction x; intros; simpl; f_equal; auto. rewrite IHx. f_equal. apply functional_extensionality. intros y. destruct y as [|y]; simpl; auto. change (bind (lift g) (lift f (old y)) = (Lam_map old ∘ bind g) (f y)). rewrite -bind_comp3 //. Qed. Definition subst (A : Type) (t : Lam (Lift A)) (s : Lam A) : Lam A := bind (fun t : Lift A => match t with | new => s | old x => var x end) t. Definition weak (A : Type) : Lam A -> Lam (Lift A) := bind (@var (Lift A) ∘ old). Lemma subst_weak : forall (A : Type) (t u : Lam A), subst (weak t) u = t. Proof. rewrite /subst /weak /=. intros. change ((bind (fun t : Lift A => match t with | new => u | old x => var x end) ∘ bind (@var (Lift A) ∘ old)) t = t). rewrite bind_comp /= bind_var //. Qed. (* See https://hackage.haskell.org/package/bound-2.0.5/docs/src/Bound.Term.html#substitute. *) Definition subst' (A : eqType) (x : A) (s : Lam A) : Lam A -> Lam A := bind (fun y : A => if x == y then s else var y). Fixpoint dbt (t : de_Bruijn_term) : Lam nat := match t with | de_Bruijn_abstraction t => abs (weak (dbt t)) | de_Bruijn_application t1 t2 => app (dbt t1) (dbt t2) | de_Bruijn_variable x => var x end. (** Page 8: "I leave it to the reader to show that -^ϕ preserves substitution, i.e. it maps substitutions to named terms as given here to substitution on de Bruijn terms." This is the only main result not yet formalized. *) Lemma TAPL_6_2_8 : forall ϕ t u x, (FV t ∪ FV u ∪ {x}) ⊆ domm ϕ -> is_injective ϕ -> dbt ((t[x⟵u])^ϕ) = subst' _ (odflt 0 (getm ϕ x)) (dbt (u^ϕ)) (dbt (t^ϕ)). Proof. unfold subst'. intros. gen ϕ u. induction t using term_depth_ind; destruct t; intros; simpl in *; f_equal. - unfold weak. change (bind (fun a : nat => var (old a)) (dbt (`⦇((1__(FV t :\ s))[x,u])[s,(variable (Fresh (codomm_Tm_set ((1__(FV t :\ s))[x,u]))))]⦈ Fresh t^ ϕ ^+ Fresh (codomm_Tm_set ((1__(FV t :\ s))[x,u])))) = (bind (lift (fun y : nat => if odflt 0 (getm ϕ x) == y then dbt (u^ϕ) else var y)) ∘ bind (fun a : nat => var (old a))) (dbt (t^ϕ ^+ s))). rewrite bind_comp /=. induction t; intros; simpl; unfold weak; simpl; f_equal; auto. - change (bind (fun a : nat => var (old a)) (dbt (`⦇((1__(FV t :\ s))[x,u])[s,(variable (Fresh (codomm_Tm_set ((1__(FV t :\ s))[x,u]))))]⦈ Fresh t^ ϕ ^+ Fresh (codomm_Tm_set ((1__(FV t :\ s))[x,u])))) = (bind (lift (fun y : nat => if odflt 0 (getm ϕ x) == y then dbt (u^ϕ) else var y)) ∘ bind (fun a : nat => var (old a))) (dbt (t^ϕ ^+ s))). rewrite bind_comp. simpl. destruct (getm ϕ x) eqn:?; simpl; auto. + Qed. End AlphaFacts.
Rebol [ Title: "RSP Preprocessor" Author: "Christopher Ross-Gill" Date: 13-Aug-2013 File: %rsp.r Version: 0.4.1 Purpose: "Rebol-based hypertext pre-processor" Rights: http://opensource.org/licenses/Apache-2.0 Type: 'module Name: 'rgchris.rsp Exports: [sanitize load-rsp render render-each] History: [ 17-Jan-2017 0.4.1 "Updated Unicode/UTF8 handling" 13-Aug-2013 0.4.0 "Extracted from the QuarterMaster web framework" ] Notes: { <% ... "evaluate Rebol code" [ ... %><% ... ] %> <%= ... "evaluate Rebol code and emit the product" ... %> <%== ... "evaluate Rebol code, sanitize and emit the product" ... %> <%! ... "pass contents to COMPOSE then BUILD-TAG and emit" ... %> } ] sanitize: use [html* utf-8 decode-utf][ ascii: exclude charset ["^/^-" #"^(20)" - #"^(7E)"] charset {&<>"} html*: exclude ascii charset {&<>"} utf-8: use [utf-2 utf-3 utf-4 utf-b][ ; probably need to adapt the rule from below... utf-2: charset [#"^(C2)" - #"^(DF)"] utf-3: charset [#"^(E0)" - #"^(EF)"] utf-4: charset [#"^(F0)" - #"^(F4)"] utf-b: charset [#"^(80)" - #"^(BF)"] [utf-2 utf-b | utf-3 2 utf-b | utf-4 3 utf-b] ] decode-utf8: use [ utf-2 utf-3 utf-3-low utf-4 utf-4-low utf-4-high utf-b utf-x1 utf-x2 utf-x3 bounds out ][ ; U+000080..U+0007FF _____________ C2..DF 80..BF ; U+000800..U+000FFF __________ E0 A0..BF 80..BF ; U+001000..U+00FFFF ______ E1..EF 80..BF 80..BF ; U+010000..U+03FFFF ___ F0 90..BF 80..BF 80..BF ; U+040000..U+0FFFFF F1..F3 80..BF 80..BF 80..BF ; U+100000..U+10FFFF ___ F4 80..8F 80..BF 80..BF utf-2: charset [#"^(C2)" - #"^(DF)"] utf-3-low: charset [#"^(A0)" - #"^(BF)"] utf-3: charset [#"^(E1)" - #"^(EF)"] utf-4-low: charset [#"^(90)" - #"^(BF)"] utf-4-high: charset [#"^(80)" - #"^(8F)"] utf-4: charset [#"^(F1)" - #"^(F3)"] utf-b: charset [#"^(80)" - #"^(BF)"] utf-x1: charset [#"^(A0)" - #"^(BF)"] utf-x2: charset [#"^(90)" - #"^(AF)"] utf-x3: charset [#"^(8F)" #"^(9F)" #"^(AF)" #"^(BF)"] func [char [string! binary!] /strict][ bounds: [0 0] out: -1 any [ all [ any [ parse/all char: as-binary char [ ; Test for invalid sequences first [ ; invalid U+D800 - U+DFFF ; UTF-8 Surrogates #"^(ED)" utf-x1 utf-b | ; invalid U+FDD0 - U+FDEF ; ??? #"^(EF)" #"^(B7)" utf-x2 | ; invalid U+nFFFE - U+nFFFF ; Troublesome UTF-16 sequences [#"^(EF)" | [#"^(F0)" | utf-4] utf-x3] #"^(BF)" [#"^(BE)" | #"^(BF)"] ] | utf-2 utf-b ( bounds: [127 2048] out: char/1 xor 192 * 64 + (char/2 xor 128) ) | [ #"^(E0)" utf-3-low utf-b (bounds: [2047 4096]) | utf-3 2 utf-b (bounds: [4095 65534]) ] ( out: char/1 xor 224 * 4096 + (char/2 xor 128 * 64) + (char/3 xor 128) ) | [ #"^(F0)" utf-4-low 2 utf-b (bounds: [65535 262144]) | utf-4 3 utf-b (bounds: [262143 1048576]) | #"^(F4)" utf-4-high 2 utf-b (bounds: [1048575 1114112]) ] ( out: char/1 xor 240 * 262144 + (char/2 xor 128 * 4096) + (char/3 xor 128 * 64) + (char/4 xor 128) ) ] not strict ] out > bounds/1 out < bounds/2 out ] 65533 ; Unknown character ] ] ] sanitize: func [text [any-string!] /local char][ parse/all copy text [ copy text any [ text: some html* | #"&" (text: change/part text "&amp;" 1) :text | #"<" (text: change/part text "&lt;" 1) :text | #">" (text: change/part text "&gt;" 1) :text | #"^"" (text: change/part text "&quot;" 1) :text | #"^M" (remove text) :text | copy char utf-8 (text: change/part text rejoin ["&#" decode-utf8 char ";"] length? char) | skip (text: change/part text rejoin ["#(" to-integer text/1 ")"] 1) :text ; | skip (text: change text "#") :text ] ] any [text ""] ] ] load-rsp: use [prototype to-set-block][ prototype: context [ out*: "" prin: func [val][repend out* val] print: func [val][prin val prin newline] ] to-set-block: func [block [block! object!] /local word][ either object? block [block: third block][ parse copy block [ (block: copy []) any [set word word! (repend block [to-set-word word get/any word])] ] ] block ] func [[catch] body [string!] /local code mk][ code: make string! length? body append code "^/out*: make string! {}^/" parse/all body [ any [ end (append code "out*") break | "<%" [ "==" copy mk to "%>" (repend code ["prin sanitize form (" mk "^/)^/"]) | "=" copy mk to "%>" (repend code ["prin (" mk "^/)^/"]) | [#":" | #"!"] copy mk to "%>" (repend code ["prin build-tag [" mk "^/]^/"]) | copy mk to "%>" (repend code [mk newline]) | (throw make error! "Expected '%>'") ] 2 skip | copy mk [to "<%" | to end] (repend code ["prin " mold mk "^/"]) ] ] func [args [block! object!]] compose/only [ args: make prototype to-set-block args do bind/copy (throw-on-error [load code]) args ] ] ] render: use [depth*][ depth*: 0 ;-- to break recursion func [ [catch] rsp [file! url! string!] /with locals [block! object!] ][ if depth* > 20 [return ""] depth*: depth* + 1 rsp: case/all [ file? rsp [rsp: read rsp] url? rsp [rsp: read rsp] binary? rsp [rsp: to string! rsp] string? rsp [ throw-on-error [rsp: load-rsp rsp] throw-on-error [rsp any [locals []]] ] ] depth*: depth* - 1 rsp ] ] render-each: func [ 'items [word! block!] source [series!] body [file! url! string!] /with locals /local out ][ out: copy "" locals: append any [locals []] items: compose [(items)] foreach :items source compose/only [ append out render/with body (locals) ] return out ]
lemma isCont_inverse_function: fixes f g :: "real \<Rightarrow> real" assumes d: "0 < d" and inj: "\<And>z. \<bar>z-x\<bar> \<le> d \<Longrightarrow> g (f z) = z" and cont: "\<And>z. \<bar>z-x\<bar> \<le> d \<Longrightarrow> isCont f z" shows "isCont g (f x)"
[STATEMENT] lemma pickEff_the[code]: "pickEff r s = the (eff' r s)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. pickEff r s = the (eff' r s) [PROOF STEP] unfolding pickEff_def enabled_def effG_def eff_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. (if \<exists>sl. eff' r s = Some sl then SOME sl. eff' r s = Some sl else the None) = the (eff' r s) [PROOF STEP] by auto
module Stride export stride_run, stride_update_path, stride_parse, exists_in_path type Stride_guts stride_path :: AbstractString delete_tmp_files :: Bool end """ SSDataResidue holds the information related to the secondary structure associated to a residue. The fields are as follows: | Field Name | Field Type | Description | | --- | --- | --- | | name | AbstractString | The 3 letter residue name | | ss_type | Char | The one letter secondary structure | | ss_type_full | AbstractString | Full SS name | | phi | Float64 | The phi | | psi | Float64 | and psi dihedral angle | | area | Float64 | Solvent accessible area | The following Secondary Structures are defined using the [DSSP8 standard](https://zhanglab.ccmb.med.umich.edu/literature/dssp.pdf) and defined as follows: | --- | --- | | H | Alpha Helix | | B | Beta Bridge | | E | Strand | | T | H Bounded Turn | | I | Pi Helix | | G | 3/10 Helix | | S | Bend | | C | Random Coil | """ type SSDataResidue name :: AbstractString ss_type :: Char ss_type_full :: AbstractString phi :: Float64 psi :: Float64 area :: Float64 end """ SSData holds the information related to a given protein """ type SSData chain :: Array{SSDataResidue, 1} end stride_state = Stride_guts("", true) """ stride_run(in_path :: AbstractString; out_path::AbstractString="") Runs Stride for a given pdb file. if out_path is set then the temporary Stride file generated will be kept. The function returns a SSData object. """ function stride_run(in_path :: AbstractString; out_path::AbstractString="") if length(stride_state.stride_path) == 0 if exists_in_path("stride") stride_path = "stride" else error("Stride not found in PATH!\nPlease add stride to PATH" * " or run stride_update_path(\"pathtostride\")") end else stride_path = stride_state.stride_path end if out_path == "" out_path = tempname() else stride_state.delete_tmp_files = false end args = [in_path, "-f$(out_path)"] cmd_string = `$(stride_path) $args` run(cmd_string) ss = stride_parse(out_path) if stride_state.delete_tmp_files rm(out_path) end return ss end """ stride_update_path(path :: AbstractString) stride_update_path can be used to change the path where the package will look for the Stride binary. """ function stride_update_path(path :: AbstractString) stride_state.stride_path = path end """ stride_delete_tmp_files(delete :: Bool) stride_delete_tmp_files recieves a boolean value that determines if the tempfiles will be deleted after use. Default option is to delete it. """ function stride_delete_tmp_files(delete :: Bool) stride_state.delete_tmp_files = delete end """ stride_parse is reponsible for parsing the output of Stride into a SSData object """ function stride_parse(path :: AbstractString) data = readdlm(path) nlines, ncolumns = size(data) ss = SSData([]) for i in 1:nlines if data[i] == "REM" continue elseif data[i] == "LOC" continue elseif data[i] == "ASG" push!(ss.chain, SSDataResidue(data[i, 2], data[i, 6][1], data[i, 7], data[i, 8], data[i, 9], data[i, 10] )) end end return ss end """ exists_in_path(prog::String) Determine whether the given program name or path is executable using the current user's permissions. This is roughly equivalent to querying `which program` at the command line and checking that a result is found, but no shelling out occurs. """ function exists_in_path(prog::String) # Code taken from Alex Arslan with permision X_OK = 1 << 0 # Taken from unistd.h # If prog has a slash, we know the user wants to determine whether the given # file exists and is executable if '/' in prog isfile(prog) || return false return ccall(:access, Cint, (Ptr{UInt8}, Cint), prog, X_OK) == 0 end path = get(ENV, "PATH", "") # Something is definitely wrong if the user's path is empty... @assert !isempty(path) sep = ':' for dir in split(path, sep), file in readdir(dir) if file == prog p = joinpath(dir, file) @assert isfile(p) return ccall(:access, Cint, (Ptr{UInt8}, Cint), p, X_OK) == 0 end end false end isexecutable(prog::String) = isexecutable(Sys.KERNEL, prog) end
# Введение в численные методы оптимизации (Ю. Е. Нестеров Введение в выпуклую оптимизацию, гл. 1 $\S$ 1.1) 1. Обзор материала весеннего семестра 2. Постановка задачи 3. Общая схема решения 4. Сравнение методов оптимизации ## Обзор материала весеннего семестра Также на [странице курса](https://github.com/amkatrutsa/MIPT-Opt/blob/master/Spring2021/README.md). 1. Методы решения задач **безусловной** оптимизации - Градиентный спуск и способы его ускорения - Метод Ньютона - Квазиньютоновские методы - Метод сопряжённых градиентов - Решение задачи наименьших квадратов - Безградиентные методы - Стохастические методы 2. Методы решения задач **условной** оптимизации - Методы проекции градиента и условного градиента - Проксимальные методы - Методы штрафных и барьерных функций - Метод модифицированой функции Лагранжа ## Постановка задачи \begin{equation} \begin{split} & \min_{x \in S} f_0(x)\\ \text{s.t. } & f_j(x) = 0, \; j = 1,\ldots,m\\ & g_k(x) \leq 0, \; k = 1,\ldots,p \end{split} \end{equation} где $S \subseteq \mathbb{R}^n$, $f_j: S \rightarrow \mathbb{R}, \; j = 0,\ldots,m$, $g_k: S \rightarrow \mathbb{R}, \; k=1,\ldots,p$ Все функции как минимум непрерывны. Важный факт</span>: задачи **нелинейной** оптимизации в их самой общей форме являются **численно неразрешимыми**! ## Аналитические результаты - Необходимое условие первого порядка: если $x^*$ точка локального минимума дифференцируемой функции $f(x)$, тогда $$ f'(x^*) = 0 $$ - Необходимое условие второго порядка если $x^*$ точка локального минимума дважды дифференцируемой функции $f(x)$, тогда $$ f'(x^*) = 0 \quad \text{и} \quad f''(x^*) \succeq 0 $$ - Достаточное условие: пусть $f(x)$ дважды дифференцируемая функция, и пусть точка $x^*$ удовлетворяет условиям $$ f'(x^*) = 0 \quad f''(x^*) \succ 0, $$ тогда $x^*$ является точкой строго локального минимума функции $f(x)$. **Замечание**: убедитесь, что Вы понимаете, как доказывать эти результаты! ## Особенности численного решения 1. Точно решить задачу принципиально невозможно из-за погрешности машинной арифметики 2. Необходимо задать критерий обнаружения решения 3. Необходимо определить, какую информацию о задаче использовать ## Общая итеративная схема Дано: начальное приближение $x$, требуемая точность $\varepsilon$. ```python def GeneralScheme(x, epsilon): while StopCriterion(x) > epsilon: OracleResponse = RequestOracle(x) UpdateInformation(I, x, OracleResponse) x = NextPoint(I, x) return x ``` ### Вопросы 1. Какие критерии остановки могут быть? 2. Что такое оракул и зачем он нужен? 3. Что такое информационная модель? 4. Как вычисляется новая точка? #### Критерии остановки 1. Сходимость по аргументу: $$ \| x_k - x^* \|_2 < \varepsilon $$ 2. Сходимость по функции: $$ \| f_k - f^* \|_2 < \varepsilon $$ 3. Выполнение необходимого условия $$ \| f'(x_k) \|_2 < \varepsilon $$ Но ведь $x^*$ неизвестна! Тогда \begin{align*} & \|x_{k+1} - x_k \| = \|x_{k+1} - x_k + x^* - x^* \| \leq \\ & \|x_{k+1} - x^* \| + \| x_k - x^* \| \leq 2\varepsilon \end{align*} Аналогично для сходимости по функции, однако иногда можно оценить $f^*$! **Замечание**: лучше использовать относительные изменения этих величин! Например $\dfrac{\|x_{k+1} - x_k \|_2}{\| x_k \|_2}$ #### Что такое оракул? **Определение**: оракулом называют некоторое абстрактное устройство, которое отвечает на последовательные вопросы метода Аналогия из ООП: - оракул - это виртуальный метод базового класса - каждая задача - производный класс - оракул определяется для каждой задачи отдельно согласно общему определению в базовом классе **Концепция чёрного ящика** 1. Единственной информацией, получаемой в ходе работы итеративного метода, являются ответы оракула 2. Ответы оракула являются *локальными* #### Информация о задаче 1. Каждый ответ оракула даёт **локальную** информацию о поведении функции в точке 2. Агрегируя все полученные ответы оракула, обновляем информацию о **глобальном** виде целевой функции: - кривизна - направление убывания - etc #### Вычисление следующей точки $$ x_{k+1} = x_{k} + \alpha_k h_k $$ - **Линейный поиск**: фиксируется направление $h_k$ и производится поиск по этому направлению "оптимального" значения $\alpha_k$ - **Метод доверительных областей**: фиксируется допустимый размер *области* по некоторой норме $\| \cdot \| \leq \alpha$ и *модель* целевой функции, которая хорошо её аппроксимирует в выбранной области. Далее производится поиск направления $h_k$, минимизирующего модель целевой функции и не выводящего точку $x_k + h_k$ за пределы доверительной области #### Вопросы 1. Как выбрать $\alpha_k$? 2. Как выбрать $h_k$? 3. Как выбрать модель? 4. Как выбрать область? 5. Как выбрать размер области? <span style="color:red"> В курсе рассматривается только линейный поиск!</span> Однако несколько раз копцепция метода доверительных областей будет использована. ## Как сравнивать методы оптимизации? Для заданного класса задач сравнивают следующие величины: 1. Сложность - аналитическая: число обращений к оракулу для решения задачи с точностью $\varepsilon$ - арифметическая: общее число всех вычислений, необходимых для решения задачи с точностью $\varepsilon$ 2. Скорость сходимости 3. Эксперименты ### Скорости сходимости _1._ Сублинейная $$ \| x_{k+1} - x^* \|_2 \leq C k^{\alpha}, $$ где $\alpha < 0$ и $ 0 < C < \infty$ _2._ Линейная (геометрическая прогрессия) $$ \| x_{k+1} - x^* \|_2 \leq Cq^k, $$ где $q \in (0, 1)$ и $ 0 < C < \infty$ _3._ Сверхлинейная $$ \| x_{k+1} - x^* \|_2 \leq Cq^{k^p}, $$ где $q \in (0, 1)$, $ 0 < C < \infty$ и $p > 1$ _4._ Квадратичная $$ \| x_{k+1} - x^* \|_2 \leq C\| x_k - x^* \|^2_2, \qquad \text{или} \qquad \| x_{k+1} - x^* \|_2 \leq C q^{2^k} $$ где $q \in (0, 1)$ и $ 0 < C < \infty$ ```python %matplotlib inline import matplotlib.pyplot as plt USE_COLAB = False if not USE_COLAB: plt.rc("text", usetex=True) import numpy as np C = 10 alpha = -0.5 q = 0.9 num_iter = 10 sublinear = np.array([C * k**alpha for k in range(1, num_iter + 1)]) linear = np.array([C * q**k for k in range(1, num_iter + 1)]) superlinear = np.array([C * q**(k**2) for k in range(1, num_iter + 1)]) quadratic = np.array([C * q**(2**k) for k in range(1, num_iter + 1)]) plt.figure(figsize=(12,8)) plt.semilogy(np.arange(1, num_iter+1), sublinear, label=r"Sublinear, $\alpha = -0.5$", linewidth=5) plt.semilogy(np.arange(1, num_iter+1), superlinear, linewidth=5, label=r"Superlinear, $q = 0.5, p=2$") plt.semilogy(np.arange(1, num_iter+1), linear, label=r"Linear, $q = 0.5$", linewidth=5) plt.semilogy(np.arange(1, num_iter+1), quadratic, label=r"Quadratic, $q = 0.5$", linewidth=5) plt.xlabel("Number of iterations, $k$", fontsize=28) plt.ylabel("Error rate upper bound", fontsize=28) plt.legend(loc="best", fontsize=26) plt.xticks(fontsize = 28) _ = plt.yticks(fontsize = 28) ``` ### Значение теорем сходимости (Б.Т. Поляк Введение в оптимизацию, гл. 1, $\S$ 6) 1. Что дают теоремы сходимости - класс задач, для которых можно рассчитывать на применимость метода (важно не завышать условия!) - выпуклость - гладкость - качественное поведение метода - существенно ли начальное приближение - по какому функционалу есть сходимость - оценку скорости сходимости - теоретическая оценка поведения метода без проведения экспериментов - определение факторов, которые влияют на сходимость (обусловленность, размерность, etc) - иногда заранее можно выбрать число итераций для достижения заданной точности 2. Что **НЕ** дают теоремы сходимости - сходимость метода **ничего не говорит** о целесообразности его применения - оценки сходимости зависят от неизвестных констант - неконструктивный характер - учёт ошибок округления и точности решения вспомогательных задач **Мораль**: нужно проявлять разумную осторожность и здравый смысл! ## Классификация задач 1. Безусловная оптимизация - целевая функция липшицева - градиент целевой функции липшицев 2. Условная оптимизация - многогранник - множество простой структуры - общего вида ## Классификация методов ### Какой размер истории нужно хранить для обновления? 1. Одношаговые методы $$ x_{k+1} = \Phi(x_k) $$ 2. Многошаговые методы $$ x_{k+1} = \Phi(x_k, x_{k-1}, ...) $$ ### Какой порядок поизводных нужно вычислить? 1. Методы нулевого порядка: оракул возвращает только значение функции $f(x)$ 2. Методы первого порядка: оракул возвращает значение функции $f(x)$ и её градиент $f'(x)$ 3. Методы второго порядка: оракул возвращает значение функции $f(x)$, её градиент $f'(x)$ и гессиан $f''(x)$. **Q**: существуют ли методы более высокого порядка? **А**: [Implementable tensor methods in unconstrained convex optimization](https://link.springer.com/content/pdf/10.1007/s10107-019-01449-1.pdf) by Y. Nesterov, 2019 ## Методы спуска. Градиентный спуск ## Что такое методы спуска? Последовательность $x_k$ генерируется по правилу $$ x_{k+1} = x_k + \alpha_k h_k $$ так что $$ f(x_{k+1}) < f(x_k) $$ Направление $h_k$ называется *направлением убывания*. **Замечание**: существуют методы, которые не требуют монотонного убывания функции от итерации к итерации. ```python def DescentMethod(f, x0, epsilon, **kwargs): x = x0 while StopCriterion(x, f, **kwargs) > epsilon: h = ComputeDescentDirection(x, f, **kwargs) alpha = SelectStepSize(x, h, f, **kwargs) x = x + alpha * h return x ``` ## Способ 1: направление убывания Рассмотрим линейную аппроксимацию дифференцируемой функции $f$ вдоль некоторого направления убывания $h, \|h\|_2 = 1$: $$ f(x + \alpha h) = f(x) + \alpha \langle f'(x), h \rangle + o(\alpha) $$ Из условия убывания $$ f(x) + \alpha \langle f'(x), h \rangle + o(\alpha) < f(x) $$ и переходя к пределу при $\alpha \rightarrow 0$: $$ \langle f'(x), h \rangle \leq 0 $$ Также из неравенства Коши-Буняковского-Шварца $$ \langle f'(x), h \rangle \geq -\| f'(x) \|_2 \| h \|_2 = -\| f'(x) \|_2 $$ Таким образом, направление антиградиента $$ h = -\dfrac{f'(x)}{\|f'(x)\|_2} $$ даёт направление **наискорейшего локального** убывания функции$~f$. В итоге метод имеет вид $$ x_{k+1} = x_k - \alpha f'(x_k) $$ ## Способ 2: схема Эйлера решения ОДУ Рассмотрим обыкновенное диференциальное уравнение вида: $$ \frac{dx}{dt} = -f'(x(t)) $$ и дискретизуем его на равномерной сетке с шагом $\alpha$: $$ \frac{x_{k+1} - x_k}{\alpha} = -f'(x_k), $$ где $x_k \equiv x(t_k)$ и $\alpha = t_{k+1} - t_k$ - шаг сетки. Отсюда получаем выражение для $x_{k+1}$ $$ x_{k+1} = x_k - \alpha f'(x_k), $$ которое в точности совпадает с выражением для градиентного спуска. Такая схема называется явной или прямой схемой Эйлера. **Q:** какая схема называется неявной или обратной? ## Способ 3: минимизация квадратичной оценки сверху #### (А. В. Гасников "Метод универсального градиентного спуска" https://arxiv.org/abs/1711.00394) Глобальная оценка сверху на функцию $f$ в точке $x_k$: $$ f(y) \leq f(x_k) + \langle f'(x_k), y - x_k \rangle + \frac{L}{2} \|y - x_k \|_2^2 = g(y), $$ где $\lambda_{\max}(f''(x)) \leq L$ для всех допустимых $x$. Справа &mdash; квадратичная форма, точка минимума которой имеет аналитическое выражение: \begin{align*} & g'(y^*) = 0 \\ & f'(x_k) + L (y^* - x_k) = 0 \\ & y^* = x_k - \frac{1}{L}f'(x_k) = x_{k+1} \end{align*} Этот способ позволяет оценить значение шага как $\frac{1}{L}$. Однако часто константа $L$ неизвестна. ## Итого: метод градиентного спуска &mdash; дёшево и сердито ```python def GradientDescentMethod(f, x0, epsilon, **kwargs): x = x0 while StopCriterion(x, f, **kwargs) > epsilon: h = ComputeGradient(x, f, **kwargs) alpha = SelectStepSize(x, h, f, **kwargs) x = x - alpha * h return x ``` ## Как выбрать шаг $\alpha_k$? (J. Nocedal, S. Wright Numerical Optimization, $\S$ 3.1.) Список подходов: - Постоянный шаг $$ \alpha_k = \overline{\alpha} $$ - Априорно заданная последовательность, например $$ \alpha_k = \dfrac{\overline{\alpha}}{\sqrt{k+1}} $$ - Наискорейший спуск $$ \alpha_k = \arg\min_{\alpha \geq 0} f(x_k - \alpha f'(x_k)) $$ - Требование **достаточного** убывания, требование **существенного** убывания и условие кривизны: для некоторых $\beta_1, \beta_2$, таких что $0 < \beta_1 < \beta_2 < 1$ найти $x_{k+1}$ такую что - Достаточное убывание: $f(x_{k+1}) \leq f(x_k) + \beta_1 \alpha_k \langle f'(x_k), h_k \rangle$ или $ f(x_k) - f(x_{k+1}) \geq \beta_1 \alpha_k \langle f'(x_k), h_k \rangle $ - Существенное убывание: $f(x_{k+1}) \geq f(x_k) + \beta_2 \alpha_k \langle f'(x_k), h_k \rangle$ или $ f(x_k) - f(x_{k+1}) \leq \beta_2 \alpha_k \langle f'(x_k), h_k \rangle $ - Условие кривизны: $\langle f'(x_{k+1}), h_k \rangle \geq \beta_2 \langle f'(x_k), h_k \rangle$ Обычно коэффициенты выбирают так: $\beta_1 \in (0, 0.3)$, а $\beta_2 \in (0.9, 1)$ ### Анализ и мотивация подходов к выбору шага $\alpha_k$ - Постоянный шаг: самое простое и неэффективное решение - Априорно заданная последовательность: немногим лучше постоянного шага - Наискорейший спуск: самое лучшее решение, но применимо только если вспомогательная задача решается аналитически или ооооооочень быстро. <br></br> То есть почти всегда неприменимо :) - Требование достаточного убывания, требование существенного убывания и условие кривизны: - требование достаточного убывания гарантирует, что функция в точке $x_{k+1}$ не превосходит линейной аппроксимации с коэффициентом наклона $\beta_1$ - требование существенного убывания гарантирует, что функция в точке $x_{k+1}$ убывает не меньше, чем линейная аппроксимация c коэффициентом наклона $\beta_2$ - условие кривизны гарантирует, что угол наклона касательной в точке $x_{k+1}$ не меньше, чем угол наклона касательной в точке $x_k$, <br></br> умноженный на $\beta_2$ Требование существенного убывания и условие кривизны обеспечивают убывание функции по выбранному направлению $h_k$. Обычно выбирают одно из них. #### Альтернативные названия - Требование достаточного убывания $\equiv$ правило Армихо - Требование достаточного убывания + условие кривизны $\equiv$ правило Вольфа - Требование достаточного убывания + требование существенного убывания $\equiv$ правило Гольдштейна ## Зачем нужно условие существенного убывания? ```python %matplotlib notebook import matplotlib.pyplot as plt plt.rc("text", usetex=True) import ipywidgets as ipywidg import numpy as np import liboptpy.unconstr_solvers as methods import liboptpy.step_size as ss from tqdm import tqdm ``` ```python f = lambda x: np.power(x, 2) gradf = lambda x: 2 * x fig = plt.figure() ax = fig.add_subplot(1, 1, 1) def update(x0, step): gd = methods.fo.GradientDescent(f, gradf, ss.ConstantStepSize(step)) _ = gd.solve(np.array([x0]), max_iter=10) x_hist = gd.get_convergence() x = np.linspace(-5, 5) ax.clear() ax.plot(x, f(x), color="r", label="$f(x) = x^2$") y_hist = np.array([f(x) for x in x_hist]) x_hist = np.array(x_hist) plt.quiver(x_hist[:-1], y_hist[:-1], x_hist[1:]-x_hist[:-1], y_hist[1:]-y_hist[:-1], scale_units='xy', angles='xy', scale=1, width=0.005, color="green", label="Descent path") ax.legend() fig.canvas.draw() step_slider = ipywidg.FloatSlider(value=0.8, min=0, max=1.2, step=0.1, description="Step") x0_slider = ipywidg.FloatSlider(value=1.5, min=-4, max=4, step=0.1, description="Initial point") _ = ipywidg.interact(update, x0=x0_slider, step=step_slider) ``` <IPython.core.display.Javascript object> interactive(children=(FloatSlider(value=1.5, description='Initial point', max=4.0, min=-4.0), FloatSlider(valu… ```python def plot_alpha(f, grad, x, h, alphas, beta1, beta2): df = np.zeros_like(alphas) for i, alpha in enumerate(alphas): df[i] = f(x + alpha * h) upper_bound = f(x) + beta1 * alphas * grad(x) * h lower_bound = f(x) + beta2 * alphas * grad(x) * h plt.plot(alphas, df, label=r"$f(x + \alpha h)$") plt.plot(alphas, upper_bound, label="Upper bound") plt.plot(alphas, lower_bound, label="Lower bound") plt.xlabel(r"$\alpha$", fontsize=18) plt.legend(loc="best", fontsize=18) ``` ```python f = lambda x: x**2 grad = lambda x: 2 * x beta1 = 0.1 beta2 = 0.9 x0 = 0.5 plot_alpha(f, grad, x0, -grad(x0), np.linspace(1e-3, 1.01, 10), beta1, beta2) ``` ## $f(x) = x\log x$ ```python x_range = np.linspace(1e-10, 4) plt.plot(x_range, x_range * np.log(x_range)) ``` ```python x0 = 1 f = lambda x: x * np.log(x) grad = lambda x: np.log(x) + 1 beta1 = 0.3 beta2 = 0.7 plot_alpha(f, grad, x0, -grad(x0), np.linspace(1e-3, 0.9, 10), beta1, beta2) ``` ### Backtracking ```python def SelectStepSize(x, f, h, rho, alpha0, beta1, beta2): # 0 < rho < 1 # alpha0 - initial guess of step size # beta1 and beta2 - constants from conditions alpha = alpha0 # Check violating sufficient decrease and curvature conditions while (f(x - alpha * h) >= f(x) + beta1 * alpha grad_f(x_k).dot(h)) and (grad_f(x - alpha * h).dot(h) <= beta2 * grad_f(x_k).dot(h)): alpha *= rho return alpha ``` ## Теоремы сходимости (Б.Т. Поляк Введение в оптимизацию, гл. 1, $\S$ 4; гл. 3, $\S$ 1; Ю.Е. Нестеров Введение в выпуклую оптимизацию, $\S$ 2.2) От общего к частному: **Теорема 1.** Пусть - $f(x)$ дифференцируема на $\mathbb{R}^n$, - градиент $f(x)$ удовлетворяет условию Липшица с константой $L$ - $f(x)$ ограничена снизу - $\alpha = const$ и $0 < \alpha < \frac{2}{L}$ Тогда для градиентного метода выполнено: $$ \lim\limits_{k \to \infty} f'(x_k) = 0, $$ а функция монотонно убывает $f(x_{k+1}) < f(x_k)$. **Теорема 2.** Пусть - $f(x)$ дифференцируема на $\mathbb{R}^n$ - $f(x)$ выпукла - $f'(x)$ удовлетворяет условию Липшица с константой $L$ - $\alpha = \dfrac{1}{L}$ Тогда $$ f(x_k) - f^* \leq \dfrac{2L \| x_0 - x^*\|^2_2}{k+4} $$ **Теорема 3.** Пусть - $f(x)$ дважды дифференцируема и $\mu\mathbf{I} \preceq f''(x) \preceq L\mathbf{I}$ для всех $x$ - $\alpha = const$ и $0 < \alpha < \frac{2}{L}$ Тогда $$ \| x_k - x^*\|_2 \leq \|x_0 - x^*\|_2 q^k, \qquad q = \max(|1 - \alpha l|, |1 - \alpha L|) < 1 $$ и минимальное $q^* = \dfrac{L - \mu}{L + \mu}$ при $\alpha^* = \dfrac{2}{L + \mu}$ ### От чего зависит $q^*$ и как это использовать? Из Теоремы 3 имеем $$ q^* = \dfrac{L - \mu}{L + \mu} = \dfrac{L/\mu - 1}{L/\mu + 1} = \dfrac{M - 1}{M + 1}, $$ где $M$ - оценка числа обусловленности $f''(x)$. **Вопрос**: что такое число обусловленности матрицы? - При $M \gg 1$, $q^* \to 1 \Rightarrow$ оооочень **медленная** сходимости градиентного метода. Например при $M = 100$: $q^* \approx 0.98 $ - При $M \simeq 1$, $q^* \to 0 \Rightarrow$ **ускорение** сходимости градиентного метода. Например при $M = 4$: $q^* = 0.6 $ **Вопрос**: какая геометрия у этого требования? **Мораль**: необходимо сделать оценку $M$ как можно ближе к 1! О том, как это сделать, Вам будет предложено подумать в домашнем задании :) ## Вычислительный аспект и эксперименты 1. Для каждого шага метода нужно хранить только текущую точку и вектор градиента: $O(n)$ памяти 2. Поиск $\alpha_k$: - дан априори - ищется из аналитического решения задачи наискорейшего спуска - заканчивается за конечное число шагов 3. Для каждого шага метода нужно вычислять линейную комбинацию векторов: $O(n)$ вычислений + высокопроизводительные реализации ### Pеализация градиентного спуска ```python def GradientDescent(f, gradf, x0, epsilon, num_iter, line_search, disp=False, callback=None, **kwargs): x = x0.copy() iteration = 0 opt_arg = {"f": f, "grad_f": gradf} for key in kwargs: opt_arg[key] = kwargs[key] while True: gradient = gradf(x) alpha = line_search(x, -gradient, **opt_arg) x = x - alpha * gradient if callback is not None: callback(x) iteration += 1 if disp: print("Current function val =", f(x)) print("Current gradient norm = ", np.linalg.norm(gradf(x))) if np.linalg.norm(gradf(x)) < epsilon: break if iteration >= num_iter: break res = {"x": x, "num_iter": iteration, "tol": np.linalg.norm(gradf(x))} return res ``` ### Выбор шага Реализации различных способов выбора шага приведены [тут](https://github.com/amkatrutsa/liboptpy/blob/master/step_size.py) ### Зависимость от обусловленности матрицы $f''(x)$ Рассмотрим задачу $$ \min f(x), $$ где $$ f(x) = x^{\top}Ax, \; A = \begin{bmatrix} 1 & 0\\ 0 & \gamma \end{bmatrix} $$ $$ f'(x) = 2Ax $$ ```python def my_f(x, A): return 0.5 * x.dot(A.dot(x)) def my_gradf(x, A): return A.dot(x) ``` ```python plt.rc("text", usetex=True) gammas = [0.1, 0.5, 1, 2, 3, 4, 5, 10, 20, 50, 100, 1000, 5000, 10000] # gammas = [1] num_iter_converg = [] for g in gammas: A = np.array([[1, 0], [0, g]], dtype=np.float64) f = lambda x: my_f(x, A) gradf = lambda x: my_gradf(x, A) # x0 = np.random.rand(A.shape[0]) # x0 = np.sort(x0) # x0 = x0[::-1] x0 = np.array([g, 1], dtype=np.float64) # print x0[1] / x0[0] gd = methods.fo.GradientDescent(f, gradf, ss.ExactLineSearch4Quad(A)) x = gd.solve(x0, tol=1e-7, max_iter=100) num_iter_converg.append(len(gd.get_convergence())) plt.figure(figsize=(8, 6)) plt.loglog(gammas, num_iter_converg) plt.xticks(fontsize = 20) plt.yticks(fontsize = 20) plt.xlabel(r"$\gamma$", fontsize=20) plt.ylabel(r"Number of iterations with $\varepsilon = 10^{-7}$", fontsize=20) ``` - При неудачном начальном приближении сходимость для плохо обусловенной задачи очень медленная - При случайном начальном приближении сходимость может быть гораздо быстрее теоретических оценок ## Эксперимент на многомерной задаче Пусть $A \in \mathbb{R}^{m \times n}$. Рассмотрим систему линейных неравенств: $Ax \leq 1$ при условии $|x_i| \leq 1$ для всех $i$. **Определение.** Аналитическим центром системы неравенств $Ax \leq 1$ при условии $|x_i| \leq 1$ является решение задачи $$ f(x) = - \sum_{i=1}^m \log(1 - a_i^{\top}x) - \sum_{i = 1}^n \log (1 - x^2_i) \to \min_x $$ $$ f'(x) - ? $$ ### Точное решение с помощью CVXPy ```python import numpy as np n = 1000 m = 2000 A = np.random.rand(n, m) x = cvx.Variable(n) obj = cvx.Minimize(cvx.sum(-cvx.log(1 - A.T * x)) - cvx.sum(cvx.log(1 - cvx.square(x)))) prob = cvx.Problem(obj) prob.solve(solver="SCS", verbose=True) x = x.value print("Optimal value =", prob.value) ``` ### Решение с помощью градиентного спуска ```python import cvxpy as cvx print(cvx.installed_solvers()) # !pip install jax # !pip install jaxlib import jax.numpy as jnp import jax # from jax.config import config # config.update("jax_enable_x64", True) A = jnp.array(A) print(A.dtype) x0 = jnp.zeros(n) f = lambda x: -jnp.sum(jnp.log(1 - A.T@x)) - jnp.sum(jnp.log(1 - x*x)) grad_f = lambda x: jnp.sum(A @ (jnp.diagflat(1 / (1 - A.T @ x))), \ axis=1) + 2 * x / (1 - jnp.power(x, 2)) grad_f_jax = jax.grad(f) print(jnp.linalg.norm(grad_f(x0) - grad_f_jax(x0))) ``` Подробнее про jax, его возможности и особенности можно посмотреть например [тут](https://github.com/amkatrutsa/MIPT-Opt/blob/master/Fall2020/03-MatrixCalculus/jax_autodiff_tutorial.ipynb) ```python gd = methods.fo.GradientDescent(f, grad_f_jax, ss.Backtracking("Armijo", rho=0.5, beta=0.1, init_alpha=1.)) x = gd.solve(x0, tol=1e-5, max_iter=100, disp=True) x_conv = gd.get_convergence() grad_conv = [jnp.linalg.norm(grad_f_jax(x)) for x in x_conv] plt.figure(figsize=(8,6)) plt.semilogy(grad_conv, label=r"$\| f'(x_k) \|_2$") plt.semilogy([np.linalg.norm(x - np.array(x_k)) for x_k in x_conv], label=r"$\|x_k - x^*\|_2$") plt.semilogy([np.linalg.norm(prob.value - f(np.array(x_k))) for x_k in x_conv], label=r"$\|f(x_k) - f^*\|_2$") plt.semilogy([np.linalg.norm(np.array(x_conv[i]) - np.array(x_conv[i+1])) for i in range(len(x_conv) - 1)], label=r"$\|x_k - x_{k+1}\|_2$") plt.semilogy([np.linalg.norm(f(np.array(x_conv[i])) - f(np.array(x_conv[i+1]))) for i in range(len(x_conv) - 1)], label=r"$\|f(x_k) - f(x_{k+1})\|_2$") plt.xlabel(r"Number of iteration, $k$", fontsize=20) plt.ylabel(r"Convergence rate", fontsize=20) plt.xticks(fontsize = 20) plt.yticks(fontsize = 20) plt.legend(loc="best", fontsize=20) plt.tight_layout() ``` ## Pro & Contra Pro - легко реализовать - сходимость как минимум к стационарной точке - параметры при выборе шага влияют на сходимость не столь сильно - имеет многочисленные вариации Contra - линейная сходимость для сильно выпуклых функций - очень сильно зависит от числа обусловленности $f''(x)$, выбор начального приближения может помочь - не является оптимальным для выпуклых функций с липшицевым градиентом и сильновыпуклых функций (см. [ускорение Нестерова](https://blogs.princeton.edu/imabandit/2013/04/01/acceleratedgradientdescent/)) ## Резюме 1. Методы спуска 2. Направление убывания 3. Метод градиентного спуска 4. Правила выбора шага 5. Теоремы сходимости 6. Эксперименты
/- Copyright (c) 2021 Scott Morrison. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Scott Morrison -/ import algebra.homology.single import tactic.linarith /-! # Augmentation and truncation of `ℕ`-indexed (co)chain complexes. -/ noncomputable theory open category_theory open category_theory.limits open homological_complex universes v u variables {V : Type u} [category.{v} V] namespace chain_complex /-- The truncation of a `ℕ`-indexed chain complex, deleting the object at `0` and shifting everything else down. -/ @[simps] def truncate [has_zero_morphisms V] : chain_complex V ℕ ⥤ chain_complex V ℕ := { obj := λ C, { X := λ i, C.X (i+1), d := λ i j, C.d (i+1) (j+1), shape' := λ i j w, by { apply C.shape, simpa }, }, map := λ C D f, { f := λ i, f.f (i+1), }, } /-- There is a canonical chain map from the truncation of a chain map `C` to the "single object" chain complex consisting of the truncated object `C.X 0` in degree 0. The components of this chain map are `C.d 1 0` in degree 0, and zero otherwise. -/ def truncate_to [has_zero_object V] [has_zero_morphisms V] (C : chain_complex V ℕ) : truncate.obj C ⟶ (single₀ V).obj (C.X 0) := (to_single₀_equiv (truncate.obj C) (C.X 0)).symm ⟨C.d 1 0, by tidy⟩ -- PROJECT when `V` is abelian (but not generally?) -- `[∀ n, exact (C.d (n+2) (n+1)) (C.d (n+1) n)] [epi (C.d 1 0)]` iff `quasi_iso (C.truncate_to)` variables [has_zero_morphisms V] /-- We can "augment" a chain complex by inserting an arbitrary object in degree zero (shifting everything else up), along with a suitable differential. -/ def augment (C : chain_complex V ℕ) {X : V} (f : C.X 0 ⟶ X) (w : C.d 1 0 ≫ f = 0) : chain_complex V ℕ := { X := λ i, match i with | 0 := X | (i+1) := C.X i end, d := λ i j, match i, j with | 1, 0 := f | (i+1), (j+1) := C.d i j | _, _ := 0 end, shape' := λ i j s, begin simp at s, rcases i with _|_|i; cases j; unfold_aux; try { simp }, { simpa using s, }, { rw [C.shape], simpa [← ne.def, nat.succ_ne_succ] using s }, end, d_comp_d' := λ i j k hij hjk, begin rcases i with _|_|i; rcases j with _|_|j; cases k; unfold_aux; try { simp }, cases i, { exact w, }, { rw [C.shape, zero_comp], simpa using i.succ_succ_ne_one.symm }, end, } @[simp] lemma augment_X_zero (C : chain_complex V ℕ) {X : V} (f : C.X 0 ⟶ X) (w : C.d 1 0 ≫ f = 0) : (augment C f w).X 0 = X := rfl @[simp] @[simp] lemma augment_d_one_zero (C : chain_complex V ℕ) {X : V} (f : C.X 0 ⟶ X) (w : C.d 1 0 ≫ f = 0) : (augment C f w).d 1 0 = f := rfl @[simp] lemma augment_d_succ_succ (C : chain_complex V ℕ) {X : V} (f : C.X 0 ⟶ X) (w : C.d 1 0 ≫ f = 0) (i j : ℕ) : (augment C f w).d (i+1) (j+1) = C.d i j := by { dsimp [augment], rcases i with _|i, refl, refl, } /-- Truncating an augmented chain complex is isomorphic (with components the identity) to the original complex. -/ def truncate_augment (C : chain_complex V ℕ) {X : V} (f : C.X 0 ⟶ X) (w : C.d 1 0 ≫ f = 0) : truncate.obj (augment C f w) ≅ C := { hom := { f := λ i, 𝟙 _, }, inv := { f := λ i, by { exact 𝟙 _, }, comm' := λ i j, by { cases j; { dsimp, simp, }, }, }, hom_inv_id' := by { ext i, cases i; { dsimp, simp, }, }, inv_hom_id' := by { ext i, cases i; { dsimp, simp, }, }, }. @[simp] lemma truncate_augment_hom_f (C : chain_complex V ℕ) {X : V} (f : C.X 0 ⟶ X) (w : C.d 1 0 ≫ f = 0) (i : ℕ) : (truncate_augment C f w).hom.f i = 𝟙 (C.X i) := rfl @[simp] lemma truncate_augment_inv_f (C : chain_complex V ℕ) {X : V} (f : C.X 0 ⟶ X) (w : C.d 1 0 ≫ f = 0) (i : ℕ) : (truncate_augment C f w).inv.f i = 𝟙 ((truncate.obj (augment C f w)).X i) := rfl @[simp] lemma chain_complex_d_succ_succ_zero (C : chain_complex V ℕ) (i : ℕ) : C.d (i+2) 0 = 0 := by { rw C.shape, simpa using i.succ_succ_ne_one.symm } /-- Augmenting a truncated complex with the original object and morphism is isomorphic (with components the identity) to the original complex. -/ def augment_truncate (C : chain_complex V ℕ) : augment (truncate.obj C) (C.d 1 0) (C.d_comp_d _ _ _) ≅ C := { hom := { f := λ i, by { cases i; exact 𝟙 _, }, comm' := λ i j, by { rcases i with _|_|i; cases j; { dsimp, simp, }, }, }, inv := { f := λ i, by { cases i; exact 𝟙 _, }, comm' := λ i j, by { rcases i with _|_|i; cases j; { dsimp, simp, }, }, }, hom_inv_id' := by { ext i, cases i; { dsimp, simp, }, }, inv_hom_id' := by { ext i, cases i; { dsimp, simp, }, }, }. @[simp] lemma augment_truncate_hom_f_zero (C : chain_complex V ℕ) : (augment_truncate C).hom.f 0 = 𝟙 (C.X 0) := rfl @[simp] lemma augment_truncate_hom_f_succ (C : chain_complex V ℕ) (i : ℕ) : (augment_truncate C).hom.f (i+1) = 𝟙 (C.X (i+1)) := rfl @[simp] lemma augment_truncate_inv_f_zero (C : chain_complex V ℕ) : (augment_truncate C).inv.f 0 = 𝟙 (C.X 0) := rfl @[simp] lemma augment_truncate_inv_f_succ (C : chain_complex V ℕ) (i : ℕ) : (augment_truncate C).inv.f (i+1) = 𝟙 (C.X (i+1)) := rfl /-- A chain map from a chain complex to a single object chain complex in degree zero can be reinterpreted as a chain complex. Ths is the inverse construction of `truncate_to`. -/ def to_single₀_as_complex [has_zero_object V] (C : chain_complex V ℕ) (X : V) (f : C ⟶ (single₀ V).obj X) : chain_complex V ℕ := let ⟨f, w⟩ := to_single₀_equiv C X f in augment C f w end chain_complex namespace cochain_complex /-- The truncation of a `ℕ`-indexed cochain complex, deleting the object at `0` and shifting everything else down. -/ @[simps] def truncate [has_zero_morphisms V] : cochain_complex V ℕ ⥤ cochain_complex V ℕ := { obj := λ C, { X := λ i, C.X (i+1), d := λ i j, C.d (i+1) (j+1), shape' := λ i j w, by { apply C.shape, simpa }, }, map := λ C D f, { f := λ i, f.f (i+1), }, } /-- There is a canonical chain map from the truncation of a cochain complex `C` to the "single object" cochain complex consisting of the truncated object `C.X 0` in degree 0. The components of this chain map are `C.d 0 1` in degree 0, and zero otherwise. -/ def to_truncate [has_zero_object V] [has_zero_morphisms V] (C : cochain_complex V ℕ) : (single₀ V).obj (C.X 0) ⟶ truncate.obj C := (from_single₀_equiv (truncate.obj C) (C.X 0)).symm ⟨C.d 0 1, by tidy⟩ variables [has_zero_morphisms V] /-- We can "augment" a cochain complex by inserting an arbitrary object in degree zero (shifting everything else up), along with a suitable differential. -/ def augment (C : cochain_complex V ℕ) {X : V} (f : X ⟶ C.X 0) (w : f ≫ C.d 0 1 = 0) : cochain_complex V ℕ := { X := λ i, match i with | 0 := X | (i+1) := C.X i end, d := λ i j, match i, j with | 0, 1 := f | (i+1), (j+1) := C.d i j | _, _ := 0 end, shape' := λ i j s, begin simp at s, rcases j with _|_|j; cases i; unfold_aux; try { simp }, { simpa using s, }, { rw [C.shape], simp only [complex_shape.up_rel], contrapose! s, rw ←s }, end, d_comp_d' := λ i j k hij hjk, begin rcases k with _|_|k; rcases j with _|_|j; cases i; unfold_aux; try { simp }, cases k, { exact w, }, { rw [C.shape, comp_zero], simp only [nat.nat_zero_eq_zero, complex_shape.up_rel, zero_add], exact (nat.one_lt_succ_succ _).ne }, end, } @[simp] lemma augment_X_zero (C : cochain_complex V ℕ) {X : V} (f : X ⟶ C.X 0) (w : f ≫ C.d 0 1 = 0) : (augment C f w).X 0 = X := rfl @[simp] lemma augment_X_succ (C : cochain_complex V ℕ) {X : V} (f : X ⟶ C.X 0) (w : f ≫ C.d 0 1 = 0) (i : ℕ) : (augment C f w).X (i+1) = C.X i := rfl @[simp] lemma augment_d_zero_one (C : cochain_complex V ℕ) {X : V} (f : X ⟶ C.X 0) (w : f ≫ C.d 0 1 = 0) : (augment C f w).d 0 1 = f := rfl @[simp] lemma augment_d_succ_succ (C : cochain_complex V ℕ) {X : V} (f : X ⟶ C.X 0) (w : f ≫ C.d 0 1 = 0) (i j : ℕ) : (augment C f w).d (i+1) (j+1) = C.d i j := rfl /-- Truncating an augmented cochain complex is isomorphic (with components the identity) to the original complex. -/ def truncate_augment (C : cochain_complex V ℕ) {X : V} (f : X ⟶ C.X 0) (w : f ≫ C.d 0 1 = 0) : truncate.obj (augment C f w) ≅ C := { hom := { f := λ i, 𝟙 _, }, inv := { f := λ i, by { exact 𝟙 _, }, comm' := λ i j, by { cases j; { dsimp, simp, }, }, }, hom_inv_id' := by { ext i, cases i; { dsimp, simp, }, }, inv_hom_id' := by { ext i, cases i; { dsimp, simp, }, }, }. @[simp] lemma truncate_augment_hom_f (C : cochain_complex V ℕ) {X : V} (f : X ⟶ C.X 0) (w : f ≫ C.d 0 1 = 0) (i : ℕ) : (truncate_augment C f w).hom.f i = 𝟙 (C.X i) := rfl @[simp] lemma truncate_augment_inv_f (C : cochain_complex V ℕ) {X : V} (f : X ⟶ C.X 0) (w : f ≫ C.d 0 1 = 0) (i : ℕ) : (truncate_augment C f w).inv.f i = 𝟙 ((truncate.obj (augment C f w)).X i) := rfl @[simp] lemma cochain_complex_d_succ_succ_zero (C : cochain_complex V ℕ) (i : ℕ) : C.d 0 (i+2) = 0 := by { rw C.shape, simp only [complex_shape.up_rel, zero_add], exact (nat.one_lt_succ_succ _).ne } /-- Augmenting a truncated complex with the original object and morphism is isomorphic (with components the identity) to the original complex. -/ def augment_truncate (C : cochain_complex V ℕ) : augment (truncate.obj C) (C.d 0 1) (C.d_comp_d _ _ _) ≅ C := { hom := { f := λ i, by { cases i; exact 𝟙 _, }, comm' := λ i j, by { rcases j with _|_|j; cases i; { dsimp, simp, }, }, }, inv := { f := λ i, by { cases i; exact 𝟙 _, }, comm' := λ i j, by { rcases j with _|_|j; cases i; { dsimp, simp, }, }, }, hom_inv_id' := by { ext i, cases i; { dsimp, simp, }, }, inv_hom_id' := by { ext i, cases i; { dsimp, simp, }, }, }. @[simp] lemma augment_truncate_hom_f_zero (C : cochain_complex V ℕ) : (augment_truncate C).hom.f 0 = 𝟙 (C.X 0) := rfl @[simp] lemma augment_truncate_hom_f_succ (C : cochain_complex V ℕ) (i : ℕ) : (augment_truncate C).hom.f (i+1) = 𝟙 (C.X (i+1)) := rfl @[simp] lemma augment_truncate_inv_f_zero (C : cochain_complex V ℕ) : (augment_truncate C).inv.f 0 = 𝟙 (C.X 0) := rfl @[simp] lemma augment_truncate_inv_f_succ (C : cochain_complex V ℕ) (i : ℕ) : (augment_truncate C).inv.f (i+1) = 𝟙 (C.X (i+1)) := rfl /-- A chain map from a single object cochain complex in degree zero to a cochain complex can be reinterpreted as a cochain complex. Ths is the inverse construction of `to_truncate`. -/ def from_single₀_as_complex [has_zero_object V] (C : cochain_complex V ℕ) (X : V) (f : (single₀ V).obj X ⟶ C) : cochain_complex V ℕ := let ⟨f, w⟩ := from_single₀_equiv C X f in augment C f w end cochain_complex
program IfstTC; var ch : char; int : integer; boolx, booly : boolean; begin boolx := true; booly := false; ch := 'a'; int := 66; write(integer(ch)); write(integer(int)); write(integer(boolx), integer(booly)); writeln; writeln(char(ch), char(int)); writeln(boolean(ch), integer(int), integer(boolx), integer(booly)); end.
! Copyright (c) 2015 Alex Kramer <[email protected]> ! See the LICENSE.txt file in the top-level directory of this distribution ! ! This module implements basic string conversions. module string use globvars, only: dp, dp_format, sp, sp_format, ip, ip_format implicit none private public :: string_val public :: string_to_val interface string_val ! Get string representation of values module procedure string_dp_real module procedure string_sp_real module procedure string_int module procedure string_logical end interface string_val interface string_to_val ! Convert string representations to values module procedure string_to_dp_real module procedure string_to_sp_real module procedure string_to_int module procedure string_to_logical end interface string_to_val contains pure function string_dp_real(num, fmt) result(val) ! Get string representation of double precision real ! ! num :: double precision real value to convert ! fmt :: optional custom format string character(:), allocatable :: val real(dp), intent(in) :: num character(*), optional, intent(in) :: fmt character(120) :: tmp if (present(fmt)) then write(tmp, fmt) num else write(tmp, dp_format) num end if val = trim(adjustl(tmp)) end function string_dp_real pure function string_sp_real(num, fmt) result(val) ! Get string representation of double precision real ! ! num :: double precision real value to convert ! fmt :: optional custom format string character(:), allocatable :: val real(sp), intent(in) :: num character(*), optional, intent(in) :: fmt character(120) :: tmp if (present(fmt)) then write(tmp, fmt) num else write(tmp, sp_format) num end if val = trim(adjustl(tmp)) end function string_sp_real pure function string_int(num, fmt) result(val) ! Get string representation of integer ! ! num :: integer value to convert ! fmt :: optional custom format string character(:), allocatable :: val integer(ip), intent(in) :: num character(*), optional, intent(in) :: fmt character(120) :: tmp if (present(fmt)) then write(tmp, fmt) num else write(tmp, ip_format) num end if val = trim(adjustl(tmp)) end function string_int pure function string_logical(bool, full) result(val) ! Get string representation of logical value ! ! bool :: logical value to convert ! full :: toggle whether only first letter (T/F) or full word is used character(:), allocatable :: val logical, intent(in) :: bool logical, intent(in), optional :: full logical :: full_str if (present(full)) then full_str = full else full_str = .true. end if if (bool) then if (full_str) then val = "True" else val = "T" end if else if (full_str) then val = "False" else val = "F" end if end if end function string_logical subroutine string_to_int(str, val) ! Get integer value from string ! ! str :: string to convert ! val :: output value character(*), intent(in) :: str integer(ip), intent(out) :: val read(str, *) val end subroutine string_to_int subroutine string_to_dp_real(str, val) ! Get double precision real value from string ! ! str :: string to convert ! val :: output value character(*), intent(in) :: str real(dp), intent(out) :: val read(str, *) val end subroutine string_to_dp_real subroutine string_to_sp_real(str, val) ! Get single precision real value from string ! ! str :: string to convert ! val :: output value character(*), intent(in) :: str real(sp), intent(out) :: val read(str, *) val end subroutine string_to_sp_real subroutine string_to_logical(str, val) ! Get logical value from string ! ! str :: string to convert ! val :: output value character(*), intent(in) :: str logical, intent(out) :: val select case(trim(adjustl(str))) case ("T", "True", "true", ".true.") val = .true. case ("F", "False", "false", ".false.") val = .false. end select end subroutine string_to_logical end module string
abstract type CNFLayer <: Function end Flux.trainable(m::CNFLayer) = (m.p,) """ Constructs a continuous-time recurrent neural network, also known as a neural ordinary differential equation (neural ODE), with fast gradient calculation via adjoints [1] and specialized for density estimation based on continuous normalizing flows (CNF) [2] with a direct computation of the trace of the dynamics' jacobian. At a high level this corresponds to the following steps: 1. Parameterize the variable of interest x(t) as a function f(z,θ,t) of a base variable z(t) with known density p_z; 2. Use the transformation of variables formula to predict the density p_x as a function of the density p_z and the trace of the Jacobian of f; 3. Choose the parameter θ to minimize a loss function of p_x (usually the negative likelihood of the data); After these steps one may use the NN model and the learned θ to predict the density p_x for new values of x. ```julia DeterministicCNF(model,tspan,basedist=nothing,monte_carlo=false,args...;kwargs...) ``` Arguments: - `model`: A Chain neural network that defines the dynamics of the model. - `basedist`: Distribution of the base variable. Set to the unit normal by default. - `tspan`: The timespan to be solved on. - `kwargs`: Additional arguments splatted to the ODE solver. See the [Common Solver Arguments](https://diffeq.sciml.ai/dev/basics/common_solver_opts/) documentation for more details. Ref [1]L. S. Pontryagin, Mathematical Theory of Optimal Processes. CRC Press, 1987. [2]R. T. Q. Chen, Y. Rubanova, J. Bettencourt, D. Duvenaud. Neural Ordinary Differential Equations. arXiv preprint at arXiv1806.07366, 2019. [3]W. Grathwohl, R. T. Q. Chen, J. Bettencourt, I. Sutskever, D. Duvenaud. FFJORD: Free-Form Continuous Dynamic For Scalable Reversible Generative Models. arXiv preprint at arXiv1810.01367, 2018. """ struct DeterministicCNF{M,P,RE,Distribution,T,A,K} <: CNFLayer model::M p::P re::RE basedist::Distribution tspan::T args::A kwargs::K function DeterministicCNF(model,tspan,args...;p = nothing,basedist=nothing,kwargs...) _p,re = Flux.destructure(model) if p === nothing p = _p end if basedist === nothing size_input = size(model[1].W)[2] basedist = MvNormal(zeros(size_input), I + zeros(size_input,size_input)) end new{typeof(model),typeof(p),typeof(re),typeof(basedist),typeof(tspan),typeof(args),typeof(kwargs)}( model,p,re,basedist,tspan,args,kwargs) end end """ Constructs a continuous-time recurrent neural network, also known as a neural ordinary differential equation (neural ODE), with fast gradient calculation via adjoints [1] and specialized for density estimation based on continuous normalizing flows (CNF) [2] with a stochastic approach [2] for the computation of the trace of the dynamics' jacobian. At a high level this corresponds to the following steps: 1. Parameterize the variable of interest x(t) as a function f(z,θ,t) of a base variable z(t) with known density p_z; 2. Use the transformation of variables formula to predict the density p_x as a function of the density p_z and the trace of the Jacobian of f; 3. Choose the parameter θ to minimize a loss function of p_x (usually the negative likelihood of the data); After these steps one may use the NN model and the learned θ to predict the density p_x for new values of x. ```julia FFJORD(model,basedist=nothing,monte_carlo=false,tspan,args...;kwargs...) ``` Arguments: - `model`: A Chain neural network that defines the dynamics of the model. - `basedist`: Distribution of the base variable. Set to the unit normal by default. - `tspan`: The timespan to be solved on. - `kwargs`: Additional arguments splatted to the ODE solver. See the [Common Solver Arguments](https://diffeq.sciml.ai/dev/basics/common_solver_opts/) documentation for more details. Ref [1]L. S. Pontryagin, Mathematical Theory of Optimal Processes. CRC Press, 1987. [2]R. T. Q. Chen, Y. Rubanova, J. Bettencourt, D. Duvenaud. Neural Ordinary Differential Equations. arXiv preprint at arXiv1806.07366, 2019. [3]W. Grathwohl, R. T. Q. Chen, J. Bettencourt, I. Sutskever, D. Duvenaud. FFJORD: Free-Form Continuous Dynamic For Scalable Reversible Generative Models. arXiv preprint at arXiv1810.01367, 2018. """ struct FFJORD{M,P,RE,Distribution,T,A,K} <: CNFLayer model::M p::P re::RE basedist::Distribution tspan::T args::A kwargs::K function FFJORD(model,tspan,args...;p = nothing,basedist=nothing,kwargs...) _p,re = Flux.destructure(model) if p === nothing p = _p end if basedist === nothing size_input = size(model[1].W)[2] basedist = MvNormal(zeros(size_input), I + zeros(size_input,size_input)) end new{typeof(model),typeof(p),typeof(re),typeof(basedist),typeof(tspan),typeof(args),typeof(kwargs)}( model,p,re,basedist,tspan,args,kwargs) end end function jacobian_fn(f, x::AbstractVector) y::AbstractVector, back = Zygote.pullback(f, x) ȳ(i) = [i == j for j = 1:length(y)] vcat([transpose(back(ȳ(i))[1]) for i = 1:length(y)]...) end function cnf(du,u,p,t,re) z = @view u[1:end-1] m = re(p) J = jacobian_fn(m, z) trace_jac = length(z) == 1 ? sum(J) : tr(J) du[1:end-1] = m(z) du[end] = -trace_jac end function ffjord(du,u,p,t,re,e,monte_carlo,regularize) m = re(p) if regularize z = @view u[1:end-3] _, back = Zygote.pullback(m,z) eJ = back(e)[1] if monte_carlo trace_jac = (eJ.*e)[1] else J = jacobian_fn(m, z) trace_jac = length(z) == 1 ? sum(J) : tr(J) end du[1:end-3] = m(z) du[end-2] = -trace_jac du[end-1] = sum(abs2, m(z)) du[end] = norm(eJ)^2 else z = @view u[1:end-1] _, back = Zygote.pullback(m,z) eJ = back(e)[1] if monte_carlo trace_jac = (eJ.*e)[1] else J = jacobian_fn(m, z) trace_jac = length(z) == 1 ? sum(J) : tr(J) end du[1:end-1] = m(z) du[end] = -trace_jac end end function (n::DeterministicCNF)(x,p=n.p) cnf_ = (du,u,p,t)->cnf(du,u,p,t,n.re) prob = ODEProblem{true}(cnf_,vcat(x,0f0),n.tspan,p) sense = InterpolatingAdjoint(autojacvec = false) pred = solve(prob,n.args...;sensealg=sense,n.kwargs...)[:,end] pz = n.basedist z = pred[1:end-1] delta_logp = pred[end] logpz = logpdf(pz, z) logpx = logpz .- delta_logp return logpx[1] end function (n::FFJORD)(x,p=n.p,regularize=false,monte_carlo=true) e = randn(Float32,length(x)) pz = n.basedist sense = InterpolatingAdjoint(autojacvec = false) if regularize ffjord_ = (du,u,p,t)->ffjord(du,u,p,t,n.re,e,monte_carlo,regularize) prob = ODEProblem{true}(ffjord_,vcat(x,0f0,0f0,0f0),n.tspan,p) pred = solve(prob,n.args...;sensealg=sense,n.kwargs...)[:,end] z = pred[1:end-3] delta_logp = pred[end-2] λ₁ = pred[end-1] λ₂ = pred[end] logpz = logpdf(pz, z) logpx = logpz .- delta_logp return logpx[1], λ₁, λ₂ else ffjord_ = (du,u,p,t)->ffjord(du,u,p,t,n.re,e,monte_carlo,regularize) prob = ODEProblem{true}(ffjord_,vcat(x,0f0),n.tspan,p) pred = solve(prob,n.args...;sensealg=sense,n.kwargs...)[:,end] z = pred[1:end-1] delta_logp = pred[end] logpz = logpdf(pz, z) logpx = logpz .- delta_logp return logpx[1] end end
{-# OPTIONS --without-K #-} open import Base module Homotopy.Pointed where open import Integers open import Homotopy.Truncation open import Homotopy.Connected record pType (i : Level) : Set (suc i) where constructor ⋆[_,_] field ∣_∣ : Set i -- \| ⋆ : ∣_∣ -- \* open pType public pType₀ : Set₁ pType₀ = pType zero _→⋆_ : ∀ {i j} → (pType i → pType j → pType (max i j)) _→⋆_ A B = ⋆[ Σ (∣ A ∣ → ∣ B ∣) (λ f → f (⋆ A) ≡ ⋆ B), ((λ _ → ⋆ B) , refl) ] τ⋆ : ∀ {i} → (ℕ₋₂ → pType i → pType i) τ⋆ n ⋆[ X , x ] = ⋆[ τ n X , proj x ] is-contr⋆ : ∀ {i} → (pType i → Set i) is-contr⋆ ⋆[ X , x ] = is-contr X is-connected⋆ : ∀ {i} → ℕ₋₂ → (pType i → Set i) is-connected⋆ n ⋆[ X , x ] = is-connected n X connected⋆-lt : ∀ {i} (k n : ℕ) (lt : k < S n) (X : pType i) → (is-connected⋆ ⟨ n ⟩ X → is-contr⋆ (τ⋆ ⟨ k ⟩ X)) connected⋆-lt .n n <n X p = p connected⋆-lt k O (<S ()) X p connected⋆-lt k (S n) (<S lt) X p = connected⋆-lt k n lt X (connected-S-is-connected ⟨ n ⟩ p) _≃⋆_ : ∀ {i j} → (pType i → pType j → Set (max i j)) ⋆[ X , x ] ≃⋆ ⋆[ Y , y ] = Σ (X ≃ Y) (λ f → π₁ f x ≡ y) id-equiv⋆ : ∀ {i} (X : pType i) → X ≃⋆ X id-equiv⋆ ⋆[ X , x ] = (id-equiv X , refl) equiv-compose⋆ : ∀ {i j k} {A : pType i} {B : pType j} {C : pType k} → (A ≃⋆ B → B ≃⋆ C → A ≃⋆ C) equiv-compose⋆ (f , pf) (g , pg) = (equiv-compose f g , (ap (π₁ g) pf ∘ pg)) pType-eq-raw : ∀ {i} {X Y : pType i} (p : ∣ X ∣ ≡ ∣ Y ∣) (q : transport (λ X → X) p (⋆ X) ≡ ⋆ Y) → X ≡ Y pType-eq-raw {i} {⋆[ X , x ]} {⋆[ .X , .x ]} refl refl = refl pType-eq : ∀ {i} {X Y : pType i} → (X ≃⋆ Y → X ≡ Y) pType-eq (e , p) = pType-eq-raw (eq-to-path e) (trans-id-eq-to-path e _ ∘ p)
context("itabulate iterator") test_that("itabulate functions properly with default values", { it <- itabulate(f=function(x) x + 1) expect_equal(take(it, 4), as.list(2:5)) }) test_that("itabulate functions properly with a given start value", { it <- itabulate(f=function(x) x^2, start=-3) expect_equal(take(it, 6), list(9, 4, 1, 0, 1, 4)) }) test_that("itabulate functions properly with given start and step values", { it <- itabulate(abs, start=-5, step=2) expect_equal(take(it, 6), list(5, 3, 1, 1, 3, 5)) }) test_that("itabulate functions properly with a decreasing sequence", { it <- itabulate(exp, start=6, step=-2) expect_equal(take(it, 4), as.list(exp(seq(6, 0, by=-2)))) })
[STATEMENT] lemma opt_mru_guard_imp_mru_guard: assumes invs: "s \<in> Vinv1" "s \<in> SV_inv3" and c_guard: "opt_mru_guard (process_mru (votes s)) Q v" shows "mru_guard s Q v" [PROOF STATE] proof (prove) goal (1 subgoal): 1. mru_guard s Q v [PROOF STEP] using c_guard [PROOF STATE] proof (prove) using this: opt_mru_guard (process_mru (votes s)) Q v goal (1 subgoal): 1. mru_guard s Q v [PROOF STEP] by(simp add: opt_mru_vote_mru_of_set[OF invs] opt_mru_guard_def mru_guard_def Let_def)
# Computational and Numerical Methods ## Group 16 ### Set 11 (08-10-2018): Nonlinear Systems and the Newton Method #### Vidhin Parmar 201601003 #### Parth Shah 201601086 <form action="javascript:code_toggle()"><input type="submit" id="toggleButton" value="Show Code"></form> ```python import numpy as np import pandas as pd import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import scipy import scipy.linalg as la from sympy import symbols, diff from sympy import * import mpmath ``` $x^2+4y^2-9=0$ $18y−14x^2+ 45 = 0$ ```python th = np.arange(0, 2*np.pi + np.pi/50, np.pi/50) plt.plot(3*np.cos(th), 1.5*np.sin(th), label="x^2+y^2=9") t = np.arange(-5, 5 + 0.01, 0.01) plt.plot((9/14)*t, (-2.5 + ((9/28)*(t**2))), label="18y - 14x^2 +45 =0") plt.legend(loc="best") plt.grid(True) plt.show() ``` ```python x,y = symbols('x y') f = lambda a,b: a**2 + 4*b**2 - 9 g = lambda a,b: -14*a**2 + 18*b + 45 fx = lambdify((x,y), diff(x**2 + 4*y**2 - 9, x), "numpy") fy = lambdify((x,y), diff(x**2 + 4*y**2 - 9, y), "numpy") gx = lambdify((x,y), diff(-14*x**2 + 18*y + 45, x), "numpy") gy = lambdify((x,y), diff(-14*x**2 + 18*y + 45, y), "numpy") x0, y0 = 1, 1 for i in range(25): mat1 = np.array([[fx(x0,y0), fy(x0,y0)], [gx(x0,y0), gy(x0,y0)]], dtype=np.float32) mat2 = np.array([[-f(x0,y0)], [-g(x0,y0)]], dtype=np.float32) ans = la.solve(mat1, mat2) x0, y0 = x0+ans[0,0], y0+ans[1,0] "Root1 : ", x0, y0 ``` ('Root1 : ', 2.1372167369636323, 1.0526519628113704) ```python x,y = symbols('x y') f = lambda a,b: a**2 + 4*b**2 - 9 g = lambda a,b: -14*a**2 + 18*b + 45 fx = lambdify((x,y), diff(x**2 + 4*y**2 - 9, x), "numpy") fy = lambdify((x,y), diff(x**2 + 4*y**2 - 9, y), "numpy") gx = lambdify((x,y), diff(-14*x**2 + 18*y + 45, x), "numpy") gy = lambdify((x,y), diff(-14*x**2 + 18*y + 45, y), "numpy") x0, y0 = -1, 1 for i in range(25): mat1 = np.array([[fx(x0,y0), fy(x0,y0)], [gx(x0,y0), gy(x0,y0)]], dtype=np.float32) mat2 = np.array([[-f(x0,y0)], [-g(x0,y0)]], dtype=np.float32) ans = la.solve(mat1, mat2) x0, y0 = x0+ans[0,0], y0+ans[1,0] "Root2 : ", x0, y0 ``` ('Root2 : ', -2.1372167369636323, 1.0526519628113704) ```python x,y = symbols('x y') f = lambda a,b: a**2 + 4*b**2 - 9 g = lambda a,b: -14*a**2 + 18*b + 45 fx = lambdify((x,y), diff(x**2 + 4*y**2 - 9, x), "numpy") fy = lambdify((x,y), diff(x**2 + 4*y**2 - 9, y), "numpy") gx = lambdify((x,y), diff(-14*x**2 + 18*y + 45, x), "numpy") gy = lambdify((x,y), diff(-14*x**2 + 18*y + 45, y), "numpy") x0, y0 = -1, -1 for i in range(25): mat1 = np.array([[fx(x0,y0), fy(x0,y0)], [gx(x0,y0), gy(x0,y0)]], dtype=np.float32) mat2 = np.array([[-f(x0,y0)], [-g(x0,y0)]], dtype=np.float32) ans = la.solve(mat1, mat2) x0, y0 = x0+ans[0,0], y0+ans[1,0] "Root3 : ", x0, y0 ``` ('Root3 : ', -1.2031669633477737, -1.3740805342399418) ```python x,y = symbols('x y') f = lambda a,b: a**2 + 4*b**2 - 9 g = lambda a,b: -14*a**2 + 18*b + 45 fx = lambdify((x,y), diff(x**2 + 4*y**2 - 9, x), "numpy") fy = lambdify((x,y), diff(x**2 + 4*y**2 - 9, y), "numpy") gx = lambdify((x,y), diff(-14*x**2 + 18*y + 45, x), "numpy") gy = lambdify((x,y), diff(-14*x**2 + 18*y + 45, y), "numpy") x0, y0 = 1, -1 for i in range(25): mat1 = np.array([[fx(x0,y0), fy(x0,y0)], [gx(x0,y0), gy(x0,y0)]], dtype=np.float32) mat2 = np.array([[-f(x0,y0)], [-g(x0,y0)]], dtype=np.float32) ans = la.solve(mat1, mat2) x0, y0 = x0+ans[0,0], y0+ans[1,0] "Root4 : ", x0, y0 ``` ('Root4 : ', 1.2031669633477737, -1.3740805342399418)
#order format: msgtype, symbol, price, quantity, side, ordtype, orderID, time #execution message format: orderID, Execstatus, symbol, quantity, avg price, side, time #Execstatus can be the following: filled, replaced, cancelled, replacereject?, cancelreject #order book format: orderID, time, symbol, price, quantity, side, ordtype #trade matrix format: time, symbol, side, quantity, price, open/close, pnl #position matrix: time, asset(symbol), #of shares, book value, market value, Con_output_dir = "outputs/" Con_FieldName_MsgType = "Msgtype" Con_FieldName_Sym = "Symbol" Con_FieldName_Price = "Price" Con_FieldName_Qty = "Quantity" Con_FieldName_Side = "Side" Con_FieldName_OrdType = "OrdType" Con_FieldName_OrdID = "OrdID" Con_FieldName_Time = "Timestamp" Con_FieldName_ExecStatus = "ExecStatus" Con_FieldName_AvgPrice = "AvgPrice" Con_FieldName_BookVal = "BookValue" Con_FieldName_MktVal = "MarketValue" Con_FieldName_OpenClose = "Open/Close" Con_FieldName_Pnl = "PnL" Con_FieldName_CurrentBid = "CurrentBid" Con_FieldName_CurrentAsk = "CurrentAsk" Con_FieldName_CurrentTick = "CurrentTick" Con_FieldName_LastHighestBid = "HighestBid" Con_FieldName_LastLowestAsk = "LowestAsk" Con_Data_ColName_LastNumTicks = "NumTicks" Con_Data_ColName_LastVolume = "Volume" Con_Data_ColName_LastValue = "Value" Con_ExecStatus_filled <- 2 Con_Side_Buy <- 1 Con_Side_Sell <- 2 Con_MsgType_New <- "D" Con_MsgType_Replace <- "G" Con_MsgType_Cancel <- "F" Con_OrdType_Mkt <- 1 Con_OrdType_Limit <- 2 Con_Sym_Cash <- "Cash" Con_Sym_Portfolio<- "Portfolio" Con_OpenClose_Open <- "Open" Con_OpenClose_Close <- "Close" Con_PriceCol <- 2 Con_GlobalVarName_LOB <- "PendingOrderBook" Con_GlobalVarName_PositionBook <- "positionbook" Con_GlobalVarName_TradesBook <- "tradesbook" Con_GlobalVarName_MktPrice <- "market_price" Con_GlobalVarName_BidPrice <- "bid_price" Con_GlobalVarName_AskPrice <- "ask_price" Con_GlobalVarName_ListDates <- "list_dates" Con_Data_Tick_Suffix <- "_tick" Con_Data_Ask_Suffix <- "_ask" Con_Data_Bid_Suffix <- "_bid" Con_Data_ColName_Date <- "Date" Con_Data_ColName_Open <- "OPEN" Con_Data_ColName_High <- "HIGH" Con_Data_ColName_Low <- "LOW" Con_Data_ColName_LastPrice <- "LAST_PRICE" Con_Data_ColName_NumTicks <- "NUMBER_TICKS" Con_Data_ColName_Volume <- "VOLUME" Con_Data_ColName_Value <- "VALUE" order_msg_spec <- c(Con_FieldName_MsgType, Con_FieldName_Sym, Con_FieldName_Price, Con_FieldName_Qty, Con_FieldName_Side, Con_FieldName_OrdType, Con_FieldName_OrdID, Con_FieldName_Time) exec_msg_spec <- c(Con_FieldName_OrdID, Con_FieldName_ExecStatus, Con_FieldName_Sym, Con_FieldName_Qty, Con_FieldName_AvgPrice, Con_FieldName_Side, Con_FieldName_Time) orderbook_spec <- c(Con_FieldName_OrdID, Con_FieldName_Time, Con_FieldName_Sym, Con_FieldName_Price, Con_FieldName_Qty, Con_FieldName_Side, Con_FieldName_OrdType) positionbook_spec <- c(Con_FieldName_Sym, Con_FieldName_Qty, Con_FieldName_BookVal, Con_FieldName_MktVal) tradesbook_spec <- c(Con_FieldName_Time, Con_FieldName_Sym, Con_FieldName_Side, Con_FieldName_Qty, Con_FieldName_Price, Con_FieldName_OpenClose, Con_FieldName_Pnl) mkt_quote_spec <- c(Con_FieldName_Sym, Con_FieldName_CurrentBid, Con_FieldName_CurrentAsk, Con_FieldName_CurrentTick, Con_FieldName_LastHighestBid, Con_FieldName_LastLowestAsk, Con_Data_ColName_LastNumTicks, Con_Data_ColName_LastVolume, Con_Data_ColName_LastValue)
r=359.89 https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d7dw2j/media/images/d7dw2j-003/svc:tesseract/full/full/359.89/default.jpg Accept:application/hocr+xml
Our annual Moonlight Race and BBQ attracted 15 entries in all this year, including a number of our junior sailors. The evening began with a BYO barbecue cooked on our newly purchased BBQ. Thanks to Rear Commodore, Andy Bennett and also Ed Pepper for assembling the BBQ just in time for the hungry hoards to descend upon it! Race Officer Jon Jowett then briefed competitors on the modified start procedure using coloured lights in place of flags and handed out mini mars bar and milky way snacks to all those competing. With only a light breeze at the start, competitors made a great sight for all the bar spectators, with various fairy lights being sported on rigs, making the job of choosing the best lit boat quite a task. Unfortunately the wind completely died part way through the race but every competitor was given a finish using the number of laps they had achieved during the time allocated. Congratulations to Tim Hore on winning the race and also to Brian Hennessey for winning the best lit boat with his spectacular ‘disco light’.
// boost/endian/conversion.hpp -------------------------------------------------------// // Copyright Beman Dawes 2010, 2011, 2014 // Distributed under the Boost Software License, Version 1.0. // http://www.boost.org/LICENSE_1_0.txt #ifndef BOOST_ENDIAN_CONVERSION_HPP #define BOOST_ENDIAN_CONVERSION_HPP #include <boost/endian/detail/endian_reverse.hpp> #include <boost/endian/detail/endian_load.hpp> #include <boost/endian/detail/endian_store.hpp> #include <boost/endian/detail/order.hpp> #include <boost/type_traits/is_class.hpp> #include <boost/type_traits/is_integral.hpp> #include <boost/type_traits/is_same.hpp> #include <boost/type_traits/integral_constant.hpp> #include <boost/predef/other/endian.h> #include <boost/static_assert.hpp> #include <boost/cstdint.hpp> #include <boost/config.hpp> //------------------------------------- synopsis ---------------------------------------// namespace boost { namespace endian { //--------------------------------------------------------------------------------------// // // // return-by-value interfaces // // suggested by Phil Endecott // // // // user-defined types (UDTs) // // // // All return-by-value conversion function templates are required to be implemented in // // terms of an unqualified call to "endian_reverse(x)", a function returning the // // value of x with endianness reversed. This provides a customization point for any // // UDT that provides a "endian_reverse" free-function meeting the requirements. // // It must be defined in the same namespace as the UDT itself so that it will be found // // by argument dependent lookup (ADL). // // // //--------------------------------------------------------------------------------------// // reverse byte order // requires T to be a non-bool integral type // in detail/endian_reverse.hpp template<class T> inline T endian_reverse( T x ) BOOST_NOEXCEPT; // reverse byte order unless native endianness is big template <class EndianReversible > inline EndianReversible big_to_native(EndianReversible x) BOOST_NOEXCEPT; // Returns: x if native endian order is big, otherwise endian_reverse(x) template <class EndianReversible > inline EndianReversible native_to_big(EndianReversible x) BOOST_NOEXCEPT; // Returns: x if native endian order is big, otherwise endian_reverse(x) // reverse byte order unless native endianness is little template <class EndianReversible > inline EndianReversible little_to_native(EndianReversible x) BOOST_NOEXCEPT; // Returns: x if native endian order is little, otherwise endian_reverse(x) template <class EndianReversible > inline EndianReversible native_to_little(EndianReversible x) BOOST_NOEXCEPT; // Returns: x if native endian order is little, otherwise endian_reverse(x) // generic conditional reverse byte order template <BOOST_SCOPED_ENUM(order) From, BOOST_SCOPED_ENUM(order) To, class EndianReversible> inline EndianReversible conditional_reverse(EndianReversible from) BOOST_NOEXCEPT; // Returns: If From == To have different values, from. // Otherwise endian_reverse(from). // Remarks: The From == To test, and as a consequence which form the return takes, is // is determined at compile time. // runtime conditional reverse byte order template <class EndianReversible > inline EndianReversible conditional_reverse(EndianReversible from, BOOST_SCOPED_ENUM(order) from_order, BOOST_SCOPED_ENUM(order) to_order) BOOST_NOEXCEPT; // Returns: from_order == to_order ? from : endian_reverse(from). //------------------------------------------------------------------------------------// // Q: What happened to bswap, htobe, and the other synonym functions based on names // popularized by BSD, OS X, and Linux? // A: Turned out these may be implemented as macros on some systems. Ditto POSIX names // for such functionality. Since macros would cause endless problems with functions // of the same names, and these functions are just synonyms anyhow, they have been // removed. //------------------------------------------------------------------------------------// // // // reverse in place interfaces // // // // user-defined types (UDTs) // // // // All reverse in place function templates are required to be implemented in terms // // of an unqualified call to "endian_reverse_inplace(x)", a function reversing // // the endianness of x, which is a non-const reference. This provides a // // customization point for any UDT that provides a "reverse_inplace" free-function // // meeting the requirements. The free-function must be declared in the same // // namespace as the UDT itself so that it will be found by argument-dependent // // lookup (ADL). // // // //------------------------------------------------------------------------------------// // reverse in place // in detail/endian_reverse.hpp template <class EndianReversible> inline void endian_reverse_inplace(EndianReversible& x) BOOST_NOEXCEPT; // Effects: x = endian_reverse(x) // reverse in place unless native endianness is big template <class EndianReversibleInplace> inline void big_to_native_inplace(EndianReversibleInplace& x) BOOST_NOEXCEPT; // Effects: none if native byte-order is big, otherwise endian_reverse_inplace(x) template <class EndianReversibleInplace> inline void native_to_big_inplace(EndianReversibleInplace& x) BOOST_NOEXCEPT; // Effects: none if native byte-order is big, otherwise endian_reverse_inplace(x) // reverse in place unless native endianness is little template <class EndianReversibleInplace> inline void little_to_native_inplace(EndianReversibleInplace& x) BOOST_NOEXCEPT; // Effects: none if native byte-order is little, otherwise endian_reverse_inplace(x); template <class EndianReversibleInplace> inline void native_to_little_inplace(EndianReversibleInplace& x) BOOST_NOEXCEPT; // Effects: none if native byte-order is little, otherwise endian_reverse_inplace(x); // generic conditional reverse in place template <BOOST_SCOPED_ENUM(order) From, BOOST_SCOPED_ENUM(order) To, class EndianReversibleInplace> inline void conditional_reverse_inplace(EndianReversibleInplace& x) BOOST_NOEXCEPT; // runtime reverse in place template <class EndianReversibleInplace> inline void conditional_reverse_inplace(EndianReversibleInplace& x, BOOST_SCOPED_ENUM(order) from_order, BOOST_SCOPED_ENUM(order) to_order) BOOST_NOEXCEPT; //----------------------------------- end synopsis -------------------------------------// namespace detail { template<class T> struct is_endian_reversible: boost::integral_constant<bool, boost::is_class<T>::value || ( boost::is_integral<T>::value && !boost::is_same<T, bool>::value )> { }; } // namespace detail template <class EndianReversible> inline EndianReversible big_to_native( EndianReversible x ) BOOST_NOEXCEPT { BOOST_STATIC_ASSERT( detail::is_endian_reversible<EndianReversible>::value ); #if BOOST_ENDIAN_BIG_BYTE return x; #else return endian_reverse(x); #endif } template <class EndianReversible> inline EndianReversible native_to_big( EndianReversible x ) BOOST_NOEXCEPT { BOOST_STATIC_ASSERT( detail::is_endian_reversible<EndianReversible>::value ); #if BOOST_ENDIAN_BIG_BYTE return x; #else return endian_reverse(x); #endif } template <class EndianReversible> inline EndianReversible little_to_native( EndianReversible x ) BOOST_NOEXCEPT { BOOST_STATIC_ASSERT( detail::is_endian_reversible<EndianReversible>::value ); #if BOOST_ENDIAN_LITTLE_BYTE return x; #else return endian_reverse(x); #endif } template <class EndianReversible> inline EndianReversible native_to_little( EndianReversible x ) BOOST_NOEXCEPT { BOOST_STATIC_ASSERT( detail::is_endian_reversible<EndianReversible>::value ); #if BOOST_ENDIAN_LITTLE_BYTE return x; #else return endian_reverse(x); #endif } namespace detail { template<class EndianReversible> inline EndianReversible conditional_reverse_impl( EndianReversible x, boost::true_type ) BOOST_NOEXCEPT { return x; } template<class EndianReversible> inline EndianReversible conditional_reverse_impl( EndianReversible x, boost::false_type ) BOOST_NOEXCEPT { return endian_reverse( x ); } } // namespace detail // generic conditional reverse template <BOOST_SCOPED_ENUM(order) From, BOOST_SCOPED_ENUM(order) To, class EndianReversible> inline EndianReversible conditional_reverse( EndianReversible x ) BOOST_NOEXCEPT { BOOST_STATIC_ASSERT( detail::is_endian_reversible<EndianReversible>::value ); return detail::conditional_reverse_impl( x, boost::integral_constant<bool, From == To>() ); } // runtime conditional reverse template <class EndianReversible> inline EndianReversible conditional_reverse( EndianReversible x, BOOST_SCOPED_ENUM(order) from_order, BOOST_SCOPED_ENUM(order) to_order ) BOOST_NOEXCEPT { BOOST_STATIC_ASSERT( detail::is_endian_reversible<EndianReversible>::value ); return from_order == to_order? x: endian_reverse( x ); } //--------------------------------------------------------------------------------------// // reverse-in-place implementation // //--------------------------------------------------------------------------------------// namespace detail { template<class T> struct is_endian_reversible_inplace: boost::integral_constant<bool, boost::is_class<T>::value || ( boost::is_integral<T>::value && !boost::is_same<T, bool>::value )> { }; } // namespace detail #if BOOST_ENDIAN_BIG_BYTE template <class EndianReversibleInplace> inline void big_to_native_inplace( EndianReversibleInplace& ) BOOST_NOEXCEPT { BOOST_STATIC_ASSERT( detail::is_endian_reversible_inplace<EndianReversibleInplace>::value ); } #else template <class EndianReversibleInplace> inline void big_to_native_inplace( EndianReversibleInplace& x ) BOOST_NOEXCEPT { BOOST_STATIC_ASSERT( detail::is_endian_reversible_inplace<EndianReversibleInplace>::value ); endian_reverse_inplace( x ); } #endif #if BOOST_ENDIAN_BIG_BYTE template <class EndianReversibleInplace> inline void native_to_big_inplace( EndianReversibleInplace& ) BOOST_NOEXCEPT { BOOST_STATIC_ASSERT( detail::is_endian_reversible_inplace<EndianReversibleInplace>::value ); } #else template <class EndianReversibleInplace> inline void native_to_big_inplace( EndianReversibleInplace& x ) BOOST_NOEXCEPT { BOOST_STATIC_ASSERT( detail::is_endian_reversible_inplace<EndianReversibleInplace>::value ); endian_reverse_inplace( x ); } #endif #if BOOST_ENDIAN_LITTLE_BYTE template <class EndianReversibleInplace> inline void little_to_native_inplace( EndianReversibleInplace& ) BOOST_NOEXCEPT { BOOST_STATIC_ASSERT( detail::is_endian_reversible_inplace<EndianReversibleInplace>::value ); } #else template <class EndianReversibleInplace> inline void little_to_native_inplace( EndianReversibleInplace& x ) BOOST_NOEXCEPT { BOOST_STATIC_ASSERT( detail::is_endian_reversible_inplace<EndianReversibleInplace>::value ); endian_reverse_inplace( x ); } #endif #if BOOST_ENDIAN_LITTLE_BYTE template <class EndianReversibleInplace> inline void native_to_little_inplace( EndianReversibleInplace& ) BOOST_NOEXCEPT { BOOST_STATIC_ASSERT( detail::is_endian_reversible_inplace<EndianReversibleInplace>::value ); } #else template <class EndianReversibleInplace> inline void native_to_little_inplace( EndianReversibleInplace& x ) BOOST_NOEXCEPT { BOOST_STATIC_ASSERT( detail::is_endian_reversible_inplace<EndianReversibleInplace>::value ); endian_reverse_inplace( x ); } #endif namespace detail { template<class EndianReversibleInplace> inline void conditional_reverse_inplace_impl( EndianReversibleInplace&, boost::true_type ) BOOST_NOEXCEPT { } template<class EndianReversibleInplace> inline void conditional_reverse_inplace_impl( EndianReversibleInplace& x, boost::false_type ) BOOST_NOEXCEPT { endian_reverse_inplace( x ); } } // namespace detail // generic conditional reverse in place template <BOOST_SCOPED_ENUM(order) From, BOOST_SCOPED_ENUM(order) To, class EndianReversibleInplace> inline void conditional_reverse_inplace( EndianReversibleInplace& x ) BOOST_NOEXCEPT { BOOST_STATIC_ASSERT( detail::is_endian_reversible_inplace<EndianReversibleInplace>::value ); detail::conditional_reverse_inplace_impl( x, boost::integral_constant<bool, From == To>() ); } // runtime reverse in place template <class EndianReversibleInplace> inline void conditional_reverse_inplace( EndianReversibleInplace& x, BOOST_SCOPED_ENUM(order) from_order, BOOST_SCOPED_ENUM(order) to_order ) BOOST_NOEXCEPT { BOOST_STATIC_ASSERT( detail::is_endian_reversible_inplace<EndianReversibleInplace>::value ); if( from_order != to_order ) { endian_reverse_inplace( x ); } } } // namespace endian } // namespace boost #endif // BOOST_ENDIAN_CONVERSION_HPP
If $a$ and $b$ are relatively prime, then $a$ and $b$ divide $a x + b y$ for any $x$ and $y$.
function L = co_reggui(action,varargin) % GUI for Manual co-registration of different image modalities to go with % CERR % - Detailed Description: this is a manual co-registration routine for two % set of images (2d/3d) assuming that a possible resmapling and affine % transformation are sufficient for alignment. The quality of registration is measured by % using the mutual information criterion (MI) % % Written By: Issam El Naqa Date: 08/28/03 % Revised by: Date: % % Copyright 2010, Joseph O. Deasy, on behalf of the CERR development team. % % This file is part of The Computational Environment for Radiotherapy Research (CERR). % % CERR development has been led by: Aditya Apte, Divya Khullar, James Alaly, and Joseph O. Deasy. % % CERR has been financially supported by the US National Institutes of Health under multiple grants. % % CERR is distributed under the terms of the Lesser GNU Public License. % % This version of CERR is free software: you can redistribute it and/or modify % it under the terms of the GNU General Public License as published by % the Free Software Foundation, either version 3 of the License, or % (at your option) any later version. % % CERR is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; % without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. % See the GNU General Public License for more details. % % You should have received a copy of the GNU General Public License % along with CERR. If not, see <http://www.gnu.org/licenses/>. if nargin<1, action='Initializeco_reggui'; end; switch action case 'Initializeco_reggui' %Initialization Initializeco_reggui; case 'RegisterMode' mode = get(findobj('Tag', 'dispregpopupmenu'),'Value'); switch mode case 1 %manual case 2 %ctrl points case 3 %auto control points end case 'LoadRefImage' set(findobj(gcbf, 'Tag', 'StatusText'), 'String', 'Loading images...'); h = get(gcbf,'Userdata'); pathname = h.pathname; [filename, pathname] = uigetfile('*.*', 'Load reference image'); load([pathname,filename]); x = cropImage(x); h.refimage=min_maxnorm(x, 0:255); set(gcbf,'Userdata',h); DispImage('axref',x); drawnow case 'LoadPlanCUnregImage' case 'LoadPlanCRefImage' case 'LoadUnregImage' set(findobj(gcbf, 'Tag', 'StatusText'), 'String', 'Loading images...'); h = get(gcbf,'Userdata'); pathname = h.pathname; [filename, pathname] = uigetfile('*.*', 'Load unregistered image'); load([pathname,filename]); x = cropImage(x); h.unregimage=min_maxnorm(x, 0:255); h.regimage=h.unregimage; % initailize registered image to unregistered one! set(gcbf,'Userdata',h); DispImage('axunreg',x); drawnow set(findobj(gcbf, 'Tag', 'StatusText'), 'String', 'Set registration parameters...'); case 'EditRegParam' h = get(gcbf,'Userdata'); h.Tx=str2num(get(findobj(gcbf,'Tag','TXed'),'String')); h.Ty=str2num(get(findobj(gcbf,'Tag','TYed'),'String')); h.Tz=str2num(get(findobj(gcbf,'Tag','TZed'),'String')); h.rot=str2num(get(findobj(gcbf,'Tag','Roted'),'String')); h.Sx=str2num(get(findobj(gcbf,'Tag','SXed'),'String')); h.Sy=str2num(get(findobj(gcbf,'Tag','SYed'),'String')); h.Sz=str2num(get(findobj(gcbf,'Tag','SZed'),'String')); h.samp=str2num(get(findobj(gcbf,'Tag','Resamped'),'String')); set(findobj(gcbf, 'Tag', 'regpushbutton'), 'Enable', 'on'); set(findobj(gcbf, 'Tag', 'dispmodepopupmenu'), 'Enable', 'on'); set(findobj(gcbf, 'Tag', 'StatusText'), 'String', 'Press register button for processing...'); set(gcbf,'UserData',h); case 'DisplayMode' setDisplayMode; case 'enlargeOverlay' h = get(gcbf,'Userdata'); displayOverlays(h.refimage, h.regimage); case 'ApplyRegistration' h = get(gcbf,'Userdata'); mode = get(findobj('Tag', 'dispregpopupmenu'),'Value'); switch mode case 1 %manual tempimage=imtranslate2d(h.unregimage,h.Tx,h.Ty); tempimage=imrotate2d(tempimage,h.rot); tempimage=imscale2d(tempimage,h.Sx,h.Sy); tempimage=imresample2d(tempimage,h.samp); h.regimage=tempimage; case 2 %ctrl points getControlPoints('init'); getControlPoints('load', h.refimage, h.unregimage); waitfor(findobj('Tag', 'RegGui'), 'Tag', 'RegGuiDone'); [ref_pts, target_pts] = getControlPoints('getpoints', h.refimage, h.unregimage); delete(findobj('Tag', 'RegGuiDone')); [tempimage,A]=compute_aff_transform(h.unregimage, ref_pts, target_pts); h.regimage=tempimage; case 3 getControlPoints('init'); getControlPoints('load', h.refimage, h.unregimage); waitfor(findobj('Tag', 'RegGui'), 'Tag', 'RegGuiDone'); [ref_pts, target_pts] = getControlPoints('getpoints', h.refimage, h.unregimage); delete(findobj('Tag', 'RegGuiDone')); [tempimage,A]=compute_perspect_transform(h.unregimage, ref_pts, target_pts); h.regimage=tempimage; case 4 %auto control points numPoints = 20; sigma = 1; get_control_points(h.unregimage, h.refimage, numPoints, sigma); end % make joint histogram plot [h.mi, h.jhist]=get_mutualinfo(h.refimage,h.regimage); %set(findobj(gcbf, 'Tag', 'mitext'), 'Visible', 'on'); %set(findobj(gcbf, 'Tag', 'mivalue'), 'Visible', 'on'); % make a cross-correlation image h.xcorr=fxcorr(h.refimage,h.regimage); % save current data set(gcbf,'UserData',h); % display images setDisplayMode; DisplayJHist(h.jhist); set(findobj(gcbf, 'Tag', 'mivalue'), 'String', num2str(h.mi)); DispImage('axcorr',h.xcorr); drawnow case 'info' helpwin co_reggui; case 'close' close(gcbf); end return % supplmentary routines function setDisplayMode() % set display mode for registration h = get(gcbf,'Userdata'); h.displaymode = get(findobj(gcbf,'Tag','dispmodepopupmenu'),'Value'); switch h.displaymode case 1 % Single registered DispImage('axreg',h.regimage); case 2 % Overlayed images alpha=0.8; % transperancy factor alternate_time=0.5; % refresh time AlternateOverlayed(h.refimage,h.regimage,'axreg',alpha,alternate_time); case 3 % Sliceomatic end set(gcbf,'UserData',h); return % image display function function DispImage(imTag,x) set(gcbf,'CurrentAxes',findobj(gcbf,'Tag',imTag)); cla, h=imagesc(x), axis image, colormap('hot'), axis ij, axis off set(h, 'ButtonDownFcn', ['co_reggui(''Ctrl_' imTag ''');']); return % image translation function function fh=imtranslate2d(f,xoff,yoff) [h,w]=size(f); [x,y]=meshgrid([1:1:h],[1:1:w]); xd=x(:)+xoff; yd=y(:)+yoff; fh=reshape(bilinear_interpolation(f,xd,yd),w,h)'; return % image rotation function around the middle of the image function fh=imrotate2d(f,ang) [h,w]=size(f); phi = ang*pi/180; % Convert to radians vx=[-floor(h/2):ceil(h/2)-1]; % center around the middle vy=[-floor(w/2):ceil(w/2)-1]; [x,y]=meshgrid(vx,vy); x=x(:); y=y(:); xd=x*cos(phi)+y*sin(phi)+floor(h/2)+1; yd=-x*sin(phi)+y*cos(phi)+floor(w/2)+1; fh=reshape(bilinear_interpolation(f,xd,yd),w,h)'; return % image scaling function (exapnsion/shrinking) function fh=imscale2d(f,Sx,Sy) [h,w]=size(f); [x,y]=meshgrid([1:1:h],[1:1:w]); xd=x(:)*Sx; yd=y(:)*Sy; fh=reshape(bilinear_interpolation(f,xd,yd),w,h)'; return % image resampling function function fh=imresample2d(f,q) [h,w]=size(f); % use a binomial filter of order 5 for smoothing d=[1 4 6 4 1]/16; B=d'*d; fh=f; if q==1 return elseif q>1 % upsample N=round(q); y = zeros(N*h,N*w); y(1:N:end,1:N:end)=fh; fh=conv2(y,B,'same'); elseif (q<1 & q>0) % downsample D=round(1/q); fh=conv2(fh,B,'same'); fh=fh(1:D:end,1:D:end); else errordlg('The resampling factor should a positive number!', 'co_reggui Error', 'replace'); end % perform fast cross-correlation in frequency domain function c=fxcorr(x,y) Fsize=size(x)+size(y)-1; Fx = fft2(rot90(x,2),Fsize(1),Fsize(2)); Fy = fft2(y,Fsize(1),Fsize(2)); c = real(ifft2(Fx .* Fy)); return % compute the mutual information using the joint histogram function [mi, histxy]=get_mutualinfo(x,y) % x, y : the two images, siz=min([size(x);size(y)]); % if sizes are different nbits=8; ngray=2^nbits; % assume 256 levels is sufficient approximation! x=double(uint8(double(x)+1)); y=double(uint8(double(y)+1)); % convert to 8 bits histxy=zeros(ngray,ngray); [iM,jM] = meshgrid(1:siz(1),1:siz(2)); indV = (jM(:) - 1) * siz(1) + iM(:); xV =double(x(indV)); yV = double(y(indV)); ind2V = (yV - 1) * ngray + xV; for i=1:length(ind2V) histxy(ind2V(i)) = histxy(ind2V(i)) + 1; end %for i=1:siz(1) % for j=1:siz(2) % histxy(x(i,j),y(i,j))= histxy(x(i,j),y(i,j))+1; % end %end histxy=histxy/sum(histxy(:)); % normalize % compute marginal distributions histx=sum(histxy,2); histy=sum(histxy,1); % by integrating out the joint mi=sum(sum(histxy.*log2(histxy./(histx*histy+eps)+eps))); return % joint histogram display function DisplayJHist(jhist) set(gcbf,'CurrentAxes',findobj(gcbf,'Tag','axjhist')); set(findobj(gcbf,'Tag','axjhist'),'Box','off'); cla, view(-37.5,30), colormap('hot'), mesh(jhist); return % display transparent in a cyclic fashion function DisplayOverlayed(x,y,imTag,alpha) % try alphas, or linear combinations... %mask = ones(size(y)); %mask(find(y<1)) = alpha; set(gcbf,'CurrentAxes',findobj(gcbf,'Tag',imTag)); sx=size(x); sy=size(y); % linear combination siz=max([sx;sy]); xa=zeros(siz); xa(1:sx(1),1:sx(2))=x; ya=zeros(siz); ya(1:sy(1),1:sy(2))=y; cla, imagesc(double(xa)+0.75*double(ya), 'ButtonDownFcn', 'co_reggui(''enlargeOverlay'');'), axis image, colormap('hot'), axis ij, axis off % hold on % hi=imagesc(); % axis image, colormap('hot'), axis ij, axis off; %set(hi,'AlphaData',mask); return function AlternateOverlayed(x,y,imTag,alpha, atime) DisplayOverlayed(x,y,imTag,alpha); % pause(atime); % DisplayOverlayed(y,x,imTag,alpha); return function x = cropImage(x) minCol = 1, minRow = 1; [maxCol, maxRow] = size(x); rows = find(~max(x')); cols = find(~max(x)); for i=1:length(rows) if rows(i) ~= i minRow = rows(i-1)+1; maxRow = rows(i)-1; break; end end for i=1:length(cols) if cols(i) ~= i minCol = cols(i-1)+1; maxCol = cols(i)-1; break; end end x = imcrop(x,[minCol, minRow, maxCol-minCol, maxRow-minRow]);
```python from sympy import sieve print([i for i in sieve.primerange(2, 1000)]) ``` ```python #188 Merry Christmas를 입력받으면 #M #Me #Mer ... 등등 끝까지 완성하는프로그램 sentence = input("Enter the Sentence") for i in range(0,len(sentence)): for j in range(0, i+1): print(sentence[j], end="") print() ``` Enter the SentenceMerry Christmas M Me Mer Merr Merry Merry Merry C Merry Ch Merry Chr Merry Chri Merry Chris Merry Christ Merry Christm Merry Christma Merry Christmas ```python #188 Merry Christmas를 입력받으면 #M #Me #Mer ... 등등 끝까지 완성하는프로그램 sentence = "Merry Christmas" for i in range(0, len(sentence)): for j in range(0, i+1): print(sentence[j], end= "") print() ``` M Me Mer Merr Merry Merry Merry C Merry Ch Merry Chr Merry Chri Merry Chris Merry Christ Merry Christm Merry Christma Merry Christmas ```python a = "Merry Christmas" for i in range(len(a)): for j in range(i+1): print(a[j], end = "") print() ``` ```python #더 짧당 sentence = input("Enter the sentence") for i in range(len(sentence)): print(sentence[0:i+1]) ``` ```python #191 Todo 별그리기 ``` * *** ***** ******* ********* *********** *********** *********** *********** *********** *********** ```python #191 답 for i in range(11): if i < 6: print("*" + "*" * 2 * i) else: print("*" + "*" * 2 * 5) ``` ```python #191 a = "*" for i in range(11): if i <= 5: print(2 * a * i + a) else: print(a*11) ``` ```python #192 Todo ``` * *** ***** ******* ********* *********** ************* *************** ***************** ******************* ********************* ```python #192 답 for i in range(11): print(" " * (11-i), "*" * (2*i+1), " " * i) ``` ```python #196 어떤 두수의 곱으로 이루어진 소수가 있다. 어떤 두 수의 소수로 곱해졌는지 찾으시오 #ex) 143 => 11 * 13 num = int(input("Enter the number : ")) WN = False for i in range(2, num): isTrue1 = True for j in range(2, i): if i % j == 0: isTrue1 = False if isTrue1 == True: for j in range(2, i): isTrue2 = True for k in range(2,j): if j % k == 0: isTrue = False if isTrue2 == True: if num == i * j: print(num, "=", max(i,j),"X", min(i,j)) WN = True break if WN == False: print("Wrong number") ``` Enter the number : 21 21 = 7 X 3 21 = 7 X 3 21 = 7 X 3 21 = 7 X 3 21 = 7 X 3 ```python #196 어떤 두수의 곱으로 이루어진 소수가 있다. 어떤 두 수의 소수로 곱해졌는지 찾으시오 (포기) #ex) 143 => 11 * 13 import sympy num = int(input("Enter the number : ")) if sympy.isprime(num) == True: ``` Enter the number : 3 True ```python #196(2) def prime_factors(n): i = 2 factors = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(i) if n > 1: factors.append(n) return factors prime_factors(143) ``` [11, 13] ```python n = 20 i = 2 while i * i < n: while n%i == 0: n = n / i i = i + 1 print (n) ``` 5.0 ```python #196(4) %time def prime_factors(n): i = 2 factors = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(i) if n > 1: factors.append(n) return factors prime_factors(600851475143) ``` CPU times: user 2 µs, sys: 1 µs, total: 3 µs Wall time: 5.25 µs [71, 839, 1471, 6857] ```python class is_prime: def prime_number(n): a = [] for i in range(2,n): if n % i == 0: a.append(n) if len(a) == 0: return True print("{} is a prime number!".format(n)) else: return False print("{} is not a prime number".format(n)) def prime_mul(self, n): a = [] for i in range(2, n): if n % i == 0: a.append(i) b = [] for i in a: if prime_number(i) == True: b.append(i) c = [] for i in range(len(b)): for j in range(len(b)): if b[i] * b[j] == n: c.append(b[i]) return c ``` ```python a = is_prime() a.prime_mul(143) ``` ```python %time from sympy import primefactors primefactors(600851475143) ``` CPU times: user 2 µs, sys: 1e+03 ns, total: 3 µs Wall time: 4.77 µs [71, 839, 1471, 6857] ```python #197 00:00 ~ 23:59 시계중 3이 가르치는 분을 모두 구하시오 clock = 0 for i in range(24): for j in range(60): three = str(i) + str(j) if '3' in three: clock += 1 clock ``` ```python #198 a, b, c 가 자연수일때, a+b+c = 100을 만족하는 세 자연수의 조합개수를 구하시오. (30 40 30), (40,30,30) 은 중복 ###자연수!!!, 100 myset = set() for a in range(1, 100): for b in range(1, 100): for c in range(1, 100): if a + b + c == 100: myset.update({a, b, c}) print(len(myset)) ``` 98 ```python #200 1부터 5까지 숫자를 단 한번만 써서 만든 다섯 자리수들을 가장 작은 수부터 커지는 순서로 나열했을 때, 50번쨰 오는 수를 구하는 프로그램. num = [1, 2, 3, 4, 5] num_list = [] for i in num: for j in num: for k in num: for l in num: for m in num: nstr = str(i)+str(j)+str(k)+str(l)+str(m) if len({i,j,k,l,m}) == 5: #len을 해줘야 5개인 set만 표기 num_list.append(int(nstr)) num_list.sort() print(num_list) ``` [12345, 12354, 12435, 12453, 12534, 12543, 13245, 13254, 13425, 13452, 13524, 13542, 14235, 14253, 14325, 14352, 14523, 14532, 15234, 15243, 15324, 15342, 15423, 15432, 21345, 21354, 21435, 21453, 21534, 21543, 23145, 23154, 23415, 23451, 23514, 23541, 24135, 24153, 24315, 24351, 24513, 24531, 25134, 25143, 25314, 25341, 25413, 25431, 31245, 31254, 31425, 31452, 31524, 31542, 32145, 32154, 32415, 32451, 32514, 32541, 34125, 34152, 34215, 34251, 34512, 34521, 35124, 35142, 35214, 35241, 35412, 35421, 41235, 41253, 41325, 41352, 41523, 41532, 42135, 42153, 42315, 42351, 42513, 42531, 43125, 43152, 43215, 43251, 43512, 43521, 45123, 45132, 45213, 45231, 45312, 45321, 51234, 51243, 51324, 51342, 51423, 51432, 52134, 52143, 52314, 52341, 52413, 52431, 53124, 53142, 53214, 53241, 53412, 53421, 54123, 54132, 54213, 54231, 54312, 54321] ```python if 20 % 3: print("possible") ``` possible ```python ``` ```python bin(40) ``` '0b101000' ```python ```
using Test using ColorTypes: RGB, Gray, N0f8 using FileIO, ImageCore, Dates, Statistics using Statistics, StatsBase import VideoIO createmode = false testdir = dirname(@__FILE__) videodir = joinpath(testdir, "..", "videos") VideoIO.TestVideos.available() VideoIO.TestVideos.download_all() swapext(f, new_ext) = "$(splitext(f)[1])$new_ext" isarm() = Base.Sys.ARCH in (:arm,:arm32,:arm7l,:armv7l,:arm8l,:armv8l,:aarch64,:arm64) #@show Base.Sys.ARCH @noinline function isblank(img) all(c->green(c) == 0, img) || all(c->blue(c) == 0, img) || all(c->red(c) == 0, img) || maximum(rawview(channelview(img))) < 0xcf end # Helper functions function test_compare_frames(test_frame, ref_frame) if isarm() @test_skip test_frame == ref_frame else @test test_frame == ref_frame end end # uses read! function get_first_frame!(img, v) seekstart(v) read!(v, img) while isblank(img) read!(v, img) end end @testset "Reading of various example file formats" begin for name in VideoIO.TestVideos.names() @testset "Reading $name" begin first_frame_file = joinpath(testdir, swapext(name, ".png")) !createmode && (first_frame = load(first_frame_file)) f = VideoIO.testvideo(name) v = VideoIO.openvideo(f) time_seconds = VideoIO.gettime(v) @test time_seconds == 0 if !createmode && (size(first_frame, 1) > v.height) first_frame = first_frame[1+size(first_frame,1)-v.height:end,:] end # Find the first non-trivial image img = read(v) i=1 while isblank(img) read!(v, img) i += 1 end # println("$name vs. $first_frame_file - First non-blank frame: $i") # for debugging if createmode save(first_frame_file,img) else test_compare_frames(img, first_frame) end for i in 1:50 read!(v,img) end fiftieth_frame = img timebase = v.avin.video_info[1].stream.time_base tstamp = v.aVideoFrame[1].pkt_dts video_tstamp = v.avin.video_info[1].stream.first_dts fiftytime = (tstamp-video_tstamp)/(convert(Float64,timebase.den)/convert(Float64,timebase.num)) while !eof(v) read!(v, img) end seek(v,float(fiftytime)) read!(v,img) @test img == fiftieth_frame # read first frames again, and compare get_first_frame!(img, v) createmode || test_compare_frames(img, first_frame) # make sure read! works with both PermutedDimsArray and Array # The above tests already use read! for PermutedDimsArray, so just test the type of img @test typeof(img) <: PermutedDimsArray img_p = parent(img) @assert typeof(img_p) <: Array # img is a view of img_p, so calling read! on img_p should alter img # # first, zero img out to be sure we get the desired result from calls to read on img_p! fill!(img, zero(eltype(img))) # Then get the first frame, which uses read! get_first_frame!(img_p, v) # Finally compare the result to make sure it's right createmode || test_compare_frames(img, first_frame) # Skipping & frame counting VideoIO.seekstart(v) VideoIO.skipframe(v) VideoIO.skipframes(v, 10) @test VideoIO.counttotalframes(v) == VideoIO.TestVideos.videofiles[name].numframes close(v) end end end @testset "IO reading of various example file formats" begin for name in VideoIO.TestVideos.names() # TODO: fix me? (startswith(name, "ladybird") || startswith(name, "NPS")) && continue @testset "Testing $name" begin first_frame_file = joinpath(testdir, swapext(name, ".png")) first_frame = load(first_frame_file) filename = joinpath(videodir, name) v = VideoIO.openvideo(VideoIO.open(filename)) if size(first_frame, 1) > v.height first_frame = first_frame[1+size(first_frame,1)-v.height:end,:] end img = read(v) # Find the first non-trivial image while isblank(img) read!(v, img) end if isarm() @test_skip img == first_frame else @test img == first_frame end while !eof(v) read!(v, img) end # Iterator interface VT = typeof(v) @test Base.IteratorSize(VT) === Base.SizeUnknown() @test Base.IteratorEltype(VT) === Base.EltypeUnknown() VideoIO.seekstart(v) i = 0 local first_frame local last_frame for frame in v i += 1 if i == 1 first_frame = frame end last_frame = frame end @test i == VideoIO.TestVideos.videofiles[name].numframes # test that the frames returned by the iterator have distinct storage if i > 1 @test first_frame !== last_frame end ## Test that iterator is mutable, and continues where iteration last ## stopped. @test iterate(v) === nothing end end VideoIO.testvideo("ladybird") # coverage testing @test_throws ErrorException VideoIO.testvideo("rickroll") @test_throws ErrorException VideoIO.testvideo("") end @testset "Reading video metadata" begin @testset "Reading Storage Aspect Ratio: SAR" begin # currently, the SAR of all the test videos is 1, we should get another video with a valid SAR that is not equal to 1 vids = Dict("ladybird.mp4" => 1, "black_hole.webm" => 1, "crescent-moon.ogv" => 1, "annie_oakley.ogg" => 1) @test all(VideoIO.aspect_ratio(VideoIO.openvideo(joinpath(videodir, k))) == v for (k,v) in vids) end @testset "Reading video duration, start date, and duration" begin # tesing the duration and date & time functions: file = joinpath(videodir, "annie_oakley.ogg") @test VideoIO.get_duration(file) == 24224200/1e6 @test VideoIO.get_start_time(file) == DateTime(1970, 1, 1) @test VideoIO.get_time_duration(file) == (DateTime(1970, 1, 1), 24224200/1e6) @test VideoIO.get_number_frames(file) === nothing end @testset "Reading the number of frames from container" begin file = joinpath(videodir, "ladybird.mp4") @test VideoIO.get_number_frames(file) == 398 @test VideoIO.get_number_frames(file, 0) == 398 @test_throws ArgumentError VideoIO.get_number_frames(file, -1) @test_throws ErrorException VideoIO.get_number_frames("Not_a_file") end end @testset "Encoding video across all supported colortypes" begin for el in [UInt8, RGB{N0f8}] @testset "Encoding $el imagestack" begin n = 100 imgstack = map(x->rand(el,100,100),1:n) props = [:priv_data => ("crf"=>"22","preset"=>"medium")] encodedvideopath = VideoIO.encodevideo("testvideo.mp4",imgstack,framerate=30,AVCodecContextProperties=props, silent=true) @test stat(encodedvideopath).size > 100 f = VideoIO.openvideo(encodedvideopath) @test VideoIO.counttotalframes(f) == n-4 # videos encoded with crf > 0 have 4 fewer frames close(f) rm(encodedvideopath) end end end @testset "Encoding video with rational frame rates" begin n = 100 fr = 59 // 2 # 29.5 target_dur = 3.39 @testset "Encoding with frame rate $(float(fr))" begin imgstack = map(x->rand(UInt8,100,100),1:n) props = [:priv_data => ("crf"=>"22","preset"=>"medium")] encodedvideopath = VideoIO.encodevideo("testvideo.mp4",imgstack, framerate=fr, AVCodecContextProperties=props, silent=true) @test stat(encodedvideopath).size > 100 measured_dur_str = VideoIO.FFMPEG.exe(`-v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 $(encodedvideopath)`, command = VideoIO.FFMPEG.ffprobe, collect = true) @test parse(Float64, measured_dur_str[1]) == target_dur rm(encodedvideopath) end end @testset "Encoding video with float frame rates" begin n = 100 fr = 29.5 # 59 // 2 target_dur = 3.39 @testset "Encoding with frame rate $(float(fr))" begin imgstack = map(x->rand(UInt8,100,100),1:n) props = [:priv_data => ("crf"=>"22","preset"=>"medium")] encodedvideopath = VideoIO.encodevideo("testvideo.mp4",imgstack, framerate=fr, AVCodecContextProperties=props, silent=true) @test stat(encodedvideopath).size > 100 measured_dur_str = VideoIO.FFMPEG.exe(`-v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 $(encodedvideopath)`, command = VideoIO.FFMPEG.ffprobe, collect = true) @test parse(Float64, measured_dur_str[1]) == target_dur rm(encodedvideopath) end end @testset "Video encode/decode accuracy (read, encode, read, compare)" begin file = joinpath(videodir, "annie_oakley.ogg") f = VideoIO.openvideo(file) imgstack_rgb = [] imgstack_gray = [] while !eof(f) img = collect(read(f)) img_gray = convert(Array{Gray{N0f8}},img) push!(imgstack_rgb,img) push!(imgstack_gray,img_gray) end @testset "Lossless Grayscale encoding" begin file_lossless_gray_copy = joinpath(videodir, "annie_oakley_lossless_gray.mp4") prop = [:color_range=>2, :priv_data => ("crf"=>"0","preset"=>"medium")] codec_name="libx264" VideoIO.encodevideo(file_lossless_gray_copy,imgstack_gray,codec_name=codec_name,AVCodecContextProperties=prop, silent=true) fcopy = VideoIO.openvideo(file_lossless_gray_copy,target_format=VideoIO.AV_PIX_FMT_GRAY8) imgstack_gray_copy = [] while !eof(fcopy) push!(imgstack_gray_copy,collect(read(fcopy))) end close(f) @test eltype(imgstack_gray) == eltype(imgstack_gray_copy) @test length(imgstack_gray) == length(imgstack_gray_copy) @test size(imgstack_gray[1]) == size(imgstack_gray_copy[1]) @test !any(.!(imgstack_gray .== imgstack_gray_copy)) end @testset "Lossless RGB encoding" begin file_lossless_rgb_copy = joinpath(videodir, "annie_oakley_lossless_rgb.mp4") prop = [:priv_data => ("crf"=>"0","preset"=>"medium")] codec_name="libx264rgb" VideoIO.encodevideo(file_lossless_rgb_copy,imgstack_rgb,codec_name=codec_name,AVCodecContextProperties=prop, silent=true) fcopy = VideoIO.openvideo(file_lossless_rgb_copy) imgstack_rgb_copy = [] while !eof(fcopy) img = collect(read(fcopy)) push!(imgstack_rgb_copy,img) end close(f) @test eltype(imgstack_rgb) == eltype(imgstack_rgb_copy) @test length(imgstack_rgb) == length(imgstack_rgb_copy) @test size(imgstack_rgb[1]) == size(imgstack_rgb_copy[1]) @test !any(.!(imgstack_rgb .== imgstack_rgb_copy)) end @testset "UInt8 accuracy during read & lossless encode" begin # Test that reading truth video has one of each UInt8 value pixels (16x16 frames = 256 pixels) f = VideoIO.openvideo(joinpath(testdir,"precisiontest_gray_truth.mp4"),target_format=VideoIO.AV_PIX_FMT_GRAY8) frame_truth = collect(rawview(channelview(read(f)))) h_truth = fit(Histogram, frame_truth[:], 0:256) @test h_truth.weights == fill(1,256) #Test that reading is precise # Test that encoding new test video has one of each UInt8 value pixels (16x16 frames = 256 pixels) img = Array{UInt8}(undef,16,16) for i in 1:256 img[i] = UInt8(i-1) end imgstack = [] for i=1:24 push!(imgstack,img) end props = [:color_range=>2, :priv_data => ("crf"=>"0","preset"=>"medium")] VideoIO.encodevideo(joinpath(testdir,"precisiontest_gray_test.mp4"), imgstack, AVCodecContextProperties = props,silent=true) f = VideoIO.openvideo(joinpath(testdir,"precisiontest_gray_test.mp4"), target_format=VideoIO.AV_PIX_FMT_GRAY8) frame_test = collect(rawview(channelview(read(f)))) h_test = fit(Histogram, frame_test[:], 0:256) @test h_test.weights == fill(1,256) #Test that encoding is precise (if above passes) @test VideoIO.counttotalframes(f) == 24 end @testset "Correct frame order when reading & encoding" begin @testset "Frame order when reading ground truth video" begin # Test that reading a video with frame-incremental pixel values is read in in-order f = VideoIO.openvideo(joinpath(testdir,"ordertest_gray_truth.mp4"),target_format=VideoIO.AV_PIX_FMT_GRAY8) frame_ids_truth = [] while !eof(f) img = collect(rawview(channelview(read(f)))) push!(frame_ids_truth,img[1,1]) end @test frame_ids_truth == collect(0:255) #Test that reading is in correct frame order @test VideoIO.counttotalframes(f) == 256 end @testset "Frame order when encoding, then reading video" begin # Test that writing and reading a video with frame-incremental pixel values is read in in-order imgstack = [] img = Array{UInt8}(undef,16,16) for i in 0:255 push!(imgstack,fill(UInt8(i),(16,16))) end props = [:color_range=>2, :priv_data => ("crf"=>"0","preset"=>"medium")] VideoIO.encodevideo(joinpath(testdir,"ordertest_gray_test.mp4"), imgstack, AVCodecContextProperties = props,silent=true) f = VideoIO.openvideo(joinpath(testdir,"ordertest_gray_test.mp4"), target_format=VideoIO.AV_PIX_FMT_GRAY8) frame_ids_test = [] while !eof(f) img = collect(rawview(channelview(read(f)))) push!(frame_ids_test,img[1,1]) end @test frame_ids_test == collect(0:255) #Test that reading is in correct frame order @test VideoIO.counttotalframes(f) == 256 end end end @testset "c api memory leak test" begin # Issue https://github.com/JuliaIO/VideoIO.jl/issues/246 if(Sys.islinux()) # TODO: find a method to get cross platform memory usage, see: https://discourse.julialang.org/t/how-to-get-current-julia-process-memory-usage/41734/4 function get_memory_usage() open("/proc/$(getpid())/statm") do io split(read(io, String))[1] end end file = joinpath(videodir, "annie_oakley.ogg") @testset "open file test" begin check_size = 10 usage_vec = Vector{String}(undef, check_size) for i in 1:check_size f = VideoIO.openvideo(file) close(f) GC.gc() usage_vec[i] = get_memory_usage() end println(usage_vec) @test usage_vec[end-1] == usage_vec[end] end @testset "open and read file test" begin check_size = 10 usage_vec = Vector{String}(undef, check_size) for i in 1:check_size f = VideoIO.openvideo(file) img = read(f) close(f) GC.gc() usage_vec[i] = get_memory_usage() end println(usage_vec) @test usage_vec[end-1] == usage_vec[end] end end end #VideoIO.TestVideos.remove_all()
\chapter{Modeling and Model Validation} \label{modeling} \begin{figure}[thpb] \centering \includegraphics[width=0.8\columnwidth]{tex/img/superball_roverscape2_cropped.jpg} \caption{SUPERball, fully assembled, in the NASA Ames Research Center Roverscape.} \label{fig:SB} \end{figure} As part of our research for the NASA Innovative Advanced Concepts (NIAC) program, we are developing the \SB{} (Spherical Underactuated Planetary Exploration Robot), which is a compliant icosahedron tensegrity robot designed for planetary landing and exploration, seen in figure \ref{fig:SB}. Tensegrity robots are soft machines which are uniquely able to compliantly absorb forces and interact with unstructured environments. However, instead of engineering a single new robot, we have chosen to develop a fundamentally reusable component for tensegrity robots by creating a modular robotic tensegrity strut which contains an integrated system of power, sensing, actuation, and communications. The purpose is to enable the exploration of the wide range of possible tensegrity robotic morphologies by simply combining the robotic struts into new systems. Though there is much prior work in a variety of theoretical areas for tensegrities, engineering knowledge of constructing practical tensegrity robots is limited. Since a staggering variety of different tensegrity structures can be constructed from collections of simple sticks and strings, we have made it a priority to develop self-contained robotic tensegrity struts which can be used to explore and build a wide range of tensegrity robots simply by combining them into novel structures. Our designs are driven by experimental results obtained from a previous prototype, ReCTeR (Reservoir Compliant Tensegrity Robot) in combination with simulation results of our validated tensegrity simulator NTRT (NASA Tensegrity Robotics Toolkit)~\cite{2917079}\cite{Caluwaerts2013rsif}. In order to develop SUPERball from ReCTeR's design limitations as well as our lab’s need for rapid experimentation of various tensegrity configurations and morphologies, we came up with a modular tensegrity platform to research large scale robotic tasks; e.g. a tensegrity planetary probe to explore Saturn's moon Titan. %Our lab obtained design requirements through an iterative %approach involving NTRT and ReCTeR. As we validated our NTRT simulator by experimental validation %with ReCTeR~\cite{Caluwaerts2013rsif} and can now quickly evaluate various Our lab obtained design requirements through an iterative approach with validated our NTRT simulator by experimental comparison with ReCTeR~\cite{Caluwaerts2013rsif}. We now can quickly evaluate various tensegrity configurations in simulation to find optimal mechanical design goals. In conjunction with the NTRT solver, we also incorporated results obtained with our (open source) Euler Lagrange solver based on Skelton's work~\cite{Skelton2009} and measurements on ReCTeR. The initial design requirements obtained from the NTRT simulations, refined designs after a first prototype build, and how these compare to other tensegrity robotic systems are given in Table \ref{design_req}. \begin{table*}[ht] %\begin{minipage}[t][\linewidth]{ \caption{\SB{} and Related Robots Design Overview.} \label{design_req} \begin{center}% \resizebox{\columnwidth}{!}{% \begin{tabular}{lrrrcrrrrrrr} %\hline &$\bm{l_{strut}}$ & $\bm{\Delta l_{act}}$ & $\bm{k_{passive}}$ & \bf{tethered?} & \bf{control} & $\bm{f_{act}}$ & \bf{\#act.} & \bf{mass} & \bf{sensors} &\bf{actuators} &\bf{ref.} \\ \hline \hline \bf{Pneumatic}&\SI{.57}{m} & - & - & Y & open loop & \SI{800}{\newton} & \num{24} & \SI{3.3}{\kg}& none & McKibben& \cite{Koizumi2012b} \\ \bf{ReCTeR}&\SI{1}{m} & \SI{0.3}{\metre} & \SI{28.4}{\newton\per\metre} & N & closed loop & \SI{12}{\newton} & \num{6} & \SI{1.1}{\kg} & F, L, IMU & DC & \cite{Caluwaerts2013rsif} \\ \bf{Rapid Proto Kit}&\SI{.69}{m} & \SI{0.005}{\metre} & \SI{1193}{\newton\per\metre} & N & open loop & $<$\SI{45}{\newton} & \num{24} & \SI{2.7}{\kg}& none & linear DC &\cite{kim2014rapid} \\ \bf{\SB{} 2014}&\SI{1.5}{m} & \SI{0.2}{\metre} & \SI{613}{\newton\per\metre} & N & closed loop & \SI{140}{\newton} & \num{12} & \SI{12}{\kg}& F, L, $\tau$, IMU & BLDC& \\ \bf{\SB{} 2015}&\SI{1.7}{m} & \SI{0.42}{\metre} & \SI{998}{\newton\per\metre} & N & closed loop & \SI{250}{\newton} & \num{12} & \SI{21}{\kg} & F, L, $\tau$, IMU & BLDC& %Tensegrity Kit %\SB{} ICRA 2014 &$1.5\mbox{m}$ & $0.26 {\mbox{m}}/{\mbox{s}}$ & $500 \mbox{N}/\mbox{m}$ & $100\mbox{Hz}$ & $3 \mbox{Nm}$ \\ %\SB{} ICRA 2015 %\hline \end{tabular} } \end{center} \bigskip \fontsize{10pt}{12pt}\selectfont The variable $l_{strut}$ indicates the length of a strut, $\Delta l_{act}$ is the nominal spring-cable retraction length in tension, $k_{passive}$ is the linear stiffness coefficient of a passive spring-cable (or active spring-cable if fully actuated), tethered indicates if the robot is powered externally or by internal systems, control indicates whether sensor feedback is used, $f_{act}$ is the nominal actuated spring-cable tension and \#act. is the number of actuators. In the sensors column, F represents a linear force sensor (for cables), L is cable length sensor (in the form of motor encoders), $\tau$ represents a torque sensor for motors, and IMU represents an accelerometer/gyroscope inertial motion sensing unit. Actuators are specified as DC motors or brushless DC (BLDC) motors. The SUPERball 2014 values are revised original design requirements based on NTRT simulations, and changed to the 2015 values after additional detail design. % \vspace{-0.2cm} %\end{minipage} \end{table*} The work presented here is work verifying our in house tensegrity simulators. In order to achieve this, the group decided to use a \SB{} like structure with a center payload. This is believed to be closer to the proposed build profile of a real tensegrity probe, where the main science modules will be contained within the payload. Protecting this science payload is the main goal for and EDL scenario. Figure \ref{fig:NTRT_SB} shows a 3-D representation of \SB{} with a payload generated within NTRT. \begin{figure}[thpb] \centering \includegraphics[width=0.8\columnwidth]{tex/img/1.png} \caption{SUPERball with a payload modeled within NTRT.} \label{fig:NTRT_SB} \end{figure} \section{Euler-Lagrange Model} In order to verify the simulation results produced by our NTRT simulator, we decided to compare the behavior of the NTRT to a published analytic model for tensegrity systems. We choose to use Skelton's dynamic equations because it is a well accepted and used model. It may be found in his \emph{Tensegrity Systems} book \cite{skelton_tensegrity_2009} which is based on his work in \cite{skelton2005dynamics}. In order to solve the dynamic equations with interactions with the environment, an Euler-Lagrange approach is used as well as Skelton's constrained class one structure. The lagrange equation for a constrained rod is given by \begin{equation} \label{eq:lagrange} L = T - V - c \end{equation} where \begin{align} \mathbf{b} &= l^{-1}(\mathbf{n}_{j}-\mathbf{n}_{i})\label{eq:normalizedVector}\\ c &= \frac{\mathbf{J}\xi}{2}(\mathbf{b}^{T}\mathbf{b}-1)\label{eq:constraint} \end{align} Equation \eqref{eq:normalizedVector} is the normalized vector of a rod with \(\mathbf{n}_{i,j}\) the nodal positions in \(R^3\), and equation \eqref{eq:constraint} contains the lagrange multiplier \(\xi\) to keep \eqref{eq:normalizedVector} constrained. \(\mathbf{J}\) is also defined as the inertia matrix for a one dimensional rod in three dimensional space. In order to define the system of \(k\) rods we need to define a combined Lagrangian as \begin{equation} \mathbf{L} = \sum_{i=1}^{k} L_{i}\label{eq:combinedLagrangian} \end{equation} where \(L_{i}\) is the Lagrange function for each rod. Using the approach outlined in Skelton's book for deriving the equations of motion, we can then derive the configuration matrix \begin{equation} \mathbf{Q} = \begin{bmatrix} \mathbf{R} & \mathbf{B} \end{bmatrix}\label{eq:configMatrix} \end{equation} where \(\mathbf{R}\) and \(\mathbf{B}\) are matrices containing the translational and rotational vectors, respectively. They have the form \begin{align} \mathbf{R} &= \begin{bmatrix} \mathbf{r}_{1} & \cdots & \mathbf{r}_{k} \end{bmatrix}\label{eq:transR}\\ \mathbf{B} &= \begin{bmatrix} \mathbf{b}_{1} & \cdots & \mathbf{b}_{k} \end{bmatrix}\label{eq:rotB} \end{align} Also using the procedure to derive generalized forces within Skelton's book, the systems's generalized force equations are computed as \begin{equation} \mathbf{F}_{\mathbf{Q}} = \begin{bmatrix} \mathbf{F}_{\mathbf{R}} & \mathbf{F}_{\mathbf{B}} \end{bmatrix}\label{eq:generalizedForce} \end{equation} with \begin{align} \mathbf{F}_{\mathbf{R}} &= \begin{bmatrix} \mathbf{f}_{\mathbf{r}_{1}} & \cdots & \mathbf{f}_{\mathbf{r}_{k}} \end{bmatrix}\label{eq:gForceR}\\ \mathbf{F}_{\mathbf{B}} &= \begin{bmatrix} \mathbf{f}_{\mathbf{b}_{1}} & \cdots & \mathbf{f}_{\mathbf{b}_{k}} \end{bmatrix}\label{eq:gForceB} \end{align} Finally, we can define the resulting equations of motion in a compact form as \begin{equation} (\ddot{\mathbf{Q}} + \mathbf{Q}\mathbf{\Xi})\mathbf{M} = \mathbf{F}_{\mathbf{Q}}\label{eq:compactForm} \end{equation} where \begin{align} \mathbf{\Xi} &= diag\begin{bmatrix} 0,\cdots,0,\xi_{1},\cdots,\xi_{k} \end{bmatrix}\label{eq:lagrangeMatrix}\\ \mathbf{M} &= diag\begin{bmatrix} m_{1},\cdots,m_{k},J_{1},\cdots,J_{k} \end{bmatrix}\label{massMatrix} \end{align} This approach was then implemented in Python utilizing a 4th order Runge-Kutta formula for solving the system of ordinary differential equations. In order to implement a gravitational field, a force distribution function is applied along the length of each rod and calculated as a nodal force depending on the given density of the rod. This external force is then applied to the nodes during each time step, simulating a gravitational field. \section{Detailed Impact Simulations and Cross-Validation Using Two Simulators} The NTRT simulator is the most general purpose, allowing us to explore control algorithms and complex environmental interactions, but it is an iterative discrete solver that we were concerned might not be providing accurate answers. The E-L solver, on the other hand, has a much stronger analytical basis and should provide very accurate answers, but is limited because some of the nodes (rod ends) must be constrained and locked into place. This is unrealistic for the deformation caused during landing, and makes it an inappropriate choice for mobility and controls research. If ground contact forces are incorporated into the E-L solver and code optimization implemented, it could be used in conjunction with a unscented kalman filter for state estimation propogation. This tool could then be used to develop online learning algorithms for mobility research. In this section, we compare the NTRT simulator and E-L solver at the moment of impact with the ground. The simulations are compared at the moment of impact with the ground because our implementation of the analytic E-L solver requires select nodes to be constrained. We setup the structure so that it is barely in contact with the ground and is in balance at time equal to 0. In both simulations, we add an initial velocity equal to the terminal velocity of Titan, and compared each vertical trajectory, vertical velocity, and vertical acceleration of the payload. Since the structure's horizontal speed is zero at the beginning and the structure is symmetrical, the payload's horizontal components of position, velocity and acceleration are zero. As it can be seen in the Figures \ref{fig:vsPosition} and \ref{fig:vsVelocity}, both simulators closely match and generate the same results for position and velocity with the error margin close to zero. Comparing the accelerations generated by two simulators (Figure \ref{fig:vsAccelerations}), it can be seen that there is a bigger difference. The reason behind this difference is the fact that NTRT uses Bullet, which is a discrete time simulator and accelerations are calculated using two point estimations from velocities at the timestep before. Yet, even with these differences in accelerations, our conclusion at the end of the comparison is that both simulators showed the same basic dynamics and their results were close enough that we could move forward using the more general purpose NTRT Simulator for our controls, mobility, and landing experiments. \begin{figure}[htb] \centering \includegraphics[width=0.8\columnwidth]{tex/images/landing/bulletVsEL/SimVsEL} \caption{NTRT vs EL: Vertical Position} \label{fig:vsPosition} \end{figure} \begin{figure}[htb] \centering \includegraphics[width=0.8\columnwidth]{tex/images/landing/bulletVsEL/Velocities} \caption{NTRT vs EL Vertical Velocity} \label{fig:vsVelocity} \end{figure} \begin{figure}[htb] \centering \includegraphics[width=0.8\columnwidth]{tex/images/landing/bulletVsEL/VelocityDerivatives_SimVsEL} \caption{NTRT vs EL Vertical Acceleration} \label{fig:vsAccelerations} \end{figure} \section{Simulated Drop Tests and Payload Protection} Finally, we performed extensive analysis on drop tests and the protection provided to a payload. As expected, we found that by varying the rod lengths, which impacts the stroke distance for the payload to decelerate, we could control the maximum deceleration experienced by the payload while ensuring that it did not collide with the ground or structure. For example, with rods of 1.5 meters in length, our payload experienced a max deceleration of 21.4G when landing at 15 m/s. In figure \ref{fig:rodvsG} we show the results of a series of drop tests with different rod lengths and show the resulting maximum deceleration and forces experienced in the tension members. As can be seen from these graphs, even for reasonable rod lengths, the maximum G's are acceptable for most instruments, and the maximum forces experienced by the cables are easily within ranges that can be engineered for. In all tests we kept the total system mass constant, at 100kg (which is 70kg for the payload and 5kg per rod) in order to highlight the impact of structural geometry and rod length. For the tension members we used spring constants of 44 kN/m for the cables around the perimeter and 10 kN/m for the cables attached to the payload. Also, the results in Figure \ref{fig:rodvsG} were found using the landing orientation of 35 degrees around X axis and 45 degrees around Z axis, which we selected from our orientation studies discussed below. \begin{figure}[htbp] \centering \includegraphics[width=0.8\columnwidth]{tex/images/rodvsG_fixed2} \caption{{\em {\bf Landing Forces Study}. This shows how rod length impacts maximum deceleration of the payload and the maximum forces experienced by the tension cables. All tests were conducted with a landing velocity of 15 m/s onto a hard surface.}} \label{fig:rodvsG} \end{figure} A very interesting point to consider is that the mass of our system will grow in a linear fashion with the length in the rods, while providing increasing payload protection. On the other hand, the mass of airbags increases with the square of the radius, which is one of the reasons that the MSL rover, with its increased size and mass, had to switch from the airbag approach to the more complex Sky Crane approach. While this study has focused on small light-weight mission concepts, we expect that there are compelling advantages to scaling up to handle larger payloads and we look forward to studying this further in the future. \section{Landing Orientation Studies} In order to study how landing orientation affects payload decelerations and impact events, we conducted a systematic study of landing orientations. Since we wanted to get meaningful data, even for bad orientations, we used a larger tensegrity with 4 meter rods so the data wouldn't saturate. Our success criteria for this study was that the decelerations had to stay under an upper limit of 25G deceleration of the payload, and the payload had to avoid collision with the ground or parts of the tensegrity structure. Figure \ref{fig:landingHeatMapRot} shows the orientations that were safely within these criteria (black) or failed one or both of the criteria (colored). By using a simple trailing streamer during descent it would be possible to control landing at an optimal orientation and enable the use of smaller structures with shorter rods because the orientation control would maximize the available stroke for the payload to decelerate within the structure. Conversely, we can use these studies to know what the worst possible landing scenario will be and choose a structure size which will allow safe landing at any orientation. \begin{figure}[htbp] \centering \includegraphics[width=0.8\columnwidth]{tex/images/landing/landingHeatMapRot.png} \caption{\em Heat map of the maximum acceleration that the payload encounters for all possible landing orientations. Black areas are safe, colored areas are where the payload does not meet one or both success criteria.} \label{fig:landingHeatMapRot} \end{figure} \section{Conclusions from Simulation Experiments} In our landing analysis we developed and cross-validated two different simulation methods that allowed us to explore the capabilities of a tensegrity structure to absorb the forces of landing and to simultaneously protect a delicate payload. This analysis confirmed that indeed it is possible to do so using a 6-bar tensegrity probe while maintaining maximum decelerations experienced by the instrument-containing payload to forces less than 25G, despite the structure landing at 15 m/s (which is greater than terminal velocity on Titan). Comparing this to the Huygens probe's landing acceleration of \(32G\) \cite{lorenz1994huygens}, the tensegrtiy probe will have a \(43\% \) reduction in \(G\) forces experienced by the scientific payload, despite the Huygens probe's use of parachutes to land at 1/3 of the speed of our tensegrity probe.
function toString(b::SymbolicType) b = Basic(b) a = ccall((:basic_str_julia, libsymengine), Cstring, (Ptr{Basic}, ), &b) string = unsafe_string(a) ccall((:basic_str_free, libsymengine), Void, (Cstring, ), a) return string end Base.show(io::IO, b::SymbolicType) = print(io, toString(b)) " show symengine logo " type AsciiArt x end function ascii_art() out = ccall((:ascii_art_str, libsymengine), Ptr{UInt8}, ()) AsciiArt(unsafe_string(out)) end Base.show(io::IO, x::AsciiArt) = print(io, x.x)
[STATEMENT] lemma some_the_fst_netgmap: assumes "i \<in> net_ips s" shows "Some (the (fst (netgmap sr s) i)) = fst (netgmap sr s) i" [PROOF STATE] proof (prove) goal (1 subgoal): 1. Some (the (fst (netgmap sr s) i)) = fst (netgmap sr s) i [PROOF STEP] using assms [PROOF STATE] proof (prove) using this: i \<in> net_ips s goal (1 subgoal): 1. Some (the (fst (netgmap sr s) i)) = fst (netgmap sr s) i [PROOF STEP] by (metis domIff dom_fst_netgmap option.collapse)
{- Types Summer School 2007 Bertinoro Aug 19 - 31, 2007 Agda Ulf Norell -} -- This is where the fun begins. -- Unleashing datatypes, pattern matching and recursion. module Datatypes where {- Simple datatypes. -} -- Let's define natural numbers. data Nat : Set where zero : Nat suc : Nat -> Nat -- A simple function. pred : Nat -> Nat pred zero = zero pred (suc n) = n -- Now let's do recursion. _+_ : Nat -> Nat -> Nat zero + m = m suc n + m = suc (n + m) infixl 60 _+_ -- An aside on infix operators: -- Any name containing _ can be used as a mixfix operator. -- The arguments simply go in place of the _. For instance: data Bool : Set where true : Bool false : Bool if_then_else_ : {A : Set} -> Bool -> A -> A -> A if true then x else y = x -- if false then x else y = y if_then_else_ false x y = y {- Parameterised datatypes -} data List (A : Set) : Set where [] : List A _::_ : A -> List A -> List A -- The parameters are implicit arguments to the constructors. nil : (A : Set) -> List A nil A = [] {A} map : {A B : Set} -> (A -> B) -> List A -> List B map f [] = [] map f (x :: xs) = f x :: map f xs {- Empty datatypes -} -- A very useful guy is the empty datatype. data False : Set where -- When pattern matching on an element of an empty type, something -- interesting happens: elim-False : {A : Set} -> False -> A elim-False () -- Look Ma, no right hand side! -- The pattern () is called an absurd pattern and matches elements -- of an empty type. {- What's next? -} -- The Curry-Howard isomorphism. -- CurryHoward.agda
= = = Later life = = =
\subsection{Cloud Computing} Cloud computing is a form of distributed computing that turns compute infrastructure, programming platforms and software systems into scalable utility services. By exposing various compute and programming resources as utility services, cloud computing promotes resource sharing at scale via the Internet. Depending on the type of resources offered as services, cloud computing platforms can be categorized into three main categories: \begin{description} \item [Infrastructure-as-a-Service clouds (IaaS)] Offers low-level compute, storage and networking resources as a service. Compute resources are typically provided in the form of on-demand virtual machines with specific CPU, memory and disk configurations (e.g. Amazon EC2, Google Compute Engine, Eucalyptus). \item [Platform-as-a-Service clouds (PaaS)] Offers a programming platform as a service, that can be used to develop and deploy applications at scale (e.g. Google App Engine, AppScale, Heroku, Amazon Elastic Beanstalk). \item [Software-as-a-Service clouds (SaaS)] Offers a collection of software applications and tools as a service, that can be directly consumed by application endusers (e.g. Salesforce, Workday, Citrix go2meeting). This can be thought of as a new way of delivering software to endusers. Instead of prompting the users to download and install any software, SaaS enables the users to consume software via the Internet. \end{description} Due to the benefits associated with cloud computing (scalability, high availability, productivity enhancement etc.), many developers and organizations have adopted the cloud as their preferred means of developing, deploying and delivering software applications. Such cloud-hosted applications expose one or more web application programming interfaces (web APIs) through which users can remotely interact with the applications. A cloud-hosted application may also consume web APIs exposed by other cloud-hosted applications. Thus, cloud-hosted applications form an intricate graph of inter-dependencies among them, where each application can service a set of applications, while being dependent on a set of other applications. However, in general, each cloud-hosted application directly depends on the core services offered by the underlying cloud platform for compute power, storage, network connectivity and scalability. In the next subsection we take a closer look at a specific type of cloud platforms -- Platform-as-a-Service clouds. We use PaaS clouds as a case study and a testbed in a number of our explorations. \subsection{Platform-as-a-Service Clouds} PaaS clouds provide a managed application programming platform, where an application developer can simply write some code, and run it at scale at the push of a button. It relieves the developer from having to install or configure any hardware resources, virtual machines or operating systems. Moreover, PaaS clouds do not require the developers to set up any utility services their applications might require such as a database or a distributed cache. Everything an application requires is provisioned and managed by the PaaS cloud. \begin{figure} \centering \includegraphics[scale=0.35]{cloud_app_model} \caption{Applications deployed in a PaaS cloud: (a) An external client making requests to an application via the web API; (b) A PaaS-hosted application invoking another in the same cloud. \label{fig:cloud_app_model} } \vspace{-0.2in} \end{figure} Figure~\ref{fig:cloud_app_model} provides a graphical overview of a PaaS cloud. The cloud platform provides a base framework, called cloud SDK (software development kit), on which new applications can be developed. The cloud SDK is a set of high level programming APIs, with abstractions for common application services such as data storage, caching, user management and more. The developer uses these abstractions to implement his/her application logic, and packages it as a web application. The service implementations for the cloud SDK are highly scalable, highly available (have SLAs associated with them), and automatically managed by the platform. Developers then upload their applications to the cloud for deployment. Once deployed, the applications and any web APIs exported by them can be accessed via HTTP/S requests by external or co-located clients. PaaS clouds are specifically built for deploying and running applications -- applications that are directly consumed by end users and other client applications. This means, all the problems outlined in the previous section, such as poor development practices, performance SLAs and performance debugging directly impact PaaS clouds. Therefore PaaS clouds are ideal candidates for implementing the type of governance systems proposed in this work. We use PaaS clouds in our research extensively both as case studies and experimental platforms. Specifically, we use Google App Engine and AppScale as test environments to experiment with our new governance systems. App Engine is a highly scalable public PaaS cloud hosted and managed by Google in their data centers. While it is open for anyone to deploy and run web applications, it is not open source software, and its internal deployment details are not commonly known. AppScale is open source software that can be used to set up a private cloud platform on one's own physical or virtual hardware. AppScale is API compatible with App Engine (i.e. it supports the same cloud SDK), and hence any web application developed for App Engine can be deployed on AppScale without any code changes. In our experiments, we typically deploy AppScale over a small cluster of physical machines, or over a set of virtual machines provided by an IaaS cloud such as Eucalyptus. By experimenting with real world PaaS clouds we demonstrate the practical feasibility and the effectiveness of the systems we design and implement. Furthermore, there are currently over a million applications deployed in App Engine, with a significant proportion of them being open source applications. Therefore we have access to a large number of real world PaaS applications to experiment with. PaaS-hosted applications are typically developed and tested outside the cloud (on a developer's workstation), and then later uploaded to the cloud. Therefore PaaS-hosted applications typically undergo three phases during their life-cycle: \begin{description} \item[Development-time] The application is being developed and tested on a developer's workstation \item[Deployment-time] The finished application is being uploaded to the PaaS cloud for deployment \item[Run-time] Application is running, and processing user requests \end{description} We explore ways to use these different phases to our advantage so as to minimize the governance overhead on running applications. \subsection{Governance} \subsubsection{IT and SOA Governance} Traditionally, information and technology (IT) governance~\cite{brown2005framing} has been a branch of corporate governance, focused on improving performance and managing the risks associated with the use of information and technology. The primary goals of IT governance are three fold: \begin{itemize} \item Assure that the use of IT generates business value \item Oversee performance of IT usage and management \item Mitigate the risks of using IT \end{itemize} A number of frameworks, models and even certification systems have emerged over time to help organizations implement IT governance. When the software engineering community started gravitating towards web services and service-oriented computing (SOC)~\cite{1254461, what-is-soa}, a new type of digital assets rose to prominence within corporate IT infrastructures -- ``services''. Aggregates of loosely-coupled, reusable, modular services soon replaced large monolithic software installations. Services required new forms of governance for managing their performance and risks. Hence the notion of service-oriented architecture (SOA) governance came into existence. Multiple definitions of SOA governance are in circulation, but most of them agree that the purpose of SOA governance is to exercise control over services and associated processes (service development, testing, monitoring etc). A commonly used definition of SOA governance is ensuring and validating that service artifacts within the architecture are operating as expected and maintaining a certain level of quality~\cite{gartner-soa-gov}. Consequently, a number of tools that help organizations implement SOA governance have also evolved~\cite{Schepers:2008:LAS:1363686.1363932,4730489,6478236,5577268}. Since web services are the mostly widely used form of services in SOA-driven systems, most of these SOA governance tools have a strong focus on controlling web services. Policies play a crucial role in all forms of governance. A policy is a specification of the acceptable behavior and the life-cycle of some entity. The entity could be a department, a software system, a service or a human process such as developing a new application. In SOA governance, policies state how services should be developed, how they are to be deployed, how to secure them and what level of quality-of-service to maintain while a service is in operation. SOA governance tools enable administrators to specify acceptable service behavior and life-cycle as policies, and a software policy enforcement agent automatically enacts those policies to control various aspects of the services~\cite{5976827,4483228,4279691}. \subsubsection{Governance for Cloud-hosted Applications} Cloud computing can be thought of as a heightened version of service-oriented computing. While classic SOC strives to offer data and application functionality as services, cloud computing offers a variety of additional computing resources as services including hardware infrastructure (compute power, storage space and networking) and programming platforms. Moreover, the applications deployed on cloud platforms are typically implemented as services with separate implementation and interface components. The web APIs exposed by these applications play the role of the service interface. Much like classic services, each cloud-hosted application can be a dependency for another co-located cloud application, or an application running elsewhere (e.g. a mobile agent). Due to this resemblance, many concepts related to SOA governance are directly applicable to cloud platforms and cloud-hosted applications. For instance, we can envision an automated policy enforcement agent enacting a set of policies that control the behavior and the life-cycle of cloud-hosted applications. More specifically, we are interested in using policies to control the following aspects of cloud-hosted applications. \begin{enumerate} \item Development and deployment conventions (including dependency management, naming and packaging standards and backward compatibility) \item Performance SLAs offered to the users \item Diagnosing performance bugs and bottlenecks \end{enumerate} A cloud-hosted application is comprised of two parts -- implementation and API. The implementation contains the functionality of the application. It primarily consists of code that implements various application features. The API is the interface that exposes the application to the network. It enables remote users and client applications to interact with the application by sending HTTP/S requests. The responses generated by an API could be based on HTML (for display on a web browser). Or they could be based on a data format such as XML or JSON (for machine-to-machine interaction). Regardless of the technology used to implement an API, it is the part of the application that is visible to the users. Therefore governance mechanisms should pay special attention to exercising control over them -- a notion that we refer to as \textit{API governance}~\cite{6903538}. A poorly implemented API may render the whole application unusable. A backward incompatible change to an API, can break any downstream applications dependent on it. Therefore it is important to be able to configure and enforce governance policies at the granularity of APIs. Our research focuses on using automated policy enforcement, diagnostics and other related governance features to ensure that cloud-hosted applications (and their APIs) adhere to developer best practices and administrative conventions, while consistently meeting performance expectations of the developers and the users. We strive to design governance mechanisms that scale up well to handle cloud workloads -- i.e. large numbers of applications and policies. We build on existing SOC and cloud computing research, and introduce novel methods where existing governance and detection methods fall short.
Formerly the DANR Lab, the ANR Lab describes itself on its website: The ANR Analytical Laboratory performs analyses on selected chemical constituents of soil, plant, water and waste water, and feed in support of statewide research and extension activities. ANR Analytical Laboratory clients are County Advisors, County Directors, Extension Specialists, and University of California faculty. The Lab is also able to accept samples from government agencies and other educational institutions on a limited basis. In addition to its analytical services, the Lab has an educational role by providing training to students and research staff in the operation of a number of analytical instruments. Related: ANR Building
Formal statement is: lemma plus_absorb2: "g \<in> o[F](f) \<Longrightarrow> L F (\<lambda>x. f x + g x) = L F (f)" Informal statement is: If $g$ is $o(f)$, then $\lim_{x \to \infty} (f(x) + g(x)) = \lim_{x \to \infty} f(x)$.
\documentclass[10pt]{beamer} \mode<presentation> { \usetheme{Boadilla} \pagestyle{empty} \setbeamerfont*{frametitle}{size=\normalsize,series=\bfseries} \setbeamerfont*{block}{size=\normalsize,series=\bfseries} %\setbeamertemplate{blocks}[rounded][shadow=true] } \definecolor{links}{HTML}{2A1B81} \hypersetup{colorlinks,linkcolor=,urlcolor=links} \usepackage{etex} % \usepackage{helvet} \usepackage{amsmath, amssymb} \usepackage{color} \usepackage{asymptote} \usepackage{mathrsfs} \usepackage{dsfont} \usepackage{makeidx} \usepackage{multido} \usepackage{dsfont} \usepackage{pst-sigsys,pst-plot,pstricks-add} %\usepackage{auto-pst-pdf} \usepackage{pst-pdf} \definecolor{links}{HTML}{2A1B81} \hypersetup{colorlinks,linkcolor=,urlcolor=links} \def\nn{\nonumber} \definecolor{links}{HTML}{2A1B81} \hypersetup{colorlinks,linkcolor=,urlcolor=links} \newcommand{\fs}[2]{#2} \title[]{LTI systems and the convolution operation} \author[\textcolor{blue}{Systems and Circuits}]{\textcolor{darkblue}{Pablo M. Olmos} ([email protected])\\ \textcolor{darkblue}{Emilio Parrado} ([email protected])} \institute{\textcolor{white}{UC3M}} \definecolor{darkblue}{rgb}{0.0, 0.0, 0.40} \setbeamercolor{title}{fg=darkblue} \setbeamercolor{frametitle}{fg=darkblue} \definecolor{darkgreen}{rgb}{0.0, 0.4, 0.0} \AtBeginSection[] { \begin{frame}<beamer>{Index} \tableofcontents[currentsection,currentsubsection] \end{frame} } \begin{document} \frame{ \titlepage \thispagestyle{empty} \begin{center} \includegraphics[scale=0.05]{Figures/uc3m-logo.pdf} \end{center} } \section{Introduction to LTI systems} \frame{ \frametitle{Motivation: Image Filtering} \begin{figure} \includegraphics[scale=0.3]{Filtro_1.png} \end{figure} } \frame{ \frametitle{Motivation: Image Filtering} \begin{figure} \includegraphics[scale=1.0]{2D_1.pdf} \end{figure} \begin{align*} Y[1,1]&=X[1,1]*K[1,1]+X[1,2]*K[1,2]+X[1,3]*K[1,3]\\ &+X[2,1]*K[2,1]+X[2,2]*K[2,2]+X[2,3]*K[2,3]\\ &+X[3,1]*K[3,1]+X[3,2]*K[3,2]+X[3,3]*K[3,3] \end{align*} } \frame{ \frametitle{Motivation: Image Filtering} \begin{figure} \includegraphics[scale=1.0]{2D_2.pdf} \end{figure} \begin{align*} Y[1,2]&=X[1,2]*K[1,1]+X[1,3]*K[1,2]+X[1,4]*K[1,3]\\ &+X[2,2]*K[2,1]+X[2,3]*K[2,2]+X[2,4]*K[2,3]\\ &+X[3,2]*K[3,1]+X[3,3]*K[3,2]+X[3,4]*K[3,3] \end{align*} } \frame{ \frametitle{Motivation: Image Filtering} \begin{figure} \includegraphics[scale=1.0]{2D_1.pdf} \end{figure} \begin{align*} Y[i,j]&=\sum_{k_1=1}^{n}\sum_{k_2=1}^{n} X[k_1,k_2]K[i-k_1,j-k_2], ~~~ K[u,q]=0 \text{ para } u>3, q>3 \end{align*} {\bf Linear operator, the result does not depend on the position of the image} } \frame{ \frametitle{Motivation: Image Filtering} \begin{figure} \includegraphics[scale=0.3]{Filtro_1.png} \end{figure} } \frame{ \frametitle{Motivation: Image Filtering} \begin{figure} \includegraphics[scale=0.3]{Filtro_2.png} \end{figure} } \frame{ \frametitle{Motivation: Image Filtering} \begin{figure} \includegraphics[scale=0.3]{Filtro_3.png} \end{figure} } \frame{ \begin{itemize} \item The theory of LTI systems has direct applications in a wide set of technical areas: \begin{itemize} \item Nuclear magnetic resonance spectroscopy \item Seismology \item Electric circuit design \item Control Theory \item Any application that involves Signal Processing \end{itemize} \item Our goal in Systems and Circuits: predict the output of a given LTI system for a given input. \item In future courses you will face the design of LTI systems according to certain specifications. \end{itemize} } \frame{ \frametitle{Time Invariance} \begin{block}{} A system is time-invariant if a time shift in the input signal causes a time shift in the output signal. \end{block} \vspace{0.5cm} \begin{itemize} \item Given $y[n]=f(x[n])$, the system is time-invariant if $f(x[n-n_0])=y[n-n_0]$ $\forall~n_0$. \item Given $y(t)=f(x(t))$, the system is time-invariant if $f(x(t-t_0))=y(t-t_0)$ $\forall~t_0$. \end{itemize} } \frame{ \frametitle{Linearity} Linear system posses the important property of superposition. \begin{exampleblock}{} For any system, consider two arbitrary inputs and their respective outputs: \begin{align}\nn x_1(t)\rightarrow y_1(t)\\\nn x_2(t)\rightarrow y_2(t), \end{align} the system is linear if \begin{align}\nn ax_1(t)+bx_2(t)\rightarrow ay_1(t)+by_2(t) \end{align} for any two complex constant $a,b\in\mathbb{C}$. \end{exampleblock} \begin{block}{Linear discrete-time signals} \begin{align}\nn ax_1[n]+bx_2[n]\rightarrow ay_1[n]+by_2[n] \end{align} \end{block} } \frame{ \frametitle{Analysis of LTI systems} \begin{figure} \centering\includegraphics[scale=0.3]{P6.pdf} \end{figure} \vspace{0.5cm} \begin{itemize} \item Decompose input signal by means of a linear combination of simpler signals \end{itemize} $$x(t) = \dots a_{-1} \phi_{-1} (t) + a_{-2} \phi_{0} (t) + a_{0} \phi_{0} (t) + a_{1} \phi_{1} (t) + a_{2} \phi_{2} (t) + \ldots $$ \begin{itemize} \item These ``basic" signals are chosen to provide a certain degree of analytical convenience, so we can analyze the system's properties and its response to arbitrary input signals: \begin{itemize} \item Delayed Impulses $\Rightarrow$ \textbf{\textcolor{darkblue}{Convolution}} \item Complex exponential signals $\Rightarrow$ \textbf{\textcolor{darkblue}{Fourier analysis}} \end{itemize} \end{itemize} } \section{Discrete-time LTI systems} \frame{ \begin{figure} \centering\includegraphics[scale=0.3]{P1.pdf} \end{figure} \vspace{0.5cm} Remember that any discrete-time sequence $x[n]$ can be decomposed as a linear combination of unit impulses: \begin{block}{} \begin{align}\nn x[n]&=\sum_{k=-\infty}^{\infty}x[k]\delta[n-k]\\\nn &=\ldots+x[-20]\delta[n+20]+x[-19]\delta[n+19]+\ldots\\\nn &+x[-1]\delta[n+1]+x[0]\delta[n]+x[1]\delta[n-1]+\ldots \end{align} \end{block} } \frame{ \textbf{If the system is linear} and we are able to compute \vspace{0.5cm} \begin{figure} \centering\includegraphics[scale=0.3]{P2.pdf} \end{figure} then we can make use of the superposition property! } \frame{ \begin{figure} \centering\includegraphics[scale=0.3]{P3.pdf} \end{figure} \vspace{0.5cm} \begin{exampleblock}{} \begin{align}\nn y[n]=\sum_{k=-\infty}^{\infty}x[k]h_{k}[n] \end{align} \end{exampleblock} \vspace{0.5cm} \begin{itemize} \item This is just a consequence of the system linearity. \item The problem is reduced to evaluate the system's output for any $\delta[n-k]$. \item The problem can be further reduced by exploiting that the system is \textbf{time-invariant}. \end{itemize} } \frame{ \begin{figure} \centering\includegraphics[scale=0.3]{P4.pdf} \end{figure} Therefore, for any input $x[n]$, if $h[n]$ is known then the system output is given by \begin{exampleblock}{} \begin{align}\nn y[n]=\sum_{k=-\infty}^{\infty}x[k]h_{k}[n]=\sum_{k=-\infty}^{\infty}x[k]h[n-k] \end{align} \end{exampleblock} } \frame{ \begin{itemize} \item $h[n]$ is the system's \textbf{impulse response}. \item Any LTI system is \textbf{completely defined} by $h[n]$! \vspace{0.3cm} \begin{figure} \centering\includegraphics[scale=0.3]{P5.pdf} \end{figure} \end{itemize} \begin{block}{Convolution} Given two discrete-time signals $x[n]$ and $h[n]$: \begin{align}\nn \sum_{k=-\infty}^{\infty}x[k]h[n-k]=x[n]\ast h[n] \end{align} is the convolution operation between them. \end{block} } % %\frame{ %\frametitle{Example I} % %\psset{xunit=0.75cm,yunit=0.75cm} % %\begin{tabular}{|c|c|} %\hline %\begin{pspicture}[showgrid](-2,-1)(4,3) % \rput(0,0){\psaxeslabels(0,0)(-2,0)(4,0){$n$}{} % \rput[tl](-2,3){$x[n]$} % \psstem[style=Stem,linecolor=blue](-1,1) % {0,1,2,0}} %\end{pspicture} %& %\begin{pspicture}[showgrid](-1,-1)(5,3) % \rput(0,0){\psaxeslabels(0,0)(-1,0)(5,0){$n$}{} % \rput[tl](-1,3){$h[n]$} % \psstem[style=Stem,linecolor=blue](0,1) % {0,1,1,1,0}} %\end{pspicture}\\ %\hline %\end{tabular} % % %} % %\frame{ %%\frametitle{Example I} % %\begin{center} %\begin{pspicture}[](5,2) % \pssignal(0,1){x}{$x[n]$} % \psblock(2,1){a}{$h[n]$} % %\psblock(4,1){b}{$h[n], H(z)$} % \pssignal(4,1){y}{$y[n]$} % %----------------- % \psset{arrows=->} % \ncline{x}{a} \ncline{a}{y} %\ncline{b}{y} %\end{pspicture} %\end{center} % %\psset{xunit=0.75cm,yunit=0.75cm} % %\begin{center} % %\begin{pspicture}[showgrid](0,-1)(6,3) % \rput(0,0){\psaxeslabels(0,0)(0,0)(6,0){$n$}{} % \rput[tl](0,3){$y[n]$} % \psstem[style=Stem,linecolor=red](1,1) % {1,3,3,2,0}} %\end{pspicture} % %\end{center} % %} %\frame{ %\frametitle{Example} % %\psset{xunit=0.75cm,yunit=0.75cm} % %\begin{center} % %\begin{tabular}{|c|c|} %\hline %\begin{pspicture}[showgrid](-1,-1)(4,5) % \rput(0,0){\psaxeslabels(0,0)(-1,0)(4,0){$n$}{} % \rput[tl](-1,3){$x[n]$} % \psstem[style=Stem,linecolor=blue](-1,1) % {0,4,1,2,5}} %\end{pspicture} %& %\begin{pspicture}[showgrid](-2,-1)(3,3) % \rput(0,0){\psaxeslabels(0,0)(-2,0)(3,0){$n$}{} % \rput[tl](-2,3){$h[n]$} % \psstem[style=Stem,linecolor=blue](-2,1) % {0,1,2,-1}} %\end{pspicture}\\ %\hline %\end{tabular} % %\end{center} % % % %} % %\frame{ %\frametitle{Example} % %\begin{center} %\begin{pspicture}[](5,2) % \pssignal(0,1){x}{$x[n]$} % \psblock(2,1){a}{$h[n]$} % %\psblock(4,1){b}{$h[n], H(z)$} % \pssignal(4,1){y}{$y[n]$} % %----------------- % \psset{arrows=->} % \ncline{x}{a} \ncline{a}{y} %\ncline{b}{y} %\end{pspicture} %\end{center} % %\psset{xunit=0.35cm,yunit=0.35cm} % %\begin{center} % %\begin{pspicture}[showgrid](-2,-5)(7,10) % \rput(0,0){\psaxeslabels(0,0)(-2,0)(7,0){$n$}{} % \rput[tl](-2,10){$y[n]$} % \psstem[style=Stem,linecolor=red](-2,1) % {0,4,9,0,8,8,-5,0}} %\end{pspicture} % %\end{center} % %} \frame{ \frametitle{Problem} \begin{itemize} \item $x[n]=\alpha^{n}u[n]$ with $\alpha\in(0,1)$ \item $h[n]=u[n]$ \end{itemize} \begin{figure} \centering\includegraphics[scale=0.3]{P5.pdf} \end{figure} } \frame{ \frametitle{Sol.} \begin{align*} y[n]=\left\{\begin{array}{cc} 0 & n<0\\\\ \frac{1-\alpha^{n+1}}{1-\alpha} & n\geq 0 \end{array} \right. \end{align*} } \section{Continuous-time LTI systems} \frame{ \textbf{\color{red}{The discussion for continuous-time LTI systems is just a generalization of the discrete-time case.}} \begin{figure} \centering\includegraphics[scale=0.3]{P6.pdf} \end{figure} \vspace{0.5cm} Remember that any continuous-time signal $x(t)$ can be decomposed as a linear combination of an infinite number of impulses: \begin{block}{} \begin{align}\nn x(t)=\int_{-\infty}^{\infty}x(\tau)\delta(t-\tau)d\tau \end{align} \end{block} } %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % EPH 20140222 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \frame{ \textbf{{\color {red} Approximate} $x(t)$ by a combination of scaled and equally spaced versions $\delta_{\Delta}(t)$} \begin{figure} \centering\includegraphics[scale=0.4]{Figures/Signal32.pdf} \end{figure} } \frame{ \[ x(t) \approx x_{\Delta}(t) = \sum_{k=-\infty}^{\infty}{x(k\Delta){\color{blue}\delta_{\Delta}(t-k\Delta)}\Delta} \] \begin{figure} \centering\includegraphics[scale=0.3]{P7.pdf} \end{figure} \begin{figure} \centering\includegraphics[scale=0.3]{Figures/Signal33.pdf} \end{figure} Then applying the superposition property \begin{figure} \centering\includegraphics[scale=0.3]{P8.pdf} \end{figure} } \frame{ %El resultado anterior todavía es inmanejable porque depende de conocer infinitas $h_{k\Delta}(t)$ % %\vspace{0.5cm} If the system is also {\bf time-invariant}: \begin{figure} \centering\includegraphics[scale=0.3]{P9.pdf} \end{figure} \vspace{0.5cm} Taking the limit $\Delta \rightarrow 0$: \begin{itemize} \item $k\Delta \rightarrow \tau$ \item $\sum \rightarrow \int$ \item $\Delta \rightarrow d\tau$ \item $h_0(t) = h(t)$ \end{itemize} \begin{exampleblock}{} \begin{align}\nn y(t) = \lim_{\Delta \rightarrow 0} y_{\Delta}(t) = \int_{-\infty}^{\infty}{x(\tau)h(t-\tau)d\tau} \end{align} \end{exampleblock} } %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % FIN EPH 20140222 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % %\frame{ % %If the system's output is known for $\delta(t-\tau)$ for any $\tau\in\mathbb{R}$: % %\begin{center} %\begin{pspicture}[](5,2) % \pssignal(0,1){x}{$\delta(t-\tau)$} % \psblock(2,1){a}{LTI} % %\psblock(4,1){b}{$h[n], H(z)$} % \pssignal(4,1){y}{$h_{\tau}(t)$} % %----------------- % \psset{arrows=->} % \ncline{x}{a} \ncline{a}{y} %\ncline{b}{y} %\end{pspicture} %\end{center} % %Then, by the superposition property, % %\begin{center} %\begin{pspicture}[](7,2) % \pssignal(0,1){x}{$\displaystyle x(t)=\int_{-\infty}^{\infty}x(\tau)\delta(t-\tau)d\tau$} % \psblock(4,1){a}{LTI} % %\psblock(4,1){b}{$h[n], H(z)$} % \pssignal(6,1){y}{$y(t)$} % %----------------- % \psset{arrows=->} % \ncline{x}{a} \ncline{a}{y} %\ncline{b}{y} %\end{pspicture} %\end{center} % %\begin{exampleblock}{} %\begin{align}\nn %y(t)=\int_{-\infty}^{\infty} x(\tau)h_{\tau}(t) d\tau %\end{align} %\end{exampleblock} % %} % %\frame{ % %If the system is time-invariant: % %\begin{center} %\begin{pspicture}[](7,2) % \pssignal(0,2){x}{$\delta(t)$} % \psblock(2,2){a}{LTI} % %\psblock(4,1){b}{$h[n], H(z)$} % \pssignal(5,2){y}{$h_{0}(t)=h(t)$} % % \pssignal(0,1){x1}{$\delta(t-\tau)$} % \psblock(2,1){a1}{LTI} % %\psblock(4,1){b}{$h[n], H(z)$} % \pssignal(5,1){y1}{$h_{\tau}(t)=h(t-\tau)$} % %----------------- % \psset{arrows=->} % \ncline{x}{a} \ncline{a}{y} %\ncline{b}{y} % \ncline{x1}{a1} \ncline{a1}{y1} %\end{pspicture} %\end{center} % %then, we finally obtain % %\begin{center} %\begin{pspicture}[](7,2) % \pssignal(0,1){x}{$\displaystyle x(t)=\int_{-\infty}^{\infty}x(\tau)\delta(t-\tau)d\tau$} % \psblock(4,1){a}{LTI} % %\psblock(4,1){b}{$h[n], H(z)$} % \pssignal(6,1){y}{$y(t)$} % %----------------- % \psset{arrows=->} % \ncline{x}{a} \ncline{a}{y} %\ncline{b}{y} %\end{pspicture} %\end{center} % %\begin{exampleblock}{} %\begin{align}\nn %y(t)=\int_{-\infty}^{\infty} x(\tau)h(t-\tau) d\tau %\end{align} %\end{exampleblock} % %} \frame{ \begin{itemize} \item $h(t)$ is the \textbf{system's response to the impulse $\delta(t)$}. \item Any LTI system is \textbf{completely defined} by $h(t)$! \begin{figure} \centering\includegraphics[scale=0.3]{P10.pdf} \end{figure} \end{itemize} \begin{block}{Continuous-time Convolution} Given two signals $x(t)$ and $h(t)$: \begin{align}\nn \int_{-\infty}^{\infty} x(\tau)h(t-\tau) d\tau=x(t)\ast h(t) \end{align} is the continuous-time convolution operation between them. \end{block} } \frame{ \frametitle{Problem 43} Consider the two signals $x(t)$ and $h(t)$ given by: \begin{align*} x(t)=\left\{ \begin{array}{cc} 1 & 0< t< T\\ 0 & \text{otherwise} \end{array} \right., \end{align*} \begin{align*} h(t)=\left\{ \begin{array}{cc} t & 0< t< 2T\\ 0 & \text{otherwise} \end{array} \right.. \end{align*} If $h(t)$ is the impulse response of an LTI system, compute the system's output when $x(t)$ is the input signal. } \frame{ \frametitle{Sol.} \begin{figure} \centering\includegraphics[scale=0.3]{P10.pdf} \end{figure} \begin{align*} y(t)=\left\{ \begin{array}{cc} 0 & t<0\\ \frac{t^2}{2} & 0\leq t< T\\ tT-\frac{T^2}{2} & T\leq t <2T\\ Tt+\frac{3}{2}T^2-\frac{t^2}{2} &2T\leq t< 3T\\ 0 & t\geq3T \end{array} \right., \end{align*} } \section{Properties of the Convolution operation} %\frame{ % %\begin{block}{Discrete-time Convolution} %Given two discrete-time signals $x[n]$ and $h[n]$: %\begin{align}\nn %\sum_{k=-\infty}^{\infty}x[k]h[n-k]=x[n]\ast h[n] %\end{align} %is the convolution operation between them. %\end{block} % %\begin{block}{Continuous-time Convolution} %Given two signals $x(t)$ and $h(t)$: %\begin{align}\nn %\int_{-\infty}^{\infty} x(\tau)h(t-\tau) d\tau=x(t)\ast h(t) %\end{align} %is the continuous-time convolution operation between them. %\end{block} % %} \frame{ \frametitle{Commutative property} \begin{exampleblock}{} \begin{align}\nn x[n]\ast h[n]=h[n]\ast x[n] %\Rightarrow \sum_{k=-\infty}^{\infty}x[k]h[n-k]=\sum_{k=-\infty}^{\infty}h[k]x[n-k] \end{align} \end{exampleblock} \textbf{Proof.:} Given \begin{align}\nn x[n]\ast h[n]=\sum_{k=-\infty}^{\infty}x[k]h[n-k] \end{align} define $v=n-k$, thus \begin{align}\nn x[n]\ast h[n]=\sum_{v=+\infty}^{-\infty}x[n-v]h[v]=\sum_{v=-\infty}^{+\infty}h[v]x[n-v]=h[n]\ast x[n] \end{align} } \frame{ \frametitle{Commutative property} Therefore, \begin{center} \begin{pspicture}[](5,2) \pssignal(0,1){x}{$x[n]$} \psblock(2,1){a}{$h[n]$} %\psblock(4,1){b}{$h[n], H(z)$} \pssignal(4,1){y}{$y[n]$} %----------------- \psset{arrows=->} \ncline{x}{a} \ncline{a}{y} %\ncline{b}{y} \end{pspicture} \end{center} is equivalent to \begin{center} \begin{pspicture}[](5,2) \pssignal(0,1){x}{$h[n]$} \psblock(2,1){a}{$x[n]$} %\psblock(4,1){b}{$h[n], H(z)$} \pssignal(4,1){y}{$y[n]$} %----------------- \psset{arrows=->} \ncline{x}{a} \ncline{a}{y} %\ncline{b}{y} \end{pspicture} \end{center} \begin{exampleblock}{Commutative property for the continuous-time convolution} \begin{align}\nn x(t)\ast h(t)=h(t)\ast x(t)\Rightarrow \int_{-\infty}^{\infty} x(\tau)h(t-\tau) d\tau=\int_{-\infty}^{\infty} h(\tau)x(t-\tau) d\tau \end{align} \end{exampleblock} } \frame{ \frametitle{Associative property} \begin{align}\nn x(t)\ast \Big(h(t) \ast z(t)\Big)=\Big(x(t)\ast h(t)\Big) \ast z(t)\\\nn\\\nn x[n]\ast \Big(h[n] \ast z[n]\Big)=\Big(x[n]\ast h[n]\Big) \ast z[n] \end{align} Therefore, the following configurations are equivalent: \begin{center} \begin{pspicture}[](8,2) \pssignal(0,1){x}{$x(t)$} \psblock(2,1){a}{$h_1(t)$} \psblock(5,1){b}{$h_2(t)$} \pssignal(8,1){y}{$y(t)$} %----------------- \psset{arrows=->} \ncline{x}{a} \ncline{a}{b} \ncline{b}{y} \end{pspicture} \vspace{0.5cm} \begin{pspicture}[](7,2) \pssignal(0,1){x}{$x(t)$} \psblock(3,1){a}{$h_1(t)\ast h_2(t)$} %\psblock(4,1){b}{$h[n], H(z)$} \pssignal(6,1){y}{$y(t)$} %----------------- \psset{arrows=->} \ncline{x}{a} \ncline{a}{y} %\ncline{b}{y} \end{pspicture} \vspace{0.5cm} \begin{pspicture}[](8,2) \pssignal(0,1){x}{$x(t)$} \psblock(2,1){a}{$h_2(t)$} \psblock(5,1){b}{$h_1(t)$} \pssignal(8,1){y}{$y(t)$} %----------------- \psset{arrows=->} \ncline{x}{a} \ncline{a}{b} \ncline{b}{y} \end{pspicture} \end{center} } \frame{ \frametitle{Distributive property with respect to the sum} \begin{align}\nn x[n]\ast\Big(y[n]+z[n]\Big)=x[n]\ast y[n]+x[n]\ast z[n]\\\nn\\\nn x(t)\ast\Big(y(t)+z(t)\Big)=x(t)\ast y(t)+x(t)\ast z(t) \end{align} The following configurations are equivalent: \begin{center} \begin{pspicture}[](6.5,2) \pssignal(0,1){x}{$x(t)$} \pnode(1,1){b} \pnode(1,2){c} \pnode(1,0){d} \psblock(3,2){e}{$h_1(t)$} \psblock(3,0){f}{$h_2(t)$} \pnode(5,2){g} \pnode(5,0){h} \pscircleop(5,1){oplus} \pssignal(6.25,1){i}{$y(t)$} \ncline{x}{b} \ncline{b}{c} \ncline{b}{d} \ncline{e}{g} \ncline{f}{h} \psset{style=RoundCorners ,style=Arrow} \ncline{c}{e} \ncline{d}{f} \ncline{g}{oplus} \ncline{h}{oplus} \ncline{oplus}{i} \end{pspicture} \vspace{1cm} \begin{pspicture}[](5,2) \pssignal(-1,1){x}{$x(t)$} \psblock(2,1){a}{$h_1(t)+ h_2(t)$} %\psblock(4,1){b}{$h[n], H(z)$} \pssignal(5,1){y}{$y(t)$} %----------------- \psset{arrows=->} \ncline{x}{a} \ncline{a}{y} %\ncline{b}{y} \end{pspicture} \end{center} } \frame{ \frametitle{Convolution with an impulse signal} Remember that any signal can be decomposed as a linear combination of an infinite number of unit impulses: \begin{align}\nn x[n]&=\sum_{k=-\infty}^{\infty}x[k]\delta[n-k],\\\nn x(t)&=\int_{-\infty}^{\infty}x(\tau)\delta(t-\tau)d\tau \end{align} Therefore \begin{alertblock}{} \begin{align} x[n]\ast \delta[n]=x[n],\nn\\\nn x(t)\ast \delta(t)=x(t). \end{align} \end{alertblock} } \frame{ \frametitle{Convolution with a delayed impulse (discrete-time)} \begin{align}\nn x[n]\ast \delta[n-n_0]=\sum_{k=-\infty}^{\infty}x[k]\delta[n-k-n_0] \end{align} Define $v=k+n_0$, thus \begin{align}\nn x[n]\ast \delta[n-n_0]&=\sum_{v=-\infty}^{\infty}x[v-n_0]\delta[n-v]\\\nn &=x[n-n_0]\ast \delta[n]=x[n-n_0]. \end{align} Therefore, \begin{block}{} \begin{align} x[n]\ast \delta[n-n_0]=x[n-n_0],\nn\\\nn %x(t)\ast \delta(t-t_0)=x(t-t_0). \end{align} \end{block} } \frame{ \frametitle{Convolution with a delayed impulse (continuous-time)} \begin{align}\nn x(t)\ast \delta(t-t_0)=\int_{\tau=-\infty}^{\infty}x(\tau)\delta(t-\tau-t_0)\text{d}\tau \end{align} Define $v=\tau+t_0$ and $x'(t)=x(t-t_0)$, thus \begin{align*} x(t)\ast \delta(t-t_0)&=\int_{v=-\infty}^{\infty}x(v-t_0)\delta(t-v)\text{d}v\\ &=\int_{v=-\infty}^{\infty}x'(v)\delta(t-v)\text{d}v=x'(t)\ast \delta(t)=x'(t) \end{align*} Therefore, \begin{block}{} \begin{align} x(t)\ast \delta(t-t_0)=x(t-t_0),\nn\\\nn %x(t)\ast \delta(t-t_0)=x(t-t_0). \end{align} \end{block} } \end{document}
function [U, S] = pca(X) %PCA Run principal component analysis on the dataset X % [U, S, X] = pca(X) computes eigenvectors of the covariance matrix of X % Returns the eigenvectors U, the eigenvalues (on diagonal) in S % % Useful values [m, n] = size(X); % You need to return the following variables correctly. U = zeros(n); S = zeros(n); % ====================== YOUR CODE HERE ====================== % Instructions: You should first compute the covariance matrix. Then, you % should use the "svd" function to compute the eigenvectors % and eigenvalues of the covariance matrix. % % Note: When computing the covariance matrix, remember to divide by m (the % number of examples). % sigma = X' * X / m; [U,S,V] = svd(sigma); % ========================================================================= end
GET_CATCH<-function(area="AI",species="'PLCK'",FYR=fyear,ADD_OLD=TRUE,OLD_FILE="OLD_CATCH.csv"){ ##Define area if(area=="AI"){ reg<-"'539' and '544'" } if(area=="GOA"){ reg<-"'600' and '699'" } if(area=="BS"){ reg<-"'500' and '539'" } YEARS<-c(1991:FYR) CATCH<-vector("list",length=length(YEARS)) blnd<-c(as.character(91:99),"00","01","02") for(i in 1:12){ test=paste("SELECT SUM(BLEND.BLEND",blnd[i],".TONS)AS TONS,\n ", "BLEND.BLEND",blnd[i],".ZONE,\n ", "BLEND.BLEND",blnd[i],".SPECN,\n ", "BLEND.BLEND",blnd[i],".SPEC,\n ", "BLEND.BLEND",blnd[i],".TYPE\n ", "FROM BLEND.BLEND",blnd[i],"\n ", "GROUP BY BLEND.BLEND",blnd[i],".ZONE,\n ", "BLEND.BLEND",blnd[i],".SPECN,\n ", "BLEND.BLEND",blnd[i],".SPEC,\n ", "BLEND.BLEND",blnd[i],".TYPE\n ", "HAVING BLEND.BLEND",blnd[i],".ZONE BETWEEN ",reg,"\n ", "AND BLEND.BLEND",blnd[i],".SPEC = ",species, sep="") CATCH[[i]]<-sqlQuery(AFSC,test) CATCH[[i]]$YEAR=YEARS[i] } for(i in 13:(FYR-1990)){ test=paste("SELECT SUM(BLEND.CAS",YEARS[i],".WEIGHT_POSTED) AS TONS,\n ", "BLEND.CAS",YEARS[i],".REPORTING_AREA_CODE AS ZONE,\n ", "BLEND.CAS",YEARS[i],".AGENCY_SPECIES_CODE AS SPECN,\n ", "BLEND.CAS",YEARS[i],".AGENCY_SPECIES_ID AS SPEC,\n ", "BLEND.CAS",YEARS[i],".SOURCE_TABLE AS TYPE,\n ", "BLEND.CAS",YEARS[i],".YEAR\n ", "FROM BLEND.CAS",YEARS[i],"\n ", "GROUP BY BLEND.CAS",YEARS[i],".REPORTING_AREA_CODE,\n ", "BLEND.CAS",YEARS[i],".AGENCY_SPECIES_CODE,\n ", "BLEND.CAS",YEARS[i],".AGENCY_SPECIES_ID,\n ", "BLEND.CAS",YEARS[i],".SPECIES_GROUP_CODE,\n ", "BLEND.CAS",YEARS[i],".SOURCE_TABLE,\n ", "BLEND.CAS",YEARS[i],".YEAR\n ", "HAVING BLEND.CAS",YEARS[i],".REPORTING_AREA_CODE BETWEEN ",reg,"\n ", "AND BLEND.CAS",YEARS[i],".SPECIES_GROUP_CODE = ",species,sep="") CATCH[[i]]<-sqlQuery(AFSC,test) CATCH[[i]]$YEAR=YEARS[i] } CATCH1<-CATCH[[1]] for(i in 2:length(YEARS)){ CATCH1<-rbind(CATCH1,CATCH[[i]]) } CATCH<-CATCH1 CATCH_TOTAL<-aggregate(list(TONS=CATCH$TONS),by=list(YEAR=CATCH$YEAR),FUN=sum) if(ADD_OLD){ OLD_CATCH<-read.csv(OLD_FILE,header=T) CATCH_TOTAL<-rbind(OLD_CATCH,CATCH_TOTAL) } CATCH_TOTAL }
import numpy as np from sklearn.metrics import normalized_mutual_info_score, adjusted_rand_score nmi = normalized_mutual_info_score ari = adjusted_rand_score def acc(y_true, y_pred): """ Calculate clustering accuracy. Require scikit-learn installed # Arguments y: true labels, numpy.array with shape `(n_samples,)` y_pred: predicted labels, numpy.array with shape `(n_samples,)` # Return accuracy, in [0,1] """ y_true = y_true.astype(np.int64) assert y_pred.size == y_true.size D = max(y_pred.max(), y_true.max()) + 1 w = np.zeros((D, D), dtype=np.int64) for i in range(y_pred.size): w[y_pred[i], y_true[i]] += 1 from sklearn.utils.linear_assignment_ import linear_assignment ind = linear_assignment(w.max() - w) return sum([w[i, j] for i, j in ind]) * 1.0 / y_pred.size def cluster_acc(Y_pred, Y): from sklearn.utils.linear_assignment_ import linear_assignment assert Y_pred.size == Y.size D = max(Y_pred.max(), Y.max())+1 w = np.zeros((D,D), dtype=np.int64) for i in range(Y_pred.size): w[Y_pred[i], Y[i]] += 1 ind = linear_assignment(w.max() - w) return sum([w[i,j] for i,j in ind])*1.0/Y_pred.size, w
# Tutorial 4 ## Mean Squared Displacement (MSD) Molecules in liquds, gases and solids do not stay in the same place and move constantly. Think about a drop of dye in a glass of water, as time passes the dye distributes throughout the water. This process is called diffusion and is common throughout nature and an incredibly relevant property for materials scientists who work on things like batteries. Using the dye as an example, the motion of a dye molecule is not simple. As it moves it is jostled by collisions with other molecules, preventing it from moving in a straight path. If the path is examined in close detail, it will be seen to be a good approximation to a random walk. In mathmatics a random walk is a series of steps, each taken in a random direction. This was analysed by Albert Einstein in a study of Brownian motion and he showed that the mean square of the distance travelled by a particle following a random walk is proportional to the time elapsed. \begin{align} \Big \langle r_{i}^{2} \big \rangle & = 6 D_t + C \end{align} where \begin{align} \Big \langle r_{i}^{2} \big \rangle = \frac{1}{3} \Big< | r_{i}(t) - r_{i}(0) |^2 \Big>. \end{align} where $\Big \langle r^2 \big \rangle$ is the mean squared distance, t is time, $D_t$ is the diffusion rate and C is a constant. If $\Big \langle r_{i}^{2} \big \rangle$ is plotted as a function of time, the gradient of the curve obtained is equal to 6 times the self-diffusion coefficient of particle i. The state of the matter effects the shape of the MSD plot, solids, where little to no diffusion is occuring, has a flat MSD profile. In a liquid however, the particles diffusion randomly and the gradient of the curve is proportional to the diffusion coefficient. ## What is the mean squared displacement Going back to the example of the dye in water, lets assume for the sake of simplicity that we are in one dimension. Each step can either be forwards or backwards and we cannot predict which. From a given starting position, what distance is our dye molecule likely to travel after 1000 steps? This can be determined simply by adding together the steps, taking into account the fact that steps backwards subtract from the total, while steps forward add to the total. Since both forward and backward steps are equally probable, we come to the surprising conclusion that the probable distance travelled sums up to zero. By adding the square of the distance we will always be adding positive numbers to our total which now increases linearly with time. Based upon equation 1 it should now be clear that a plot of $\Big \langle r_{i}^{2} \big \rangle$ vs time with produce a line, the gradient of which is equal to 6D. Giving us direct access to the diffusion coefficient of the system. ```python from polypy import read as rd from polypy.msd import MSD from polypy.msd import RegionalMSD from polypy import analysis from polypy import utils as ut from polypy import plotting import numpy as np import matplotlib.pyplot as plt ``` This example will use a short (50,000 steps), pre-prepared trajectory of bulk $CaF_2$. In reality we probably want a considerably longer simulation (~10,000,000 steps). Such simulations generate huge files (5GB) and the analysis would take too long for this tutorial. The first step is to read the history file to generate the data. The `HISTORY` class expects two things, the filename of the history file and a list of atoms to read. It will return a `polypy.read.Trajectory` object, which stores the the atom labels (`Trajectory.atom_labels`), datatype (`Trajectory.data_type`), cartesian coordinates (`Trajectory.cartesian_coordinates`), fractiona coordinates (`Trajectory.fractional_coordinates`), reciprocal lattice vectors (`Trajectory.reciprocal_lv`), lattice vectors (`Trajectory.lv`) cell lengths (`Trajectory.cell_lengths`), total atoms in the file (`Trajectory.atoms_in_history`), timesteps (`Trajectory.timesteps`), total atoms per timestep (`Trajectory.total_atoms`). ```python history_caf2 = rd.History("../example_data/HISTORY_CaF2", ["F"]) ``` Once the data has been read into the code the MSD calculation can be performed using the `MSD` class. The code will return a `polypy.MSD.MSDContainer` object, which contains the MSD information. ```python f_msd = MSD(history_caf2.trajectory, sweeps=2) output = f_msd.msd() ``` ```python ax = plotting.msd_plot(output) plt.show() ``` MSD calculations require a large number of statistics to be considered representative. A full msd will use every single frame of the trajectory as a starting point and effectively do a seperate msd from each starting point, these are then averaged to give the final result. An MSD is technically an ensemble average over all sweeps and number of particles. The sweeps paramter is used to control the number of frames that are used as starting points in the calculation. For simulations with lots of diffusion events, a smaller number will be sufficient whereas simulations with a small number of diffusion events will require a larger number. ```python f_msd = MSD(history_caf2.trajectory, sweeps=10) output = f_msd.msd() ax = plotting.msd_plot(output) plt.show() ``` ```python print("Three Dimensional Diffusion Coefficient", output.xyz_diffusion_coefficient()) print("One Dimensional Diffusion Coefficient in X", output.x_diffusion_coefficient()) print("One Dimensional Diffusion Coefficient in Y", output.y_diffusion_coefficient()) print("One Dimensional Diffusion Coefficient in Z", output.z_diffusion_coefficient()) ``` Three Dimensional Diffusion Coefficient 1.6077008453783734 One Dimensional Diffusion Coefficient in X 1.6043984537894274 One Dimensional Diffusion Coefficient in Y 1.6855680306977678 One Dimensional Diffusion Coefficient in Z 1.5331360516479267 Note: An MSD is supposed to be linear only after a ballistic regime and it usually lacks statistics for longer times. Thus the linear fit to extract the slope and thus the diffusion coefficient should be done on a portion of the MSD only. This can be accomplished using the `exclude_initial` and `exclude_final` parameters ```python print("Three Dimensional Diffusion Coefficient", output.xyz_diffusion_coefficient(exclude_initial=50, exclude_final=50)) print("One Dimensional Diffusion Coefficient in X", output.x_diffusion_coefficient(exclude_initial=50, exclude_final=50)) print("One Dimensional Diffusion Coefficient in Y", output.y_diffusion_coefficient(exclude_initial=50, exclude_final=50)) print("One Dimensional Diffusion Coefficient in Z", output.z_diffusion_coefficient(exclude_initial=50, exclude_final=50)) ``` Three Dimensional Diffusion Coefficient 1.5912662736409342 One Dimensional Diffusion Coefficient in X 1.5862517497696607 One Dimensional Diffusion Coefficient in Y 1.6753802400942055 One Dimensional Diffusion Coefficient in Z 1.5121668310589353 ## Arrhenius It is then possible to take diffusion coefficients, calculated over a large temperature range and, using the Arrhenius equation calculate the activation energy for diffusion. Common sense and chemical intuition suggest that the higher the temperature, the faster a given chemical reaction will proceed. Quantitatively this relationship between the rate a reaction proceeds and its temperature is determined by the Arrhenius Equation. At higher temperatures, the probability that two molecules will collide is higher. This higher collision rate results in a higher kinetic energy, which has an effect on the activation energy of the reaction. The activation energy is the amount of energy required to ensure that a reaction happens. \begin{align} k = A * e^{(-Ea / RT)} \end{align} where k is the rate coefficient, A is a constant, Ea is the activation energy, R is the universal gas constant, and T is the temperature (in kelvin). ## Ionic Conductivity Usefully, as we have the diffusion coefficient, the number of particles (charge carriers) and the ability to calculate the volume, we can convert this data into the ionic conductivity and then the resistance. \begin{align} \sigma & = \frac{D C_F e^2}{k_B T} \end{align} where $\sigma$ is the ionic conductivity, D is the diffusion coefficient, $C_F$ is the concentration of charge carriers, which in this case if F ions, $e^2$ is the charge of the diffusing species, $k_B$ is the Boltzmann constant and T is the temperature. The resitance can then be calculated according to \begin{align} \Omega & = \frac{1}{\sigma} \end{align} So the first step is to calculate the volume, the system voume module will do this from the given data. ```python volume, step = analysis.system_volume(history_caf2.trajectory) average_volume = np.mean(volume[:50]) ``` The number of charge carriers is just the total number of atoms. ```python sigma = analysis.conductivity(history_caf2.trajectory.total_atoms, average_volume, output.xyz_diffusion_coefficient(), 1500, 1) ``` ```python print("Ionic Conductivity :", sigma) ``` Ionic Conductivity : 0.0008752006872146488 ```python print("Resistivity :", (1 / sigma)) ``` Resistivity : 1142.5950808865662 ## Simulation Length It is important to consider the lenght of your simulation (Number of steps). The above examples use a short trajectory but it is at a sufficient temperature that there are enough diffusion events to get a good MSD plot. The following example is of a very short simulation, you will hopefully note that the MSD plot is clearly not converged. ```python history_short = rd.History("../example_data/HISTORY_short", atom_list=["F"]) ``` ```python f_msd_short = MSD(history_short.trajectory, sweeps=2) output = f_msd_short.msd() ``` ```python ax = plotting.msd_plot(output) plt.show() ``` ```python print("Three Dimensional Diffusion Coefficient", output.xyz_diffusion_coefficient()) print("One Dimensional Diffusion Coefficient in X", output.x_diffusion_coefficient()) print("One Dimensional Diffusion Coefficient in Y", output.y_diffusion_coefficient()) print("One Dimensional Diffusion Coefficient in Z", output.z_diffusion_coefficient()) ``` Three Dimensional Diffusion Coefficient 1.5862125967428773 One Dimensional Diffusion Coefficient in X 1.572862308748596 One Dimensional Diffusion Coefficient in Y 1.6321092976984717 One Dimensional Diffusion Coefficient in Z 1.553666183781564 Amusingly, this actually does not seem to have a huge effect on the diffusion coefficient compared to the longer simulation. However these trajectories are from a CaF$_2$ simulation at 1500 K and there are thus a large number of diffusion events in the short time frame. ## State of Matter It is possible to identify the phase of matter from the MSD plot. <center> <br> <i>Figure 1. The anticipated MSD form for each state of matter.</i> <br> </center> The fluorine diffusion discussed already clearly shows that the fluorine sub lattice has melted and the diffusion is liquid like. Whereas, carrying out the same analysis on the calcium sub lattice shows that while the fluorine sub lattice has melted, the Calcium sub lattice is still behaving like a solid. ```python f_msd = MSD(history_caf2.trajectory, sweeps=2) output = f_msd.msd() ax = plotting.msd_plot(output) plt.show() ``` ## Regional MSD Calculations Often in solid state chemistry simulations involve defects, both structural e.g. grain boundaries, dislocations and surface, and chemical e.g. point defects. It is important to try and isolate the contributions of these defects to the overall properties. Regarding diffusion, it could be imagined that a certain region within a structure will have different properties compared with the stoichiometric bulk, e.g. a grain boundary vs the grains, or the surface vs the bulk. `polypy` has the capability to isolate trajectories that pass within certain regions of a structure and thus calculate a diffusion coefficient for those regions. In this example we will calculate the diffusion coefficient in a box between -5.0 and 5.0 in the dimension of the first lattice vector. ```python f_msd = RegionalMSD(history_caf2.trajectory, -5, 5, dimension="x") output = f_msd.analyse_trajectory() ``` ```python ax = plotting.msd_plot(output) plt.show() ``` ```python print("Three Dimensional Diffusion Coefficient", output.xyz_diffusion_coefficient()) print("One Dimensional Diffusion Coefficient in X", output.x_diffusion_coefficient()) print("One Dimensional Diffusion Coefficient in Y", output.y_diffusion_coefficient()) print("One Dimensional Diffusion Coefficient in Z", output.z_diffusion_coefficient()) ``` Three Dimensional Diffusion Coefficient 1.5967417785595026 One Dimensional Diffusion Coefficient in X 1.611906338211519 One Dimensional Diffusion Coefficient in Y 1.670973618673711 One Dimensional Diffusion Coefficient in Z 1.5073453787932771 #### DLMONTE ```python archive = rd.Archive("../example_data/ARCHIVE_LLZO", atom_list=["O"]) ``` ```python f_msd = MSD(archive.trajectory, sweeps=2) ```
#! /usr/bin/env python ############################################################################### # planar_crane.py # # Defines a planar crane environment for use with the openAI Gym. # This version has a continuous range of inputs for the trolley accel. input # We are treating the trolley in a way that assumes we can exactly control its # motion. We specify its acceleration as the input. # Cable length is constant. # # NOTE: Any plotting is set up for output, not viewing on screen. # So, it will likely be ugly on screen. The saved PDFs should look # better. # # Created: 07/07/17 # - Joshua Vaughan # - [email protected] # - http://www.ucs.louisiana.edu/~jev9637 # # Modified: # * # # TODO: # * ############################################################################### import gym from gym import spaces from gym.utils import seeding import logging import numpy as np import datetime # for unique filenames logger = logging.getLogger(__name__) class PlanarCraneEnv(gym.Env): metadata = { 'render.modes': ['human', 'rgb_array'], 'video.frames_per_second' : 50 } MAX_TROLLEY_ACCEL = 1.0 # (m/s^2) NUM_ACTIONS = 9 # Need to use an odd number to include 0 AVAIL_TROLLEY_ACCEL = np.linspace(-MAX_TROLLEY_ACCEL, MAX_TROLLEY_ACCEL, NUM_ACTIONS) def __init__(self): self.gravity = 9.8 # accel. due to gravity (m/s^2) self.masspend = 1.0 # mass of the pendulum point mass (kg) self.cable_length = 2.0 # cable length (m) self.tau = 0.02 # seconds between state updates self.counter = 0 # counter for number of steps self.desired_trolley = 0 # desired final position of payload self.MAX_STEPS = 500 # maximum number of steps to run self.SAVE_DATA = False # set True to save episode data # Define thesholds for failing episode self.theta_threshold = 60 * np.pi / 180 # +/- 45 degree limit (rad) self.x_max_threshold = 4.0 # max trolley position (m) self.v_max_threshold = 0.5 # max trolley velocity (m/s) # This action space is just hoist down, do nothing, hoist up # self.action_space = spaces.Box(low=-self.max_cable_accel, # high=self.max_cable_accel, # shape = (1,)) # This action space is just accel left, do nothing, accel right self.action_space = spaces.Discrete(self.NUM_ACTIONS) high_limit = np.array([2*self.theta_threshold, # max observable angle 10*2*self.theta_threshold, # max observable angular vel. self.x_max_threshold, # max observable position self.v_max_threshold]) # max observable cable vel low_limit = -high_limit # limits are symmetric about 0 self.observation_space = spaces.Box(high_limit, low_limit) self.seed() self.viewer = None self.state = None self.x_accel = 0.0 def seed(self, seed=None): self.np_random, seed = seeding.np_random(seed) return [seed] def step(self, action): theta, theta_dot, x, x_dot = self.state self.counter = self.counter + 1 # Update the trolley states self.x_accel = self.AVAIL_TROLLEY_ACCEL[action] x_dot = x_dot + self.tau * self.x_accel x = x + self.tau * x_dot # Update the pendulum states theta_ddot = -self.gravity / self.cable_length * theta + 1.0 / self.cable_length * self.x_accel theta_dot = theta_dot + self.tau * theta_ddot theta = theta + self.tau * theta_dot self.state = (theta, theta_dot, x, x_dot) # Define a boolean on whether we're exceeding limits or not. We'll just penalize # any of these conditions identically in the reward function limits = x > self.x_max_threshold \ or x < -self.x_max_threshold \ or theta < -self.theta_threshold \ or theta > self.theta_threshold \ or x_dot > self.v_max_threshold \ or x_dot < -self.v_max_threshold \ # TODO: 07/09/17 - This has *huge* effect on the outcome. Decide "optimal" reward scheme. distance_to_target = self.desired_trolley - (x - self.cable_length * np.sin(theta)) #reward = -1.0 + 0.001 / (distance_to_target)**2 - 0.0001*self.x_accel**2 - limits*10 if np.abs(distance_to_target) >= 0.01: # reward = -1.0 - 10*theta**2 - 0.1*self.x_accel**2 - limits*10 reward = -np.clip(self.counter*self.tau, 1, 50) - 10*theta**2 - 0.1*self.x_accel**2 - limits*10 else: reward = 1000.0 - 250*self.x_accel**2 #- 10*theta**2 - 0.1*self.x_accel**2 - limits*10 # - 0.01 * self.x_accel**2 # reward = -(1/0.01) * x**2 - 1/(0.5 * np.pi/180)*theta**2 - limits*100 if self.SAVE_DATA: current_data = np.array([self.counter * self.tau, theta, theta_dot, x, x_dot, self.x_accel, reward]) self.episode_data[self.counter, :] = current_data if self.counter >= self.MAX_STEPS: done = True if self.SAVE_DATA: header = header='Time (s), Angle (rad), Angle (rad/s), Trolley Pos (m), Trolly Vel (m/s), Trolley Accel (m/s^2), Reward' data_filename = 'example_data/EpisodeData_{}.csv'.format(datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S')) np.savetxt(data_filename, self.episode_data, header=header, delimiter=',') else: done = False return np.array(self.state), reward, done, {} def reset(self): # TODO: 07/07/17 - Probably need more randomness in initial conditions self.state = np.array([0,#self.np_random.uniform(low=-5*np.pi/180, high=5*np.pi/180), 0, #self.np_random.uniform(low=-0.5*np.pi/6, high=0.5*np.pi/6), self.np_random.uniform(low=-3.0, high=3.0), 0])#self.np_random.uniform(low=-0.5, high=0.5)]) # Reset the counter and the data recorder array self.counter = 0 if self.SAVE_DATA: self.episode_data = np.zeros((501, 7)) self.episode_data[0,:] = np.array([0, self.state[0], self.state[1], self.state[2], self.state[3], 0, 0]) return np.array(self.state) def render(self, mode='human', close=False): if close: if self.viewer is not None: self.viewer.close() self.viewer = None return screen_width = 600 screen_height = 400 world_width = 1.5 * self.x_max_threshold scale = screen_width/world_width scale = screen_width/world_width # Scale according to width # scale = screen_height/world_height # Scale according to height # Define the payload diameter and cable width in pixels payload_size = 10.0 cable_width = 2.0 # Define the trolley size and its offset from the bottom of the screen (pixels) trolley_width = 50.0 trolley_height = 30.0 trolley_yOffset = screen_height-25 theta, theta_dot, x, x_dot = self.state if self.viewer is None: # Initial scene setup from gym.envs.classic_control import rendering self.viewer = rendering.Viewer(screen_width, screen_height) # the target is a series of circles, a bullseye self.target = rendering.make_circle(payload_size*2) self.targettrans = rendering.Transform(translation=(screen_width/2 + self.desired_trolley*scale, trolley_yOffset-self.cable_length*scale)) self.target.add_attr(self.targettrans) self.target.set_color(1,0,0) # red self.viewer.add_geom(self.target) self.target = rendering.make_circle(payload_size*1.25) self.targettrans = rendering.Transform(translation=(screen_width/2 + self.desired_trolley*scale, trolley_yOffset-self.cable_length*scale)) self.target.add_attr(self.targettrans) self.target.set_color(1,1,1) # white self.viewer.add_geom(self.target) self.target = rendering.make_circle(payload_size/2) self.targettrans = rendering.Transform(translation=(screen_width/2 + self.desired_trolley*scale, trolley_yOffset-self.cable_length*scale)) self.target.add_attr(self.targettrans) self.target.set_color(1,0,0) # red self.viewer.add_geom(self.target) # Define the trolley polygon l,r,t,b = -trolley_width/2, trolley_width/2, trolley_height/2, -trolley_height/2 self.trolley = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)]) self.trolleytrans = rendering.Transform(translation=(screen_width/2 + x*scale, trolley_yOffset)) self.trolley.add_attr(self.trolleytrans) self.trolley.set_color(0.85,0.85,0.85) # light gray self.viewer.add_geom(self.trolley) # define the cable as a polygon, so we can change its length later l,r,t,b = -cable_width/2, cable_width/2, cable_width/2, -self.cable_length*scale-cable_width/2 self.cable = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)]) self.cabletrans = rendering.Transform(translation=(screen_width/2 + x*scale, trolley_yOffset)) self.cable.add_attr(self.cabletrans) self.cable.set_color(0.25,0.25,0.25) # dark gray self.viewer.add_geom(self.cable) # the payload is a circle. self.payload = rendering.make_circle(payload_size) self.payloadtrans = rendering.Transform(translation=(screen_width/2 + x*scale, trolley_yOffset-self.cable_length)) self.payload.add_attr(self.payloadtrans) self.payload.set_color(0.5,0.5,0.5) # mid gray self.viewer.add_geom(self.payload) # This is a bar that shows the direction of the current accel. command l,r,t,b = -10.0, 10.0, cable_width/2, -cable_width/2 self.accel = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)]) self.acceltrans = rendering.Transform(translation=(screen_width/2 + x*scale-trolley_width/2, trolley_yOffset)) self.accel.add_attr(self.acceltrans) self.accel.set_color(0.1, 0.1, 0.5) self.viewer.add_geom(self.accel) # calculate the payload position in the window, then move it there payload_screen_x = (x - self.cable_length*np.sin(theta))*scale payload_screen_y = trolley_yOffset - self.cable_length*np.cos(theta)*scale self.payloadtrans.set_translation(screen_width/2 + payload_screen_x, payload_screen_y) # rotate the cable self.cabletrans.set_translation(screen_width/2 + x*scale, trolley_yOffset) self.cabletrans.set_rotation(-theta) # move the trolley self.trolleytrans.set_translation(screen_width/2 + x*scale, trolley_yOffset) # show the accel direction #self.acceltrans.set_translation(screen_width/2 + (x*scale + np.sign(self.x_accel)*(trolley_width/2+10)), trolley_yOffset) # show the accel direction accel_scaling = 0.025*self.x_accel*scale # self.acceltrans.set_translation(screen_width/2 + (x*scale + np.sign(self.x_accel)*(trolley_width/2+10)), trolley_yOffset) self.acceltrans.set_translation(screen_width/2 + (x*scale + (20*accel_scaling/2)), trolley_yOffset) self.acceltrans.set_scale(accel_scaling, 1) return self.viewer.render(return_rgb_array = mode=='rgb_array')
I am a Engineering Civil Engeneering major. I might commonly be sighted at the Silo getting some fast food, or at Surge IV distracting people that are trying to study for real. I ride my fixie around town looking all cool, except that I got a break now :(. I have the coolest Users/mydiaz girlfriend anyone could ask for with the coolest major in the world (Environmental Toxicology)...Im kinda jealous, actually! ...A girlfriend who apparently, cannot spell.
lemma kuhn_counting_lemma: fixes bnd compo compo' face S F defines "nF s == card {f\<in>F. face f s \<and> compo' f}" assumes [simp, intro]: "finite F" \<comment> \<open>faces\<close> and [simp, intro]: "finite S" \<comment> \<open>simplices\<close> and "\<And>f. f \<in> F \<Longrightarrow> bnd f \<Longrightarrow> card {s\<in>S. face f s} = 1" and "\<And>f. f \<in> F \<Longrightarrow> \<not> bnd f \<Longrightarrow> card {s\<in>S. face f s} = 2" and "\<And>s. s \<in> S \<Longrightarrow> compo s \<Longrightarrow> nF s = 1" and "\<And>s. s \<in> S \<Longrightarrow> \<not> compo s \<Longrightarrow> nF s = 0 \<or> nF s = 2" and "odd (card {f\<in>F. compo' f \<and> bnd f})" shows "odd (card {s\<in>S. compo s})"
(* Title: HOL/Auth/n_g2kAbsAfter_lemma_inv__86_on_rules.thy Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences *) header{*The n_g2kAbsAfter Protocol Case Study*} theory n_g2kAbsAfter_lemma_inv__86_on_rules imports n_g2kAbsAfter_lemma_on_inv__86 begin section{*All lemmas on causal relation between inv__86*} lemma lemma_inv__86_on_rules: assumes b1: "r \<in> rules N" and b2: "(f=inv__86 )" shows "invHoldForRule s f r (invariants N)" proof - have c1: "(\<exists> d. d\<le>N\<and>r=n_n_Store_i1 d)\<or> (\<exists> d. d\<le>N\<and>r=n_n_AStore_i1 d)\<or> (r=n_n_SendReqS_j1 )\<or> (r=n_n_SendReqEI_i1 )\<or> (r=n_n_SendReqES_i1 )\<or> (r=n_n_RecvReq_i1 )\<or> (r=n_n_SendInvE_i1 )\<or> (r=n_n_SendInvS_i1 )\<or> (r=n_n_SendInvAck_i1 )\<or> (r=n_n_RecvInvAck_i1 )\<or> (r=n_n_SendGntS_i1 )\<or> (r=n_n_SendGntE_i1 )\<or> (r=n_n_RecvGntS_i1 )\<or> (r=n_n_RecvGntE_i1 )\<or> (r=n_n_ASendReqIS_j1 )\<or> (r=n_n_ASendReqSE_j1 )\<or> (r=n_n_ASendReqEI_i1 )\<or> (r=n_n_ASendReqES_i1 )\<or> (r=n_n_SendReqEE_i1 )\<or> (r=n_n_ARecvReq_i1 )\<or> (r=n_n_ASendInvE_i1 )\<or> (r=n_n_ASendInvS_i1 )\<or> (r=n_n_ASendInvAck_i1 )\<or> (r=n_n_ARecvInvAck_i1 )\<or> (r=n_n_ASendGntS_i1 )\<or> (r=n_n_ASendGntE_i1 )\<or> (r=n_n_ARecvGntS_i1 )\<or> (r=n_n_ARecvGntE_i1 )" apply (cut_tac b1, auto) done moreover { assume d1: "(\<exists> d. d\<le>N\<and>r=n_n_Store_i1 d)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_Store_i1Vsinv__86) done } moreover { assume d1: "(\<exists> d. d\<le>N\<and>r=n_n_AStore_i1 d)" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_AStore_i1Vsinv__86) done } moreover { assume d1: "(r=n_n_SendReqS_j1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_SendReqS_j1Vsinv__86) done } moreover { assume d1: "(r=n_n_SendReqEI_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_SendReqEI_i1Vsinv__86) done } moreover { assume d1: "(r=n_n_SendReqES_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_SendReqES_i1Vsinv__86) done } moreover { assume d1: "(r=n_n_RecvReq_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_RecvReq_i1Vsinv__86) done } moreover { assume d1: "(r=n_n_SendInvE_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_SendInvE_i1Vsinv__86) done } moreover { assume d1: "(r=n_n_SendInvS_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_SendInvS_i1Vsinv__86) done } moreover { assume d1: "(r=n_n_SendInvAck_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_SendInvAck_i1Vsinv__86) done } moreover { assume d1: "(r=n_n_RecvInvAck_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_RecvInvAck_i1Vsinv__86) done } moreover { assume d1: "(r=n_n_SendGntS_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_SendGntS_i1Vsinv__86) done } moreover { assume d1: "(r=n_n_SendGntE_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_SendGntE_i1Vsinv__86) done } moreover { assume d1: "(r=n_n_RecvGntS_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_RecvGntS_i1Vsinv__86) done } moreover { assume d1: "(r=n_n_RecvGntE_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_RecvGntE_i1Vsinv__86) done } moreover { assume d1: "(r=n_n_ASendReqIS_j1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_ASendReqIS_j1Vsinv__86) done } moreover { assume d1: "(r=n_n_ASendReqSE_j1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_ASendReqSE_j1Vsinv__86) done } moreover { assume d1: "(r=n_n_ASendReqEI_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_ASendReqEI_i1Vsinv__86) done } moreover { assume d1: "(r=n_n_ASendReqES_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_ASendReqES_i1Vsinv__86) done } moreover { assume d1: "(r=n_n_SendReqEE_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_SendReqEE_i1Vsinv__86) done } moreover { assume d1: "(r=n_n_ARecvReq_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_ARecvReq_i1Vsinv__86) done } moreover { assume d1: "(r=n_n_ASendInvE_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_ASendInvE_i1Vsinv__86) done } moreover { assume d1: "(r=n_n_ASendInvS_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_ASendInvS_i1Vsinv__86) done } moreover { assume d1: "(r=n_n_ASendInvAck_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_ASendInvAck_i1Vsinv__86) done } moreover { assume d1: "(r=n_n_ARecvInvAck_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_ARecvInvAck_i1Vsinv__86) done } moreover { assume d1: "(r=n_n_ASendGntS_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_ASendGntS_i1Vsinv__86) done } moreover { assume d1: "(r=n_n_ASendGntE_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_ASendGntE_i1Vsinv__86) done } moreover { assume d1: "(r=n_n_ARecvGntS_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_ARecvGntS_i1Vsinv__86) done } moreover { assume d1: "(r=n_n_ARecvGntE_i1 )" have "invHoldForRule s f r (invariants N)" apply (cut_tac b2 d1, metis n_n_ARecvGntE_i1Vsinv__86) done } ultimately show "invHoldForRule s f r (invariants N)" by satx qed end
In ice hockey , a combination of a player 's goals and assists are collectively called points . Penalty minutes are the total number of minutes assigned to a player for infractions assessed during the <unk> @-@ minus is a statistic that tracks when a player was on the ice while goals were scored , both for and against their team , though some in game situations will not effect the statistic . Below is a listing of all player statistics for the Blue Jackets during the season .
(* Authors: Jose Divasón Maximilian Haslbeck Sebastiaan Joosten René Thiemann Akihisa Yamada License: BSD *) section \<open>The LLL Algorithm\<close> text \<open>Soundness of the LLL algorithm is proven in four steps. In the basic version, we do recompute the Gram-Schmidt ortogonal (GSO) basis in every step. This basic version will have a full functional soundness proof, i.e., termination and the property that the returned basis is reduced. Then in LLL-Number-Bounds we will strengthen the invariant and prove that all intermediate numbers stay polynomial in size. Moreover, in LLL-Impl we will refine the basic version, so that the GSO does not need to be recomputed in every step. Finally, in LLL-Complexity, we develop an cost-annotated version of the refined algorithm and prove a polynomial upper bound on the number of arithmetic operations.\<close> text \<open>This theory provides a basic implementation and a soundness proof of the LLL algorithm to compute a "short" vector in a lattice.\<close> theory LLL imports Gram_Schmidt_2 Missing_Lemmas Jordan_Normal_Form.Determinant "Abstract-Rewriting.SN_Order_Carrier" begin subsection \<open>Core Definitions, Invariants, and Theorems for Basic Version\<close> (* Note/TODO by Max Haslbeck: Up to here I refactored the code in Gram_Schmidt_2 and Gram_Schmidt_Int which now makes heavy use of locales. In the future I would also like to do this here (instead of using LLL_invariant everywhere). *) locale LLL = fixes n :: nat (* n-dimensional vectors, *) and m :: nat (* number of vectors *) and fs_init :: "int vec list" (* initial basis *) and \<alpha> :: rat (* approximation factor *) begin sublocale vec_module "TYPE(int)" n. abbreviation RAT where "RAT \<equiv> map (map_vec rat_of_int)" abbreviation SRAT where "SRAT xs \<equiv> set (RAT xs)" abbreviation Rn where "Rn \<equiv> carrier_vec n :: rat vec set" sublocale gs: gram_schmidt_fs n "RAT fs_init" . abbreviation lin_indep where "lin_indep fs \<equiv> gs.lin_indpt_list (RAT fs)" abbreviation gso where "gso fs \<equiv> gram_schmidt_fs.gso n (RAT fs)" abbreviation \<mu> where "\<mu> fs \<equiv> gram_schmidt_fs.\<mu> n (RAT fs)" abbreviation reduced where "reduced fs \<equiv> gram_schmidt_fs.reduced n (RAT fs) \<alpha>" abbreviation weakly_reduced where "weakly_reduced fs \<equiv> gram_schmidt_fs.weakly_reduced n (RAT fs) \<alpha>" text \<open>lattice of initial basis\<close> definition "L = lattice_of fs_init" text \<open>maximum squared norm of initial basis\<close> definition "N = max_list (map (nat \<circ> sq_norm) fs_init)" text \<open>maximum absolute value in initial basis\<close> definition "M = Max ({abs (fs_init ! i $ j) | i j. i < m \<and> j < n} \<union> {0})" text \<open>This is the core invariant which enables to prove functional correctness.\<close> definition "\<mu>_small fs i = (\<forall> j < i. abs (\<mu> fs i j) \<le> 1/2)" definition LLL_invariant :: "bool \<Rightarrow> nat \<Rightarrow> int vec list \<Rightarrow> bool" where "LLL_invariant upw i fs = ( gs.lin_indpt_list (RAT fs) \<and> lattice_of fs = L \<and> reduced fs i \<and> i \<le> m \<and> length fs = m \<and> (upw \<or> \<mu>_small fs i) )" lemma LLL_invD: assumes "LLL_invariant upw i fs" shows "lin_indep fs" "length (RAT fs) = m" "set fs \<subseteq> carrier_vec n" "\<And> i. i < m \<Longrightarrow> fs ! i \<in> carrier_vec n" "\<And> i. i < m \<Longrightarrow> gso fs i \<in> carrier_vec n" "length fs = m" "lattice_of fs = L" "weakly_reduced fs i" "i \<le> m" "reduced fs i" "upw \<or> \<mu>_small fs i" proof (atomize (full), goal_cases) case 1 interpret gs': gram_schmidt_fs_lin_indpt n "RAT fs" by (standard) (use assms LLL_invariant_def gs.lin_indpt_list_def in auto) show ?case using assms gs'.fs_carrier gs'.f_carrier gs'.gso_carrier by (auto simp add: LLL_invariant_def gram_schmidt_fs.reduced_def) qed lemma LLL_invI: assumes "set fs \<subseteq> carrier_vec n" "length fs = m" "lattice_of fs = L" "i \<le> m" "lin_indep fs" "reduced fs i" "upw \<or> \<mu>_small fs i" shows "LLL_invariant upw i fs" unfolding LLL_invariant_def Let_def split using assms by auto end locale fs_int' = fixes n m fs_init \<alpha> upw i fs assumes LLL_inv: "LLL.LLL_invariant n m fs_init \<alpha> upw i fs" sublocale fs_int' \<subseteq> fs_int_indpt using LLL_inv unfolding LLL.LLL_invariant_def by (unfold_locales) blast context LLL begin lemma gso_cong: assumes "\<And> i. i \<le> x \<Longrightarrow> f1 ! i = f2 ! i" "x < length f1" "x < length f2" shows "gso f1 x = gso f2 x" by (rule gs.gso_cong, insert assms, auto) lemma \<mu>_cong: assumes "\<And> k. j < i \<Longrightarrow> k \<le> j \<Longrightarrow> f1 ! k = f2 ! k" and i: "i < length f1" "i < length f2" and "j < i \<Longrightarrow> f1 ! i = f2 ! i" shows "\<mu> f1 i j = \<mu> f2 i j" by (rule gs.\<mu>_cong, insert assms, auto) definition reduction where "reduction = (4+\<alpha>)/(4*\<alpha>)" definition d :: "int vec list \<Rightarrow> nat \<Rightarrow> int" where "d fs k = gs.Gramian_determinant fs k" definition D :: "int vec list \<Rightarrow> nat" where "D fs = nat (\<Prod> i < m. d fs i)" definition "d\<mu> gs i j = int_of_rat (of_int (d gs (Suc j)) * \<mu> gs i j)" definition logD :: "int vec list \<Rightarrow> nat" where "logD fs = (if \<alpha> = 4/3 then (D fs) else nat (floor (log (1 / of_rat reduction) (D fs))))" definition LLL_measure :: "nat \<Rightarrow> int vec list \<Rightarrow> nat" where "LLL_measure i fs = (2 * logD fs + m - i)" context fixes upw i fs assumes Linv: "LLL_invariant upw i fs" begin interpretation fs: fs_int' n m fs_init \<alpha> upw i fs by (standard) (use Linv in auto) lemma Gramian_determinant: assumes k: "k \<le> m" shows "of_int (gs.Gramian_determinant fs k) = (\<Prod> j<k. sq_norm (gso fs j))" (is ?g1) "gs.Gramian_determinant fs k > 0" (is ?g2) using assms fs.Gramian_determinant LLL_invD[OF Linv] by auto lemma LLL_d_pos [intro]: assumes k: "k \<le> m" shows "d fs k > 0" unfolding d_def using fs.Gramian_determinant k LLL_invD[OF Linv] by auto lemma LLL_d_Suc: assumes k: "k < m" shows "of_int (d fs (Suc k)) = sq_norm (gso fs k) * of_int (d fs k)" using assms fs.fs_int_d_Suc LLL_invD[OF Linv] unfolding fs.d_def d_def by auto lemma LLL_D_pos: shows "D fs > 0" using fs.fs_int_D_pos LLL_invD[OF Linv] unfolding D_def fs.D_def fs.d_def d_def by auto text \<open>Condition when we can increase the value of $i$\<close> lemma increase_i: assumes i: "i < m" and upw: "upw \<Longrightarrow> i = 0" and red_i: "i \<noteq> 0 \<Longrightarrow> sq_norm (gso fs (i - 1)) \<le> \<alpha> * sq_norm (gso fs i)" shows "LLL_invariant True (Suc i) fs" "LLL_measure i fs > LLL_measure (Suc i) fs" proof - note inv = LLL_invD[OF Linv] from inv(8,10) have red: "weakly_reduced fs i" and sred: "reduced fs i" by (auto) from red red_i i have red: "weakly_reduced fs (Suc i)" unfolding gram_schmidt_fs.weakly_reduced_def by (intro allI impI, rename_tac ii, case_tac "Suc ii = i", auto) from inv(11) upw have sred_i: "\<And> j. j < i \<Longrightarrow> \<bar>\<mu> fs i j\<bar> \<le> 1 / 2" unfolding \<mu>_small_def by auto from sred sred_i have sred: "reduced fs (Suc i)" unfolding gram_schmidt_fs.reduced_def by (intro conjI[OF red] allI impI, rename_tac ii j, case_tac "ii = i", auto) show "LLL_invariant True (Suc i) fs" by (intro LLL_invI, insert inv red sred i, auto) show "LLL_measure i fs > LLL_measure (Suc i) fs" unfolding LLL_measure_def using i by auto qed end text \<open>Standard addition step which makes $\mu_{i,j}$ small\<close> definition "\<mu>_small_row i fs j = (\<forall> j'. j \<le> j' \<longrightarrow> j' < i \<longrightarrow> abs (\<mu> fs i j') \<le> inverse 2)" lemma basis_reduction_add_row_main: assumes Linv: "LLL_invariant True i fs" and i: "i < m" and j: "j < i" and fs': "fs' = fs[ i := fs ! i - c \<cdot>\<^sub>v fs ! j]" shows "LLL_invariant True i fs'" "c = round (\<mu> fs i j) \<Longrightarrow> \<mu>_small_row i fs (Suc j) \<Longrightarrow> \<mu>_small_row i fs' j" (* mu-value at position i j gets small *) "LLL_measure i fs' = LLL_measure i fs" (* new values of gso: no change *) "\<And> i. i < m \<Longrightarrow> gso fs' i = gso fs i" (* new values of mu *) "\<And> i' j'. i' < m \<Longrightarrow> j' < m \<Longrightarrow> \<mu> fs' i' j' = (if i' = i \<and> j' \<le> j then \<mu> fs i j' - of_int c * \<mu> fs j j' else \<mu> fs i' j')" (* new values of d *) "\<And> ii. ii \<le> m \<Longrightarrow> d fs' ii = d fs ii" proof - define bnd :: rat where bnd: "bnd = 4 ^ (m - 1 - Suc j) * of_nat (N ^ (m - 1) * m)" define M where "M = map (\<lambda>i. map (\<mu> fs i) [0..<m]) [0..<m]" note inv = LLL_invD[OF Linv] note Gr = inv(1) have ji: "j \<le> i" "j < m" and jstrict: "j < i" and add: "set fs \<subseteq> carrier_vec n" "i < length fs" "j < length fs" "i \<noteq> j" and len: "length fs = m" and red: "weakly_reduced fs i" and indep: "lin_indep fs" using inv j i by auto let ?R = rat_of_int let ?RV = "map_vec ?R" from inv i j have Fij: "fs ! i \<in> carrier_vec n" "fs ! j \<in> carrier_vec n" by auto let ?x = "fs ! i - c \<cdot>\<^sub>v fs ! j" let ?g = "gso fs" let ?g' = "gso fs'" let ?mu = "\<mu> fs" let ?mu' = "\<mu> fs'" from inv j i have Fi:"\<And> i. i < length (RAT fs) \<Longrightarrow> (RAT fs) ! i \<in> carrier_vec n" and gs_carr: "?g j \<in> carrier_vec n" "?g i \<in> carrier_vec n" "\<And> i. i < j \<Longrightarrow> ?g i \<in> carrier_vec n" "\<And> j. j < i \<Longrightarrow> ?g j \<in> carrier_vec n" and len': "length (RAT fs) = m" and add':"set (map ?RV fs) \<subseteq> carrier_vec n" by auto have RAT_F1: "RAT fs' = (RAT fs)[i := (RAT fs) ! i - ?R c \<cdot>\<^sub>v (RAT fs) ! j]" unfolding fs' proof (rule nth_equalityI[rule_format], goal_cases) case (2 k) show ?case proof (cases "k = i") case False thus ?thesis using 2 by auto next case True hence "?thesis = (?RV (fs ! i - c \<cdot>\<^sub>v fs ! j) = ?RV (fs ! i) - ?R c \<cdot>\<^sub>v ?RV (fs ! j))" using 2 add by auto also have "\<dots>" by (rule eq_vecI, insert Fij, auto) finally show ?thesis by simp qed qed auto hence RAT_F1_i:"RAT fs' ! i = (RAT fs) ! i - ?R c \<cdot>\<^sub>v (RAT fs) ! j" (is "_ = _ - ?mui") using i len by auto have uminus: "fs ! i - c \<cdot>\<^sub>v fs ! j = fs ! i + -c \<cdot>\<^sub>v fs ! j" by (subst minus_add_uminus_vec, insert Fij, auto) have "lattice_of fs' = lattice_of fs" unfolding fs' uminus by (rule lattice_of_add[OF add, of _ "- c"], auto) with inv have lattice: "lattice_of fs' = L" by auto from add len have "k < length fs \<Longrightarrow> \<not> k \<noteq> i \<Longrightarrow> fs' ! k \<in> carrier_vec n" for k unfolding fs' by (metis (no_types, lifting) nth_list_update nth_mem subset_eq carrier_dim_vec index_minus_vec(2) index_smult_vec(2)) hence "k < length fs \<Longrightarrow> fs' ! k \<in> carrier_vec n" for k unfolding fs' using add len by (cases "k \<noteq> i",auto) with len have F1: "set fs' \<subseteq> carrier_vec n" "length fs' = m" unfolding fs' by (auto simp: set_conv_nth) hence F1': "length (RAT fs') = m" "SRAT fs' \<subseteq> Rn" by auto from indep have dist: "distinct (RAT fs)" by (auto simp: gs.lin_indpt_list_def) have Fij': "(RAT fs) ! i \<in> Rn" "(RAT fs) ! j \<in> Rn" using add'[unfolded set_conv_nth] i \<open>j < m\<close> len by auto have uminus': "(RAT fs) ! i - ?R c \<cdot>\<^sub>v (RAT fs) ! j = (RAT fs) ! i + - ?R c \<cdot>\<^sub>v (RAT fs) ! j" by (subst minus_add_uminus_vec[where n = n], insert Fij', auto) have span_F_F1: "gs.span (SRAT fs) = gs.span (SRAT fs')" unfolding RAT_F1 uminus' by (rule gs.add_vec_span, insert len add, auto) have **: "?RV (fs ! i) + - ?R c \<cdot>\<^sub>v (RAT fs) ! j = ?RV (fs ! i - c \<cdot>\<^sub>v fs ! j)" by (rule eq_vecI, insert Fij len i j, auto) from i j len have "j < length (RAT fs)" "i < length (RAT fs)" "i \<noteq> j" by auto from gs.lin_indpt_list_add_vec[OF this indep, of "- of_int c"] have "gs.lin_indpt_list ((RAT fs) [i := (RAT fs) ! i + - ?R c \<cdot>\<^sub>v (RAT fs) ! j])" (is "gs.lin_indpt_list ?F1") . also have "?F1 = RAT fs'" unfolding fs' using i len Fij' ** by (auto simp: map_update) finally have indep_F1: "lin_indep fs'" . have conn1: "set (RAT fs) \<subseteq> carrier_vec n" "length (RAT fs) = m" "distinct (RAT fs)" "gs.lin_indpt (set (RAT fs))" using inv unfolding gs.lin_indpt_list_def by auto have conn2: "set (RAT fs') \<subseteq> carrier_vec n" "length (RAT fs') = m" "distinct (RAT fs')" "gs.lin_indpt (set (RAT fs'))" using indep_F1 F1' unfolding gs.lin_indpt_list_def by auto interpret gs1: gram_schmidt_fs_lin_indpt n "RAT fs" by (standard) (use LLL_invD[OF assms(1)] gs.lin_indpt_list_def in auto) interpret gs2: gram_schmidt_fs_lin_indpt n "RAT fs'" by (standard) (use indep_F1 F1' gs.lin_indpt_list_def in auto) let ?G = "map ?g [0 ..< m]" let ?G' = "map ?g' [0 ..< m]" from gs1.span_gso gs2.span_gso gs1.gso_carrier gs2.gso_carrier conn1 conn2 span_F_F1 len have span_G_G1: "gs.span (set ?G) = gs.span (set ?G')" and lenG: "length ?G = m" and Gi: "i < length ?G \<Longrightarrow> ?G ! i \<in> Rn" and G1i: "i < length ?G' \<Longrightarrow> ?G' ! i \<in> Rn" for i by auto have eq: "x \<noteq> i \<Longrightarrow> RAT fs' ! x = (RAT fs) ! x" for x unfolding RAT_F1 by auto hence eq_part: "x < i \<Longrightarrow> ?g' x = ?g x" for x by (intro gs.gso_cong, insert len, auto) have G: "i < m \<Longrightarrow> (RAT fs) ! i \<in> Rn" "i < m \<Longrightarrow> fs ! i \<in> carrier_vec n" for i by(insert add len', auto) note carr1[intro] = this[OF i] this[OF ji(2)] have "x < m \<Longrightarrow> ?g x \<in> Rn" "x < m \<Longrightarrow> ?g' x \<in> Rn" "x < m \<Longrightarrow> dim_vec (gso fs x) = n" "x < m \<Longrightarrow> dim_vec (gso fs' x) = n" for x using inv G1i by (auto simp:o_def Gi G1i) hence carr2[intro!]:"?g i \<in> Rn" "?g' i \<in> Rn" "?g ` {0..<i} \<subseteq> Rn" "?g ` {0..<Suc i} \<subseteq> Rn" using i by auto have F1_RV: "?RV (fs' ! i) = RAT fs' ! i" using i F1 by auto have F_RV: "?RV (fs ! i) = (RAT fs) ! i" using i len by auto from eq_part have span_G1_G: "gs.span (?g' ` {0..<i}) = gs.span (?g ` {0..<i})" (is "?ls = ?rs") apply(intro cong[OF refl[of "gs.span"]],rule image_cong[OF refl]) using eq by auto have "(RAT fs') ! i - ?g' i = ((RAT fs) ! i - ?g' i) - ?mui" unfolding RAT_F1_i using carr1 carr2 by (intro eq_vecI, auto) hence in1:"((RAT fs) ! i - ?g' i) - ?mui \<in> ?rs" using gs2.oc_projection_exist[of i] conn2 i unfolding span_G1_G by auto from \<open>j < i\<close> have Gj_mem: "(RAT fs) ! j \<in> (\<lambda> x. ((RAT fs) ! x)) ` {0 ..< i}" by auto have id1: "set (take i (RAT fs)) = (\<lambda>x. ?RV (fs ! x)) ` {0..<i}" using \<open>i \<le> m\<close> len by (subst nth_image[symmetric], force+) have "(RAT fs) ! j \<in> ?rs \<longleftrightarrow> (RAT fs) ! j \<in> gs.span ((\<lambda>x. ?RV (fs ! x)) ` {0..<i})" using gs1.partial_span \<open>i \<le> m\<close> id1 inv by auto also have "(\<lambda>x. ?RV (fs ! x)) ` {0..<i} = (\<lambda>x. ((RAT fs) ! x)) ` {0..<i}" using \<open>i < m\<close> len by force also have "(RAT fs) ! j \<in> gs.span \<dots>" by (rule gs.span_mem[OF _ Gj_mem], insert \<open>i < m\<close> G, auto) finally have "(RAT fs) ! j \<in> ?rs" . hence in2:"?mui \<in> ?rs" apply(intro gs.prod_in_span) by force+ have ineq:"((RAT fs) ! i - ?g' i) + ?mui - ?mui = ((RAT fs) ! i - ?g' i)" using carr1 carr2 by (intro eq_vecI, auto) have cong': "A = B \<Longrightarrow> A \<in> C \<Longrightarrow> B \<in> C" for A B :: "'a vec" and C by auto have *: "?g ` {0..<i} \<subseteq> Rn" by auto have in_span: "(RAT fs) ! i - ?g' i \<in> ?rs" by (rule cong'[OF eq_vecI gs.span_add1[OF * in1 in2,unfolded ineq]], insert carr1 carr2, auto) { fix x assume x:"x < i" hence "x < m" "i \<noteq> x" using i by auto from gs2.orthogonal this inv assms have "?g' i \<bullet> ?g' x = 0" by auto } hence G1_G: "?g' i = ?g i" by (intro gs1.oc_projection_unique) (use inv i eq_part in_span in auto) show eq_fs:"x < m \<Longrightarrow> ?g' x = ?g x" for x proof(induct x rule:nat_less_induct[rule_format]) case (1 x) hence ind: "m < x \<Longrightarrow> ?g' m = ?g m" for m by auto { assume "x > i" hence ?case unfolding gs2.gso.simps[of x] gs1.gso.simps[of x] unfolding gs1.\<mu>.simps gs2.\<mu>.simps using ind eq by (auto intro: cong[OF _ cong[OF refl[of "gs.sumlist"]]]) } note eq_rest = this show ?case by (rule linorder_class.linorder_cases[of x i],insert G1_G eq_part eq_rest,auto) qed hence Hs:"?G' = ?G" by (auto simp:o_def) have red: "weakly_reduced fs' i" using red using eq_fs \<open>i < m\<close> unfolding gram_schmidt_fs.weakly_reduced_def by simp let ?Mi = "M ! i ! j" have Gjn: "dim_vec (fs ! j) = n" using Fij(2) carrier_vecD by blast define E where "E = addrow_mat m (- ?R c) i j" define M' where "M' = gs1.M m" define N' where "N' = gs2.M m" have E: "E \<in> carrier_mat m m" unfolding E_def by simp have M: "M' \<in> carrier_mat m m" unfolding gs1.M_def M'_def by auto have N: "N' \<in> carrier_mat m m" unfolding gs2.M_def N'_def by auto let ?mat = "mat_of_rows n" let ?GsM = "?mat ?G" have Gs: "?GsM \<in> carrier_mat m n" by auto hence GsT: "?GsM\<^sup>T \<in> carrier_mat n m" by auto have Gnn: "?mat (RAT fs) \<in> carrier_mat m n" unfolding mat_of_rows_def using len by auto have "?mat (RAT fs') = addrow (- ?R c) i j (?mat (RAT fs))" unfolding RAT_F1 by (rule eq_matI, insert Gjn ji(2), auto simp: len mat_of_rows_def) also have "\<dots> = E * ?mat (RAT fs)" unfolding E_def by (rule addrow_mat, insert j i, auto simp: mat_of_rows_def len) finally have HEG: "?mat (RAT fs') = E * ?mat (RAT fs)" . (* lemma 16.12(i), part 1 *) have "(E * M') * ?mat ?G = E * (M' * ?mat ?G)" by (rule assoc_mult_mat[OF E M Gs]) also have "M' * ?GsM = ?mat (RAT fs)" using gs1.matrix_equality conn1 M'_def by simp also have "E * \<dots> = ?mat (RAT fs')" unfolding HEG .. also have "\<dots> = N' * ?mat ?G'" using gs2.matrix_equality conn2 unfolding N'_def by simp also have "?mat ?G' = ?GsM" unfolding Hs .. finally have "(E * M') * ?GsM = N' * ?GsM" . from arg_cong[OF this, of "\<lambda> x. x * ?GsM\<^sup>T"] E M N have EMN: "(E * M') * (?GsM * ?GsM\<^sup>T) = N' * (?GsM * ?GsM\<^sup>T)" by (subst (1 2) assoc_mult_mat[OF _ Gs GsT, of _ m, symmetric], auto) have "det (?GsM * ?GsM\<^sup>T) = gs.Gramian_determinant ?G m" unfolding gs.Gramian_determinant_def by (subst gs.Gramian_matrix_alt_def, auto simp: Let_def) also have "\<dots> > 0" proof - have 1: "gs.lin_indpt_list ?G" using conn1 gs1.orthogonal_gso gs1.gso_carrier by (intro gs.orthogonal_imp_lin_indpt_list) (auto) interpret G: gram_schmidt_fs_lin_indpt n ?G by (standard) (use 1 gs.lin_indpt_list_def in auto) show ?thesis by (intro G.Gramian_determinant) auto qed finally have "det (?GsM * ?GsM\<^sup>T) \<noteq> 0" by simp from vec_space.det_nonzero_congruence[OF EMN this _ _ N] Gs E M have EMN: "E * M' = N'" by auto (* lemma 16.12(i), part 2 *) from inv have sred: "reduced fs i" by auto { fix i' j' assume ij: "i' < m" "j' < m" and choice: "i' \<noteq> i \<or> j < j'" have "?mu' i' j' = N' $$ (i',j')" using ij F1 unfolding N'_def gs2.M_def by auto also have "\<dots> = addrow (- ?R c) i j M' $$ (i',j')" unfolding EMN[symmetric] E_def by (subst addrow_mat[OF M], insert ji, auto) also have "\<dots> = (if i = i' then - ?R c * M' $$ (j, j') + M' $$ (i', j') else M' $$ (i', j'))" by (rule index_mat_addrow, insert ij M, auto) also have "\<dots> = M' $$ (i', j')" proof (cases "i = i'") case True with choice have jj: "j < j'" by auto have "M' $$ (j, j') = ?mu j j'" using ij ji len unfolding M'_def gs1.M_def by auto also have "\<dots> = 0" unfolding gs1.\<mu>.simps using jj by auto finally show ?thesis using True by auto qed auto also have "\<dots> = ?mu i' j'" using ij len unfolding M'_def gs1.M_def by auto also note calculation } note mu_no_change = this { fix j' assume jj': "j' \<le> j" with j i have j': "j' < m" by auto have "?mu' i j' = N' $$ (i,j')" using jj' j i F1 unfolding N'_def gs2.M_def by auto also have "\<dots> = addrow (- ?R c) i j M' $$ (i,j')" unfolding EMN[symmetric] E_def by (subst addrow_mat[OF M], insert ji, auto) also have "\<dots> = - ?R c * M' $$ (j, j') + M' $$ (i, j')" by (rule index_mat_addrow, insert j' i M, auto) also have "\<dots> = M' $$ (i, j') - ?R c * M' $$ (j, j')" by simp also have "M' $$ (i, j') = ?mu i j'" using i j' len unfolding M'_def gs1.M_def by auto also have "M' $$ (j, j') = ?mu j j'" using i j j' len unfolding M'_def gs1.M_def by auto finally have "?mu' i j' = ?mu i j' - ?R c * ?mu j j'" by auto } note mu_change = this show mu_update: "i' < m \<Longrightarrow> j' < m \<Longrightarrow> ?mu' i' j' = (if i' = i \<and> j' \<le> j then ?mu i j' - ?R c * ?mu j j' else ?mu i' j')" for i' j' using mu_change[of j'] mu_no_change[of i' j'] by auto have sred: "reduced fs' i" unfolding gram_schmidt_fs.reduced_def proof (intro conjI[OF red] impI allI, goal_cases) case (1 i' j) with mu_no_change[of i' j] sred[unfolded gram_schmidt_fs.reduced_def, THEN conjunct2, rule_format, of i' j] i show ?case by auto qed have mudiff:"?mu i j - of_int c = ?mu' i j" by (subst mu_change, auto simp: gs1.\<mu>.simps) have lin_indpt_list_fs: "gs.lin_indpt_list (RAT fs')" unfolding gs.lin_indpt_list_def using conn2 by auto { assume c: "c = round (\<mu> fs i j)" assume mu_small: "\<mu>_small_row i fs (Suc j)" have small: "abs (?mu i j - of_int c) \<le> inverse 2" unfolding j c using of_int_round_abs_le by (auto simp add: abs_minus_commute) from this[unfolded mudiff] have mu'_2: "abs (?mu' i j) \<le> inverse 2" . show "\<mu>_small_row i fs' j" unfolding \<mu>_small_row_def proof (intro allI, goal_cases) case (1 j') show ?case using mu'_2 mu_small[unfolded \<mu>_small_row_def, rule_format, of j'] by (cases "j' > j", insert mu_update[of i j'] i, auto) qed } show Linv': "LLL_invariant True i fs'" by (intro LLL_invI[OF F1 lattice \<open>i \<le> m\<close> lin_indpt_list_fs sred], auto) { fix i assume i: "i \<le> m" have "rat_of_int (d fs' i) = of_int (d fs i)" unfolding d_def Gramian_determinant(1)[OF Linv i] Gramian_determinant(1)[OF Linv' i] by (rule prod.cong[OF refl], subst eq_fs, insert i, auto) thus "d fs' i = d fs i" by simp } note d = this have D: "D fs' = D fs" unfolding D_def by (rule arg_cong[of _ _ nat], rule prod.cong[OF refl], auto simp: d) show "LLL_measure i fs' = LLL_measure i fs" unfolding LLL_measure_def logD_def D .. qed text \<open>Addition step which can be skipped since $\mu$-value is already small\<close> lemma basis_reduction_add_row_main_0: assumes Linv: "LLL_invariant True i fs" and i: "i < m" and j: "j < i" and 0: "round (\<mu> fs i j) = 0" and mu_small: "\<mu>_small_row i fs (Suc j)" shows "\<mu>_small_row i fs j" (is ?g1) proof - note inv = LLL_invD[OF Linv] from inv(5)[OF i] inv(5)[of j] i j have id: "fs[i := fs ! i - 0 \<cdot>\<^sub>v fs ! j] = fs" by (intro nth_equalityI, insert inv i, auto) show ?g1 using basis_reduction_add_row_main[OF Linv i j _, of fs] 0 id mu_small by auto qed lemma \<mu>_small_row_refl: "\<mu>_small_row i fs i" unfolding \<mu>_small_row_def by auto lemma basis_reduction_add_row_done: assumes Linv: "LLL_invariant True i fs" and i: "i < m" and mu_small: "\<mu>_small_row i fs 0" shows "LLL_invariant False i fs" proof - note inv = LLL_invD[OF Linv] from mu_small have mu_small: "\<mu>_small fs i" unfolding \<mu>_small_row_def \<mu>_small_def by auto show ?thesis using i mu_small by (intro LLL_invI[OF inv(3,6,7,9,1,10)], auto) qed (* lemma 16.16 (ii), one case *) lemma d_swap_unchanged: assumes len: "length F1 = m" and i0: "i \<noteq> 0" and i: "i < m" and ki: "k \<noteq> i" and km: "k \<le> m" and swap: "F2 = F1[i := F1 ! (i - 1), i - 1 := F1 ! i]" shows "d F1 k = d F2 k" proof - let ?F1_M = "mat k n (\<lambda>(i, y). F1 ! i $ y)" let ?F2_M = "mat k n (\<lambda>(i, y). F2 ! i $ y)" have "\<exists> P. P \<in> carrier_mat k k \<and> det P \<in> {-1, 1} \<and> ?F2_M = P * ?F1_M" proof cases assume ki: "k < i" hence H: "?F2_M = ?F1_M" unfolding swap by (intro eq_matI, auto) let ?P = "1\<^sub>m k" have "?P \<in> carrier_mat k k" "det ?P \<in> {-1, 1}" "?F2_M = ?P * ?F1_M" unfolding H by auto thus ?thesis by blast next assume "\<not> k < i" with ki have ki: "k > i" by auto let ?P = "swaprows_mat k i (i - 1)" from i0 ki have neq: "i \<noteq> i - 1" and kmi: "i - 1 < k" by auto have *: "?P \<in> carrier_mat k k" "det ?P \<in> {-1, 1}" using det_swaprows_mat[OF ki kmi neq] ki by auto from i len have iH: "i < length F1" "i - 1 < length F1" by auto have "?P * ?F1_M = swaprows i (i - 1) ?F1_M" by (subst swaprows_mat[OF _ ki kmi], auto) also have "\<dots> = ?F2_M" unfolding swap by (intro eq_matI, rename_tac ii jj, case_tac "ii = i", (insert iH, simp add: nth_list_update)[1], case_tac "ii = i - 1", insert iH neq ki, auto simp: nth_list_update) finally show ?thesis using * by metis qed then obtain P where P: "P \<in> carrier_mat k k" and detP: "det P \<in> {-1, 1}" and H': "?F2_M = P * ?F1_M" by auto have "d F2 k = det (gs.Gramian_matrix F2 k)" unfolding d_def gs.Gramian_determinant_def by simp also have "\<dots> = det (?F2_M * ?F2_M\<^sup>T)" unfolding gs.Gramian_matrix_def Let_def by simp also have "?F2_M * ?F2_M\<^sup>T = ?F2_M * (?F1_M\<^sup>T * P\<^sup>T)" unfolding H' by (subst transpose_mult[OF P], auto) also have "\<dots> = P * (?F1_M * (?F1_M\<^sup>T * P\<^sup>T))" unfolding H' by (subst assoc_mult_mat[OF P], auto) also have "det \<dots> = det P * det (?F1_M * (?F1_M\<^sup>T * P\<^sup>T))" by (rule det_mult[OF P], insert P, auto) also have "?F1_M * (?F1_M\<^sup>T * P\<^sup>T) = (?F1_M * ?F1_M\<^sup>T) * P\<^sup>T" by (subst assoc_mult_mat, insert P, auto) also have "det \<dots> = det (?F1_M * ?F1_M\<^sup>T) * det P" by (subst det_mult, insert P, auto simp: det_transpose) also have "det (?F1_M * ?F1_M\<^sup>T) = det (gs.Gramian_matrix F1 k)" unfolding gs.Gramian_matrix_def Let_def by simp also have "\<dots> = d F1 k" unfolding d_def gs.Gramian_determinant_def by simp finally have "d F2 k = (det P * det P) * d F1 k" by simp also have "det P * det P = 1" using detP by auto finally show "d F1 k = d F2 k" by simp qed definition base where "base = real_of_rat ((4 * \<alpha>) / (4 + \<alpha>))" definition g_bound :: "int vec list \<Rightarrow> bool" where "g_bound fs = (\<forall> i < m. sq_norm (gso fs i) \<le> of_nat N)" end locale LLL_with_assms = LLL + assumes \<alpha>: "\<alpha> \<ge> 4/3" and lin_dep: "lin_indep fs_init" and len: "length fs_init = m" begin lemma \<alpha>0: "\<alpha> > 0" "\<alpha> \<noteq> 0" using \<alpha> by auto lemma fs_init: "set fs_init \<subseteq> carrier_vec n" using lin_dep[unfolded gs.lin_indpt_list_def] by auto lemma reduction: "0 < reduction" "reduction \<le> 1" "\<alpha> > 4/3 \<Longrightarrow> reduction < 1" "\<alpha> = 4/3 \<Longrightarrow> reduction = 1" using \<alpha> unfolding reduction_def by auto lemma base: "\<alpha> > 4/3 \<Longrightarrow> base > 1" using reduction(1,3) unfolding reduction_def base_def by auto lemma basis_reduction_swap_main: assumes Linv: "LLL_invariant False i fs" and i: "i < m" and i0: "i \<noteq> 0" and norm_ineq: "sq_norm (gso fs (i - 1)) > \<alpha> * sq_norm (gso fs i)" and fs'_def: "fs' = fs[i := fs ! (i - 1), i - 1 := fs ! i]" shows "LLL_invariant False (i - 1) fs'" and "LLL_measure i fs > LLL_measure (i - 1) fs'" (* new values of gso *) and "\<And> k. k < m \<Longrightarrow> gso fs' k = (if k = i - 1 then gso fs i + \<mu> fs i (i - 1) \<cdot>\<^sub>v gso fs (i - 1) else if k = i then gso fs (i - 1) - (RAT fs ! (i - 1) \<bullet> gso fs' (i - 1) / sq_norm (gso fs' (i - 1))) \<cdot>\<^sub>v gso fs' (i - 1) else gso fs k)" (is "\<And> k. _ \<Longrightarrow> _ = ?newg k") (* new values of norms of gso *) and "\<And> k. k < m \<Longrightarrow> sq_norm (gso fs' k) = (if k = i - 1 then sq_norm (gso fs i) + (\<mu> fs i (i - 1) * \<mu> fs i (i - 1)) * sq_norm (gso fs (i - 1)) else if k = i then sq_norm (gso fs i) * sq_norm (gso fs (i - 1)) / sq_norm (gso fs' (i - 1)) else sq_norm (gso fs k))" (is "\<And> k. _ \<Longrightarrow> _ = ?new_norm k") (* new values of \<mu>-values *) and "\<And> ii j. ii < m \<Longrightarrow> j < ii \<Longrightarrow> \<mu> fs' ii j = ( if ii = i - 1 then \<mu> fs i j else if ii = i then if j = i - 1 then \<mu> fs i (i - 1) * sq_norm (gso fs (i - 1)) / sq_norm (gso fs' (i - 1)) else \<mu> fs (i - 1) j else if ii > i \<and> j = i then \<mu> fs ii (i - 1) - \<mu> fs i (i - 1) * \<mu> fs ii i else if ii > i \<and> j = i - 1 then \<mu> fs ii (i - 1) * \<mu> fs' i (i - 1) + \<mu> fs ii i * sq_norm (gso fs i) / sq_norm (gso fs' (i - 1)) else \<mu> fs ii j)" (is "\<And> ii j. _ \<Longrightarrow> _ \<Longrightarrow> _ = ?new_mu ii j") (* new d-values *) and "\<And> ii. ii \<le> m \<Longrightarrow> of_int (d fs' ii) = (if ii = i then sq_norm (gso fs' (i - 1)) / sq_norm (gso fs (i - 1)) * of_int (d fs i) else of_int (d fs ii))" proof - note inv = LLL_invD[OF Linv] interpret fs: fs_int' n m fs_init \<alpha> False i fs by (standard) (use Linv in auto) let ?mu1 = "\<mu> fs" let ?mu2 = "\<mu> fs'" let ?g1 = "gso fs" let ?g2 = "gso fs'" from inv(11)[unfolded \<mu>_small_def] have mu_F1_i: "\<And> j. j<i \<Longrightarrow> \<bar>?mu1 i j\<bar> \<le> 1 / 2" by auto from mu_F1_i[of "i-1"] have m12: "\<bar>?mu1 i (i - 1)\<bar> \<le> inverse 2" using i0 by auto note d = d_def note Gd = Gramian_determinant(1) note Gd12 = Gd[OF Linv] let ?x = "?g1 (i - 1)" let ?y = "?g1 i" let ?cond = "\<alpha> * sq_norm ?y < sq_norm ?x" from inv have red: "weakly_reduced fs i" and len: "length fs = m" and HC: "set fs \<subseteq> carrier_vec n" and L: "lattice_of fs = L" using i by auto from i0 inv i have swap: "set fs \<subseteq> carrier_vec n" "i < length fs" "i - 1 < length fs" "i \<noteq> i - 1" unfolding Let_def by auto have RAT_fs': "RAT fs' = (RAT fs)[i := (RAT fs) ! (i - 1), i - 1 := (RAT fs) ! i]" unfolding fs'_def using swap by (intro nth_equalityI, auto simp: nth_list_update) have span': "gs.span (SRAT fs) = gs.span (SRAT fs')" unfolding fs'_def by (rule arg_cong[of _ _ gs.span], insert swap, auto) have lfs': "lattice_of fs' = lattice_of fs" unfolding fs'_def by (rule lattice_of_swap[OF swap refl]) with inv have lattice: "lattice_of fs' = L" by auto have len': "length fs' = m" using inv unfolding fs'_def by auto have fs': "set fs' \<subseteq> carrier_vec n" using swap unfolding fs'_def set_conv_nth by (auto, rename_tac k, case_tac "k = i", force, case_tac "k = i - 1", auto) let ?rv = "map_vec rat_of_int" from inv(1) have indepH: "lin_indep fs" . from i i0 len have "i < length (RAT fs)" "i - 1 < length (RAT fs)" by auto with distinct_swap[OF this] len have "distinct (RAT fs') = distinct (RAT fs)" unfolding RAT_fs' by (auto simp: map_update) with len' fs' span' indepH have indepH': "lin_indep fs'" unfolding fs'_def using i i0 by (auto simp: gs.lin_indpt_list_def) have lenR': "length (RAT fs') = m" using len' by auto have conn1: "set (RAT fs) \<subseteq> carrier_vec n" "length (RAT fs) = m" "distinct (RAT fs)" "gs.lin_indpt (set (RAT fs))" using inv unfolding gs.lin_indpt_list_def by auto have conn2: "set (RAT fs') \<subseteq> carrier_vec n" "length (RAT fs') = m" "distinct (RAT fs')" "gs.lin_indpt (set (RAT fs'))" using indepH' lenR' unfolding gs.lin_indpt_list_def by auto interpret gs2: gram_schmidt_fs_lin_indpt n "RAT fs'" by (standard) (use indepH' lenR' gs.lin_indpt_list_def in auto) have fs'_fs: "k < i - 1 \<Longrightarrow> fs' ! k = fs ! k" for k unfolding fs'_def by auto { fix k assume ki: "k < i - 1" with i have kn: "k < m" by simp have "?g2 k = ?g1 k" by (rule gs.gso_cong, insert ki kn len, auto simp: fs'_def) } note G2_G = this have take_eq: "take (Suc i - 1 - 1) fs' = take (Suc i - 1 - 1) fs" by (intro nth_equalityI, insert len len' i swap(2-), auto intro!: fs'_fs) from inv have "weakly_reduced fs i" by auto hence "weakly_reduced fs (i - 1)" unfolding gram_schmidt_fs.weakly_reduced_def by auto hence red: "weakly_reduced fs' (i - 1)" unfolding gram_schmidt_fs.weakly_reduced_def using i G2_G by simp have i1n: "i - 1 < m" using i by auto let ?R = rat_of_int let ?RV = "map_vec ?R" let ?f1 = "\<lambda> i. RAT fs ! i" let ?f2 = "\<lambda> i. RAT fs' ! i" let ?n1 = "\<lambda> i. sq_norm (?g1 i)" let ?n2 = "\<lambda> i. sq_norm (?g2 i)" have heq:"fs ! (i - 1) = fs' ! i" "take (i-1) fs = take (i-1) fs'" "?f2 (i - 1) = ?f1 i" "?f2 i = ?f1 (i - 1)" unfolding fs'_def using i len i0 by auto have norm_pos2: "j < m \<Longrightarrow> ?n2 j > 0" for j using gs2.sq_norm_pos len' by simp have norm_pos1: "j < m \<Longrightarrow> ?n1 j > 0" for j using fs.gs.sq_norm_pos inv by simp have norm_zero2: "j < m \<Longrightarrow> ?n2 j \<noteq> 0" for j using norm_pos2[of j] by linarith have norm_zero1: "j < m \<Longrightarrow> ?n1 j \<noteq> 0" for j using norm_pos1[of j] by linarith have gs: "\<And> j. j < m \<Longrightarrow> ?g1 j \<in> Rn" using inv by blast have gs2: "\<And> j. j < m \<Longrightarrow> ?g2 j \<in> Rn" using fs.gs.gso_carrier conn2 by auto have g: "\<And> j. j < m \<Longrightarrow> ?f1 j \<in> Rn" using inv by auto have g2: "\<And> j. j < m \<Longrightarrow> ?f2 j \<in> Rn" using gs2.f_carrier conn2 by blast let ?fs1 = "?f1 ` {0..< (i - 1)}" have G: "?fs1 \<subseteq> Rn" using g i by auto let ?gs1 = "?g1 ` {0..< (i - 1)}" have G': "?gs1 \<subseteq> Rn" using gs i by auto let ?S = "gs.span ?fs1" let ?S' = "gs.span ?gs1" have S'S: "?S' = ?S" by (rule fs.gs.partial_span', insert conn1 i, auto) have "gs.is_oc_projection (?g2 (i - 1)) (gs.span (?g2 ` {0..< (i - 1)})) (?f2 (i - 1))" using i len' by (intro gs2.gso_oc_projection_span(2)) auto also have "?f2 (i - 1) = ?f1 i" unfolding fs'_def using len i by auto also have "gs.span (?g2 ` {0 ..< (i - 1)}) = gs.span (?f2 ` {0 ..< (i - 1)})" using i len' by (intro gs2.partial_span') auto also have "?f2 ` {0 ..< (i - 1)} = ?fs1" by (rule image_cong[OF refl], insert len i, auto simp: fs'_def) finally have claim1: "gs.is_oc_projection (?g2 (i - 1)) ?S (?f1 i)" . have list_id: "[0..<Suc (i - 1)] = [0..< i - 1] @ [i - 1]" "[0..< Suc i] = [0..< i] @ [i]" "map f [x] = [f x]" for f x using i by auto (* f1i_sum is claim 2 *) have f1i_sum: "?f1 i = gs.sumlist (map (\<lambda>j. ?mu1 i j \<cdot>\<^sub>v ?g1 j) [0 ..< i]) + ?g1 i" (is "_ = ?sum + _") apply(subst fs.gs.fi_is_sum_of_mu_gso, insert len i, force) unfolding map_append list_id by (subst gs.M.sumlist_snoc, insert i gs conn1, auto simp: fs.gs.\<mu>.simps) have f1im1_sum: "?f1 (i - 1) = gs.sumlist (map (\<lambda>j. ?mu1 (i - 1) j \<cdot>\<^sub>v ?g1 j) [0..<i - 1]) + ?g1 (i - 1)" (is "_ = ?sum1 + _") apply(subst fs.gs.fi_is_sum_of_mu_gso, insert len i, force) unfolding map_append list_id by (subst gs.M.sumlist_snoc, insert i gs, auto simp: fs.gs.\<mu>.simps) have sum: "?sum \<in> Rn" by (rule gs.sumlist_carrier, insert gs i, auto) have sum1: "?sum1 \<in> Rn" by (rule gs.sumlist_carrier, insert gs i, auto) from gs.span_closed[OF G] have S: "?S \<subseteq> Rn" by auto from gs i have gs': "\<And> j. j < i - 1 \<Longrightarrow> ?g1 j \<in> Rn" and gsi: "?g1 (i - 1) \<in> Rn" by auto have "[0 ..< i] = [0 ..< Suc (i - 1)]" using i0 by simp also have "\<dots> = [0 ..< i - 1] @ [i - 1]" by simp finally have list: "[0 ..< i] = [0 ..< i - 1] @ [i - 1]" . { (* d does not change for k \<noteq> i *) fix k assume kn: "k \<le> m" and ki: "k \<noteq> i" from d_swap_unchanged[OF len i0 i ki kn fs'_def] have "d fs k = d fs' k" by simp } note d = this (* new value of g (i-1) *) have g2_im1: "?g2 (i - 1) = ?g1 i + ?mu1 i (i - 1) \<cdot>\<^sub>v ?g1 (i - 1)" (is "_ = _ + ?mu_f1") proof (rule gs.is_oc_projection_eq[OF claim1 _ S g[OF i]]) show "gs.is_oc_projection (?g1 i + ?mu_f1) ?S (?f1 i)" unfolding gs.is_oc_projection_def proof (intro conjI allI impI) let ?sum' = "gs.sumlist (map (\<lambda>j. ?mu1 i j \<cdot>\<^sub>v ?g1 j) [0 ..< i - 1])" have sum': "?sum' \<in> Rn" by (rule gs.sumlist_carrier, insert gs i, auto) show inRn: "(?g1 i + ?mu_f1) \<in> Rn" using gs[OF i] gsi i by auto have carr: "?sum \<in> Rn" "?g1 i \<in> Rn" "?mu_f1 \<in> Rn" "?sum' \<in> Rn" using sum' sum gs[OF i] gsi i by auto have "?f1 i - (?g1 i + ?mu_f1) = (?sum + ?g1 i) - (?g1 i + ?mu_f1)" unfolding f1i_sum by simp also have "\<dots> = ?sum - ?mu_f1" using carr by auto also have "?sum = gs.sumlist (map (\<lambda>j. ?mu1 i j \<cdot>\<^sub>v ?g1 j) [0 ..< i - 1] @ [?mu_f1])" unfolding list by simp also have "\<dots> = ?sum' + ?mu_f1" by (subst gs.sumlist_append, insert gs' gsi, auto) also have "\<dots> - ?mu_f1 = ?sum'" using sum' gsi by auto finally have id: "?f1 i - (?g1 i + ?mu_f1) = ?sum'" . show "?f1 i - (?g1 i + ?mu_f1) \<in> gs.span ?S" unfolding id gs.span_span[OF G] proof (rule gs.sumlist_in_span[OF G]) fix v assume "v \<in> set (map (\<lambda>j. ?mu1 i j \<cdot>\<^sub>v ?g1 j) [0 ..< i - 1])" then obtain j where j: "j < i - 1" and v: "v = ?mu1 i j \<cdot>\<^sub>v ?g1 j" by auto show "v \<in> ?S" unfolding v by (rule gs.smult_in_span[OF G], unfold S'S[symmetric], rule gs.span_mem, insert gs i j, auto) qed fix x assume "x \<in> ?S" hence x: "x \<in> ?S'" using S'S by simp show "(?g1 i + ?mu_f1) \<bullet> x = 0" proof (rule gs.orthocompl_span[OF _ G' inRn x]) fix x assume "x \<in> ?gs1" then obtain j where j: "j < i - 1" and x_id: "x = ?g1 j" by auto from j i x_id gs[of j] have x: "x \<in> Rn" by auto { fix k assume k: "k > j" "k < m" have "?g1 k \<bullet> x = 0" unfolding x_id by (rule fs.gs.orthogonal, insert conn1 k, auto) } from this[of i] this[of "i - 1"] j i have main: "?g1 i \<bullet> x = 0" "?g1 (i - 1) \<bullet> x = 0" by auto have "(?g1 i + ?mu_f1) \<bullet> x = ?g1 i \<bullet> x + ?mu_f1 \<bullet> x" by (rule add_scalar_prod_distrib[OF gs[OF i] _ x], insert gsi, auto) also have "\<dots> = 0" using main by (subst smult_scalar_prod_distrib[OF gsi x], auto) finally show "(?g1 i + ?mu_f1) \<bullet> x = 0" . qed qed qed { (* 16.13 (i): for g, only g_i and g_{i-1} can change *) fix k assume kn: "k < m" and ki: "k \<noteq> i" "k \<noteq> i - 1" have "?g2 k = gs.oc_projection (gs.span (?g2 ` {0..<k})) (?f2 k)" by (rule gs2.gso_oc_projection_span, insert kn conn2, auto) also have "gs.span (?g2 ` {0..<k}) = gs.span (?f2 ` {0..<k})" by (rule gs2.partial_span', insert conn2 kn, auto) also have "?f2 ` {0..<k} = ?f1 ` {0..<k}" proof(cases "k\<le>i") case True hence "k < i - 1" using ki by auto then show ?thesis apply(intro image_cong) unfolding fs'_def using len i by auto next case False have "?f2 ` {0..<k} = Fun.swap i (i - 1) ?f1 ` {0..<k}" unfolding Fun.swap_def fs'_def o_def using len i by (intro image_cong, insert len kn, force+) also have "\<dots> = ?f1 ` {0..<k}" apply(rule swap_image_eq) using False by auto finally show ?thesis. qed also have "gs.span \<dots> = gs.span (?g1 ` {0..<k})" by (rule sym, rule fs.gs.partial_span', insert conn1 kn, auto) also have "?f2 k = ?f1 k" using ki kn len unfolding fs'_def by auto also have "gs.oc_projection (gs.span (?g1 ` {0..<k})) \<dots> = ?g1 k" by (subst fs.gs.gso_oc_projection_span, insert kn conn1, auto) finally have "?g2 k = ?g1 k" . } note g2_g1_identical = this (* calculation of new mu-values *) { (* no change of mu for lines before line i - 1 *) fix jj ii assume ii: "ii < i - 1" have "?mu2 ii jj = ?mu1 ii jj" using ii i len by (subst gs.\<mu>_cong[of _ _ "RAT fs" "RAT fs'"], auto simp: fs'_def) } note mu'_mu_small_i = this { (* swap of mu-values in lines i - 1 and i for j < i - 1 *) fix jj assume jj: "jj < i - 1" hence id1: "jj < i - 1 \<longleftrightarrow> True" "jj < i \<longleftrightarrow> True" by auto have id2: "?g2 jj = ?g1 jj" by (subst g2_g1_identical, insert jj i, auto) have "?mu2 i jj = ?mu1 (i - 1) jj" "?mu2 (i - 1) jj = ?mu1 i jj" unfolding gs2.\<mu>.simps fs.gs.\<mu>.simps id1 id2 if_True using len i i0 by (auto simp: fs'_def) } note mu'_mu_i_im1_j = this have im1: "i - 1 < m" using i by auto (* calculation of new value of g_i *) let ?g2_im1 = "?g2 (i - 1)" have g2_im1_Rn: "?g2_im1 \<in> Rn" using i conn2 by (auto intro!: fs.gs.gso_carrier) { let ?mu2_f2 = "\<lambda> j. - ?mu2 i j \<cdot>\<^sub>v ?g2 j" let ?sum = "gs.sumlist (map (\<lambda>j. - ?mu1 (i - 1) j \<cdot>\<^sub>v ?g1 j) [0 ..< i - 1])" have mhs: "?mu2_f2 (i - 1) \<in> Rn" using i conn2 by (auto intro!: fs.gs.gso_carrier) have sum': "?sum \<in> Rn" by (rule gs.sumlist_carrier, insert gs i, auto) have gim1: "?f1 (i - 1) \<in> Rn" using g i by auto have "?g2 i = ?f2 i + gs.sumlist (map ?mu2_f2 [0 ..< i-1] @ [?mu2_f2 (i-1)])" unfolding gs2.gso.simps[of i] list by simp also have "?f2 i = ?f1 (i - 1)" unfolding fs'_def using len i i0 by auto also have "map ?mu2_f2 [0 ..< i-1] = map (\<lambda>j. - ?mu1 (i - 1) j \<cdot>\<^sub>v ?g1 j) [0 ..< i - 1]" by (rule map_cong[OF refl], subst g2_g1_identical, insert i, auto simp: mu'_mu_i_im1_j) also have "gs.sumlist (\<dots> @ [?mu2_f2 (i - 1)]) = ?sum + ?mu2_f2 (i - 1)" by (subst gs.sumlist_append, insert gs i mhs, auto) also have "?f1 (i - 1) + \<dots> = (?f1 (i - 1) + ?sum) + ?mu2_f2 (i - 1)" using gim1 sum' mhs by auto also have "?f1 (i - 1) + ?sum = ?g1 (i - 1)" unfolding fs.gs.gso.simps[of "i - 1"] by simp also have "?mu2_f2 (i - 1) = - (?f2 i \<bullet> ?g2_im1 / sq_norm ?g2_im1) \<cdot>\<^sub>v ?g2_im1" unfolding gs2.\<mu>.simps using i0 by simp also have "\<dots> = - ((?f2 i \<bullet> ?g2_im1 / sq_norm ?g2_im1) \<cdot>\<^sub>v ?g2_im1)" by auto also have "?g1 (i - 1) + \<dots> = ?g1 (i - 1) - ((?f2 i \<bullet> ?g2_im1 / sq_norm ?g2_im1) \<cdot>\<^sub>v ?g2_im1)" by (rule sym, rule minus_add_uminus_vec[of _ n], insert gsi g2_im1_Rn, auto) also have "?f2 i = ?f1 (i - 1)" by fact finally have "?g2 i = ?g1 (i - 1) - (?f1 (i - 1) \<bullet> ?g2 (i - 1) / sq_norm (?g2 (i - 1))) \<cdot>\<^sub>v ?g2 (i - 1)" . } note g2_i = this let ?n1 = "\<lambda> i. sq_norm (?g1 i)" let ?n2 = "\<lambda> i. sq_norm (?g2 i)" (* calculation of new norms *) { (* norm of g (i - 1) *) have "?n2 (i - 1) = sq_norm (?g1 i + ?mu_f1)" unfolding g2_im1 by simp also have "\<dots> = (?g1 i + ?mu_f1) \<bullet> (?g1 i + ?mu_f1)" by (simp add: sq_norm_vec_as_cscalar_prod) also have "\<dots> = (?g1 i + ?mu_f1) \<bullet> ?g1 i + (?g1 i + ?mu_f1) \<bullet> ?mu_f1" by (rule scalar_prod_add_distrib, insert gs i, auto) also have "(?g1 i + ?mu_f1) \<bullet> ?g1 i = ?g1 i \<bullet> ?g1 i + ?mu_f1 \<bullet> ?g1 i" by (rule add_scalar_prod_distrib, insert gs i, auto) also have "(?g1 i + ?mu_f1) \<bullet> ?mu_f1 = ?g1 i \<bullet> ?mu_f1 + ?mu_f1 \<bullet> ?mu_f1" by (rule add_scalar_prod_distrib, insert gs i, auto) also have "?mu_f1 \<bullet> ?g1 i = ?g1 i \<bullet> ?mu_f1" by (rule comm_scalar_prod, insert gs i, auto) also have "?g1 i \<bullet> ?g1 i = sq_norm (?g1 i)" by (simp add: sq_norm_vec_as_cscalar_prod) also have "?g1 i \<bullet> ?mu_f1 = ?mu1 i (i - 1) * (?g1 i \<bullet> ?g1 (i - 1))" by (rule scalar_prod_smult_right, insert gs[OF i] gs[OF \<open>i - 1 < m\<close>], auto) also have "?g1 i \<bullet> ?g1 (i - 1) = 0" using orthogonalD[OF fs.gs.orthogonal_gso, of i "i - 1"] i len i0 by (auto simp: o_def) also have "?mu_f1 \<bullet> ?mu_f1 = ?mu1 i (i - 1) * (?mu_f1 \<bullet> ?g1 (i - 1))" by (rule scalar_prod_smult_right, insert gs[OF i] gs[OF \<open>i - 1 < m\<close>], auto) also have "?mu_f1 \<bullet> ?g1 (i - 1) = ?mu1 i (i - 1) * (?g1 (i - 1) \<bullet> ?g1 (i - 1))" by (rule scalar_prod_smult_left, insert gs[OF i] gs[OF \<open>i - 1 < m\<close>], auto) also have "?g1 (i - 1) \<bullet> ?g1 (i - 1) = sq_norm (?g1 (i - 1))" by (simp add: sq_norm_vec_as_cscalar_prod) finally have "?n2 (i - 1) = ?n1 i + (?mu1 i (i - 1) * ?mu1 i (i - 1)) * ?n1 (i - 1)" by (simp add: ac_simps o_def) } note sq_norm_g2_im1 = this from norm_pos1[OF i] norm_pos1[OF im1] norm_pos2[OF i] norm_pos2[OF im1] have norm0: "?n1 i \<noteq> 0" "?n1 (i - 1) \<noteq> 0" "?n2 i \<noteq> 0" "?n2 (i - 1) \<noteq> 0" by auto hence norm0': "?n2 (i - 1) \<noteq> 0" using i by auto { (* new norm of g i *) have si: "Suc i \<le> m" and im1: "i - 1 \<le> m" using i by auto have det1: "gs.Gramian_determinant (RAT fs) (Suc i) = (\<Prod>j<Suc i. \<parallel>fs.gs.gso j\<parallel>\<^sup>2)" using fs.gs.Gramian_determinant si len by auto have det2: "gs.Gramian_determinant (RAT fs') (Suc i) = (\<Prod>j<Suc i. \<parallel>gs2.gso j\<parallel>\<^sup>2)" using gs2.Gramian_determinant si len' by auto from norm_zero1[OF less_le_trans[OF _ im1]] have 0: "(\<Prod>j < i-1. ?n1 j) \<noteq> 0" by (subst prod_zero_iff, auto) have "rat_of_int (d fs' (Suc i)) = rat_of_int (d fs (Suc i))" using d_swap_unchanged[OF len i0 i _ si fs'_def] by auto also have "rat_of_int (d fs' (Suc i)) = gs.Gramian_determinant (RAT fs') (Suc i)" unfolding d_def by (subst fs.of_int_Gramian_determinant[symmetric], insert conn2 i g fs', auto simp: set_conv_nth) also have "\<dots> = (\<Prod>j<Suc i. ?n2 j)" unfolding det2 by (rule prod.cong, insert i, auto) also have "rat_of_int (d fs (Suc i)) = gs.Gramian_determinant (RAT fs) (Suc i)" unfolding d_def by (subst fs.of_int_Gramian_determinant[symmetric], insert conn1 i g, auto) also have "\<dots> = (\<Prod>j<Suc i. ?n1 j)" unfolding det1 by (rule prod.cong, insert i, auto) also have "{..<Suc i} = insert i (insert (i-1) {..<i-1})" (is "_ = ?set") by auto also have "(\<Prod>j\<in> ?set. ?n2 j) = ?n2 i * ?n2 (i - 1) * (\<Prod>j < i-1. ?n2 j)" using i0 by (subst prod.insert; (subst prod.insert)?; auto) also have "(\<Prod>j\<in> ?set. ?n1 j) = ?n1 i * ?n1 (i - 1) * (\<Prod>j < i-1. ?n1 j)" using i0 by (subst prod.insert; (subst prod.insert)?; auto) also have "(\<Prod>j < i-1. ?n2 j) = (\<Prod>j < i-1. ?n1 j)" by (rule prod.cong, insert G2_G, auto) finally have "?n2 i = ?n1 i * ?n1 (i - 1) / ?n2 (i - 1)" using 0 norm0' by (auto simp: field_simps) } note sq_norm_g2_i = this (* mu values in rows > i do not change with j \<notin> {i, i - 1} *) { fix ii j assume ii: "ii > i" "ii < m" and ji: "j \<noteq> i" "j \<noteq> i - 1" { assume j: "j < ii" have "?mu2 ii j = (?f2 ii \<bullet> ?g2 j) / sq_norm (?g2 j)" unfolding gs2.\<mu>.simps using j by auto also have "?f2 ii = ?f1 ii" using ii len unfolding fs'_def by auto also have "?g2 j = ?g1 j" using g2_g1_identical[of j] j ii ji by auto finally have "?mu2 ii j = ?mu1 ii j" unfolding fs.gs.\<mu>.simps using j by auto } hence "?mu2 ii j = ?mu1 ii j" by (cases "j < ii", auto simp: gs2.\<mu>.simps fs.gs.\<mu>.simps) } note mu_no_change_large_row = this { (* the new value of mu i (i - 1) *) have "?mu2 i (i - 1) = (?f2 i \<bullet> ?g2 (i - 1)) / ?n2 (i - 1)" unfolding gs2.\<mu>.simps using i0 by auto also have "?f2 i \<bullet> ?g2 (i - 1) = ?f1 (i - 1) \<bullet> ?g2 (i - 1)" using len i i0 unfolding fs'_def by auto also have "\<dots> = ?f1 (i - 1) \<bullet> (?g1 i + ?mu1 i (i - 1) \<cdot>\<^sub>v ?g1 (i - 1))" unfolding g2_im1 by simp also have "\<dots> = ?f1 (i - 1) \<bullet> ?g1 i + ?f1 (i - 1) \<bullet> (?mu1 i (i - 1) \<cdot>\<^sub>v ?g1 (i - 1))" by (rule scalar_prod_add_distrib[of _ n], insert i gs g, auto) also have "?f1 (i - 1) \<bullet> ?g1 i = 0" by (subst fs.gs.fi_scalar_prod_gso, insert conn1 im1 i i0, auto simp: fs.gs.\<mu>.simps fs.gs.\<mu>.simps) also have "?f1 (i - 1) \<bullet> (?mu1 i (i - 1) \<cdot>\<^sub>v ?g1 (i - 1)) = ?mu1 i (i - 1) * (?f1 (i - 1) \<bullet> ?g1 (i - 1))" by (rule scalar_prod_smult_distrib, insert gs g i, auto) also have "?f1 (i - 1) \<bullet> ?g1 (i - 1) = ?n1 (i - 1)" by (subst fs.gs.fi_scalar_prod_gso, insert conn1 im1, auto simp: fs.gs.\<mu>.simps) finally have "?mu2 i (i - 1) = ?mu1 i (i - 1) * ?n1 (i - 1) / ?n2 (i - 1)" by (simp add: sq_norm_vec_as_cscalar_prod) } note mu'_mu_i_im1 = this { (* the new values of mu ii (i - 1) for ii > i *) fix ii assume iii: "ii > i" and ii: "ii < m" hence iii1: "i - 1 < ii" by auto have "?mu2 ii (i - 1) = (?f2 ii \<bullet> ?g2 (i - 1)) / ?n2 (i - 1)" unfolding gs2.\<mu>.simps using i0 iii1 by auto also have "?f2 ii \<bullet> ?g2 (i-1) = ?f1 ii \<bullet> ?g2 (i - 1)" using len i i0 iii ii unfolding fs'_def by auto also have "\<dots> = ?f1 ii \<bullet> (?g1 i + ?mu1 i (i - 1) \<cdot>\<^sub>v ?g1 (i - 1))" unfolding g2_im1 by simp also have "\<dots> = ?f1 ii \<bullet> ?g1 i + ?f1 ii \<bullet> (?mu1 i (i - 1) \<cdot>\<^sub>v ?g1 (i - 1))" by (rule scalar_prod_add_distrib[of _ n], insert i ii gs g, auto) also have "?f1 ii \<bullet> ?g1 i = ?mu1 ii i * ?n1 i" by (rule fs.gs.fi_scalar_prod_gso, insert conn1 ii i, auto) also have "?f1 ii \<bullet> (?mu1 i (i - 1) \<cdot>\<^sub>v ?g1 (i - 1)) = ?mu1 i (i - 1) * (?f1 ii \<bullet> ?g1 (i - 1))" by (rule scalar_prod_smult_distrib, insert gs g i ii, auto) also have "?f1 ii \<bullet> ?g1 (i - 1) = ?mu1 ii (i - 1) * ?n1 (i - 1)" by (rule fs.gs.fi_scalar_prod_gso, insert conn1 ii im1, auto) finally have "?mu2 ii (i - 1) = ?mu1 ii (i - 1) * ?mu2 i (i - 1) + ?mu1 ii i * ?n1 i / ?n2 (i - 1)" unfolding mu'_mu_i_im1 using norm0 by (auto simp: field_simps) } note mu'_mu_large_row_im1 = this { (* the new values of mu ii i for ii > i *) fix ii assume iii: "ii > i" and ii: "ii < m" have "?mu2 ii i = (?f2 ii \<bullet> ?g2 i) / ?n2 i" unfolding gs2.\<mu>.simps using i0 iii by auto also have "?f2 ii \<bullet> ?g2 i = ?f1 ii \<bullet> ?g2 i" using len i i0 iii ii unfolding fs'_def by auto also have "\<dots> = ?f1 ii \<bullet> (?g1 (i - 1) - (?f1 (i - 1) \<bullet> ?g2 (i - 1) / ?n2 (i - 1)) \<cdot>\<^sub>v ?g2 (i - 1))" unfolding g2_i by simp also have "?f1 (i - 1) = ?f2 i" using i i0 len unfolding fs'_def by auto also have "?f2 i \<bullet> ?g2 (i - 1) / ?n2 (i - 1) = ?mu2 i (i - 1)" unfolding gs2.\<mu>.simps using i i0 by auto also have "?f1 ii \<bullet> (?g1 (i - 1) - ?mu2 i (i - 1) \<cdot>\<^sub>v ?g2 (i - 1)) = ?f1 ii \<bullet> ?g1 (i - 1) - ?f1 ii \<bullet> (?mu2 i (i - 1) \<cdot>\<^sub>v ?g2 (i - 1))" by (rule scalar_prod_minus_distrib[OF g gs], insert gs2 ii i, auto) also have "?f1 ii \<bullet> ?g1 (i - 1) = ?mu1 ii (i - 1) * ?n1 (i - 1)" by (rule fs.gs.fi_scalar_prod_gso, insert conn1 ii im1, auto) also have "?f1 ii \<bullet> (?mu2 i (i - 1) \<cdot>\<^sub>v ?g2 (i - 1)) = ?mu2 i (i - 1) * (?f1 ii \<bullet> ?g2 (i - 1))" by (rule scalar_prod_smult_distrib, insert gs gs2 g i ii, auto) also have "?f1 ii \<bullet> ?g2 (i - 1) = (?f1 ii \<bullet> ?g2 (i - 1) / ?n2 (i - 1)) * ?n2 (i - 1)" using norm0 by (auto simp: field_simps) also have "?f1 ii \<bullet> ?g2 (i - 1) = ?f2 ii \<bullet> ?g2 (i - 1)" using len ii iii unfolding fs'_def by auto also have "\<dots> / ?n2 (i - 1) = ?mu2 ii (i - 1)" unfolding gs2.\<mu>.simps using iii by auto finally have "?mu2 ii i = (?mu1 ii (i - 1) * ?n1 (i - 1) - ?mu2 i (i - 1) * ?mu2 ii (i - 1) * ?n2 (i - 1)) / ?n2 i" by simp also have "\<dots> = (?mu1 ii (i - 1) - ?mu1 i (i - 1) * ?mu2 ii (i - 1)) * ?n2 (i - 1) / ?n1 i" unfolding sq_norm_g2_i mu'_mu_i_im1 using norm0 by (auto simp: field_simps) also have "\<dots> = (?mu1 ii (i - 1) * ?n2 (i - 1) - ?mu1 i (i - 1) * ((?mu1 ii i * ?n1 i + ?mu1 i (i - 1) * ?mu1 ii (i - 1) * ?n1 (i - 1)))) / ?n1 i" unfolding mu'_mu_large_row_im1[OF iii ii] mu'_mu_i_im1 using norm0 by (auto simp: field_simps) also have "\<dots> = ?mu1 ii (i - 1) - ?mu1 i (i - 1) * ?mu1 ii i" unfolding sq_norm_g2_im1 using norm0 by (auto simp: field_simps) finally have "?mu2 ii i = ?mu1 ii (i - 1) - ?mu1 i (i - 1) * ?mu1 ii i" . } note mu'_mu_large_row_i = this { fix k assume k: "k < m" show "?g2 k = ?newg k" unfolding g2_i[symmetric] unfolding g2_im1[symmetric] using g2_g1_identical[OF k] by auto show "?n2 k = ?new_norm k" unfolding sq_norm_g2_i[symmetric] unfolding sq_norm_g2_im1[symmetric] using g2_g1_identical[OF k] by auto fix j assume jk: "j < k" hence j: "j < m" using k by auto have "k < i - 1 \<or> k = i - 1 \<or> k = i \<or> k > i" by linarith thus "?mu2 k j = ?new_mu k j" unfolding mu'_mu_i_im1[symmetric] using mu'_mu_large_row_i[OF _ k] mu'_mu_large_row_im1 [OF _ k] mu_no_change_large_row[OF _ k, of j] mu'_mu_small_i mu'_mu_i_im1_j jk j k by auto } note new_g = this (* stay reduced *) from inv have sred: "reduced fs i" by auto have sred: "reduced fs' (i - 1)" unfolding gram_schmidt_fs.reduced_def proof (intro conjI[OF red] allI impI, goal_cases) case (1 i' j) with sred have "\<bar>?mu1 i' j\<bar> \<le> 1 / 2" unfolding gram_schmidt_fs.reduced_def by auto thus ?case using mu'_mu_small_i[OF 1(1)] by simp qed { (* 16.13 (ii) : norm of g (i - 1) decreases by reduction factor *) note sq_norm_g2_im1 also have "?n1 i + (?mu1 i (i - 1) * ?mu1 i (i - 1)) * ?n1 (i - 1) < 1/\<alpha> * (?n1 (i - 1)) + (1/2 * 1/2) * (?n1 (i - 1))" proof (rule add_less_le_mono[OF _ mult_mono]) from norm_ineq[unfolded mult.commute[of \<alpha>], THEN linordered_field_class.mult_imp_less_div_pos[OF \<alpha>0(1)]] show "?n1 i < 1/\<alpha> * ?n1 (i - 1)" using len i by auto from m12 have abs: "abs (?mu1 i (i - 1)) \<le> 1/2" by auto have "?mu1 i (i - 1) * ?mu1 i (i - 1) \<le> abs (?mu1 i (i - 1)) * abs (?mu1 i (i - 1))" by auto also have "\<dots> \<le> 1/2 * 1/2" using mult_mono[OF abs abs] by auto finally show "?mu1 i (i - 1) * ?mu1 i (i - 1) \<le> 1/2 * 1/2" by auto qed auto also have "\<dots> = reduction * sq_norm (?g1 (i - 1))" unfolding reduction_def using \<alpha>0 by (simp add: ring_distribs add_divide_distrib) finally have "?n2 (i - 1) < reduction * ?n1 (i - 1)" . } note g_reduction = this (* Lemma 16.13 (ii) *) have lin_indpt_list_fs': "gs.lin_indpt_list (RAT fs')" unfolding gs.lin_indpt_list_def using conn2 by auto have mu_small: "\<mu>_small fs' (i - 1)" unfolding \<mu>_small_def proof (intro allI impI, goal_cases) case (1 j) thus ?case using inv(11) unfolding mu'_mu_i_im1_j[OF 1] \<mu>_small_def by auto qed (* invariant is established *) show newInv: "LLL_invariant False (i - 1) fs'" by (rule LLL_invI, insert lin_indpt_list_fs' conn2 mu_small span' lattice fs' sred i, auto) (* show decrease in measure *) { (* 16.16 (ii), the decreasing case *) have ile: "i \<le> m" using i by auto from Gd[OF newInv, folded d_def, OF ile] have "?R (d fs' i) = (\<Prod>j<i. ?n2 j )" by auto also have "\<dots> = prod ?n2 ({0 ..< i-1} \<union> {i - 1})" by (rule sym, rule prod.cong, (insert i0, auto)[1], insert i, auto) also have "\<dots> = ?n2 (i - 1) * prod ?n2 ({0 ..< i-1})" by simp also have "prod ?n2 ({0 ..< i-1}) = prod ?n1 ({0 ..< i-1})" by (rule prod.cong[OF refl], subst g2_g1_identical, insert i, auto) also have "\<dots> = (prod ?n1 ({0 ..< i-1} \<union> {i - 1})) / ?n1 (i - 1)" by (subst prod.union_disjoint, insert norm_pos1[OF im1], auto) also have "prod ?n1 ({0 ..< i-1} \<union> {i - 1}) = prod ?n1 {0..<i}" by (rule arg_cong[of _ _ "prod ?n1"], insert i0, auto) also have "\<dots> = (\<Prod>j<i. ?n1 j)" by (rule prod.cong, insert i0, auto) also have "\<dots> = ?R (d fs i)" unfolding d_def Gd[OF Linv ile] by (rule prod.cong[OF refl], insert i, auto) finally have new_di: "?R (d fs' i) = ?n2 (i - 1) / ?n1 (i - 1) * ?R (d fs i)" by simp also have "\<dots> < (reduction * ?n1 (i - 1)) / ?n1 (i - 1) * ?R (d fs i)" by (rule mult_strict_right_mono[OF divide_strict_right_mono[OF g_reduction norm_pos1[OF im1]]], insert LLL_d_pos[OF Linv] i, auto) also have "\<dots> = reduction * ?R (d fs i)" using norm_pos1[OF im1] by auto finally have "d fs' i < real_of_rat reduction * d fs i" using of_rat_less of_rat_mult of_rat_of_int_eq by metis note this new_di } note d_i = this show "ii \<le> m \<Longrightarrow> ?R (d fs' ii) = (if ii = i then ?n2 (i - 1) / ?n1 (i - 1) * ?R (d fs i) else ?R (d fs ii))" for ii using d_i d by auto have pos: "k < m \<Longrightarrow> 0 < d fs' k" "k < m \<Longrightarrow> 0 \<le> d fs' k" for k using LLL_d_pos[OF newInv, of k] by auto have prodpos:"0< (\<Prod>i<m. d fs' i)" apply (rule prod_pos) using LLL_d_pos[OF newInv] by auto have prod_pos':"0 < (\<Prod>x\<in>{0..<m} - {i}. real_of_int (d fs' x))" apply (rule prod_pos) using LLL_d_pos[OF newInv] pos by auto have prod_nonneg:"0 \<le> (\<Prod>x\<in>{0..<m} - {i}. real_of_int (d fs' x))" apply (rule prod_nonneg) using LLL_d_pos[OF newInv] pos by auto have prodpos2:"0<(\<Prod>ia<m. d fs ia)" apply (rule prod_pos) using LLL_d_pos[OF assms(1)] by auto have "D fs' = real_of_int (\<Prod>i<m. d fs' i)" unfolding D_def using prodpos by simp also have "(\<Prod>i<m. d fs' i) = (\<Prod> j \<in> {0 ..< m} - {i} \<union> {i}. d fs' j)" by (rule prod.cong, insert i, auto) also have "real_of_int \<dots> = real_of_int (\<Prod> j \<in> {0 ..< m} - {i}. d fs' j) * real_of_int (d fs' i)" by (subst prod.union_disjoint, auto) also have "\<dots> < (\<Prod> j \<in> {0 ..< m} - {i}. d fs' j) * (of_rat reduction * d fs i)" by(rule mult_strict_left_mono[OF d_i(1)],insert prod_pos',auto) also have "(\<Prod> j \<in> {0 ..< m} - {i}. d fs' j) = (\<Prod> j \<in> {0 ..< m} - {i}. d fs j)" by (rule prod.cong, insert d, auto) also have "\<dots> * (of_rat reduction * d fs i) = of_rat reduction * (\<Prod> j \<in> {0 ..< m} - {i} \<union> {i}. d fs j)" by (subst prod.union_disjoint, auto) also have "(\<Prod> j \<in> {0 ..< m} - {i} \<union> {i}. d fs j) = (\<Prod> j<m. d fs j)" by (subst prod.cong, insert i, auto) finally have D: "D fs' < real_of_rat reduction * D fs" unfolding D_def using prodpos2 by auto have logD: "logD fs' < logD fs" proof (cases "\<alpha> = 4/3") case True show ?thesis using D unfolding reduction(4)[OF True] logD_def unfolding True by simp next case False hence False': "\<alpha> = 4/3 \<longleftrightarrow> False" by simp from False \<alpha> have "\<alpha> > 4/3" by simp with reduction have reduction1: "reduction < 1" by simp let ?new = "real (D fs')" let ?old = "real (D fs)" let ?log = "log (1/of_rat reduction)" note pos = LLL_D_pos[OF newInv] LLL_D_pos[OF assms(1)] from reduction have "real_of_rat reduction > 0" by auto hence gediv:"1/real_of_rat reduction > 0" by auto have "(1/of_rat reduction) * ?new \<le> ((1/of_rat reduction) * of_rat reduction) * ?old" unfolding mult.assoc real_mult_le_cancel_iff2[OF gediv] using D by simp also have "(1/of_rat reduction) * of_rat reduction = 1" using reduction by auto finally have "(1/of_rat reduction) * ?new \<le> ?old" by auto hence "?log ((1/of_rat reduction) * ?new) \<le> ?log ?old" by (subst log_le_cancel_iff, auto simp: pos reduction1 reduction) hence "floor (?log ((1/of_rat reduction) * ?new)) \<le> floor (?log ?old)" by (rule floor_mono) hence "nat (floor (?log ((1/of_rat reduction) * ?new))) \<le> nat (floor (?log ?old))" by simp also have "\<dots> = logD fs" unfolding logD_def False' by simp also have "?log ((1/of_rat reduction) * ?new) = 1 + ?log ?new" by (subst log_mult, insert reduction reduction1, auto simp: pos ) also have "floor (1 + ?log ?new) = 1 + floor (?log ?new)" by simp also have "nat (1 + floor (?log ?new)) = 1 + nat (floor (?log ?new))" by (subst nat_add_distrib, insert pos reduction reduction1, auto) also have "nat (floor (?log ?new)) = logD fs'" unfolding logD_def False' by simp finally show "logD fs' < logD fs" by simp qed show "LLL_measure i fs > LLL_measure (i - 1) fs'" unfolding LLL_measure_def using i logD by simp qed lemma LLL_inv_initial_state: "LLL_invariant True 0 fs_init" proof - from lin_dep[unfolded gs.lin_indpt_list_def] have "set (RAT fs_init) \<subseteq> Rn" by auto hence fs_init: "set fs_init \<subseteq> carrier_vec n" by auto show ?thesis by (rule LLL_invI[OF fs_init len _ _ lin_dep], auto simp: L_def gs.reduced_def gs.weakly_reduced_def) qed lemma LLL_inv_m_imp_reduced: assumes "LLL_invariant True m fs" shows "reduced fs m" using LLL_invD[OF assms] by blast lemma basis_reduction_short_vector: assumes LLL_inv: "LLL_invariant True m fs" and v: "v = hd fs" and m0: "m \<noteq> 0" shows "v \<in> carrier_vec n" "v \<in> L - {0\<^sub>v n}" "h \<in> L - {0\<^sub>v n} \<Longrightarrow> rat_of_int (sq_norm v) \<le> \<alpha> ^ (m - 1) * rat_of_int (sq_norm h)" "v \<noteq> 0\<^sub>v j" proof - let ?L = "lattice_of fs_init" have a1: "\<alpha> \<ge> 1" using \<alpha> by auto from LLL_invD[OF LLL_inv] have L: "lattice_of fs = L" and red: "gram_schmidt_fs.weakly_reduced n (RAT fs) \<alpha> (length (RAT fs))" and basis: "lin_indep fs" and lenH: "length fs = m" and H: "set fs \<subseteq> carrier_vec n" by (auto simp: gs.lin_indpt_list_def gs.reduced_def) from lin_dep have G: "set fs_init \<subseteq> carrier_vec n" unfolding gs.lin_indpt_list_def by auto with m0 len have "dim_vec (hd fs_init) = n" by (cases fs_init, auto) from v m0 lenH v have v: "v = fs ! 0" by (cases fs, auto) interpret gs1: gram_schmidt_fs_lin_indpt n "RAT fs" by (standard) (use assms LLL_invariant_def gs.lin_indpt_list_def in auto) let ?r = "rat_of_int" let ?rv = "map_vec ?r" let ?F = "RAT fs" let ?h = "?rv h" { assume h:"h \<in> L - {0\<^sub>v n}" (is ?h_req) from h[folded L] have h: "h \<in> lattice_of fs" "h \<noteq> 0\<^sub>v n" by auto { assume f: "?h = 0\<^sub>v n" have "?h = ?rv (0\<^sub>v n)" unfolding f by (intro eq_vecI, auto) hence "h = 0\<^sub>v n" using of_int_hom.vec_hom_zero_iff[of h] of_int_hom.vec_hom_inj by auto with h have False by simp } hence h0: "?h \<noteq> 0\<^sub>v n" by auto with lattice_of_of_int[OF H h(1)] have "?h \<in> gs.lattice_of ?F - {0\<^sub>v n}" by auto } from gs1.weakly_reduced_imp_short_vector[OF red this a1] lenH show "h \<in> L - {0\<^sub>v n} \<Longrightarrow> ?r (sq_norm v) \<le> \<alpha> ^ (m - 1) * ?r (sq_norm h)" using basis unfolding L v gs.lin_indpt_list_def by (auto simp: sq_norm_of_int) from m0 H lenH show vn: "v \<in> carrier_vec n" unfolding v by (cases fs, auto) have vL: "v \<in> L" unfolding L[symmetric] v using m0 H lenH by (intro basis_in_latticeI, cases fs, auto) { assume "v = 0\<^sub>v n" hence "hd ?F = 0\<^sub>v n" unfolding v using m0 lenH by (cases fs, auto) with gs.lin_indpt_list_nonzero[OF basis] have False using m0 lenH by (cases fs, auto) } with vL show v: "v \<in> L - {0\<^sub>v n}" by auto have jn:"0\<^sub>v j \<in> carrier_vec n \<Longrightarrow> j = n" unfolding zero_vec_def carrier_vec_def by auto with v vn show "v \<noteq> 0\<^sub>v j" by auto qed lemma LLL_mu_d_Z: assumes inv: "LLL_invariant upw i fs" and j: "j \<le> ii" and ii: "ii < m" shows "of_int (d fs (Suc j)) * \<mu> fs ii j \<in> \<int>" proof - interpret fs: fs_int' n m fs_init \<alpha> upw i fs by standard (use inv in auto) show ?thesis using assms fs.fs_int_mu_d_Z LLL_invD[OF inv] unfolding d_def fs.d_def by auto qed context fixes upw i fs assumes Linv: "LLL_invariant upw i fs" and gbnd: "g_bound fs" begin interpretation gs1: gram_schmidt_fs_lin_indpt n "RAT fs" by (standard) (use Linv LLL_invariant_def gs.lin_indpt_list_def in auto) lemma LLL_inv_N_pos: assumes m: "m \<noteq> 0" shows "N > 0" proof - let ?r = rat_of_int note inv = LLL_invD[OF Linv] from inv have F: "RAT fs ! 0 \<in> Rn" "fs ! 0 \<in> carrier_vec n" using m by auto from m have upt: "[0..< m] = 0 # [1 ..< m]" using upt_add_eq_append[of 0 1 "m - 1"] by auto from inv(6) m have "map_vec ?r (fs ! 0) \<noteq> 0\<^sub>v n" using gs.lin_indpt_list_nonzero[OF inv(1)] unfolding set_conv_nth by force hence F0: "fs ! 0 \<noteq> 0\<^sub>v n" by auto hence "sq_norm (fs ! 0) \<noteq> 0" using F by simp hence 1: "sq_norm (fs ! 0) \<ge> 1" using sq_norm_vec_ge_0[of "fs ! 0"] by auto from gbnd m have "sq_norm (gso fs 0) \<le> of_nat N" unfolding g_bound_def by auto also have "gso fs 0 = RAT fs ! 0" unfolding upt using F by (simp add: gs1.gso.simps[of 0]) also have "RAT fs ! 0 = map_vec ?r (fs ! 0)" using inv(6) m by auto also have "sq_norm \<dots> = ?r (sq_norm (fs ! 0))" by (simp add: sq_norm_of_int) finally show ?thesis using 1 by (cases N, auto) qed (* equation (3) in front of Lemma 16.18 *) lemma d_approx_main: assumes i: "ii \<le> m" "m \<noteq> 0" shows "rat_of_int (d fs ii) \<le> rat_of_nat (N^ii)" proof - note inv = LLL_invD[OF Linv] from LLL_inv_N_pos i have A: "0 < N" by auto note main = inv(2)[unfolded gram_schmidt_int_def gram_schmidt_wit_def] have "rat_of_int (d fs ii) = (\<Prod>j<ii. \<parallel>gso fs j\<parallel>\<^sup>2)" unfolding d_def using i by (auto simp: Gramian_determinant [OF Linv]) also have "\<dots> \<le> (\<Prod>j<ii. of_nat N)" using i by (intro prod_mono ballI conjI prod_nonneg, insert gbnd[unfolded g_bound_def], auto) also have "\<dots> = (of_nat N)^ii" unfolding prod_constant by simp also have "\<dots> = of_nat (N^ii)" by simp finally show ?thesis by simp qed lemma d_approx: assumes i: "ii < m" shows "rat_of_int (d fs ii) \<le> rat_of_nat (N^ii)" using d_approx_main[of ii] assms by auto lemma d_bound: assumes i: "ii < m" shows "d fs ii \<le> N^ii" using d_approx[OF assms] unfolding d_def by linarith lemma D_approx: "D fs \<le> N ^ (m * m)" proof - note inv = LLL_invD[OF Linv] from LLL_inv_N_pos have N: "m \<noteq> 0 \<Longrightarrow> 0 < N" by auto note main = inv(2)[unfolded gram_schmidt_int_def gram_schmidt_wit_def] have "rat_of_int (\<Prod>i<m. d fs i) = (\<Prod>i<m. rat_of_int (d fs i))" by simp also have "\<dots> \<le> (\<Prod>i<m. (of_nat N) ^ i)" by (rule prod_mono, insert d_approx LLL_d_pos[OF Linv], auto simp: less_le) also have "\<dots> \<le> (\<Prod>i<m. (of_nat N ^ m))" by (rule prod_mono, insert N, auto intro: pow_mono_exp) also have "\<dots> = (of_nat N)^(m * m)" unfolding prod_constant power_mult by simp also have "\<dots> = of_nat (N ^ (m * m))" by simp finally have "(\<Prod>i<m. d fs i) \<le> N ^ (m * m)" by linarith also have "(\<Prod>i<m. d fs i) = D fs" unfolding D_def by (subst nat_0_le, rule prod_nonneg, insert LLL_d_pos[OF Linv], auto simp: le_less) finally show "D fs \<le> N ^ (m * m)" by linarith qed lemma LLL_measure_approx: assumes "\<alpha> > 4/3" "m \<noteq> 0" shows "LLL_measure i fs \<le> m + 2 * m * m * log base N" proof - have b1: "base > 1" using base assms by auto have id: "base = 1 / real_of_rat reduction" unfolding base_def reduction_def using \<alpha>0 by (auto simp: field_simps of_rat_divide) from LLL_D_pos[OF Linv] have D1: "real (D fs) \<ge> 1" by auto note invD = LLL_invD[OF Linv] from invD have F: "set fs \<subseteq> carrier_vec n" and len: "length fs = m" by auto have N0: "N > 0" using LLL_inv_N_pos[OF assms(2)] . from D_approx have D: "D fs \<le> N ^ (m * m)" . hence "real (D fs) \<le> real (N ^ (m * m))" by linarith also have "\<dots> = real N ^ (m * m)" by simp finally have log: "log base (real (D fs)) \<le> log base (real N ^ (m * m))" by (subst log_le_cancel_iff[OF b1], insert D1 N0, auto) have "real (logD fs) = real (nat \<lfloor>log base (real (D fs))\<rfloor>)" unfolding logD_def id using assms by auto also have "\<dots> \<le> log base (real (D fs))" using b1 D1 by auto also have "\<dots> \<le> log base (real N ^ (m * m))" by fact also have "\<dots> = (m * m) * log base (real N)" by (rule log_nat_power, insert N0, auto) finally have main: "logD fs \<le> m * m * log base N" by simp have "real (LLL_measure i fs) = real (2 * logD fs + m - i)" unfolding LLL_measure_def split invD(1) by simp also have "\<dots> \<le> 2 * real (logD fs) + m" using invD by simp also have "\<dots> \<le> 2 * (m * m * log base N) + m" using main by auto finally show ?thesis by simp qed end lemma g_bound_fs_init: "g_bound fs_init" proof - { fix i assume i: "i < m" let ?N = "map (nat o sq_norm) fs_init" let ?r = rat_of_int from i have mem: "nat (sq_norm (fs_init ! i)) \<in> set ?N" using fs_init len unfolding set_conv_nth by force interpret gs: gram_schmidt_fs_lin_indpt n "RAT fs_init" by (standard) (use len lin_dep LLL_invariant_def gs.lin_indpt_list_def in auto) from mem_set_imp_le_max_list[OF _ mem] have FN: "nat (sq_norm (fs_init ! i)) \<le> N" unfolding N_def by force hence "\<parallel>fs_init ! i\<parallel>\<^sup>2 \<le> int N" using i by auto also have "\<dots> \<le> int (N * m)" using i by fastforce finally have f_bnd: "\<parallel>fs_init ! i\<parallel>\<^sup>2 \<le> int (N * m)" . from FN have "rat_of_nat (nat (sq_norm (fs_init ! i))) \<le> rat_of_nat N" by simp also have "rat_of_nat (nat (sq_norm (fs_init ! i))) = ?r (sq_norm (fs_init ! i))" using sq_norm_vec_ge_0[of "fs_init ! i"] by auto also have "\<dots> = sq_norm (RAT fs_init ! i)" unfolding sq_norm_of_int[symmetric] using fs_init len i by auto finally have "sq_norm (RAT fs_init ! i) \<le> rat_of_nat N" . with gs.sq_norm_gso_le_f i len lin_dep have g_bnd: "\<parallel>gs.gso i\<parallel>\<^sup>2 \<le> rat_of_nat N" unfolding gs.lin_indpt_list_def by fastforce note f_bnd g_bnd } thus "g_bound fs_init" unfolding g_bound_def by auto qed lemma LLL_measure_approx_fs_init: "LLL_invariant upw i fs_init \<Longrightarrow> 4 / 3 < \<alpha> \<Longrightarrow> m \<noteq> 0 \<Longrightarrow> real (LLL_measure i fs_init) \<le> real m + real (2 * m * m) * log base (real N)" using LLL_measure_approx[OF _ g_bound_fs_init] . lemma N_le_MMn: assumes m0: "m \<noteq> 0" shows "N \<le> nat M * nat M * n" unfolding N_def proof (rule max_list_le, unfold set_map o_def) fix ni assume "ni \<in> (\<lambda>x. nat \<parallel>x\<parallel>\<^sup>2) ` set fs_init" then obtain fi where ni: "ni = nat (\<parallel>fi\<parallel>\<^sup>2)" and fi: "fi \<in> set fs_init" by auto from fi len obtain i where fii: "fi = fs_init ! i" and i: "i < m" unfolding set_conv_nth by auto from fi fs_init have fi: "fi \<in> carrier_vec n" by auto let ?set = "{\<bar>fs_init ! i $ j\<bar> |i j. i < m \<and> j < n} \<union> {0}" have id: "?set = (\<lambda> (i,j). abs (fs_init ! i $ j)) ` ({0..<m} \<times> {0..<n}) \<union> {0}" by force have fin: "finite ?set" unfolding id by auto { fix j assume "j < n" hence "M \<ge> \<bar>fs_init ! i $ j\<bar>" unfolding M_def using i by (intro Max_ge[of _ "abs (fs_init ! i $ j)"], intro fin, auto) } note M = this from Max_ge[OF fin, of 0] have M0: "M \<ge> 0" unfolding M_def by auto have "ni = nat (\<parallel>fi\<parallel>\<^sup>2)" unfolding ni by auto also have "\<dots> \<le> nat (int n * \<parallel>fi\<parallel>\<^sub>\<infinity>\<^sup>2)" using sq_norm_vec_le_linf_norm[OF fi] by (intro nat_mono, auto) also have "\<dots> = n * nat (\<parallel>fi\<parallel>\<^sub>\<infinity>\<^sup>2)" by (simp add: nat_mult_distrib) also have "\<dots> \<le> n * nat (M^2)" proof (rule mult_left_mono[OF nat_mono]) have fi: "\<parallel>fi\<parallel>\<^sub>\<infinity> \<le> M" unfolding linf_norm_vec_def proof (rule max_list_le, unfold set_append set_map, rule ccontr) fix x assume "x \<in> abs ` set (list_of_vec fi) \<union> set [0]" and xM: "\<not> x \<le> M" with M0 obtain fij where fij: "fij \<in> set (list_of_vec fi)" and x: "x = abs fij" by auto from fij fi obtain j where j: "j < n" and fij: "fij = fi $ j" unfolding set_list_of_vec vec_set_def by auto from M[OF j] xM[unfolded x fij fii] show False by auto qed auto show "\<parallel>fi\<parallel>\<^sub>\<infinity>\<^sup>2 \<le> M^2" unfolding abs_le_square_iff[symmetric] using fi using linf_norm_vec_ge_0[of fi] by auto qed auto finally show "ni \<le> nat M * nat M * n" using M0 by (subst nat_mult_distrib[symmetric], auto simp: power2_eq_square ac_simps) qed (insert m0 len, auto) subsection \<open>Basic LLL implementation based on previous results\<close> text \<open>We now assemble a basic implementation of the LLL algorithm, where only the lattice basis is updated, and where the GSO and the $\mu$-values are always computed from scratch. This enables a simple soundness proof and permits to separate an efficient implementation from the soundness reasoning.\<close> fun basis_reduction_add_rows_loop where "basis_reduction_add_rows_loop i fs 0 = fs" | "basis_reduction_add_rows_loop i fs (Suc j) = ( let c = round (\<mu> fs i j); fs' = (if c = 0 then fs else fs[ i := fs ! i - c \<cdot>\<^sub>v fs ! j]) in basis_reduction_add_rows_loop i fs' j)" definition basis_reduction_add_rows where "basis_reduction_add_rows upw i fs = (if upw then basis_reduction_add_rows_loop i fs i else fs)" definition basis_reduction_swap where "basis_reduction_swap i fs = (False, i - 1, fs[i := fs ! (i - 1), i - 1 := fs ! i])" definition basis_reduction_step where "basis_reduction_step upw i fs = (if i = 0 then (True, Suc i, fs) else let fs' = basis_reduction_add_rows upw i fs in if sq_norm (gso fs' (i - 1)) \<le> \<alpha> * sq_norm (gso fs' i) then (True, Suc i, fs') else basis_reduction_swap i fs')" function basis_reduction_main where "basis_reduction_main (upw,i,fs) = (if i < m \<and> LLL_invariant upw i fs then basis_reduction_main (basis_reduction_step upw i fs) else fs)" by pat_completeness auto definition "reduce_basis = basis_reduction_main (True, 0, fs_init)" definition "short_vector = hd reduce_basis" text \<open>Soundness of this implementation is easily proven\<close> lemma basis_reduction_add_rows_loop: assumes inv: "LLL_invariant True i fs" and mu_small: "\<mu>_small_row i fs j" and res: "basis_reduction_add_rows_loop i fs j = fs'" and i: "i < m" and j: "j \<le> i" shows "LLL_invariant False i fs'" "LLL_measure i fs' = LLL_measure i fs" proof (atomize(full), insert assms, induct j arbitrary: fs) case (0 fs) thus ?case using basis_reduction_add_row_done[of i fs] by auto next case (Suc j fs) hence j: "j < i" by auto let ?c = "round (\<mu> fs i j)" show ?case proof (cases "?c = 0") case True thus ?thesis using Suc(1)[OF Suc(2) basis_reduction_add_row_main_0[OF Suc(2) i j True Suc(3)]] Suc(2-) by auto next case False note step = basis_reduction_add_row_main[OF Suc(2) i j refl] show ?thesis using Suc(1)[OF step(1-2)] False Suc(2-) step(3) by auto qed qed lemma basis_reduction_add_rows: assumes inv: "LLL_invariant upw i fs" and res: "basis_reduction_add_rows upw i fs = fs'" and i: "i < m" shows "LLL_invariant False i fs'" "LLL_measure i fs' = LLL_measure i fs" proof (atomize(full), goal_cases) case 1 note def = basis_reduction_add_rows_def show ?case proof (cases upw) case False with res inv show ?thesis by (simp add: def) next case True with inv have "LLL_invariant True i fs" by auto note start = this \<mu>_small_row_refl[of i fs] from res[unfolded def] True have "basis_reduction_add_rows_loop i fs i = fs'" by auto from basis_reduction_add_rows_loop[OF start this i] show ?thesis by auto qed qed lemma basis_reduction_swap: assumes inv: "LLL_invariant False i fs" and res: "basis_reduction_swap i fs = (upw',i',fs')" and cond: "sq_norm (gso fs (i - 1)) > \<alpha> * sq_norm (gso fs i)" and i: "i < m" "i \<noteq> 0" shows "LLL_invariant upw' i' fs'" (is ?g1) "LLL_measure i' fs' < LLL_measure i fs" (is ?g2) proof - note def = basis_reduction_swap_def from res[unfolded basis_reduction_swap_def] have id: "upw' = False" "i' = i - 1" "fs' = fs[i := fs ! (i - 1), i - 1 := fs ! i]" by auto from basis_reduction_swap_main(1-2)[OF inv i cond id(3)] show ?g1 ?g2 unfolding id by auto qed lemma basis_reduction_step: assumes inv: "LLL_invariant upw i fs" and res: "basis_reduction_step upw i fs = (upw',i',fs')" and i: "i < m" shows "LLL_invariant upw' i' fs'" "LLL_measure i' fs' < LLL_measure i fs" proof (atomize(full), goal_cases) case 1 note def = basis_reduction_step_def obtain fs'' where fs'': "basis_reduction_add_rows upw i fs = fs''" by auto show ?case proof (cases "i = 0") case True from increase_i[OF inv i True] True res show ?thesis by (auto simp: def) next case False hence id: "(i = 0) = False" by auto note res = res[unfolded def id if_False fs'' Let_def] let ?x = "sq_norm (gso fs'' (i - 1))" let ?y = "\<alpha> * sq_norm (gso fs'' i)" from basis_reduction_add_rows[OF inv fs'' i] have inv: "LLL_invariant False i fs''" and meas: "LLL_measure i fs'' = LLL_measure i fs" by auto show ?thesis proof (cases "?x \<le> ?y") case True from increase_i[OF inv i _ True] True res meas show ?thesis by auto next case gt: False hence "?x > ?y" by auto from basis_reduction_swap[OF inv _ this i False] gt res meas show ?thesis by auto qed qed qed termination by (relation "measure (\<lambda> (upw,i,fs). LLL_measure i fs)", insert basis_reduction_step, auto split: prod.splits) declare basis_reduction_main.simps[simp del] lemma basis_reduction_main: assumes "LLL_invariant upw i fs" and res: "basis_reduction_main (upw,i,fs) = fs'" shows "LLL_invariant True m fs'" using assms proof (induct "LLL_measure i fs" arbitrary: i fs upw rule: less_induct) case (less i fs upw) have id: "LLL_invariant upw i fs = True" using less by auto note res = less(3)[unfolded basis_reduction_main.simps[of upw i fs] id] note inv = less(2) note IH = less(1) show ?case proof (cases "i < m") case i: True obtain i' fs' upw' where step: "basis_reduction_step upw i fs = (upw',i',fs')" (is "?step = _") by (cases ?step, auto) from IH[OF basis_reduction_step(2,1)[OF inv step i]] res[unfolded step] i show ?thesis by auto next case False with LLL_invD[OF inv] have i: "i = m" by auto with False res inv have "LLL_invariant upw m fs'" by auto thus "LLL_invariant True m fs'" unfolding LLL_invariant_def by auto qed qed lemma reduce_basis_inv: assumes res: "reduce_basis = fs" shows "LLL_invariant True m fs" using basis_reduction_main[OF LLL_inv_initial_state res[unfolded reduce_basis_def]] . lemma reduce_basis: assumes res: "reduce_basis = fs" shows "lattice_of fs = L" "reduced fs m" "lin_indep fs" "length fs = m" using LLL_invD[OF reduce_basis_inv[OF res]] by blast+ lemma short_vector: assumes res: "short_vector = v" and m0: "m \<noteq> 0" shows "v \<in> carrier_vec n" "v \<in> L - {0\<^sub>v n}" "h \<in> L - {0\<^sub>v n} \<Longrightarrow> rat_of_int (sq_norm v) \<le> \<alpha> ^ (m - 1) * rat_of_int (sq_norm h)" "v \<noteq> 0\<^sub>v j" using basis_reduction_short_vector[OF reduce_basis_inv[OF refl] res[symmetric, unfolded short_vector_def] m0] by blast+ end end
[GOAL] E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ ⊢ Monotone fun s => posTangentConeAt s a [PROOFSTEP] rintro s t hst y ⟨c, d, hd, hc, hcd⟩ [GOAL] case intro.intro.intro.intro E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ s t : Set E hst : s ≤ t y : E c : ℕ → ℝ d : ℕ → E hd : ∀ᶠ (n : ℕ) in atTop, a + d n ∈ s hc : Tendsto c atTop atTop hcd : Tendsto (fun n => c n • d n) atTop (𝓝 y) ⊢ y ∈ (fun s => posTangentConeAt s a) t [PROOFSTEP] exact ⟨c, d, mem_of_superset hd fun h hn => hst hn, hc, hcd⟩ [GOAL] E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ s : Set E x y : E h : segment ℝ x y ⊆ s ⊢ y - x ∈ posTangentConeAt s x [PROOFSTEP] let c := fun n : ℕ => (2 : ℝ) ^ n [GOAL] E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ s : Set E x y : E h : segment ℝ x y ⊆ s c : ℕ → ℝ := fun n => 2 ^ n ⊢ y - x ∈ posTangentConeAt s x [PROOFSTEP] let d := fun n : ℕ => (c n)⁻¹ • (y - x) [GOAL] E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ s : Set E x y : E h : segment ℝ x y ⊆ s c : ℕ → ℝ := fun n => 2 ^ n d : ℕ → E := fun n => (c n)⁻¹ • (y - x) ⊢ y - x ∈ posTangentConeAt s x [PROOFSTEP] refine' ⟨c, d, Filter.univ_mem' fun n => h _, tendsto_pow_atTop_atTop_of_one_lt one_lt_two, _⟩ [GOAL] case refine'_1 E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ s : Set E x y : E h : segment ℝ x y ⊆ s c : ℕ → ℝ := fun n => 2 ^ n d : ℕ → E := fun n => (c n)⁻¹ • (y - x) n : ℕ ⊢ x + d n ∈ segment ℝ x y case refine'_2 E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ s : Set E x y : E h : segment ℝ x y ⊆ s c : ℕ → ℝ := fun n => 2 ^ n d : ℕ → E := fun n => (c n)⁻¹ • (y - x) ⊢ Tendsto (fun n => c n • d n) atTop (𝓝 (y - x)) [PROOFSTEP] show x + d n ∈ segment ℝ x y [GOAL] case refine'_1 E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ s : Set E x y : E h : segment ℝ x y ⊆ s c : ℕ → ℝ := fun n => 2 ^ n d : ℕ → E := fun n => (c n)⁻¹ • (y - x) n : ℕ ⊢ x + d n ∈ segment ℝ x y [PROOFSTEP] rw [segment_eq_image'] [GOAL] case refine'_1 E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ s : Set E x y : E h : segment ℝ x y ⊆ s c : ℕ → ℝ := fun n => 2 ^ n d : ℕ → E := fun n => (c n)⁻¹ • (y - x) n : ℕ ⊢ x + d n ∈ (fun θ => x + θ • (y - x)) '' Icc 0 1 [PROOFSTEP] refine' ⟨(c n)⁻¹, ⟨_, _⟩, rfl⟩ [GOAL] case refine'_1.refine'_1 E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ s : Set E x y : E h : segment ℝ x y ⊆ s c : ℕ → ℝ := fun n => 2 ^ n d : ℕ → E := fun n => (c n)⁻¹ • (y - x) n : ℕ ⊢ 0 ≤ (c n)⁻¹ case refine'_1.refine'_2 E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ s : Set E x y : E h : segment ℝ x y ⊆ s c : ℕ → ℝ := fun n => 2 ^ n d : ℕ → E := fun n => (c n)⁻¹ • (y - x) n : ℕ ⊢ (c n)⁻¹ ≤ 1 [PROOFSTEP] exacts [inv_nonneg.2 (pow_nonneg zero_le_two _), inv_le_one (one_le_pow_of_one_le one_le_two _)] [GOAL] case refine'_2 E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ s : Set E x y : E h : segment ℝ x y ⊆ s c : ℕ → ℝ := fun n => 2 ^ n d : ℕ → E := fun n => (c n)⁻¹ • (y - x) ⊢ Tendsto (fun n => c n • d n) atTop (𝓝 (y - x)) [PROOFSTEP] show Tendsto (fun n => c n • d n) atTop (𝓝 (y - x)) [GOAL] case refine'_2 E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ s : Set E x y : E h : segment ℝ x y ⊆ s c : ℕ → ℝ := fun n => 2 ^ n d : ℕ → E := fun n => (c n)⁻¹ • (y - x) ⊢ Tendsto (fun n => c n • d n) atTop (𝓝 (y - x)) [PROOFSTEP] exact tendsto_const_nhds.congr fun n ↦ (smul_inv_smul₀ (pow_ne_zero _ two_ne_zero) _).symm [GOAL] E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ s : Set E x y : E h : segment ℝ x (x + y) ⊆ s ⊢ y ∈ posTangentConeAt s x [PROOFSTEP] simpa only [add_sub_cancel'] using mem_posTangentConeAt_of_segment_subset h [GOAL] E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ s : Set E h : IsLocalMaxOn f s a hf : HasFDerivWithinAt f f' s a y : E hy : y ∈ posTangentConeAt s a ⊢ ↑f' y ≤ 0 [PROOFSTEP] rcases hy with ⟨c, d, hd, hc, hcd⟩ [GOAL] case intro.intro.intro.intro E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ s : Set E h : IsLocalMaxOn f s a hf : HasFDerivWithinAt f f' s a y : E c : ℕ → ℝ d : ℕ → E hd : ∀ᶠ (n : ℕ) in atTop, a + d n ∈ s hc : Tendsto c atTop atTop hcd : Tendsto (fun n => c n • d n) atTop (𝓝 y) ⊢ ↑f' y ≤ 0 [PROOFSTEP] have hc' : Tendsto (‖c ·‖) atTop atTop := tendsto_abs_atTop_atTop.comp hc [GOAL] case intro.intro.intro.intro E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ s : Set E h : IsLocalMaxOn f s a hf : HasFDerivWithinAt f f' s a y : E c : ℕ → ℝ d : ℕ → E hd : ∀ᶠ (n : ℕ) in atTop, a + d n ∈ s hc : Tendsto c atTop atTop hcd : Tendsto (fun n => c n • d n) atTop (𝓝 y) hc' : Tendsto (fun x => ‖c x‖) atTop atTop ⊢ ↑f' y ≤ 0 [PROOFSTEP] suffices : ∀ᶠ n in atTop, c n • (f (a + d n) - f a) ≤ 0 [GOAL] case intro.intro.intro.intro E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ s : Set E h : IsLocalMaxOn f s a hf : HasFDerivWithinAt f f' s a y : E c : ℕ → ℝ d : ℕ → E hd : ∀ᶠ (n : ℕ) in atTop, a + d n ∈ s hc : Tendsto c atTop atTop hcd : Tendsto (fun n => c n • d n) atTop (𝓝 y) hc' : Tendsto (fun x => ‖c x‖) atTop atTop this : ∀ᶠ (n : ℕ) in atTop, c n • (f (a + d n) - f a) ≤ 0 ⊢ ↑f' y ≤ 0 [PROOFSTEP] exact le_of_tendsto (hf.lim atTop hd hc' hcd) this [GOAL] case this E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ s : Set E h : IsLocalMaxOn f s a hf : HasFDerivWithinAt f f' s a y : E c : ℕ → ℝ d : ℕ → E hd : ∀ᶠ (n : ℕ) in atTop, a + d n ∈ s hc : Tendsto c atTop atTop hcd : Tendsto (fun n => c n • d n) atTop (𝓝 y) hc' : Tendsto (fun x => ‖c x‖) atTop atTop ⊢ ∀ᶠ (n : ℕ) in atTop, c n • (f (a + d n) - f a) ≤ 0 [PROOFSTEP] replace hd : Tendsto (fun n => a + d n) atTop (𝓝[s] (a + 0)) [GOAL] case hd E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ s : Set E h : IsLocalMaxOn f s a hf : HasFDerivWithinAt f f' s a y : E c : ℕ → ℝ d : ℕ → E hd : ∀ᶠ (n : ℕ) in atTop, a + d n ∈ s hc : Tendsto c atTop atTop hcd : Tendsto (fun n => c n • d n) atTop (𝓝 y) hc' : Tendsto (fun x => ‖c x‖) atTop atTop ⊢ Tendsto (fun n => a + d n) atTop (𝓝[s] (a + 0)) [PROOFSTEP] exact tendsto_nhdsWithin_iff.2 ⟨tendsto_const_nhds.add (tangentConeAt.lim_zero _ hc' hcd), hd⟩ [GOAL] case this E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ s : Set E h : IsLocalMaxOn f s a hf : HasFDerivWithinAt f f' s a y : E c : ℕ → ℝ d : ℕ → E hc : Tendsto c atTop atTop hcd : Tendsto (fun n => c n • d n) atTop (𝓝 y) hc' : Tendsto (fun x => ‖c x‖) atTop atTop hd : Tendsto (fun n => a + d n) atTop (𝓝[s] (a + 0)) ⊢ ∀ᶠ (n : ℕ) in atTop, c n • (f (a + d n) - f a) ≤ 0 [PROOFSTEP] rw [add_zero] at hd [GOAL] case this E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ s : Set E h : IsLocalMaxOn f s a hf : HasFDerivWithinAt f f' s a y : E c : ℕ → ℝ d : ℕ → E hc : Tendsto c atTop atTop hcd : Tendsto (fun n => c n • d n) atTop (𝓝 y) hc' : Tendsto (fun x => ‖c x‖) atTop atTop hd : Tendsto (fun n => a + d n) atTop (𝓝[s] a) ⊢ ∀ᶠ (n : ℕ) in atTop, c n • (f (a + d n) - f a) ≤ 0 [PROOFSTEP] filter_upwards [hd.eventually h, hc.eventually_ge_atTop 0] with n hfn hcn [GOAL] case h E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ s : Set E h : IsLocalMaxOn f s a hf : HasFDerivWithinAt f f' s a y : E c : ℕ → ℝ d : ℕ → E hc : Tendsto c atTop atTop hcd : Tendsto (fun n => c n • d n) atTop (𝓝 y) hc' : Tendsto (fun x => ‖c x‖) atTop atTop hd : Tendsto (fun n => a + d n) atTop (𝓝[s] a) n : ℕ hfn : f (a + d n) ≤ f a hcn : 0 ≤ c n ⊢ c n • (f (a + d n) - f a) ≤ 0 [PROOFSTEP] exact mul_nonpos_of_nonneg_of_nonpos hcn (sub_nonpos.2 hfn) [GOAL] E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ s : Set E h : IsLocalMaxOn f s a y : E hy : y ∈ posTangentConeAt s a hf : ¬DifferentiableWithinAt ℝ f s a ⊢ ↑(fderivWithin ℝ f s a) y ≤ 0 [PROOFSTEP] rw [fderivWithin_zero_of_not_differentiableWithinAt hf] [GOAL] E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ s : Set E h : IsLocalMaxOn f s a y : E hy : y ∈ posTangentConeAt s a hf : ¬DifferentiableWithinAt ℝ f s a ⊢ ↑0 y ≤ 0 [PROOFSTEP] rfl [GOAL] E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ s : Set E h : IsLocalMaxOn f s a hf : HasFDerivWithinAt f f' s a y : E hy : y ∈ posTangentConeAt s a hy' : -y ∈ posTangentConeAt s a ⊢ 0 ≤ ↑f' y [PROOFSTEP] simpa using h.hasFDerivWithinAt_nonpos hf hy' [GOAL] E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ s : Set E h : IsLocalMaxOn f s a y : E hy : y ∈ posTangentConeAt s a hy' : -y ∈ posTangentConeAt s a hf : ¬DifferentiableWithinAt ℝ f s a ⊢ ↑(fderivWithin ℝ f s a) y = 0 [PROOFSTEP] rw [fderivWithin_zero_of_not_differentiableWithinAt hf] [GOAL] E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ s : Set E h : IsLocalMaxOn f s a y : E hy : y ∈ posTangentConeAt s a hy' : -y ∈ posTangentConeAt s a hf : ¬DifferentiableWithinAt ℝ f s a ⊢ ↑0 y = 0 [PROOFSTEP] rfl [GOAL] E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ s : Set E h : IsLocalMinOn f s a hf : HasFDerivWithinAt f f' s a y : E hy : y ∈ posTangentConeAt s a ⊢ 0 ≤ ↑f' y [PROOFSTEP] simpa using h.neg.hasFDerivWithinAt_nonpos hf.neg hy [GOAL] E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ s : Set E h : IsLocalMinOn f s a y : E hy : y ∈ posTangentConeAt s a hf : ¬DifferentiableWithinAt ℝ f s a ⊢ 0 ≤ ↑(fderivWithin ℝ f s a) y [PROOFSTEP] rw [fderivWithin_zero_of_not_differentiableWithinAt hf] [GOAL] E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ s : Set E h : IsLocalMinOn f s a y : E hy : y ∈ posTangentConeAt s a hf : ¬DifferentiableWithinAt ℝ f s a ⊢ 0 ≤ ↑0 y [PROOFSTEP] rfl [GOAL] E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ s : Set E h : IsLocalMinOn f s a hf : HasFDerivWithinAt f f' s a y : E hy : y ∈ posTangentConeAt s a hy' : -y ∈ posTangentConeAt s a ⊢ ↑f' y = 0 [PROOFSTEP] simpa using h.neg.hasFDerivWithinAt_eq_zero hf.neg hy hy' [GOAL] E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ s : Set E h : IsLocalMinOn f s a y : E hy : y ∈ posTangentConeAt s a hy' : -y ∈ posTangentConeAt s a hf : ¬DifferentiableWithinAt ℝ f s a ⊢ ↑(fderivWithin ℝ f s a) y = 0 [PROOFSTEP] rw [fderivWithin_zero_of_not_differentiableWithinAt hf] [GOAL] E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ s : Set E h : IsLocalMinOn f s a y : E hy : y ∈ posTangentConeAt s a hy' : -y ∈ posTangentConeAt s a hf : ¬DifferentiableWithinAt ℝ f s a ⊢ ↑0 y = 0 [PROOFSTEP] rfl [GOAL] E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ h : IsLocalMin f a hf : HasFDerivAt f f' a ⊢ f' = 0 [PROOFSTEP] ext y [GOAL] case h E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ h : IsLocalMin f a hf : HasFDerivAt f f' a y : E ⊢ ↑f' y = ↑0 y [PROOFSTEP] apply (h.on univ).hasFDerivWithinAt_eq_zero hf.hasFDerivWithinAt [GOAL] case h.hy E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ h : IsLocalMin f a hf : HasFDerivAt f f' a y : E ⊢ y ∈ posTangentConeAt univ a [PROOFSTEP] rw [posTangentConeAt_univ] [GOAL] case h.hy' E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ h : IsLocalMin f a hf : HasFDerivAt f f' a y : E ⊢ -y ∈ posTangentConeAt univ a [PROOFSTEP] rw [posTangentConeAt_univ] [GOAL] case h.hy E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ h : IsLocalMin f a hf : HasFDerivAt f f' a y : E ⊢ y ∈ univ [PROOFSTEP] apply mem_univ [GOAL] case h.hy' E : Type u inst✝¹ : NormedAddCommGroup E inst✝ : NormedSpace ℝ E f : E → ℝ a : E f' : E →L[ℝ] ℝ h : IsLocalMin f a hf : HasFDerivAt f f' a y : E ⊢ -y ∈ univ [PROOFSTEP] apply mem_univ [GOAL] f : ℝ → ℝ f' a b : ℝ h : IsLocalMin f a hf : HasDerivAt f f' a ⊢ f' = 0 [PROOFSTEP] simpa using FunLike.congr_fun (h.hasFDerivAt_eq_zero (hasDerivAt_iff_hasFDerivAt.1 hf)) 1
/- Copyright (c) 2015 Leonardo de Moura. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Leonardo de Moura Naive sort for lists -/ import data.list.comb data.list.set data.list.perm data.list.sorted logic.connectives algebra.order namespace list open decidable nat variables {B A : Type} variable (R : A → A → Prop) variable [decR : decidable_rel R] include decR definition min_core : list A → A → A | [] a := a | (b::l) a := if R b a then min_core l b else min_core l a definition min : Π (l : list A), l ≠ nil → A | [] h := absurd rfl h | (a::l) h := min_core R l a variable [decA : decidable_eq A] include decA variable {R} variables (to : total R) (tr : transitive R) (rf : reflexive R) lemma min_core_lemma : ∀ {b l} a, b ∈ l ∨ b = a → R (min_core R l a) b | b [] a h := or.elim h (suppose b ∈ [], absurd this !not_mem_nil) (suppose b = a, have R a a, from rf a, begin subst b, unfold min_core, assumption end) | b (c::l) a h := or.elim h (suppose b ∈ c :: l, or.elim (eq_or_mem_of_mem_cons this) (suppose b = c, or.elim (em (R c a)) (suppose R c a, have R (min_core R l b) b, from min_core_lemma _ (or.inr rfl), begin unfold min_core, rewrite [if_pos `R c a`], subst c, assumption end) (suppose ¬ R c a, have R a c, from or_resolve_right (to c a) this, have R (min_core R l a) a, from min_core_lemma _ (or.inr rfl), have R (min_core R l a) c, from tr this `R a c`, begin unfold min_core, rewrite [if_neg `¬ R c a`], subst b, exact `R (min_core R l a) c` end)) (suppose b ∈ l, or.elim (em (R c a)) (suppose R c a, have R (min_core R l c) b, from min_core_lemma _ (or.inl `b ∈ l`), begin unfold min_core, rewrite [if_pos `R c a`], assumption end) (suppose ¬ R c a, have R (min_core R l a) b, from min_core_lemma _ (or.inl `b ∈ l`), begin unfold min_core, rewrite [if_neg `¬ R c a`], assumption end))) (suppose b = a, have R (min_core R l a) b, from min_core_lemma _ (or.inr this), or.elim (em (R c a)) (suppose R c a, have R (min_core R l c) c, from min_core_lemma _ (or.inr rfl), have R (min_core R l c) a, from tr this `R c a`, begin unfold min_core, rewrite [if_pos `R c a`], subst b, exact `R (min_core R l c) a` end) (suppose ¬ R c a, begin unfold min_core, rewrite [if_neg `¬ R c a`], assumption end)) lemma min_core_le_of_mem {b : A} {l : list A} (a : A) : b ∈ l → R (min_core R l a) b := assume h : b ∈ l, min_core_lemma to tr rf a (or.inl h) lemma min_core_le {l : list A} (a : A) : R (min_core R l a) a := min_core_lemma to tr rf a (or.inr rfl) lemma min_lemma : ∀ {l} (h : l ≠ nil), all l (R (min R l h)) | [] h := absurd rfl h | (b::l) h := all_of_forall (take x, suppose x ∈ b::l, or.elim (eq_or_mem_of_mem_cons this) (suppose x = b, have R (min_core R l b) b, from min_core_le to tr rf b, begin subst x, unfold min, assumption end) (suppose x ∈ l, have R (min_core R l b) x, from min_core_le_of_mem to tr rf _ this, begin unfold min, assumption end)) variable (R) lemma min_core_mem : ∀ l a, min_core R l a ∈ l ∨ min_core R l a = a | [] a := or.inr rfl | (b::l) a := or.elim (em (R b a)) (suppose R b a, begin unfold min_core, rewrite [if_pos `R b a`], apply or.elim (min_core_mem l b), suppose min_core R l b ∈ l, or.inl (mem_cons_of_mem _ this), suppose min_core R l b = b, by rewrite this; exact or.inl !mem_cons end) (suppose ¬ R b a, begin unfold min_core, rewrite [if_neg `¬ R b a`], apply or.elim (min_core_mem l a), suppose min_core R l a ∈ l, or.inl (mem_cons_of_mem _ this), suppose min_core R l a = a, or.inr this end) lemma min_mem : ∀ (l : list A) (h : l ≠ nil), min R l h ∈ l | [] h := absurd rfl h | (a::l) h := begin unfold min, apply or.elim (min_core_mem R l a), suppose min_core R l a ∈ l, mem_cons_of_mem _ this, suppose min_core R l a = a, by rewrite this; apply mem_cons end lemma min_map (f : B → A) {l : list B} (h : l ≠ nil) : all l (λ b, (R (min R (map f l) (map_ne_nil_of_ne_nil _ h))) (f b)):= using to tr rf, begin apply all_of_forall, intro b Hb, have Hfa : all (map f l) (R (min R (map f l) (map_ne_nil_of_ne_nil _ h))), from min_lemma to tr rf _, have Hfb : f b ∈ map f l, from mem_map _ Hb, exact of_mem_of_all Hfb Hfa end lemma min_map_all (f : B → A) {l : list B} (h : l ≠ nil) {b : B} (Hb : b ∈ l) : R (min R (map f l) ((map_ne_nil_of_ne_nil _ h))) (f b) := of_mem_of_all Hb (min_map _ to tr rf f h) omit decR private lemma ne_nil {l : list A} {n : nat} : length l = succ n → l ≠ nil := assume h₁ h₂, by rewrite h₂ at h₁; contradiction include decR lemma sort_aux_lemma {l n} (h : length l = succ n) : length (erase (min R l (ne_nil h)) l) = n := have min R l _ ∈ l, from min_mem R l (ne_nil h), have length (erase (min R l _) l) = pred (length l), from length_erase_of_mem this, by rewrite h at this; exact this definition sort_aux : Π (n : nat) (l : list A), length l = n → list A | 0 l h := [] | (succ n) l h := let m := min R l (ne_nil h) in let l₁ := erase m l in m :: sort_aux n l₁ (sort_aux_lemma R h) definition sort (l : list A) : list A := sort_aux R (length l) l rfl open perm lemma sort_aux_perm : ∀ {n : nat} {l : list A} (h : length l = n), sort_aux R n l h ~ l | 0 l h := by rewrite [↑sort_aux, eq_nil_of_length_eq_zero h] | (succ n) l h := let m := min R l (ne_nil h) in have leq : length (erase m l) = n, from sort_aux_lemma R h, calc m :: sort_aux R n (erase m l) leq ~ m :: erase m l : perm.skip m (sort_aux_perm leq) ... ~ l : perm_erase (min_mem _ _ _) lemma sort_perm (l : list A) : sort R l ~ l := sort_aux_perm R rfl lemma strongly_sorted_sort_aux : ∀ {n : nat} {l : list A} (h : length l = n), strongly_sorted R (sort_aux R n l h) | 0 l h := !strongly_sorted.base | (succ n) l h := let m := min R l (ne_nil h) in have leq : length (erase m l) = n, from sort_aux_lemma R h, have ss : strongly_sorted R (sort_aux R n (erase m l) leq), from strongly_sorted_sort_aux leq, have all l (R m), from min_lemma to tr rf (ne_nil h), have hall : all (sort_aux R n (erase m l) leq) (R m), from all_of_forall (take x, suppose x ∈ sort_aux R n (erase m l) leq, have x ∈ erase m l, from mem_perm (sort_aux_perm R leq) this, have x ∈ l, from mem_of_mem_erase this, show R m x, from of_mem_of_all this `all l (R m)`), strongly_sorted.step hall ss variable {R} lemma strongly_sorted_sort_core (to : total R) (tr : transitive R) (rf : reflexive R) (l : list A) : strongly_sorted R (sort R l) := @strongly_sorted_sort_aux _ _ _ _ to tr rf (length l) l rfl lemma sort_eq_of_perm_core {l₁ l₂ : list A} (to : total R) (tr : transitive R) (rf : reflexive R) (asy : anti_symmetric R) (h : l₁ ~ l₂) : sort R l₁ = sort R l₂ := have s₁ : sorted R (sort R l₁), from sorted_of_strongly_sorted (strongly_sorted_sort_core to tr rf l₁), have s₂ : sorted R (sort R l₂), from sorted_of_strongly_sorted (strongly_sorted_sort_core to tr rf l₂), have p : sort R l₁ ~ sort R l₂, from calc sort R l₁ ~ l₁ : sort_perm ... ~ l₂ : h ... ~ sort R l₂ : sort_perm, eq_of_sorted_of_perm tr asy p s₁ s₂ section omit decR lemma strongly_sorted_sort [decidable_linear_order A] (l : list A) : strongly_sorted le (sort le l) := strongly_sorted_sort_core le.total (@le.trans A _) le.refl l lemma sort_eq_of_perm {l₁ l₂ : list A} [decidable_linear_order A] (h : l₁ ~ l₂) : sort le l₁ = sort le l₂ := sort_eq_of_perm_core le.total (@le.trans A _) le.refl (@le.antisymm A _) h end end list
[STATEMENT] lemma wset_final_okI: "(\<And>t w. ws t = \<lfloor>w\<rfloor> \<Longrightarrow> \<exists>x ln. ts t = \<lfloor>(x, ln)\<rfloor> \<and> \<not> final x) \<Longrightarrow> wset_final_ok ws ts" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (\<And>t w. ws t = \<lfloor>w\<rfloor> \<Longrightarrow> \<exists>x ln. ts t = \<lfloor>(x, ln)\<rfloor> \<and> \<not> final x) \<Longrightarrow> wset_final_ok ws ts [PROOF STEP] unfolding wset_final_ok_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. (\<And>t w. ws t = \<lfloor>w\<rfloor> \<Longrightarrow> \<exists>x ln. ts t = \<lfloor>(x, ln)\<rfloor> \<and> \<not> final x) \<Longrightarrow> \<forall>t\<in>dom ws. \<exists>x ln. ts t = \<lfloor>(x, ln)\<rfloor> \<and> \<not> final x [PROOF STEP] by(blast)
@kernel function polar_kernel(n::Int, nlines::Int, line_start::Int, scale::Float64, u_curr, v_curr, l_curr, rho, shift_lines::Int, param, _YffR, _YffI, _YftR, _YftI, _YttR, _YttI, _YtfR, _YtfI, frBound, toBound) I = @index(Group, Linear) J = @index(Local, Linear) id_line = I + shift_lines x = @localmem Float64 (4,) xl = @localmem Float64 (4,) xu = @localmem Float64 (4,) @inbounds begin YffR = _YffR[id_line]; YffI = _YffI[id_line] YftR = _YftR[id_line]; YftI = _YftI[id_line] YttR = _YttR[id_line]; YttI = _YttI[id_line] YtfR = _YtfR[id_line]; YtfI = _YtfI[id_line] pij_idx = line_start + 8*(I-1) xl[1] = sqrt(frBound[2*(id_line-1)+1]) xu[1] = sqrt(frBound[2*id_line]) xl[2] = sqrt(toBound[2*(id_line-1)+1]) xu[2] = sqrt(toBound[2*id_line]) xl[3] = -2*pi xu[3] = 2*pi xl[4] = -2*pi xu[4] = 2*pi x[1] = min(xu[1], max(xl[1], sqrt(u_curr[pij_idx+4]))) x[2] = min(xu[2], max(xl[2], sqrt(u_curr[pij_idx+5]))) x[3] = min(xu[3], max(xl[3], u_curr[pij_idx+6])) x[4] = min(xu[4], max(xl[4], u_curr[pij_idx+7])) param[1,id_line] = l_curr[pij_idx] param[2,id_line] = l_curr[pij_idx+1] param[3,id_line] = l_curr[pij_idx+2] param[4,id_line] = l_curr[pij_idx+3] param[5,id_line] = l_curr[pij_idx+4] param[6,id_line] = l_curr[pij_idx+5] param[7,id_line] = l_curr[pij_idx+6] param[8,id_line] = l_curr[pij_idx+7] param[9,id_line] = rho[pij_idx] param[10,id_line] = rho[pij_idx+1] param[11,id_line] = rho[pij_idx+2] param[12,id_line] = rho[pij_idx+3] param[13,id_line] = rho[pij_idx+4] param[14,id_line] = rho[pij_idx+5] param[15,id_line] = rho[pij_idx+6] param[16,id_line] = rho[pij_idx+7] param[17,id_line] = v_curr[pij_idx] param[18,id_line] = v_curr[pij_idx+1] param[19,id_line] = v_curr[pij_idx+2] param[20,id_line] = v_curr[pij_idx+3] param[21,id_line] = v_curr[pij_idx+4] param[22,id_line] = v_curr[pij_idx+5] param[23,id_line] = v_curr[pij_idx+6] param[24,id_line] = v_curr[pij_idx+7] @synchronize status, minor_iter = tron_kernel(n, shift_lines, 500, 200, 1e-6, scale, true, x, xl, xu, param, YffR, YffI, YftR, YftI, YttR, YttI, YtfR, YtfI, I, J) vi_vj_cos = x[1]*x[2]*cos(x[3] - x[4]) vi_vj_sin = x[1]*x[2]*sin(x[3] - x[4]) u_curr[pij_idx] = YffR*x[1]^2 + YftR*vi_vj_cos + YftI*vi_vj_sin u_curr[pij_idx+1] = -YffI*x[1]^2 - YftI*vi_vj_cos + YftR*vi_vj_sin u_curr[pij_idx+2] = YttR*x[2]^2 + YtfR*vi_vj_cos - YtfI*vi_vj_sin u_curr[pij_idx+3] = -YttI*x[2]^2 - YtfI*vi_vj_cos - YtfR*vi_vj_sin u_curr[pij_idx+4] = x[1]^2 u_curr[pij_idx+5] = x[2]^2 u_curr[pij_idx+6] = x[3] u_curr[pij_idx+7] = x[4] end end function polar_kernel_cpu(n::Int, nline::Int, line_start::Int, scale::Float64, u_curr::AbstractVector{Float64}, v_curr::AbstractVector{Float64}, l_curr::AbstractVector{Float64}, rho::AbstractVector{Float64}, shift::Int, param::Array{Float64}, YffR::Array{Float64}, YffI::Array{Float64}, YftR::Array{Float64}, YftI::Array{Float64}, YttR::Array{Float64}, YttI::Array{Float64}, YtfR::Array{Float64}, YtfI::Array{Float64}, frBound::Array{Float64}, toBound::Array{Float64}) avg_minor_it = 0 x = zeros(n) xl = zeros(n) xu = zeros(n) xl[3] = -2*pi xu[3] = 2*pi xl[4] = -2*pi xu[4] = 2*pi @inbounds for I=1:nline pij_idx = line_start + 8*(I-1) id_line = shift + I xl[1] = sqrt(frBound[2*(id_line-1)+1]) xu[1] = sqrt(frBound[2*id_line]) xl[2] = sqrt(toBound[2*(id_line-1)+1]) xu[2] = sqrt(toBound[2*id_line]) x[1] = min(xu[1], max(xl[1], sqrt(u_curr[pij_idx+4]))) x[2] = min(xu[2], max(xl[2], sqrt(u_curr[pij_idx+5]))) x[3] = min(xu[3], max(xl[3], u_curr[pij_idx+6])) x[4] = min(xu[4], max(xl[4], u_curr[pij_idx+7])) param[1,id_line] = l_curr[pij_idx] param[2,id_line] = l_curr[pij_idx+1] param[3,id_line] = l_curr[pij_idx+2] param[4,id_line] = l_curr[pij_idx+3] param[5,id_line] = l_curr[pij_idx+4] param[6,id_line] = l_curr[pij_idx+5] param[7,id_line] = l_curr[pij_idx+6] param[8,id_line] = l_curr[pij_idx+7] param[9,id_line] = rho[pij_idx] param[10,id_line] = rho[pij_idx+1] param[11,id_line] = rho[pij_idx+2] param[12,id_line] = rho[pij_idx+3] param[13,id_line] = rho[pij_idx+4] param[14,id_line] = rho[pij_idx+5] param[15,id_line] = rho[pij_idx+6] param[16,id_line] = rho[pij_idx+7] param[17,id_line] = v_curr[pij_idx] param[18,id_line] = v_curr[pij_idx+1] param[19,id_line] = v_curr[pij_idx+2] param[20,id_line] = v_curr[pij_idx+3] param[21,id_line] = v_curr[pij_idx+4] param[22,id_line] = v_curr[pij_idx+5] param[23,id_line] = v_curr[pij_idx+6] param[24,id_line] = v_curr[pij_idx+7] function eval_f_cb(x) f = eval_f_polar_kernel_cpu(id_line, scale, x, param, YffR, YffI, YftR, YftI, YttR, YttI, YtfR, YtfI) return f end function eval_g_cb(x, g) eval_grad_f_polar_kernel_cpu(id_line, scale, x, g, param, YffR, YffI, YftR, YftI, YttR, YttI, YtfR, YtfI) return end function eval_h_cb(x, mode, rows, cols, _scale, lambda, values) eval_h_polar_kernel_cpu(id_line, x, mode, scale, rows, cols, lambda, values, param, YffR, YffI, YftR, YftI, YttR, YttI, YtfR, YtfI) return end nele_hess = 10 tron = ExaTronKernels.createProblem(4, xl, xu, nele_hess, eval_f_cb, eval_g_cb, eval_h_cb; :tol => 1e-6, :matrix_type => :Dense, :max_minor => 200, :frtol => 1e-12) tron.x .= x status = ExaTronKernels.solveProblem(tron) x .= tron.x avg_minor_it += tron.minor_iter cos_ij = cos(x[3] - x[4]) sin_ij = sin(x[3] - x[4]) vi_vj_cos = x[1]*x[2]*cos_ij vi_vj_sin = x[1]*x[2]*sin_ij u_curr[pij_idx] = YffR[id_line]*x[1]^2 + YftR[id_line]*vi_vj_cos + YftI[id_line]*vi_vj_sin u_curr[pij_idx+1] = -YffI[id_line]*x[1]^2 - YftI[id_line]*vi_vj_cos + YftR[id_line]*vi_vj_sin u_curr[pij_idx+2] = YttR[id_line]*x[2]^2 + YtfR[id_line]*vi_vj_cos - YtfI[id_line]*vi_vj_sin u_curr[pij_idx+3] = -YttI[id_line]*x[2]^2 - YtfI[id_line]*vi_vj_cos - YtfR[id_line]*vi_vj_sin u_curr[pij_idx+4] = x[1]^2 u_curr[pij_idx+5] = x[2]^2 u_curr[pij_idx+6] = x[3] u_curr[pij_idx+7] = x[4] end return 0, avg_minor_it / nline end
import category_theory.follow_your_nose universes u₁ v₁ open category_theory open opposite namespace terse variables (C : Type u₁) [𝒞 : category.{v₁+1} C] include 𝒞 def yoneda : C ⥤ ((Cᵒᵖ) ⥤ Type v₁) := ƛ X, ƛ Y, (unop Y) ⟶ X. def yoneda_evaluation : ((Cᵒᵖ) × ((Cᵒᵖ) ⥤ (Type v₁))) ⥤ (Type (max u₁ v₁)) := (evaluation_uncurried (Cᵒᵖ) (Type v₁)) ⋙ ulift_functor.{u₁} @[simp] lemma yoneda_evaluation_map_down (P Q : (Cᵒᵖ) × (Cᵒᵖ ⥤ Type v₁)) (α : P ⟶ Q) (x : (yoneda_evaluation C).obj P) : ((yoneda_evaluation C).map α x).down = (α.2).app (Q.1) ((P.2).map (α.1) (x.down)) := rfl def yoneda_pairing : ((Cᵒᵖ) × ((Cᵒᵖ) ⥤ (Type v₁))) ⥤ (Type (max u₁ v₁)) := (functor.prod ((yoneda C).op) (functor.id ((Cᵒᵖ) ⥤ (Type v₁)))) ⋙ (functor.hom ((Cᵒᵖ) ⥤ (Type v₁))) @[simp] lemma yoneda_pairing_map (P Q : (Cᵒᵖ) × (Cᵒᵖ ⥤ Type v₁)) (α : P ⟶ Q) (β : (yoneda_pairing C).obj P) : (yoneda_pairing C).map α β = (yoneda C).map (α.1.unop) ≫ β ≫ α.2 := rfl def yoneda_lemma : (yoneda_pairing C) ≅ (yoneda_evaluation C) := { hom := { app := λ F x, ulift.up ((x.app F.1) (𝟙 (unop F.1))) }, inv := { app := λ F x, { app := λ X a, (F.2.map a.op) x.down } } }. end terse
State Before: M : Type ?u.23867 inst✝⁴ : Zero M l : List M inst✝³ : DecidablePred fun x => getD l x 0 ≠ 0 n : ℕ R : Type u_1 inst✝² : AddZeroClass R x : R xs : List R inst✝¹ : DecidablePred fun i => getD (xs ++ [x]) i 0 ≠ 0 inst✝ : DecidablePred fun i => getD xs i 0 ≠ 0 ⊢ toFinsupp (xs ++ [x]) = toFinsupp xs + Finsupp.single (length xs) x State After: no goals Tactic: classical rw [toFinsupp_append, toFinsupp_singleton, Finsupp.embDomain_single, addLeftEmbedding_apply, add_zero] State Before: M : Type ?u.23867 inst✝⁴ : Zero M l : List M inst✝³ : DecidablePred fun x => getD l x 0 ≠ 0 n : ℕ R : Type u_1 inst✝² : AddZeroClass R x : R xs : List R inst✝¹ : DecidablePred fun i => getD (xs ++ [x]) i 0 ≠ 0 inst✝ : DecidablePred fun i => getD xs i 0 ≠ 0 ⊢ toFinsupp (xs ++ [x]) = toFinsupp xs + Finsupp.single (length xs) x State After: no goals Tactic: rw [toFinsupp_append, toFinsupp_singleton, Finsupp.embDomain_single, addLeftEmbedding_apply, add_zero]
From Test Require Import tactic. Section FOFProblem. Variable Universe : Set. Variable UniverseElement : Universe. Variable wd_ : Universe -> Universe -> Prop. Variable col_ : Universe -> Universe -> Universe -> Prop. Variable col_swap1_1 : (forall A B C : Universe, (col_ A B C -> col_ B A C)). Variable col_swap2_2 : (forall A B C : Universe, (col_ A B C -> col_ B C A)). Variable col_triv_3 : (forall A B : Universe, col_ A B B). Variable wd_swap_4 : (forall A B : Universe, (wd_ A B -> wd_ B A)). Variable col_trans_5 : (forall P Q A B C : Universe, ((wd_ P Q /\ (col_ P Q A /\ (col_ P Q B /\ col_ P Q C))) -> col_ A B C)). Theorem pipo_6 : (forall A B C Aprime Bprime Cprime O : Universe, ((wd_ B O /\ (wd_ C O /\ (wd_ Bprime O /\ (wd_ Cprime O /\ (wd_ A O /\ (wd_ Aprime O /\ (wd_ A B /\ (wd_ A Aprime /\ (col_ O A B /\ (col_ O B C /\ (col_ O Aprime Bprime /\ (col_ O Bprime Cprime /\ col_ O A C)))))))))))) -> col_ O Aprime Cprime)). Proof. time tac. Qed. End FOFProblem.
As the seasons are changing, some people face more financial hardship than others. Families struggle to pay for higher bills, like heating, on top of the cost of groceries, warm clothing, and big meals or gifts to celebrate holiday traditions. Because of this, some families are more stressed than they are happy this time of year. There are ways to give back so that everyone can experience the joy that this season brings. n:p beautiful is a philanthropic company that donates 10 percent of its net proceeds to organizations that support pediatric cancer research and animal abuse prevention. When you purchase our products, you’re helping give back to those struggling or those who don’t have a voice to stand up for themselves. This holiday season, we encourage you to give back in other ways as well. As we said, many families struggle to buy groceries and prepare large meals during the holidays. One way to help is to participate in food drives. You can donate non-perishable food items like canned soups and vegetables or boxed foods. As the holidays approach, you can donate foods like a turkey or ham so families can celebrate traditions together. Food drives also need volunteers to help organize packages that go to those who need them. If you can’t afford to buy extra groceries, consider volunteering your time instead. We love kids and their families. As a philanthropic company, we give back to families struggling the most with sick children. But what about the healthy kids that are just trying to stay that way? Kids grow quickly, and a lot of time the clothes that fit them one year don’t fit them the next. This makes it difficult on parents in the wintertime trying to dress their children for the cold. If you have children or younger family members that have grown out of their clothing, consider donating the clothing. Lightly worn long-sleeve shirts, pants, coats, hats, gloves, and boots are great items to donate. You can always purchase new clothing as well. Items like socks and underwear are necessities for children, but these items should be donated new. This type of organization could also use volunteers, whether you donated or not. You can help sort clothing items or make deliveries. Parents want to give their children the world, but sometimes they aren’t able to financially provide for any more than their basic needs. It can be difficult for a child to go to school and see all of their friends with new clothes and fun toys during the holidays, even if their parents are doing the best that they can. No, toys aren’t exactly a necessity; however, they do facilitate in the development of children’s social, problem solving, and creative skills. A lot of organizations hold toy drives during the holiday season to help give toys and games to children in need. Many accept any toys or games, but many also have a system set up where you can choose the age and gender of a specific child in need in order to buy an appropriate gift just for them. This makes the donation feel more personal and a lot like a Secret Santa! Organizations that hold toy drives for the holidays need volunteers too. If you’re a pro at wrapping gifts, this is a great opportunity to lend your talents. These options are great for the holiday season, but they could also be ways to give back to your community any time of year. Some of these require getting involved with charities and organizations, while others just require you to take the extra step as a neighbor, friend, or family member. The holiday season is about being thankful for your loved ones and giving to the people and causes you care about. n:p beautiful is a philanthropic company that cares about causes that help sick children and their families, as well as causes that support animals. What causes do you care about? Get involved today.
[STATEMENT] lemma (in PolynRg) low_deg_terms_zeroTr:" pol_coeff S (n, f) \<longrightarrow> polyn_expr R X (n + j) (ext_cf S j (n, f)) = (X^\<^bsup>R j\<^esup>) \<cdot>\<^sub>r (polyn_expr R X n (n, f))" [PROOF STATE] proof (prove) goal (1 subgoal): 1. pol_coeff S (n, f) \<longrightarrow> polyn_expr R X (n + j) (ext_cf S j (n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X n (n, f) [PROOF STEP] apply (cut_tac ring_is_ag, cut_tac X_mem_R, frule npClose[of "X" "j"]) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R\<rbrakk> \<Longrightarrow> pol_coeff S (n, f) \<longrightarrow> polyn_expr R X (n + j) (ext_cf S j (n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X n (n, f) [PROOF STEP] apply (induct_tac n) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R\<rbrakk> \<Longrightarrow> pol_coeff S (0, f) \<longrightarrow> polyn_expr R X (0 + j) (ext_cf S j (0, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X 0 (0, f) 2. \<And>n. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (n, f) \<longrightarrow> polyn_expr R X (n + j) (ext_cf S j (n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X n (n, f)\<rbrakk> \<Longrightarrow> pol_coeff S (Suc n, f) \<longrightarrow> polyn_expr R X (Suc n + j) (ext_cf S j (Suc n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X (Suc n) (Suc n, f) [PROOF STEP] apply (rule impI, simp) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (0, f)\<rbrakk> \<Longrightarrow> polyn_expr R X j (ext_cf S j (0, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X 0 (0, f) 2. \<And>n. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (n, f) \<longrightarrow> polyn_expr R X (n + j) (ext_cf S j (n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X n (n, f)\<rbrakk> \<Longrightarrow> pol_coeff S (Suc n, f) \<longrightarrow> polyn_expr R X (Suc n + j) (ext_cf S j (Suc n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X (Suc n) (Suc n, f) [PROOF STEP] apply (case_tac "j = 0", simp add:ext_cf_def sliden_def polyn_expr_def) [PROOF STATE] proof (prove) goal (3 subgoals): 1. \<lbrakk>aGroup R; X \<in> carrier R; 1\<^sub>r \<in> carrier R; pol_coeff S (0, f); j = 0\<rbrakk> \<Longrightarrow> f 0 \<cdot>\<^sub>r 1\<^sub>r = 1\<^sub>r \<cdot>\<^sub>r (f 0 \<cdot>\<^sub>r 1\<^sub>r) 2. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (0, f); j \<noteq> 0\<rbrakk> \<Longrightarrow> polyn_expr R X j (ext_cf S j (0, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X 0 (0, f) 3. \<And>n. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (n, f) \<longrightarrow> polyn_expr R X (n + j) (ext_cf S j (n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X n (n, f)\<rbrakk> \<Longrightarrow> pol_coeff S (Suc n, f) \<longrightarrow> polyn_expr R X (Suc n + j) (ext_cf S j (Suc n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X (Suc n) (Suc n, f) [PROOF STEP] apply (frule_tac c = "(0, f)" and j = 0 in pol_coeff_mem_R, simp, simp) [PROOF STATE] proof (prove) goal (3 subgoals): 1. \<lbrakk>aGroup R; X \<in> carrier R; 1\<^sub>r \<in> carrier R; pol_coeff S (0, f); j = 0; f 0 \<in> carrier R\<rbrakk> \<Longrightarrow> f 0 \<cdot>\<^sub>r 1\<^sub>r = 1\<^sub>r \<cdot>\<^sub>r (f 0 \<cdot>\<^sub>r 1\<^sub>r) 2. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (0, f); j \<noteq> 0\<rbrakk> \<Longrightarrow> polyn_expr R X j (ext_cf S j (0, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X 0 (0, f) 3. \<And>n. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (n, f) \<longrightarrow> polyn_expr R X (n + j) (ext_cf S j (n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X n (n, f)\<rbrakk> \<Longrightarrow> pol_coeff S (Suc n, f) \<longrightarrow> polyn_expr R X (Suc n + j) (ext_cf S j (Suc n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X (Suc n) (Suc n, f) [PROOF STEP] apply (simp add:ring_r_one ring_l_one) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (0, f); j \<noteq> 0\<rbrakk> \<Longrightarrow> polyn_expr R X j (ext_cf S j (0, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X 0 (0, f) 2. \<And>n. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (n, f) \<longrightarrow> polyn_expr R X (n + j) (ext_cf S j (n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X n (n, f)\<rbrakk> \<Longrightarrow> pol_coeff S (Suc n, f) \<longrightarrow> polyn_expr R X (Suc n + j) (ext_cf S j (Suc n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X (Suc n) (Suc n, f) [PROOF STEP] apply (cut_tac polyn_Suc[of "j - Suc 0" "ext_cf S j (0, f)"], simp del:npow_suc) [PROOF STATE] proof (prove) goal (3 subgoals): 1. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (0, f); 0 < j; polyn_expr R X j (j, snd (ext_cf S j (0, f))) = polyn_expr R X (j - Suc 0) (ext_cf S j (0, f)) \<plusminus> snd (ext_cf S j (0, f)) j \<cdot>\<^sub>r X^\<^bsup>R j\<^esup>\<rbrakk> \<Longrightarrow> polyn_expr R X j (ext_cf S j (0, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X 0 (0, f) 2. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (0, f); j \<noteq> 0\<rbrakk> \<Longrightarrow> Suc (j - Suc 0) \<le> fst (ext_cf S j (0, f)) 3. \<And>n. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (n, f) \<longrightarrow> polyn_expr R X (n + j) (ext_cf S j (n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X n (n, f)\<rbrakk> \<Longrightarrow> pol_coeff S (Suc n, f) \<longrightarrow> polyn_expr R X (Suc n + j) (ext_cf S j (Suc n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X (Suc n) (Suc n, f) [PROOF STEP] apply (frule ext_cf_len[of "(0, f)" j], cut_tac polyn_expr_split[of j "ext_cf S j (0, f)"], simp, thin_tac "polyn_expr R X j (ext_cf S j (0, f)) = polyn_expr R X (j - Suc 0) (ext_cf S j (0, f)) \<plusminus> snd (ext_cf S j (0, f)) j \<cdot>\<^sub>r X^\<^bsup>R j\<^esup>") [PROOF STATE] proof (prove) goal (3 subgoals): 1. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (0, f); 0 < j; polyn_expr R X j (j, snd (ext_cf S j (0, f))) = polyn_expr R X (j - Suc 0) (ext_cf S j (0, f)) \<plusminus> snd (ext_cf S j (0, f)) j \<cdot>\<^sub>r X^\<^bsup>R j\<^esup>; fst (ext_cf S j (0, f)) = j\<rbrakk> \<Longrightarrow> polyn_expr R X (j - Suc 0) (ext_cf S j (0, f)) \<plusminus> snd (ext_cf S j (0, f)) j \<cdot>\<^sub>r X^\<^bsup>R j\<^esup> = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X 0 (0, f) 2. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (0, f); j \<noteq> 0\<rbrakk> \<Longrightarrow> Suc (j - Suc 0) \<le> fst (ext_cf S j (0, f)) 3. \<And>n. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (n, f) \<longrightarrow> polyn_expr R X (n + j) (ext_cf S j (n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X n (n, f)\<rbrakk> \<Longrightarrow> pol_coeff S (Suc n, f) \<longrightarrow> polyn_expr R X (Suc n + j) (ext_cf S j (Suc n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X (Suc n) (Suc n, f) [PROOF STEP] apply (simp add:polyn_ext_cf_lo_zero[of "(0, f)" j], thin_tac "polyn_expr R X j (j, snd (ext_cf S j (0, f))) = \<zero> \<plusminus> snd (ext_cf S j (0, f)) j \<cdot>\<^sub>r X^\<^bsup>R j\<^esup>", frule ext_cf_hi[THEN sym, of "(0, f)" j], simp add:polyn_expr_def) [PROOF STATE] proof (prove) goal (3 subgoals): 1. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (0, f); 0 < j; fst (ext_cf S j (0, f)) = j; snd (ext_cf S j (0, f)) j = f 0\<rbrakk> \<Longrightarrow> \<zero> \<plusminus> f 0 \<cdot>\<^sub>r X^\<^bsup>R j\<^esup> = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r (f 0 \<cdot>\<^sub>r 1\<^sub>r) 2. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (0, f); j \<noteq> 0\<rbrakk> \<Longrightarrow> Suc (j - Suc 0) \<le> fst (ext_cf S j (0, f)) 3. \<And>n. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (n, f) \<longrightarrow> polyn_expr R X (n + j) (ext_cf S j (n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X n (n, f)\<rbrakk> \<Longrightarrow> pol_coeff S (Suc n, f) \<longrightarrow> polyn_expr R X (Suc n + j) (ext_cf S j (Suc n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X (Suc n) (Suc n, f) [PROOF STEP] apply (frule_tac c = "(0, f)" and j = 0 in pol_coeff_mem_R, simp, simp) [PROOF STATE] proof (prove) goal (3 subgoals): 1. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (0, f); 0 < j; fst (ext_cf S j (0, f)) = j; snd (ext_cf S j (0, f)) j = f 0; f 0 \<in> carrier R\<rbrakk> \<Longrightarrow> \<zero> \<plusminus> f 0 \<cdot>\<^sub>r X^\<^bsup>R j\<^esup> = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r (f 0 \<cdot>\<^sub>r 1\<^sub>r) 2. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (0, f); j \<noteq> 0\<rbrakk> \<Longrightarrow> Suc (j - Suc 0) \<le> fst (ext_cf S j (0, f)) 3. \<And>n. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (n, f) \<longrightarrow> polyn_expr R X (n + j) (ext_cf S j (n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X n (n, f)\<rbrakk> \<Longrightarrow> pol_coeff S (Suc n, f) \<longrightarrow> polyn_expr R X (Suc n + j) (ext_cf S j (Suc n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X (Suc n) (Suc n, f) [PROOF STEP] apply (subst aGroup.ag_l_zero, assumption, simp add:ring_tOp_closed, simp add:ring_r_one, subst ring_tOp_commute, assumption+, simp) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (0, f); j \<noteq> 0\<rbrakk> \<Longrightarrow> Suc (j - Suc 0) \<le> fst (ext_cf S j (0, f)) 2. \<And>n. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (n, f) \<longrightarrow> polyn_expr R X (n + j) (ext_cf S j (n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X n (n, f)\<rbrakk> \<Longrightarrow> pol_coeff S (Suc n, f) \<longrightarrow> polyn_expr R X (Suc n + j) (ext_cf S j (Suc n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X (Suc n) (Suc n, f) [PROOF STEP] apply (simp add:ext_cf_len) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<And>n. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (n, f) \<longrightarrow> polyn_expr R X (n + j) (ext_cf S j (n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X n (n, f)\<rbrakk> \<Longrightarrow> pol_coeff S (Suc n, f) \<longrightarrow> polyn_expr R X (Suc n + j) (ext_cf S j (Suc n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X (Suc n) (Suc n, f) [PROOF STEP] apply (rule impI, cut_tac subring, cut_tac subring_Ring[of S], frule_tac n = n in pol_coeff_pre[of _ "f"]) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<And>n. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (n, f) \<longrightarrow> polyn_expr R X (n + j) (ext_cf S j (n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X n (n, f); pol_coeff S (Suc n, f); Subring R S; Ring S; pol_coeff S (n, f)\<rbrakk> \<Longrightarrow> polyn_expr R X (Suc n + j) (ext_cf S j (Suc n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X (Suc n) (Suc n, f) 2. \<And>n. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (n, f) \<longrightarrow> polyn_expr R X (n + j) (ext_cf S j (n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X n (n, f); pol_coeff S (Suc n, f); Subring R S\<rbrakk> \<Longrightarrow> Subring R S [PROOF STEP] apply simp [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<And>n. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; polyn_expr R X (n + j) (ext_cf S j (n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X n (n, f); pol_coeff S (Suc n, f); Subring R S; Ring S; pol_coeff S (n, f)\<rbrakk> \<Longrightarrow> polyn_expr R X (Suc (n + j)) (ext_cf S j (Suc n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X (Suc n) (Suc n, f) 2. \<And>n. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (n, f) \<longrightarrow> polyn_expr R X (n + j) (ext_cf S j (n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X n (n, f); pol_coeff S (Suc n, f); Subring R S\<rbrakk> \<Longrightarrow> Subring R S [PROOF STEP] apply (subst polyn_expr_split) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<And>n. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; polyn_expr R X (n + j) (ext_cf S j (n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X n (n, f); pol_coeff S (Suc n, f); Subring R S; Ring S; pol_coeff S (n, f)\<rbrakk> \<Longrightarrow> polyn_expr R X (Suc (n + j)) (fst (ext_cf S j (Suc n, f)), snd (ext_cf S j (Suc n, f))) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X (Suc n) (Suc n, f) 2. \<And>n. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (n, f) \<longrightarrow> polyn_expr R X (n + j) (ext_cf S j (n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X n (n, f); pol_coeff S (Suc n, f); Subring R S\<rbrakk> \<Longrightarrow> Subring R S [PROOF STEP] apply (cut_tac n = "n + j" and c = "ext_cf S j (Suc n, f)" in polyn_Suc, simp add:ext_cf_len) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<And>n. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; polyn_expr R X (n + j) (ext_cf S j (n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X n (n, f); pol_coeff S (Suc n, f); Subring R S; Ring S; pol_coeff S (n, f); polyn_expr R X (Suc (n + j)) (Suc (n + j), snd (ext_cf S j (Suc n, f))) = polyn_expr R X (n + j) (ext_cf S j (Suc n, f)) \<plusminus> snd (ext_cf S j (Suc n, f)) (Suc (n + j)) \<cdot>\<^sub>r X^\<^bsup>R Suc (n + j)\<^esup>\<rbrakk> \<Longrightarrow> polyn_expr R X (Suc (n + j)) (fst (ext_cf S j (Suc n, f)), snd (ext_cf S j (Suc n, f))) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X (Suc n) (Suc n, f) 2. \<And>n. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (n, f) \<longrightarrow> polyn_expr R X (n + j) (ext_cf S j (n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X n (n, f); pol_coeff S (Suc n, f); Subring R S\<rbrakk> \<Longrightarrow> Subring R S [PROOF STEP] apply (subst ext_cf_len, assumption+, simp del:npow_suc add:add.commute[of j], thin_tac "polyn_expr R X (Suc (n + j)) (Suc (n + j), snd (ext_cf S j (Suc n, f))) = polyn_expr R X (n + j) (ext_cf S j (Suc n, f)) \<plusminus> snd (ext_cf S j (Suc n, f)) (Suc (n + j)) \<cdot>\<^sub>r X^\<^bsup>R (Suc (n + j))\<^esup>", subst ext_cf_inductTl, assumption+, simp del:npow_suc, thin_tac "polyn_expr R X (n + j) (ext_cf S j (n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X n (n, f)") [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<And>n. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (Suc n, f); Subring R S; Ring S; pol_coeff S (n, f)\<rbrakk> \<Longrightarrow> X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X n (n, f) \<plusminus> snd (ext_cf S j (Suc n, f)) (Suc (n + j)) \<cdot>\<^sub>r X^\<^bsup>R Suc (n + j)\<^esup> = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X (Suc n) (Suc n, f) 2. \<And>n. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (n, f) \<longrightarrow> polyn_expr R X (n + j) (ext_cf S j (n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X n (n, f); pol_coeff S (Suc n, f); Subring R S\<rbrakk> \<Longrightarrow> Subring R S [PROOF STEP] apply (cut_tac c1 = "(Suc n, f)" and n1 = j in ext_cf_hi[THEN sym], assumption+, simp del:npow_suc add:add.commute[of j]) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<And>n. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (Suc n, f); Subring R S; Ring S; pol_coeff S (n, f); snd (ext_cf S j (Suc n, f)) (Suc (n + j)) = f (Suc n)\<rbrakk> \<Longrightarrow> X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X n (n, f) \<plusminus> f (Suc n) \<cdot>\<^sub>r X^\<^bsup>R Suc (n + j)\<^esup> = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X (Suc n) (Suc n, f) 2. \<And>n. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (n, f) \<longrightarrow> polyn_expr R X (n + j) (ext_cf S j (n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X n (n, f); pol_coeff S (Suc n, f); Subring R S\<rbrakk> \<Longrightarrow> Subring R S [PROOF STEP] apply (cut_tac n = n and c = "(Suc n, f)" in polyn_Suc, simp, simp del:npow_suc) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<And>n. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (Suc n, f); Subring R S; Ring S; pol_coeff S (n, f); snd (ext_cf S j (Suc n, f)) (Suc (n + j)) = f (Suc n); polyn_expr R X (Suc n) (Suc n, f) = polyn_expr R X n (Suc n, f) \<plusminus> f (Suc n) \<cdot>\<^sub>r X^\<^bsup>R Suc n\<^esup>\<rbrakk> \<Longrightarrow> X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X n (n, f) \<plusminus> f (Suc n) \<cdot>\<^sub>r X^\<^bsup>R Suc (n + j)\<^esup> = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r (polyn_expr R X n (Suc n, f) \<plusminus> f (Suc n) \<cdot>\<^sub>r X^\<^bsup>R Suc n\<^esup>) 2. \<And>n. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (n, f) \<longrightarrow> polyn_expr R X (n + j) (ext_cf S j (n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X n (n, f); pol_coeff S (Suc n, f); Subring R S\<rbrakk> \<Longrightarrow> Subring R S [PROOF STEP] apply (frule_tac c = "(Suc n, f)" and k = n in polyn_mem, simp, frule_tac c = "(Suc n, f)" and j = "Suc n" in pol_coeff_mem_R, simp, simp del:npow_suc, frule_tac x = "f (Suc n)" and y = "X^\<^bsup>R (Suc n)\<^esup>" in ring_tOp_closed, rule npClose, assumption, subst ring_distrib1, assumption+) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<And>n. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (Suc n, f); Subring R S; Ring S; pol_coeff S (n, f); snd (ext_cf S j (Suc n, f)) (Suc (n + j)) = f (Suc n); polyn_expr R X (Suc n) (Suc n, f) = polyn_expr R X n (Suc n, f) \<plusminus> f (Suc n) \<cdot>\<^sub>r X^\<^bsup>R Suc n\<^esup>; polyn_expr R X n (Suc n, f) \<in> carrier R; f (Suc n) \<in> carrier R; f (Suc n) \<cdot>\<^sub>r X^\<^bsup>R Suc n\<^esup> \<in> carrier R\<rbrakk> \<Longrightarrow> X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X n (n, f) \<plusminus> f (Suc n) \<cdot>\<^sub>r X^\<^bsup>R Suc (n + j)\<^esup> = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X n (Suc n, f) \<plusminus> X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r (f (Suc n) \<cdot>\<^sub>r X^\<^bsup>R Suc n\<^esup>) 2. \<And>n. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (n, f) \<longrightarrow> polyn_expr R X (n + j) (ext_cf S j (n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X n (n, f); pol_coeff S (Suc n, f); Subring R S\<rbrakk> \<Longrightarrow> Subring R S [PROOF STEP] apply (subst polyn_expr_restrict, assumption+) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<And>n. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (Suc n, f); Subring R S; Ring S; pol_coeff S (n, f); snd (ext_cf S j (Suc n, f)) (Suc (n + j)) = f (Suc n); polyn_expr R X (Suc n) (Suc n, f) = polyn_expr R X n (Suc n, f) \<plusminus> f (Suc n) \<cdot>\<^sub>r X^\<^bsup>R Suc n\<^esup>; polyn_expr R X n (Suc n, f) \<in> carrier R; f (Suc n) \<in> carrier R; f (Suc n) \<cdot>\<^sub>r X^\<^bsup>R Suc n\<^esup> \<in> carrier R\<rbrakk> \<Longrightarrow> X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X n (n, f) \<plusminus> f (Suc n) \<cdot>\<^sub>r X^\<^bsup>R Suc (n + j)\<^esup> = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X n (n, f) \<plusminus> X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r (f (Suc n) \<cdot>\<^sub>r X^\<^bsup>R Suc n\<^esup>) 2. \<And>n. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (n, f) \<longrightarrow> polyn_expr R X (n + j) (ext_cf S j (n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X n (n, f); pol_coeff S (Suc n, f); Subring R S\<rbrakk> \<Longrightarrow> Subring R S [PROOF STEP] apply (rule_tac a = "f (Suc n) \<cdot>\<^sub>r X^\<^bsup>R (Suc (n + j))\<^esup> " and b = "X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r (f (Suc n) \<cdot>\<^sub>r X^\<^bsup>R (Suc n)\<^esup>)" and c = "X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X n (n, f)" in aGroup.ag_pOp_add_l, assumption+, rule ring_tOp_closed, assumption+, rule npClose, assumption, (rule ring_tOp_closed, assumption+)+, simp add:polyn_mem, frule_tac n = "Suc n" in npClose[of X], subst ring_tOp_assoc[THEN sym], assumption+, subst ring_tOp_commute[of "X^\<^bsup>R j\<^esup>"], assumption, simp add:pol_coeff_mem, subst ring_tOp_assoc, assumption+, subst npMulDistr[of X], assumption, simp add:add.commute[of j]) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<And>n. \<lbrakk>aGroup R; X \<in> carrier R; X^\<^bsup>R j\<^esup> \<in> carrier R; pol_coeff S (n, f) \<longrightarrow> polyn_expr R X (n + j) (ext_cf S j (n, f)) = X^\<^bsup>R j\<^esup> \<cdot>\<^sub>r polyn_expr R X n (n, f); pol_coeff S (Suc n, f); Subring R S\<rbrakk> \<Longrightarrow> Subring R S [PROOF STEP] apply simp [PROOF STATE] proof (prove) goal: No subgoals! [PROOF STEP] done
# Quadratic Programming - Mathematical optimization problems with quadratic functions - Developed in the 1950s - Widely used in - Optimization of financial portfolios, - Image and signal processing, - Regression, - Scheduling in chemical plants, etc. - Solution methods - Interior point, - Augmented Lagrange, - Gradient-based, - Extensions of the simplex algortihm. ## Problem Formulation Our objective is to find $\mathbf{x}\in\mathbb{R}^n$ in the following problem: \begin{align} \text{minimize}\ & \frac{1}{2}\mathbf{x}^T Q \mathbf{x} + \mathbf{c}^T\mathbf{x}, \\ \text{subject to } & \\ & A\mathbf{x} \ \leq \mathbf{b}, \\ \end{align} where - $\mathbf{c} \in \mathbb{R}^n$, - $Q \in \mathbb{R}^{n \times n}$, - $A \in \mathbb{R}^{m \times n}$, - $\mathbf{b} \in \mathbb{R}^{m}$. ## Coding in Python The model: \begin{align} \text{minimize}\ & x^2 + 2y^2 + \frac{1}{2}z^2 , \\ \text{subject to } & \\ & x + 3y + 2z \geq 5, \\ & y + z \geq 2.5, \\ & x, y \geq 0, \\ & y \in \mathbb{Z} \\ & z \in \{0, 1\} \end{align} ### Step 1: Import Package ```python from gurobipy import * ``` ### Step 2: Create a model ```python quadratic_model = Model('quadratic') ``` ### Step 3: Define decision variables ```python x = quadratic_model.addVar(vtype=GRB.CONTINUOUS, lb = 0, name="x") y = quadratic_model.addVar(vtype=GRB.INTEGER, lb = 0, name="y") z = quadratic_model.addVar(vtype=GRB.BINARY, name="z") ``` ### Step 4: Define the objective function ```python obj_fn = x**2 + 2*y**2 + 0.5*z**2 quadratic_model.setObjective(obj_fn, GRB.MINIMIZE) ``` ### Step 5: Add constraints ```python # x + 3y + 2z >= 5 quadratic_model.addConstr(x + 3*y + 2*z >= 5) # y + z >= 2.5 quadratic_model.addConstr(y + z >= 2.5) ``` ### Step 6: Solve model and output the result ```python quadratic_model.setParam('OutputFlag',False) quadratic_model.optimize() print('Optimization is done. Objective Function Value: %.2f' % quadratic_model.objVal) # Get values of the decision variables for v in quadratic_model.getVars(): print('%s: %g' % (v.varName, v.x)) ``` ### Extras: Update the type of a decision variable Let us change the requirement of integrality on the decision variable $y$: ```python y.vType = GRB.CONTINUOUS quadratic_model.optimize() print('Optimization is done. Objective Function Value: %.2f' % quadratic_model.objVal) # Get values of the decision variables for v in quadratic_model.getVars(): print('%s: %g' % (v.varName, v.x)) ``` ### Extras: Add a quadratic constraint Let us add a quadratic constraint: $x^2 \geq y^2 + z^2$ ```python quadratic_model.addConstr(z**2 + y**2 <= x**2) quadratic_model.optimize() print('Optimization is done. Objective Function Value: %.2f' % quadratic_model.objVal) # Get values of the decision variables for v in quadratic_model.getVars(): print('%s: %g' % (v.varName, v.x)) ```
If $S$ is a closed subset of a complete metric space $t$, then $S$ is complete.
MAY 3RD - 5TH WELCOME BACK CAMPERS!! This year our first weekend of camping once again coincides with CINCO DE MAYO!! Join us Friday Night for a "meet and greet" in the Lodge. We'll have some Chips and Salsa on hand and a Pinata for the kids. Take a fun family photo with some of our Cinco de Mayo props!! We will be making some maracas on Saturday morning and having a Special "South of the Border" bingo around noon. Looking for ideas for Bingo? How about a cactus, taco or fajita kits, some "Hot Tamales", chips and salsa, some avocados, a pinata, or sombrero, "mustachios", etc. If all else fails....a candy bar will do! Every Saturday afternoon during our "Spring Fling" weekends, we invite all our kiddos and adults to join Mr. Marcondes at the basketball courts for some "Street Hockey"! There will be two sessions, 3:00 - 4:00 will be an ALL age session and 4:00 to 5:00 will be for age 12 thru adults. How much fun does that sound?? Our "Welcome Back Campers' Pot Luck Supper" (Mexican food would be wonderful but not necessary) will be at 6:00 on Saturday night followed a spectacular band, WELCOME BACK "Loose Screws" beginning at 7:30. LOOKING FORWARD TO ANOTHER EPIC SEASON!! Saturday morning, we will have a special craft available for the kiddies to make for Mom in the Lodge! Saturday afternoon we will be hosting a "Paint and Pamper Party" for Mom in the Lodge with "Life Expressions Decor" (http://lifeexpressionsdecor.com). Additional vendors will also be on hand such as LuLaroe Apparel, Paparazzi Jewelry, Essential Oils, Heavenly Goddess Spa Products and more. Snacks provided but BYOB. While Mom is having her fun in the Lodge, Dad and the kids are invited to participate in a Fishing Derby or Kick Ball Game at the Ball Field. Moms also get a free cup of coffee and pancakes from the Coffee Shop Sunday morning! How much fun does that sound?? Happy Mother's Day Moms! Pets camp FREE this weekend! We are planning a Pet Parade (COSTUMES OPTIONAL...but how cute would that be?) immediately following our Wake Up Charlie Flag Raising, Family Pet Photos (if you don't have a pet, Charlie will be more than happy to be in your Family Photo), Pet Talent Show - start practicing now, and FRIDAY NIGHT..."PET SUPPLIES BINGO" .... Purchase a card with any item relevant to a pet and we will create a "BARK/MEOW Box" which will be donated to a local Pet Shelter. Last year our "Bark/Meow Box" was donated to Swansea Animal Shelter and it was VERY MUCH APPRECIATED!! The campground will supply the prizes for the Lucky Winners. (Suggestions are available on the web site and on the Lodge Bulletin Board.) Every Saturday afternoon during our "Spring Fling" weekends, we invite all our kiddos and adults to join Mr. Marcondes at the basketball courts for some "Street Hockey"! There will be two sessions, 3:00-4:00 will be an ALL age session and 4:00 to 5:00 will be for age 12 thru adults. Saturday night we WELCOME BACK A FANTASTIC BAND in the Lodge.... "FUNHOUSE"! Sounds like a "Paws-itively "PURR-fect" Weekend!! MAY 17TH - 19TH NATIONAL ARMED FORCES APPRECIATION WEEKEND! National Armed Forces Day is May 19TH and we would like to show our appreciation to all Military Personnel, past and present!! Special 20% discount on amenities and activities throughout the weekend and complimentary coffee in our coffee shop Saturday and Sunday morning for all military personnel past and present. Please be prepared to show your military card for discounts. We have a special Bingo planned with all card purchases ($2.00 each) being donated to the "WOUNDED WARRIOR PROJECT". Maple Park will supply the prizes for the lucky Bingo winners. We also will be having our SECOND ANNUAL WALK/RUN OR RIDE FOR THE "WARRIORS". We are asking for a $5.00 donation for adult participants and $2.00 for kids. (Sorry folks...walkers, runners and bikes only. No golf carts.) The "track" will be approximately 1.5-2 miles throughout the campground. All donations from this event will also be donated to the "Wounded Warrior Project". 2018's donation was $300.00 to this GREAT organization!! For further information, please visit https://www.woundedwarriorproject.org/. Saturday night we will honor our Veterans, past and present with a tribute in the Lodge followed by Mike Young's Band, "PRIDE AND JOY", a rhythm and blues revue! Also, every Saturday afternoon during our "Spring Fling" weekends, we invite all our kiddos and adults to join Mr. Marcondes at the basketball courts for some "Street Hockey"! There will be two sessions, 3:00-4:00 will be an ALL age session and 4:00 to 5:00 will be for age 12 thru adults. How much fun does that sound?? Our first (unofficial) weekend of summer!! Join us Friday night at the outdoor theater for a "BLAST FROM THE PAST MOVIE". Saturday we'll be playing candy bingo, having a sand castle contest, ceramics, scavenger hunt, and Saturday night $$ BINGO. Also, every Saturday afternoon during our "Spring Fling" weekends, we invite all our kiddos and adults to join Mr. Marcondes at the basketball courts for some "Street Hockey"! On "Sunday/Funday", join us for a fishing derby, cook-out on the beach and our first of the season OUTDOOR DANCE with a live band....WELCOME BACK "UNPLUGGED"!! MAY 31st - JUNE 2nd KARAOKE WEEKEND and OUR FOURTH ANNUAL SUPER SOUP CONTEST! Join us for our first of two Karaoke Weekends this summer! Friday night $$ Bingo. Saturday Night "Kiddie Karaoke" starts at 7:30, Adult Karaoke begins at 9:00. This event is so popular, and it always amazes us how much talent our Maple Park Families have! If singing isn't your passion but cooking is, we will be having our Fourth Annual "Super Soup" Contest at 3:00 on Saturday! The competition field gets bigger every year! Just make a pot of your very best soup and our judges will be on hand to pick the "Super Soup" winner of 2019. This year we will be awarding Maple Park Gift Certificates for the 1st Place ($50.00), 2nd Place ($30.00) and 3rd place ($20.00) winners. After judging is over, we will invite our guests to sample the left-overs to see if they agree with the judge's decisions! Also, every Saturday afternoon during our "Spring Fling" weekends, we invite all our kiddos and adults to join Mr. Marcondes at the basketball courts for some "Street Hockey"! We have lots of family fun competitions and activities planned for this weekend such as a family scavenger hunt, "Human Hungry Hippo" (so much fun), "Family Feud" on the beach, family water balloon toss, "FUN IN THE SUN" Bingo and a dance at the Outdoor Theater with LOOSE SCREWS!! Also, every Saturday afternoon during our "Spring Fling" weekends, we invite all our kiddos and adults to join Mr. Marcondes at the basketball courts for some "Street Hockey"! Mom had her Special Day in May.... Now it's Dad's turn. Special kid's craft for Dad o n Saturday morning, PAINT PARTY FOR DAD IN THE LODGE...Just Kidding...actually we will be having a Tailgating Party at the Ball Field for Dad with a Cook-out, Horseshoe and Bocce ball tournaments beginning at 1:00, Mom and the Kid's Gift Bingo in the Lodge and Saturday night, "FUNHOUSE" will be back to entertain us!! FREE COFFEE AND PANCAKES AT THE COFFEE SHOP FOR ALL DADS ON SUNDAY MORNING!! JUNE 21st - 23RD PIRATE WEEKEND - SECOND KARAOKE NIGHT OF THE SEASON! Ahoy there Matey's...This weekend is all about PIRATES and "SINGIN' UP A STORM"!! Friday night we will have a special Pirate Pizza Party with a visit from a REAL, LIVE PIRATE!! (Advanced reservations required). Saturday, we invite ALL our guests to "walk the plank" for some "booty", possibly while "sword fighting" ....and did I mention in the water?? An afternoon "Pirate Bingo"...bring a wrapped gift ($2-$5 in value) but.....winners beware, the Pirates can steal your "booty"! Adults and teens are invited to participate in our "Pirate Ship" (kayak) races!! We have a special Pirate Treasure Hunt planned with a "boat-load" of treasure to the lucky winners. Saturday night we will be entertained with another fun-filled night of KARAOKE beginning at 7:30 with "KIDDIE KARAOKE" until 9:00 then moving on to "ADULT TIME" from 9:00-10:30. Hopefully, this gives all our "STARS" a chance to entertain us! (This date is subject to change depending on local cities and town's final day of classes due to snow days)Some of the great daily activities we have planned for this summer are ceramics, beach games, lunches and Friday Pizza Parties with our mascot Charlie, OUTDOOR MOVIES at our outdoor theater, special event nights, volleyball, and crafts for all ages. We will have morning cartoons, face painting, treasure and scavenger hunts,Candy Bar and Gift Bingo, Tie Dye and so much more! Rain or shine, we ALWAYS have a GREAT time here at Maple Park. We begin our weekend with a noontime Pizza Party with Charlie, ceramics and Friday night $$ Bingo. Saturday, join us for some tie-dye, ceramics, candy bingo and a Beach Dance with an AWESOME band FUNHOUSE! Sunday's highlights are an afternoon candy bingo and an Outdoor Movie at the Outdoor Theater. "MAGIC MONDAY"- An early evening Magic Show starring one of the very talented magicians from The Magic Company. "EXTRATERRESTRIAL TUESDAY", is all about Alien Adventures....believe it or not, it is actually WORLD UFO DAY today!! "WAY BACK WEDNESDAY - We are rolling back the prices on Kayak and Paddle Boats today, creating some COOL Tie Dye shirts and making some Pet Rocks. Root Beer and Creamsicle Floats are only a $1.00 today!! Tonight, we will be showing a "Blast from the Past" movie at the Outdoor Theater. Bell bottoms and Leisure Suits are optional!! On Thursday, July 4th, our HAPPY BIRTHDAY AMERICA CELEBRATION begins with our Annual "Star-Spangled Parade", an "All American" Cook-out on the beach, our Annual Children's Presentation on the beach followed by our Floating Bonfire and Beach Dance. FABULOUS FRIDAY we planned a family scavenger hunt, a noontime Pizza Party with Charlie and Charlotte, Ceramics, and $$ Bingo. SATURDAY (7/6) we begin the day with our ANNUAL CRAFT/VENDOR FAIR and end the day with our 23RD ANNUAL TALENT SHOW - What would a summer at Maple Park be without our Annual Talent Show! Imagine it has been 23 years since we started this spectacular event!! If anyone has some old photos from Talent Shows in the past, please forward them to me either by text (5084930913) or email ([email protected]) or drop off pictures at the office and I can scan them. I will put a slide show together that everyone can enjoy before and after the show. Always a big hit for all our guests whether your talent is watching or participating!! A horseshoe tournament, sunflower seed spitting contest (believe it or not, they practice all year long for this event), "Battle of the Beans" contest on 2nd beach. Who will have the honor of making the best-baked beans this year?? $50.00 MP Gift Card to the 1st place winner!! A Country Western dance with A LIVE BAND, "Loose Screws", and YES, they really do play Country music, at the Outdoor Theater! COME ON DOWN and join the fun!! Yes!! A whole week of Christmas festivities. Every day we will make an ornament to hang on our tree. On Friday (July 19th) night we have a VERY SPECIAL tree lighting ceremony planned at the Outdoor Theater. Santa visits on Saturday night (July 20th) followed by a dance with MC Productions. (Parents must provide gifts for Santa's bag.) Site decorating contest, Polar Bear swim, Breakfast with a MAGIC SHOW, "Snowman" contest, Gift Bingo, "SNOWBALL" toss, and Red and Green tie-dye, are just some of the fantastic events of this week and the list goes on and on and on...OH, I FORGOT TO MENTION OUR "HILLARIOUS HOLIDAY T-Shirt CONTEST" If you don't have one, don't despair...bring a T-shirt to the Lodge Saturday Morning and we'll have supplies on hand to help you "Merry It Up"! This was such a spectacular week for all involved in years past that we have made it an annual event. This is a week of crafts, fun, comradery, and spirituality for all children ages 5 to 18. Non-Denominational, Non-Discriminatory, just an atmosphere of acceptance and Love of God. Bienvenido campista!! Welcome Campers!! A "Jumping Bean" Contest, Pinatas', "Taco Bar" and Cook-Out on the Beach, and a Beach Dance with "UNPLUGGED" are just a few of the highlights of this weekend. Sign up for our Annual Chili Cook-off Contest and be crowned the Chili King or Queen of 2018!! Competition is tough - start "Googling" your recipes now!! If Nachos are your passion, we'll be having a "Supreme Nacho Contest" too!! YUM!! Join us for our 13TH Annual Mardi Gras!! Decorate your floats...golf carts, wagons, bikes, strollers, etc..... or help us decorate our Mardi Gras float and join us in our evening Mardi Gras parade. Prizes for best Mardi Gras "Floats". Kids and adults will be decorating their masks for our Saturday Night Mardi Gras Ball featuring our FAVORITE Mardi Gras Band on the beach...."TIMELINE" (formerly Mojo Mambo). Welcome Back Guys!! This weekend is all about "FUN IN THE SUN"!! We have lots of beach activities planned such as a water balloon toss, Hula Hoop contest (kids and adults), tug o' war in the water, Kayak races, WATER volleyball, Beach Bingo, etc. Just plan on having lots of fun and getting a "little" wet during the day AND dancing the night away one of the spectacular DJ's from MC Productions!! You can also "LOCK UP A LOVED ONE ON THE ISLAND" this weekend. All "BAIL" $$, yes, you must eventually post bail, will be donated to our 2019 Maple Park Cares for Kids Charity (to be announced). Last year we raised $2,000 for the Tommorrow Fund. A full weekend of every kind of BINGO imaginable! (AND WE REALLY STRETCHED OUR IMAGINATIONS FOR THIS ONE!) What you will need are: school supplies, groceries, $3.00+ gift, gardening items, a $3.00+ toy, $$, and candy. This year since we will be having Family $$ Bingo on Friday night, on Saturday night we will have a special early evening (6:30) Toy/Gift Bingo for the FAMILY and $$ Bingo will be for the Adults (18+) beginning at 8:30. FOR BINGO LOVERS.....this is the BEST weekend of the summer!! AUGUST 23rd - 25th 5th ANNUAL CLASSIC CAR SHOW!! This was such a huge success last year!! We have lots of vendors, food trucks and some spectacular cars, trucks and tractors on display. LOOSE SCREWS will be entertaining us at the show and at the outdoor theater in the evening, Check back for more information on this event!! It is something you will not want to miss!! . This weekend is highlighted by our annual Craft Fair/Flea Market (8/31). New crafters/vendors are always welcome. $$ Bingo is on Saturday night. We also have kid's crafts, a horseshoe tournament, movies, softball, volleyball, FIRE TRUCK rides, etc. and our END OF SUMMER FAMILY FUN DAY with Games, Bounce House and Cotton Candy, followed by our ANNUAL GOLF CART SHOW AND PARADE and Farewell Summer Dance with "FUNHOUSE" (9/1). Also, last year we had our FOURTH Annual "Maple Park Cares" Event beginning in August and ending on Columbus Day Weekend. We raised $2,000 for THE TOMORROW FUND!! Proceeds from this weekend's FAMILY FUN DAY will be going toward this year's charity in hopes of matching or beating our 2018 total. Limited Activities will be offered during these weekends but TAKE ADVANTAGE OF THE WARM FALL WEATHER TO WALK THE CAPE COD CANAL OR VISIT THE CHRISTMAS TREE SHOPS ON THE CAPE. See office for directions and a list of LOCAL EVENTS which will be available for the month of September. SEPTEMBER 6TH - 8TH GRANDPARENTS WEEKEND - We have some truly "GRAND" activities planned for this weekend including an "OLDIES" dance with MC Productions!! Unfortunately, after almost 50 years in operation, the Bourne Scallop Festival does not take place anymore...However, let's have our own "SEAFOOD FESTIVAL" at Maple Park. On September 21st, we will start our day with a beach-inspired craft, followed by a "Chowder Contest" ($50. Gift Card for 1st, Place, $30. For 2nd and $20. For 3rd) at noon, an ocean-themed paint party for kids and adults, and a "Seafood Pot Luck Supper" at 6:00. I'll be serving up some spaghetti with white clam sauce and garlic bread. What is everyone else bringing?? Chowder, cod fish cakes, Shrimp Mozambique, fish sticks, clam cakes, even tuna salad sandwiches would work! To top off this spectacular day, how about a fun night of KARAOKE?? OH YAH!!! SEPTEMBER 28th DANCE WITH "UNPLUGGED" Join us on October 5th for a SPECIAL HALLOWEEN EVENT FOR OUR FABULOUS FALL GUESTS WHO WILL BE LEAVING US ON SUNDAY! On Saturday afternoon, beginning at 3:30, we invite all Ghosts, Goblins, Princesses, Pirates, Zombies, etc. to join us in a costume parade followed by Site to Site Trick or Treating throughout the campground. Saturday Night, "LOOSE SCREWS" will be entertaining us in the Lodge. Costumes optional but WELCOME!! Site to Site Trick or Treating, pumpkin decorating contest, Halloween crafts, Scary movie, our Annual Adult Only Night with Harry French on Friday night, Columbus Day Weekend Craft Fair on Saturday (new crafters are always welcome), Our 6th Annual Harvest Carnival which is always a spectacular event, Our 52nd Annual Chicken BBQ catered by TAZZ BBQ (10/13), followed by our FINAL DANCE (can't believe it's over) OF THE 2019 SEASON with MC Productions!! Advance reservations required for Chicken BBQ!! THIS IS ALSO THE FINAL WEEKEND OF OUR "MAPLE PARK CARES" EVENT. SPECIAL RAFFLES AND OUR SPECTACULAR "PATH OF PUMPKINS FOR A CURE" WILL BE HIGHLIGHTS OF THIS WEEKEND. REMEMBER, LAST YEAR WE RAISED $2,000 for THE TOMORROW FUND!! A GREAT, BIG THANK YOU TO OUR GENEROUS CAMPERS! Once again, will choose a local charity near and dear to our hearts. Our hope is to match or beat that donation in 2019!
! { dg-do compile } ! { dg-options "-std=f2003" } ! Check whether empty contains are allowd ! PR fortran/29806 module x contains end module x ! { dg-error "CONTAINS statement without FUNCTION or SUBROUTINE statement" } program y contains end program y ! { dg-error "CONTAINS statement without FUNCTION or SUBROUTINE statement" }
(** Ghost state for a monotonically increasing nat, wrapping the [mono_natR] RA. Provides an authoritative proposition [mono_nat_auth_own γ q n] for the underlying number [n] and a persistent proposition [mono_nat_lb_own γ m] witnessing that the authoritative nat is at least m. The key rules are [mono_nat_lb_own_valid], which asserts that an auth at [n] and a lower-bound at [m] imply that [m ≤ n], and [mono_nat_update], which allows to increase the auth element. At any time the auth nat can be "snapshotted" with [mono_nat_get_lb] to produce a persistent lower-bound proposition. *) From iris.proofmode Require Import tactics. From iris.algebra.lib Require Import mono_nat. From iris.bi.lib Require Import fractional. From iris.base_logic.lib Require Export own. From iris.prelude Require Import options. Class mono_natG Σ := MonoNatG { mono_natG_inG :> inG Σ mono_natR; }. Definition mono_natΣ : gFunctors := #[ GFunctor mono_natR ]. Global Instance subG_mono_natΣ Σ : subG mono_natΣ Σ → mono_natG Σ. Proof. solve_inG. Qed. Definition mono_nat_auth_own_def `{!mono_natG Σ} (γ : gname) (q : Qp) (n : nat) : iProp Σ := own γ (mono_nat_auth q n). Definition mono_nat_auth_own_aux : seal (@mono_nat_auth_own_def). Proof. by eexists. Qed. Definition mono_nat_auth_own := mono_nat_auth_own_aux.(unseal). Definition mono_nat_auth_own_eq : @mono_nat_auth_own = @mono_nat_auth_own_def := mono_nat_auth_own_aux.(seal_eq). Global Arguments mono_nat_auth_own {Σ _} γ q n. Definition mono_nat_lb_own_def `{!mono_natG Σ} (γ : gname) (n : nat): iProp Σ := own γ (mono_nat_lb n). Definition mono_nat_lb_own_aux : seal (@mono_nat_lb_own_def). Proof. by eexists. Qed. Definition mono_nat_lb_own := mono_nat_lb_own_aux.(unseal). Definition mono_nat_lb_own_eq : @mono_nat_lb_own = @mono_nat_lb_own_def := mono_nat_lb_own_aux.(seal_eq). Global Arguments mono_nat_lb_own {Σ _} γ n. Local Ltac unseal := rewrite ?mono_nat_auth_own_eq /mono_nat_auth_own_def ?mono_nat_lb_own_eq /mono_nat_lb_own_def. Section mono_nat. Context `{!mono_natG Σ}. Implicit Types (n m : nat). Global Instance mono_nat_auth_own_timeless γ q n : Timeless (mono_nat_auth_own γ q n). Proof. unseal. apply _. Qed. Global Instance mono_nat_lb_own_timeless γ n : Timeless (mono_nat_lb_own γ n). Proof. unseal. apply _. Qed. Global Instance mono_nat_lb_own_persistent γ n : Persistent (mono_nat_lb_own γ n). Proof. unseal. apply _. Qed. Global Instance mono_nat_auth_own_fractional γ n : Fractional (λ q, mono_nat_auth_own γ q n). Proof. unseal. intros p q. rewrite -own_op mono_nat_auth_frac_op //. Qed. Global Instance mono_nat_auth_own_as_fractional γ q n : AsFractional (mono_nat_auth_own γ q n) (λ q, mono_nat_auth_own γ q n) q. Proof. split; [auto|apply _]. Qed. Lemma mono_nat_auth_own_agree γ q1 q2 n1 n2 : mono_nat_auth_own γ q1 n1 -∗ mono_nat_auth_own γ q2 n2 -∗ ⌜(q1 + q2 ≤ 1)%Qp ∧ n1 = n2⌝. Proof. unseal. iIntros "H1 H2". iDestruct (own_valid_2 with "H1 H2") as %?%mono_nat_auth_frac_op_valid; done. Qed. Lemma mono_nat_auth_own_exclusive γ n1 n2 : mono_nat_auth_own γ 1 n1 -∗ mono_nat_auth_own γ 1 n2 -∗ False. Proof. iIntros "H1 H2". by iDestruct (mono_nat_auth_own_agree with "H1 H2") as %[[] _]. Qed. Lemma mono_nat_lb_own_valid γ q n m : mono_nat_auth_own γ q n -∗ mono_nat_lb_own γ m -∗ ⌜(q ≤ 1)%Qp ∧ m ≤ n⌝. Proof. unseal. iIntros "Hauth Hlb". iDestruct (own_valid_2 with "Hauth Hlb") as %Hvalid%mono_nat_both_frac_valid. auto. Qed. (** The conclusion of this lemma is persistent; the proofmode will preserve the [mono_nat_auth_own] in the premise as long as the conclusion is introduced to the persistent context, for example with [iDestruct (mono_nat_lb_own_get with "Hauth") as "#Hfrag"]. *) Lemma mono_nat_lb_own_get γ q n : mono_nat_auth_own γ q n -∗ mono_nat_lb_own γ n. Proof. unseal. apply own_mono, mono_nat_included. Qed. Lemma mono_nat_lb_own_le {γ n} n' : n' ≤ n → mono_nat_lb_own γ n -∗ mono_nat_lb_own γ n'. Proof. unseal. intros. by apply own_mono, mono_nat_lb_mono. Qed. Lemma mono_nat_own_alloc n : ⊢ |==> ∃ γ, mono_nat_auth_own γ 1 n ∗ mono_nat_lb_own γ n. Proof. unseal. iMod (own_alloc (mono_nat_auth 1 n ⋅ mono_nat_lb n)) as (γ) "[??]". { apply mono_nat_both_valid; auto. } auto with iFrame. Qed. Lemma mono_nat_own_update {γ n} n' : n ≤ n' → mono_nat_auth_own γ 1 n ==∗ mono_nat_auth_own γ 1 n' ∗ mono_nat_lb_own γ n'. Proof. iIntros (?) "Hauth". iAssert (mono_nat_auth_own γ 1 n') with "[> Hauth]" as "Hauth". { unseal. iApply (own_update with "Hauth"). by apply mono_nat_update. } iModIntro. iSplit; [done|]. by iApply mono_nat_lb_own_get. Qed. End mono_nat.
(* Copyright (C) 2017 M.A.L. Marques This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. *) (* type: gga_exc *) (* prefix: gga_x_hjs_params *params; assert(p->params != NULL); params = (gga_x_hjs_params * )(p->params); *) hjs_AA := 0.757211: hjs_BB := -0.106364: hjs_CC := -0.118649: hjs_DD := 0.609650: hjs_fH := s -> add(params_a_a[i]*s^(1+i), i=1..6)/(1 + add(params_a_b[i]*s^i, i=1..9)): (* The m_max functions are necessary as in some cases the arguments of the sqrt become negative *) hjs_zeta := s -> m_max(s^2*hjs_fH(s), 1e-10): hjs_eta := s -> m_max(hjs_AA + hjs_zeta(s), 1e-10): hjs_lambda := s -> hjs_DD + hjs_zeta(s): hjs_chi := (rs, z, s) -> nu(rs, z)/sqrt(hjs_lambda(s) + nu(rs, z)^2): hjs_fF := (rs, z, s) -> 1 - s^2/(27*hjs_CC*(1 + s^2/4)) - hjs_zeta(s)/(2*hjs_CC): hjs_fG := (rs, z, s) -> - 2/5 * hjs_CC*hjs_fF(rs, z, s)*hjs_lambda(s) - 4/15 * hjs_BB*hjs_lambda(s)^2 - 6/5 * hjs_AA*hjs_lambda(s)^3 - hjs_lambda(s)^(7/2)*(4/5*sqrt(Pi) + 12/5*(sqrt(hjs_zeta(s)) - sqrt(hjs_eta(s)))): hjs_f1 := (rs, z, s) -> + hjs_AA - 4/9 * hjs_BB*(1 - hjs_chi(rs, z, s))/hjs_lambda(s) - 2/9 * hjs_CC*hjs_fF(rs, z, s)*(2 - 3*hjs_chi(rs, z, s) + hjs_chi(rs, z, s)^3)/hjs_lambda(s)^2 - 1/9 * hjs_fG(rs, z, s)*(8 - 15*hjs_chi(rs, z, s) + 10*hjs_chi(rs, z, s)^3 - 3*hjs_chi(rs, z, s)^5)/hjs_lambda(s)^3 + 2*nu(rs, z)*(sqrt(hjs_zeta(s) + nu(rs, z)^2) - sqrt(hjs_eta(s) + nu(rs, z)^2)) + 2*hjs_zeta(s)*log((nu(rs, z) + sqrt(hjs_zeta(s) + nu(rs, z)^2))/(nu(rs, z) + sqrt(hjs_lambda(s) + nu(rs, z)^2))) - 2*hjs_eta(s)*log((nu(rs, z) + sqrt(hjs_eta(s) + nu(rs, z)^2))/(nu(rs, z) + sqrt(hjs_lambda(s) + nu(rs, z)^2))): hjs_fx := (rs, z, x) -> hjs_f1(rs, z, X2S*x): f := (rs, z, xt, xs0, xs1) -> gga_exchange_nsp(hjs_fx, rs, z, xs0, xs1):
Formal statement is: lemma subset_box: fixes a :: "'a::euclidean_space" shows "cbox c d \<subseteq> cbox a b \<longleftrightarrow> (\<forall>i\<in>Basis. c\<bullet>i \<le> d\<bullet>i) \<longrightarrow> (\<forall>i\<in>Basis. a\<bullet>i \<le> c\<bullet>i \<and> d\<bullet>i \<le> b\<bullet>i)" (is ?th1) and "cbox c d \<subseteq> box a b \<longleftrightarrow> (\<forall>i\<in>Basis. c\<bullet>i \<le> d\<bullet>i) \<longrightarrow> (\<forall>i\<in>Basis. a\<bullet>i < c\<bullet>i \<and> d\<bullet>i < b\<bullet>i)" (is ?th2) and "box c d \<subseteq> cbox a b \<longleftrightarrow> (\<forall>i\<in>Basis. c\<bullet>i < d\<bullet>i) \<longrightarrow> (\<forall>i\<in>Basis. a\<bullet>i \<le> c\<bullet>i \<and> d\<bullet>i \<le> b\<bullet>i)" (is ?th3) and "box c d \<subseteq> box a b \<longleftrightarrow> (\<forall>i\<in>Basis. c\<bullet>i < d\<bullet>i) \<longrightarrow> (\<forall>i\<in>Basis. a\<bullet>i \<le> c\<bullet>i \<and> d\<bullet>i \<le> b\<bullet>i)" (is ?th4) Informal statement is: The following four statements are equivalent: 1. $[c,d] \subseteq [a,b]$ 2. $[c,d] \subseteq (a,b)$ 3. $(c,d) \subseteq [a,b]$ 4. $(c,d) \subseteq (a,b)$
\chapter{Introduction} \label{sec:intro} The accurate prediction of survival times is essential for clinicians and researchers to decide treatment and identify which variables drive survival. The Cox Proportional-Hazards (Cox PH) model \citep{cox1972regression, breslow1975analysis} is still the \emph{de facto} standard model for survival analysis today, despite proposals for various other methods such as random survival forests \citep{ishwaran2008random}, boosting \citep{hothorn2010model} and neural networks \citep{ching2018cox}. Survival analysis of cancer patients can be particularly challenging due to the heterogeneous nature of the disease, even for patients suffering from the same type of cancer \citep{polyak2011heterogeneity, dagogo2018tumour}. With the advent of high throughput sequencing technologies, researchers hoped to leverage the information inherent in biological data such as gene expression, DNA methylation, and others (jointly referred to as multi-omics) to help explain and mitigate this heterogeneity. Improved models may, in turn, also help clinicians better understand the underlying diseases by shedding light on which variables drive survival for a specific cancer \citep{hasin2017multi, chakraborty2018onco}. However, even using the wealth of newly available biological data in large scale projects such as The Cancer Genome Atlas Program (TCGA) \citep{tomczak2015cancer}, significant improvements in performance in cancer survival analysis as measured by performance metrics such as the concordance index \citep{harrell1982evaluating} or the brier score \citep{brier1950verification} have been elusive. \citet{herrmann2021large} showed that the Cox PH model using only clinical data could outperform most other methods, even when these were designed to integrate multi-omics data. Nevertheless, there is still much active research on multi-modal integration (of which multi-omics integration is a subset), both in bioinformatics and the machine learning community more broadly. The neural networks trained within the broader context of multi-modal integration are often employed to perform tasks such as image labeling or matching \citep{mao2014explain, ma2015multimodal}, and similar challenges which can help make sense of the vast amount of unlabeled data available online (\emph{e.g.,} unlabelled images on \emph{Flickr}; as represented by the \emph{Flickr30K} dataset \citep{plummer2015flickr30k}). There are two main differences between multi-modal data in the (cancer) survival context and the broader machine learning literature: First, while machine learning benchmark datasets tend to have a large sample size (usually \(n >> 1e4\)), datasets in (cancer) survival analysis tend to be much smaller, especially those which provide access to multi-omics data. The relatively small sample size of cancer multi-omics datasets constrains the architectures employed in their analysis, generally making them quite simple. Second, while a task such as image captioning does not necessarily require an interpretable model, (cancer) survival analysis is held to a much higher standard in terms of interpretability, both because of the need to convince clinicians of a models usefulness and to be able to learn more about underlying disease biology. Our work explored a neural approach to multi-omics integration, which leverages hierarchical supervised autoencoders \citep{le2018supervised} combined with \gls{sgl} regularization \citep{simon2013sparse} to achieve competitive performance on \gls{tcga} \citep{tomczak2015cancer} as measured by Harrell's concordance \citep{harrell1982evaluating}. In particular, we showed that neural models could perform as well or better than \gls{bf} and its variants, even without transfer learning. Furthermore, we investigated the biological knowledge learned by our models by investigating their latent spaces, a feature which non-neural models such as \gls{bf} do not offer. We also studied global surrogate models for our black-box neural multi-omics integration methods.\footnote{An alternative way to frame surrogate models in the context of survival analysis is that we used the predicted log-partial hazards from our multi-omics methods as \emph{pseudo observations} for a linear regression model.} We showed that surrogate models could provide both group-level sparsity and a high degree of overall sparsity while maintaining high performance. In addition, we partially reframed multi-omics integration as a feature selection problem. In particular, we showed that if trained on a particular subset of predictive input variables, non-integrative methods performed decidedly better than when trained on all input variables. In brief, our main contributions were thus the following: \begin{enumerate} \item We proposed several neural architectures for multi-omics integration in cancer survival models, the best of which performed on-par with the state-of-the-art. \item We showed that surrogate models enable close to state-of-the-art performance at very high sparsity levels. \item We \emph{partially} reframed multi-omics integration as feature selection but, in the end, showed that well-performing methods are not exclusively differentiated by which features they rely on. \end{enumerate} Following this introduction, we give background on the key topics relevant to this work, notably survival analysis, cancer, multi-omics data, autoencoders, and interpretable machine learning. Afterward, we present related work, especially general-purpose survival methods, survival methods for multi-omics integration, and a few miscellaneous topics. In Chapter \ref{sec:methods}, we present the methods and study design. In particular, we introduce our main contribution, the novel neural architectures we developed. In addition, we give an overview of the benchmark setup, mainly which datasets and reference models were used and the validation strategy that we employed. Afterward, in Chapter \ref{sec:results} we present our main results followed by a discussion of their implications in Chapter \ref{sec:discussion}. To finish up, we summarize the entirety of our thesis in Chapter \ref{sec:conclusion} before elaborating on the potential for future work and some of the limitations of our work.
The filter of right neighborhoods of $0$ is equal to the filter of neighborhoods of $\infty$ under the map $x \mapsto 1/x$.
function bytes = get_file_size(fname) % gets file size in bytes, ensuring that symlinks are dereferenced % MP: not sure who wrote this code, but they were careful to do it right on Linux bytes = NaN; if isunix cmd = sprintf('stat -Lc %%s %s', fname); [status, r] = system(cmd); if status == 0 bytes = str2double(r); end end if isnan(bytes) o = dir(fname); bytes = o.bytes; end end
ZamZam 40 comes with the legendary Jamaican roots singer Johnny Clarke. The people responsible for this version are also an institution – Henry & Louis (Jack Lundie and Andy Scholes) are one of the Bristol icons of ‘Bristol sound’. This crew has started their musical journey in 1988, forming 2Kings label in 1990. A heavily edited version of “Love and Understanding” was released on their “Time Will Tell” LP; this release is the first time the full vocal and full dub version have been heard in all their glory. Bababoom Hi Fi label presents us a new 7” with roots reggae legend Johnny Clarke, who meets Roots Defender Band in Satta Studio. This Italian crew plays i.a. with Micheal Prophet or Earl 16. “Come let we gather” is a nice roots&culture with lovely voice of Johnny Clarke and a brilliant, rootical riddim played by Satta (drums), Basque Dub Foundation (bass), Nobke (keys) and Draxt (guitar). The track was mixed and mastered by Jamtone and Pilah. Of course the flip side contains dub version, where the musicians prove that they know what dubwise is.
5 have trigonal bipyramidal molecular geometry in the gas phase , but in the liquid phase , SbF
{- Copyright 2018, Mokshasoft AB (mokshasoft.com) This software may be distributed and modified according to the terms of the BSD 2-Clause license. Note that NO WARRANTY is provided. See "LICENSE_BSD2.txt" for details. -} {- ported from file kernel/libsel4/include/sel4/bootinfo_types.h -} module seL4.BootinfoTypes import seL4.Types import seL4.SharedTypes %access public export %default total -- caps with fixed slot positions in the root CNode data SeL4Cap = SeL4_CapNull -- null cap | SeL4_CapInitThreadTCB -- initial thread's TCB cap | SeL4_CapInitThreadCNode -- initial thread's root CNode cap | SeL4_CapInitThreadVSpace -- initial thread's VSpace cap | SeL4_CapIRQControl -- global IRQ controller cap | SeL4_CapASIDControl -- global ASID controller cap | SeL4_CapInitThreadASIDPool -- initial thread's ASID pool cap | SeL4_CapIOPort -- global IO port cap (null cap if not supported) | SeL4_CapIOSpace -- global IO space cap (null cap if no IOMMU support) | SeL4_CapBootInfoFrame -- bootinfo frame cap | SeL4_CapInitThreadIPCBuffer -- initial thread's IPC buffer frame cap | SeL4_CapDomain -- global domain controller cap | SeL4_NumInitialCaps Cast SeL4Cap Int where cast SeL4_CapNull = 0 cast SeL4_CapInitThreadTCB = 1 cast SeL4_CapInitThreadCNode = 2 cast SeL4_CapInitThreadVSpace = 3 cast SeL4_CapIRQControl = 4 cast SeL4_CapASIDControl = 5 cast SeL4_CapInitThreadASIDPool = 6 cast SeL4_CapIOPort = 7 cast SeL4_CapIOSpace = 8 cast SeL4_CapBootInfoFrame = 9 cast SeL4_CapInitThreadIPCBuffer = 10 cast SeL4_CapDomain = 11 cast SeL4_NumInitialCaps = 12 SeL4_SlotPos : Type SeL4_SlotPos = SeL4_Word record SeL4_SlotRegion where constructor MKSeL4_SlotRegion start : SeL4_SlotPos end : SeL4_SlotPos record SeL4_UntypedDesc where constructor MKSeL4_UntypedDesc paddr : SeL4_Word -- physical address of untyped cap padding1 : SeL4_Uint8 padding2 : SeL4_Uint8 sizeBits : SeL4_Uint8 -- size (2^n) bytes of each untyped isDevice : SeL4_Uint8 -- whether the untyped is a device record SeL4_BootInfo where constructor MKSeL4_BootInfo extraLen : SeL4_Word -- length of any additional bootinfo information nodeID : SeL4_NodeId -- ID [0..numNodes-1] of the seL4 node (0 if uniprocessor) numNodes : SeL4_Word -- number of seL4 nodes (1 if uniprocessor) numIOPTLevels : SeL4_Word -- number of IOMMU PT levels (0 if no IOMMU support) ipcBuffer : SeL4_IPCBufferP -- pointer to initial thread's IPC buffer empty : SeL4_SlotRegion -- empty slots (null caps) sharedFrames : SeL4_SlotRegion -- shared-frame caps (shared between seL4 nodes) userImageFrames : SeL4_SlotRegion -- userland-image frame caps userImagePaging : SeL4_SlotRegion -- userland-image paging structure caps ioSpaceCaps : SeL4_SlotRegion -- IOSpace caps for ARM SMMU extraBIPages : SeL4_SlotRegion -- caps for any pages used to back the additional bootinfo information initThreadCNodeSizeBits : SeL4_Uint8 -- initial thread's root CNode size (2^n slots) initThreadDomain : SeL4_Domain -- Initial thread's domain ID archInfo : SeL4_Word -- tsc freq on x86, unused on arm untyped : SeL4_SlotRegion -- untyped-object caps (untyped caps) -- seL4_UntypedDesc untypedList[CONFIG_MAX_NUM_BOOTINFO_UNTYPED_CAPS]; -- information about each untyped record SeL4_BootInfoHeader where constructor MKSeL4_BootInfoHeader -- identifier of the following chunk. IDs are arch/platform specific id : SeL4_Word -- length of the chunk, including this header len : SeL4_Word Show SeL4_SlotRegion where show sl = "[" ++ show (start sl) ++ " --> " ++ show (end sl) ++ ")" Show SeL4_BootInfo where show bi = "Node " ++ show (nodeID bi) ++ " of " ++ show (numNodes bi) ++ "\n" ++ "IOPT levels: " ++ show (numIOPTLevels bi) ++ "\n" ++ "IPC buffer: " ++ show (ipcBuffer bi) ++ "\n" ++ "Empty slots: " ++ show (empty bi) ++ "\n" ++ "sharedFrames: " ++ show (sharedFrames bi) ++ "\n" ++ "userImageFrames: " ++ show (userImageFrames bi) ++ "\n" ++ "userImagePaging: " ++ show (userImagePaging bi) ++ "\n" ++ "untyped: " ++ show (untyped bi) ++ "\n" ++ "Initial thread domain: " ++ show (initThreadDomain bi) ++ "\n" ++ "Initial thread cnode size: " ++ show (initThreadCNodeSizeBits bi) ++ "\n" ++ "List of untypeds\n" ++ "------------------\n" ++ "Paddr | Size | Device\n" ++ "unimplemented...\n" -- A function to create a dummy BootInfo before reading the proper one from the system. createDummyBootInfo : SeL4_BootInfo createDummyBootInfo = MKSeL4_BootInfo 0 1 2 3 4 (MKSeL4_SlotRegion 5 6) (MKSeL4_SlotRegion 6 7) (MKSeL4_SlotRegion 8 9) (MKSeL4_SlotRegion 9 10) (MKSeL4_SlotRegion 10 11) (MKSeL4_SlotRegion 11 12) 13 14 15 (MKSeL4_SlotRegion 16 17)
module Attenuations using AxisArrays using HTTP using Unitful import Unitful: g, cm, eV, keV, MeV export eV, keV, MeV export μ, μᵨ, Element, Compound, Mixture, Elements, Materials, PhotoelectricAbsorption, Coherent, Incoherent, InNuclearField, InElectronField, WithCoherent, WithoutCoherent, val abstract type Attenuation end """ PhotoelectricAbsorption is one of the principal forms of interaction of x-ray and gamma photons with matter. A low energy photon interacts with an electron in the atom and removes it from its shell. https://en.wikipedia.org/wiki/Photoelectric_effect https://radiopaedia.org/articles/photoelectric-effect """ struct PhotoelectricAbsorption <: Attenuation end abstract type Scattering <: Attenuation end """ Coherent scattering (also known as unmodified, Rayleigh, classical or elastic scattering) is one of three forms of photon interaction which occurs when the energy of the x-ray or gamma photon is small in relation to the ionization energy of the atom. It therefore occurs with low energy radiation. Upon interacting with the attenuating medium, the photon does not have enough energy to liberate the electron from its bound state (i.e. the photon energy is well below the binding energy of the electron) so no energy transfer occurs. There is no energy deposition and thus no dose resulting from coherent scattering. The only change is a change of direction (scatter) of the photon, hence 'unmodified' scatter. Coherent scattering is not a major interaction process encountered in radiography at the energies normally used. Coherent scattering varies with the atomic number of the absorber (Z) and incident photon energy (E) by Z/E². https://radiopaedia.org/articles/coherent-scattering """ struct Coherent <: Scattering end """ Incoherent scatter (Compton effect or Compton scatter) is one of principle forms of photon interaction. It is the main cause of scattered radiation in a material. It occurs due to the interaction of the photon (x-ray or gamma) with free electrons (unattached to atoms) or loosely bound valence shell (outer shell) electrons. The resultant incident photon is scattered (changes direction) and imparts energy to the electron (recoil electron). The scattered photon will have a different wavelength (observed phenomenon) and thus a different energy (E=hc/λ). Energy and momentum are conserved in this process. The Compton effect is a partial absorption process and as the original photon has lost energy, known as Compton shift (i.e. a shift of wavelength/frequency). The wavelength change of the scattered photon can be determined by 0.024*(1- cosθ), where θ is scattered photon angle. Thus, the energy of the scattered photon decreases with increasing scattered photon angle. https://en.wikipedia.org/wiki/Compton_scattering https://radiopaedia.org/articles/compton-effect?lang=us """ struct Incoherent <: Scattering end abstract type PairProduction <: Attenuation end struct InNuclearField <: PairProduction end struct InElectronField <: PairProduction end abstract type Total <: Attenuation end struct WithCoherent <: Total end struct WithoutCoherent <: Total end const DefaultAttenuation = WithCoherent abstract type Matter end μᵨ( m::Matter, energy::T, a::Type{A} = DefaultAttenuation, ) where {T<:Unitful.Energy,A<:Attenuation} = μᵨ(m, [energy], a)[1] μ( m::Matter, energy::T, a::Type{A} = DefaultAttenuation, ) where {T<:Unitful.Energy,A<:Attenuation} = μ(m, [energy], a)[1] val(a::AbstractArray{T}) where {T<:Unitful.AbstractQuantity} = [i.val for i in a.data] val(a::T) where {T<:Unitful.AbstractQuantity} = a.val """ Element An element of the periodic table. """ struct Element{T,S} <: Matter where {T<:Unitful.Energy,S<:Unitful.Density} Z::Int symbol::String name::String ZAratio::Float64 I::T ρ::S end Base.show(io::IO, e::Element) = print( io, "$(e.Z) $(e.symbol) $(e.name) Z/A=$(e.ZAratio) I=$(e.I.val)eV ρ=$(e.ρ.val)g/cm³", ) function μᵨ( e::Element, energies::AbstractArray{<:Unitful.Energy}, a::Type{<:Attenuation} = DefaultAttenuation, ) body = Dict{String,String}( "Method" => "1", "ZNum" => "$(e.Z)", bodykey(a) => "on", "Energies" => formatenergies(energies), ) μᵨ = XCOM(body) * cm^2 ./ g AxisArray(μᵨ, Axis{:energy}(energies)) end μ( e::Element, energies::AbstractArray{<:Unitful.Energy}, a::Type{<:Attenuation} = DefaultAttenuation, ) = AxisArray(e.ρ * μᵨ(e, energies, a), Axis{:energy}(energies)) """ Compound """ struct Compound <: Matter formula::String end function μᵨ( c::Compound, energies::AbstractArray{<:Unitful.Energy}, a::Type{<:Attenuation} = DefaultAttenuation, ) body = Dict{String,String}( "Method" => "2", "Formula" => c.formula, bodykey(a) => "on", "Energies" => formatenergies(energies), ) μᵨ = XCOM(body) * cm^2 ./ g AxisArray(μᵨ, Axis{:energy}(energies)) end """ Mixture """ struct Mixture{T} <: Matter where {T<:AbstractFloat} formulae::Dict{String,T} end function μᵨ( m::Mixture, energies::AbstractArray{<:Unitful.Energy}, a::Type{<:Attenuation}, ) body = Dict{String,String}( "Method" => "3", "Formulae" => join(["$k $v" for (k, v) in m.formulae], '\n'), bodykey(a) => "on", "Energies" => formatenergies(energies), ) μᵨ = XCOM(body) * cm^2 ./ g AxisArray(μᵨ, Axis{:energy}(energies)) end struct Material{T,S} <: Matter where {T<:Unitful.Energy,S<:Unitful.Density} name::String ZAratio::Float64 I::T ρ::S composition::Dict{Int,Float64} end μᵨ( m::Material, energies::AbstractArray{<:Unitful.Energy}, a::Type{<:Attenuation} = DefaultAttenuation, ) = μᵨ( Mixture(Dict([(Elements[k].symbol, v) for (k, v) in m.composition])), energies, a, ) μ( m::Material, energies::AbstractArray{<:Unitful.Energy}, a::Type{<:Attenuation} = DefaultAttenuation, ) = AxisArray(m.ρ * μᵨ(m, energies, a), Axis{:energy}(energies)) Base.show(io::IO, m::Material) = print( io, "$(m.name) Z/A=$(m.ZAratio) I=$(m.I) ρ=$(m.ρ)\r\n", join(["$k: $v" for (k, v) in m.composition], "\r\n"), ) include("xcom.jl") include("elements.jl") include("materials.jl") end # module
{-# OPTIONS --safe #-} module Cubical.Algebra.Group.Instances.Unit where open import Cubical.Foundations.Prelude open import Cubical.Foundations.Isomorphism open import Cubical.Foundations.Structure open import Cubical.Foundations.HLevels open import Cubical.Foundations.Equiv open import Cubical.Data.Unit open import Cubical.Algebra.Group.Base open import Cubical.Algebra.Group.DirProd open import Cubical.Algebra.Group.Morphisms open import Cubical.Algebra.Group.MorphismProperties open import Cubical.Algebra.Group.GroupPath open GroupStr open IsGroupHom private variable ℓ : Level UnitGroup₀ : Group₀ fst UnitGroup₀ = Unit 1g (snd UnitGroup₀) = tt _·_ (snd UnitGroup₀) = λ _ _ → tt inv (snd UnitGroup₀) = λ _ → tt isGroup (snd UnitGroup₀) = makeIsGroup isSetUnit (λ _ _ _ → refl) (λ _ → refl) (λ _ → refl) (λ _ → refl) (λ _ → refl) UnitGroup : Group ℓ fst UnitGroup = Unit* 1g (snd UnitGroup) = tt* _·_ (snd UnitGroup) = λ _ _ → tt* inv (snd UnitGroup) = λ _ → tt* isGroup (snd UnitGroup) = makeIsGroup (isOfHLevelUnit* 2) (λ _ _ _ → refl) (λ _ → refl) (λ _ → refl) (λ _ → refl) (λ _ → refl) open Iso -- The trivial group is a unit. lUnitGroupIso : {G : Group ℓ} → GroupIso (DirProd UnitGroup₀ G) G fun (fst lUnitGroupIso) = snd inv (fst lUnitGroupIso) g = tt , g rightInv (fst lUnitGroupIso) _ = refl leftInv (fst lUnitGroupIso) _ = refl snd lUnitGroupIso = makeIsGroupHom λ _ _ → refl rUnitGroupIso : {G : Group ℓ} → GroupIso (DirProd G UnitGroup₀) G fun (fst rUnitGroupIso) = fst inv (fst rUnitGroupIso) g = g , tt rightInv (fst rUnitGroupIso) _ = refl leftInv (fst rUnitGroupIso) _ = refl snd rUnitGroupIso = makeIsGroupHom λ _ _ → refl lUnitGroupEquiv : {G : Group ℓ} → GroupEquiv (DirProd UnitGroup₀ G) G lUnitGroupEquiv = GroupIso→GroupEquiv lUnitGroupIso rUnitGroupEquiv : ∀ {ℓ} {G : Group ℓ} → GroupEquiv (DirProd G UnitGroup₀) G rUnitGroupEquiv = GroupIso→GroupEquiv rUnitGroupIso contrGroupIsoUnit : {G : Group ℓ} → isContr ⟨ G ⟩ → GroupIso G UnitGroup₀ fun (fst (contrGroupIsoUnit contr)) _ = tt inv (fst (contrGroupIsoUnit contr)) _ = fst contr rightInv (fst (contrGroupIsoUnit contr)) _ = refl leftInv (fst (contrGroupIsoUnit contr)) x = snd contr x snd (contrGroupIsoUnit contr) = makeIsGroupHom λ _ _ → refl contrGroupEquivUnit : {G : Group ℓ} → isContr ⟨ G ⟩ → GroupEquiv G UnitGroup₀ contrGroupEquivUnit contr = GroupIso→GroupEquiv (contrGroupIsoUnit contr) isContr→≡UnitGroup : {G : Group ℓ-zero} → isContr (fst G) → UnitGroup₀ ≡ G isContr→≡UnitGroup c = fst (GroupPath _ _) (invGroupEquiv ((isContr→≃Unit c) , (makeIsGroupHom (λ _ _ → refl)))) GroupIsoUnitGroup→isContr : {G : Group ℓ-zero} → GroupIso UnitGroup₀ G → isContr (fst G) GroupIsoUnitGroup→isContr is = isOfHLevelRetractFromIso 0 (invIso (fst is)) isContrUnit →UnitHom : ∀ {ℓ} (G : Group ℓ) → GroupHom G UnitGroup₀ fst (→UnitHom G) _ = tt snd (→UnitHom G) = makeIsGroupHom λ _ _ → refl
proposition homotopic_with_trans: assumes "homotopic_with P X Y f g" "homotopic_with P X Y g h" shows "homotopic_with P X Y f h"
/- Copyright (c) 2018 Mario Carneiro. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Mario Carneiro, Johannes Hölzl ! This file was ported from Lean 3 source module data.list.forall2 ! leanprover-community/mathlib commit 10708587e81b68c763fcdb7505f279d52e569768 ! Please do not edit these lines, except to modify the commit id ! if you have ported upstream changes. -/ import Mathbin.Data.List.Infix /-! # Double universal quantification on a list > THIS FILE IS SYNCHRONIZED WITH MATHLIB4. > Any changes to this file require a corresponding PR to mathlib4. This file provides an API for `list.forall₂` (definition in `data.list.defs`). `forall₂ R l₁ l₂` means that `l₁` and `l₂` have the same length, and whenever `a` is the nth element of `l₁`, and `b` is the nth element of `l₂`, then `R a b` is satisfied. -/ open Nat Function namespace List variable {α β γ δ : Type _} {R S : α → β → Prop} {P : γ → δ → Prop} {Rₐ : α → α → Prop} open Relator mk_iff_of_inductive_prop List.Forall₂ List.forall₂_iff /- warning: list.forall₂_cons -> List.forall₂_cons is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {R : α -> β -> Prop} {a : α} {b : β} {l₁ : List.{u1} α} {l₂ : List.{u2} β}, Iff (List.Forall₂.{u1, u2} α β R (List.cons.{u1} α a l₁) (List.cons.{u2} β b l₂)) (And (R a b) (List.Forall₂.{u1, u2} α β R l₁ l₂)) but is expected to have type forall {α : Type.{u2}} {β : Type.{u1}} {R : α -> β -> Prop} {a : α} {b : β} {l₁ : List.{u2} α} {l₂ : List.{u1} β}, Iff (List.Forall₂.{u2, u1} α β R (List.cons.{u2} α a l₁) (List.cons.{u1} β b l₂)) (And (R a b) (List.Forall₂.{u2, u1} α β R l₁ l₂)) Case conversion may be inaccurate. Consider using '#align list.forall₂_cons List.forall₂_consₓ'. -/ @[simp] theorem forall₂_cons {a b l₁ l₂} : Forall₂ R (a :: l₁) (b :: l₂) ↔ R a b ∧ Forall₂ R l₁ l₂ := ⟨fun h => by cases' h with h₁ h₂ <;> constructor <;> assumption, fun ⟨h₁, h₂⟩ => Forall₂.cons h₁ h₂⟩ #align list.forall₂_cons List.forall₂_cons /- warning: list.forall₂.imp -> List.Forall₂.imp is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {R : α -> β -> Prop} {S : α -> β -> Prop}, (forall (a : α) (b : β), (R a b) -> (S a b)) -> (forall {l₁ : List.{u1} α} {l₂ : List.{u2} β}, (List.Forall₂.{u1, u2} α β R l₁ l₂) -> (List.Forall₂.{u1, u2} α β S l₁ l₂)) but is expected to have type forall {α : Type.{u2}} {β : Type.{u1}} {R : α -> β -> Prop} {S : α -> β -> Prop}, (forall (a : α) (b : β), (R a b) -> (S a b)) -> (forall {l₁ : List.{u2} α} {l₂ : List.{u1} β}, (List.Forall₂.{u2, u1} α β R l₁ l₂) -> (List.Forall₂.{u2, u1} α β S l₁ l₂)) Case conversion may be inaccurate. Consider using '#align list.forall₂.imp List.Forall₂.impₓ'. -/ theorem Forall₂.imp (H : ∀ a b, R a b → S a b) {l₁ l₂} (h : Forall₂ R l₁ l₂) : Forall₂ S l₁ l₂ := by induction h <;> constructor <;> solve_by_elim #align list.forall₂.imp List.Forall₂.imp /- warning: list.forall₂.mp -> List.Forall₂.mp is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {R : α -> β -> Prop} {S : α -> β -> Prop} {Q : α -> β -> Prop}, (forall (a : α) (b : β), (Q a b) -> (R a b) -> (S a b)) -> (forall {l₁ : List.{u1} α} {l₂ : List.{u2} β}, (List.Forall₂.{u1, u2} α β Q l₁ l₂) -> (List.Forall₂.{u1, u2} α β R l₁ l₂) -> (List.Forall₂.{u1, u2} α β S l₁ l₂)) but is expected to have type forall {α : Type.{u2}} {β : Type.{u1}} {R : α -> β -> Prop} {S : α -> β -> Prop} {Q : α -> β -> Prop}, (forall (a : α) (b : β), (Q a b) -> (R a b) -> (S a b)) -> (forall {l₁ : List.{u2} α} {l₂ : List.{u1} β}, (List.Forall₂.{u2, u1} α β Q l₁ l₂) -> (List.Forall₂.{u2, u1} α β R l₁ l₂) -> (List.Forall₂.{u2, u1} α β S l₁ l₂)) Case conversion may be inaccurate. Consider using '#align list.forall₂.mp List.Forall₂.mpₓ'. -/ theorem Forall₂.mp {Q : α → β → Prop} (h : ∀ a b, Q a b → R a b → S a b) : ∀ {l₁ l₂}, Forall₂ Q l₁ l₂ → Forall₂ R l₁ l₂ → Forall₂ S l₁ l₂ | [], [], forall₂.nil, forall₂.nil => Forall₂.nil | a :: l₁, b :: l₂, forall₂.cons hr hrs, forall₂.cons hq hqs => Forall₂.cons (h a b hr hq) (forall₂.mp hrs hqs) #align list.forall₂.mp List.Forall₂.mp /- warning: list.forall₂.flip -> List.Forall₂.flip is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {R : α -> β -> Prop} {a : List.{u1} α} {b : List.{u2} β}, (List.Forall₂.{u2, u1} β α (flip.{succ u1, succ u2, 1} α β Prop R) b a) -> (List.Forall₂.{u1, u2} α β R a b) but is expected to have type forall {α : Type.{u2}} {β : Type.{u1}} {R : α -> β -> Prop} {a : List.{u2} α} {b : List.{u1} β}, (List.Forall₂.{u1, u2} β α (flip.{succ u2, succ u1, 1} α β Prop R) b a) -> (List.Forall₂.{u2, u1} α β R a b) Case conversion may be inaccurate. Consider using '#align list.forall₂.flip List.Forall₂.flipₓ'. -/ theorem Forall₂.flip : ∀ {a b}, Forall₂ (flip R) b a → Forall₂ R a b | _, _, forall₂.nil => Forall₂.nil | a :: as, b :: bs, forall₂.cons h₁ h₂ => Forall₂.cons h₁ h₂.flip #align list.forall₂.flip List.Forall₂.flip #print List.forall₂_same /- @[simp] theorem forall₂_same : ∀ {l : List α}, Forall₂ Rₐ l l ↔ ∀ x ∈ l, Rₐ x x | [] => by simp | a :: l => by simp [@forall₂_same l] #align list.forall₂_same List.forall₂_same -/ #print List.forall₂_refl /- theorem forall₂_refl [IsRefl α Rₐ] (l : List α) : Forall₂ Rₐ l l := forall₂_same.2 fun a h => refl _ #align list.forall₂_refl List.forall₂_refl -/ #print List.forall₂_eq_eq_eq /- @[simp] theorem forall₂_eq_eq_eq : Forall₂ ((· = ·) : α → α → Prop) = (· = ·) := by funext a b; apply propext constructor · intro h induction h · rfl simp only [*] <;> constructor <;> rfl · rintro rfl exact forall₂_refl _ #align list.forall₂_eq_eq_eq List.forall₂_eq_eq_eq -/ #print List.forall₂_nil_left_iff /- @[simp] theorem forall₂_nil_left_iff {l} : Forall₂ R nil l ↔ l = nil := ⟨fun H => by cases H <;> rfl, by rintro rfl <;> exact forall₂.nil⟩ #align list.forall₂_nil_left_iff List.forall₂_nil_left_iff -/ /- warning: list.forall₂_nil_right_iff -> List.forall₂_nil_right_iff is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {R : α -> β -> Prop} {l : List.{u1} α}, Iff (List.Forall₂.{u1, u2} α β R l (List.nil.{u2} β)) (Eq.{succ u1} (List.{u1} α) l (List.nil.{u1} α)) but is expected to have type forall {α : Type.{u2}} {β : Type.{u1}} {R : α -> β -> Prop} {l : List.{u2} α}, Iff (List.Forall₂.{u2, u1} α β R l (List.nil.{u1} β)) (Eq.{succ u2} (List.{u2} α) l (List.nil.{u2} α)) Case conversion may be inaccurate. Consider using '#align list.forall₂_nil_right_iff List.forall₂_nil_right_iffₓ'. -/ @[simp] theorem forall₂_nil_right_iff {l} : Forall₂ R l nil ↔ l = nil := ⟨fun H => by cases H <;> rfl, by rintro rfl <;> exact forall₂.nil⟩ #align list.forall₂_nil_right_iff List.forall₂_nil_right_iff /- warning: list.forall₂_cons_left_iff -> List.forall₂_cons_left_iff is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {R : α -> β -> Prop} {a : α} {l : List.{u1} α} {u : List.{u2} β}, Iff (List.Forall₂.{u1, u2} α β R (List.cons.{u1} α a l) u) (Exists.{succ u2} β (fun (b : β) => Exists.{succ u2} (List.{u2} β) (fun (u' : List.{u2} β) => And (R a b) (And (List.Forall₂.{u1, u2} α β R l u') (Eq.{succ u2} (List.{u2} β) u (List.cons.{u2} β b u')))))) but is expected to have type forall {α : Type.{u2}} {β : Type.{u1}} {R : α -> β -> Prop} {a : α} {l : List.{u2} α} {u : List.{u1} β}, Iff (List.Forall₂.{u2, u1} α β R (List.cons.{u2} α a l) u) (Exists.{succ u1} β (fun (b : β) => Exists.{succ u1} (List.{u1} β) (fun (u' : List.{u1} β) => And (R a b) (And (List.Forall₂.{u2, u1} α β R l u') (Eq.{succ u1} (List.{u1} β) u (List.cons.{u1} β b u')))))) Case conversion may be inaccurate. Consider using '#align list.forall₂_cons_left_iff List.forall₂_cons_left_iffₓ'. -/ theorem forall₂_cons_left_iff {a l u} : Forall₂ R (a :: l) u ↔ ∃ b u', R a b ∧ Forall₂ R l u' ∧ u = b :: u' := Iff.intro (fun h => match u, h with | b :: u', forall₂.cons h₁ h₂ => ⟨b, u', h₁, h₂, rfl⟩) fun h => match u, h with | _, ⟨b, u', h₁, h₂, rfl⟩ => Forall₂.cons h₁ h₂ #align list.forall₂_cons_left_iff List.forall₂_cons_left_iff #print List.forall₂_cons_right_iff /- theorem forall₂_cons_right_iff {b l u} : Forall₂ R u (b :: l) ↔ ∃ a u', R a b ∧ Forall₂ R u' l ∧ u = a :: u' := Iff.intro (fun h => match u, h with | b :: u', forall₂.cons h₁ h₂ => ⟨b, u', h₁, h₂, rfl⟩) fun h => match u, h with | _, ⟨b, u', h₁, h₂, rfl⟩ => Forall₂.cons h₁ h₂ #align list.forall₂_cons_right_iff List.forall₂_cons_right_iff -/ /- warning: list.forall₂_and_left -> List.forall₂_and_left is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {R : α -> β -> Prop} {p : α -> Prop} (l : List.{u1} α) (u : List.{u2} β), Iff (List.Forall₂.{u1, u2} α β (fun (a : α) (b : β) => And (p a) (R a b)) l u) (And (forall (a : α), (Membership.Mem.{u1, u1} α (List.{u1} α) (List.hasMem.{u1} α) a l) -> (p a)) (List.Forall₂.{u1, u2} α β R l u)) but is expected to have type forall {α : Type.{u2}} {β : Type.{u1}} {R : α -> β -> Prop} {p : α -> Prop} (l : List.{u2} α) (u : List.{u1} β), Iff (List.Forall₂.{u2, u1} α β (fun (a : α) (b : β) => And (p a) (R a b)) l u) (And (forall (a : α), (Membership.mem.{u2, u2} α (List.{u2} α) (List.instMembershipList.{u2} α) a l) -> (p a)) (List.Forall₂.{u2, u1} α β R l u)) Case conversion may be inaccurate. Consider using '#align list.forall₂_and_left List.forall₂_and_leftₓ'. -/ theorem forall₂_and_left {p : α → Prop} : ∀ l u, Forall₂ (fun a b => p a ∧ R a b) l u ↔ (∀ a ∈ l, p a) ∧ Forall₂ R l u | [], u => by simp only [forall₂_nil_left_iff, forall_prop_of_false (not_mem_nil _), imp_true_iff, true_and_iff] | a :: l, u => by simp only [forall₂_and_left l, forall₂_cons_left_iff, forall_mem_cons, and_assoc', and_comm', and_left_comm, exists_and_distrib_left.symm] #align list.forall₂_and_left List.forall₂_and_left #print List.forall₂_map_left_iff /- @[simp] theorem forall₂_map_left_iff {f : γ → α} : ∀ {l u}, Forall₂ R (map f l) u ↔ Forall₂ (fun c b => R (f c) b) l u | [], _ => by simp only [map, forall₂_nil_left_iff] | a :: l, _ => by simp only [map, forall₂_cons_left_iff, forall₂_map_left_iff] #align list.forall₂_map_left_iff List.forall₂_map_left_iff -/ /- warning: list.forall₂_map_right_iff -> List.forall₂_map_right_iff is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} {R : α -> β -> Prop} {f : γ -> β} {l : List.{u1} α} {u : List.{u3} γ}, Iff (List.Forall₂.{u1, u2} α β R l (List.map.{u3, u2} γ β f u)) (List.Forall₂.{u1, u3} α γ (fun (a : α) (c : γ) => R a (f c)) l u) but is expected to have type forall {α : Type.{u3}} {β : Type.{u1}} {γ : Type.{u2}} {R : α -> β -> Prop} {f : γ -> β} {l : List.{u3} α} {u : List.{u2} γ}, Iff (List.Forall₂.{u3, u1} α β R l (List.map.{u2, u1} γ β f u)) (List.Forall₂.{u3, u2} α γ (fun (a : α) (c : γ) => R a (f c)) l u) Case conversion may be inaccurate. Consider using '#align list.forall₂_map_right_iff List.forall₂_map_right_iffₓ'. -/ @[simp] theorem forall₂_map_right_iff {f : γ → β} : ∀ {l u}, Forall₂ R l (map f u) ↔ Forall₂ (fun a c => R a (f c)) l u | _, [] => by simp only [map, forall₂_nil_right_iff] | _, b :: u => by simp only [map, forall₂_cons_right_iff, forall₂_map_right_iff] #align list.forall₂_map_right_iff List.forall₂_map_right_iff /- warning: list.left_unique_forall₂' -> List.left_unique_forall₂' is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {R : α -> β -> Prop}, (Relator.LeftUnique.{u1, u2} α β R) -> (forall {a : List.{u1} α} {b : List.{u1} α} {c : List.{u2} β}, (List.Forall₂.{u1, u2} α β R a c) -> (List.Forall₂.{u1, u2} α β R b c) -> (Eq.{succ u1} (List.{u1} α) a b)) but is expected to have type forall {α : Type.{u2}} {β : Type.{u1}} {R : α -> β -> Prop}, (Relator.LeftUnique.{u2, u1} α β R) -> (forall {a : List.{u2} α} {b : List.{u2} α} {c : List.{u1} β}, (List.Forall₂.{u2, u1} α β R a c) -> (List.Forall₂.{u2, u1} α β R b c) -> (Eq.{succ u2} (List.{u2} α) a b)) Case conversion may be inaccurate. Consider using '#align list.left_unique_forall₂' List.left_unique_forall₂'ₓ'. -/ theorem left_unique_forall₂' (hr : LeftUnique R) : ∀ {a b c}, Forall₂ R a c → Forall₂ R b c → a = b | a₀, nil, a₁, forall₂.nil, forall₂.nil => rfl | a₀ :: l₀, b :: l, a₁ :: l₁, forall₂.cons ha₀ h₀, forall₂.cons ha₁ h₁ => hr ha₀ ha₁ ▸ left_unique_forall₂' h₀ h₁ ▸ rfl #align list.left_unique_forall₂' List.left_unique_forall₂' /- warning: relator.left_unique.forall₂ -> Relator.LeftUnique.forall₂ is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {R : α -> β -> Prop}, (Relator.LeftUnique.{u1, u2} α β R) -> (Relator.LeftUnique.{u1, u2} (List.{u1} α) (List.{u2} β) (List.Forall₂.{u1, u2} α β R)) but is expected to have type forall {α : Type.{u2}} {β : Type.{u1}} {R : α -> β -> Prop}, (Relator.LeftUnique.{u2, u1} α β R) -> (Relator.LeftUnique.{u2, u1} (List.{u2} α) (List.{u1} β) (List.Forall₂.{u2, u1} α β R)) Case conversion may be inaccurate. Consider using '#align relator.left_unique.forall₂ Relator.LeftUnique.forall₂ₓ'. -/ theorem Relator.LeftUnique.forall₂ (hr : LeftUnique R) : LeftUnique (Forall₂ R) := @left_unique_forall₂' _ _ _ hr #align relator.left_unique.forall₂ Relator.LeftUnique.forall₂ /- warning: list.right_unique_forall₂' -> List.right_unique_forall₂' is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {R : α -> β -> Prop}, (Relator.RightUnique.{u1, u2} α β R) -> (forall {a : List.{u1} α} {b : List.{u2} β} {c : List.{u2} β}, (List.Forall₂.{u1, u2} α β R a b) -> (List.Forall₂.{u1, u2} α β R a c) -> (Eq.{succ u2} (List.{u2} β) b c)) but is expected to have type forall {α : Type.{u2}} {β : Type.{u1}} {R : α -> β -> Prop}, (Relator.RightUnique.{u2, u1} α β R) -> (forall {a : List.{u2} α} {b : List.{u1} β} {c : List.{u1} β}, (List.Forall₂.{u2, u1} α β R a b) -> (List.Forall₂.{u2, u1} α β R a c) -> (Eq.{succ u1} (List.{u1} β) b c)) Case conversion may be inaccurate. Consider using '#align list.right_unique_forall₂' List.right_unique_forall₂'ₓ'. -/ theorem right_unique_forall₂' (hr : RightUnique R) : ∀ {a b c}, Forall₂ R a b → Forall₂ R a c → b = c | nil, a₀, a₁, forall₂.nil, forall₂.nil => rfl | b :: l, a₀ :: l₀, a₁ :: l₁, forall₂.cons ha₀ h₀, forall₂.cons ha₁ h₁ => hr ha₀ ha₁ ▸ right_unique_forall₂' h₀ h₁ ▸ rfl #align list.right_unique_forall₂' List.right_unique_forall₂' /- warning: relator.right_unique.forall₂ -> Relator.RightUnique.forall₂ is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {R : α -> β -> Prop}, (Relator.RightUnique.{u1, u2} α β R) -> (Relator.RightUnique.{u1, u2} (List.{u1} α) (List.{u2} β) (List.Forall₂.{u1, u2} α β R)) but is expected to have type forall {α : Type.{u2}} {β : Type.{u1}} {R : α -> β -> Prop}, (Relator.RightUnique.{u2, u1} α β R) -> (Relator.RightUnique.{u2, u1} (List.{u2} α) (List.{u1} β) (List.Forall₂.{u2, u1} α β R)) Case conversion may be inaccurate. Consider using '#align relator.right_unique.forall₂ Relator.RightUnique.forall₂ₓ'. -/ theorem Relator.RightUnique.forall₂ (hr : RightUnique R) : RightUnique (Forall₂ R) := @right_unique_forall₂' _ _ _ hr #align relator.right_unique.forall₂ Relator.RightUnique.forall₂ /- warning: relator.bi_unique.forall₂ -> Relator.BiUnique.forall₂ is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {R : α -> β -> Prop}, (Relator.BiUnique.{u1, u2} α β R) -> (Relator.BiUnique.{u1, u2} (List.{u1} α) (List.{u2} β) (List.Forall₂.{u1, u2} α β R)) but is expected to have type forall {α : Type.{u2}} {β : Type.{u1}} {R : α -> β -> Prop}, (Relator.BiUnique.{u2, u1} α β R) -> (Relator.BiUnique.{u2, u1} (List.{u2} α) (List.{u1} β) (List.Forall₂.{u2, u1} α β R)) Case conversion may be inaccurate. Consider using '#align relator.bi_unique.forall₂ Relator.BiUnique.forall₂ₓ'. -/ theorem Relator.BiUnique.forall₂ (hr : BiUnique R) : BiUnique (Forall₂ R) := ⟨hr.left.forall₂, hr.right.forall₂⟩ #align relator.bi_unique.forall₂ Relator.BiUnique.forall₂ /- warning: list.forall₂.length_eq -> List.Forall₂.length_eq is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {R : α -> β -> Prop} {l₁ : List.{u1} α} {l₂ : List.{u2} β}, (List.Forall₂.{u1, u2} α β R l₁ l₂) -> (Eq.{1} Nat (List.length.{u1} α l₁) (List.length.{u2} β l₂)) but is expected to have type forall {α : Type.{u2}} {β : Type.{u1}} {R : α -> β -> Prop} {l₁ : List.{u2} α} {l₂ : List.{u1} β}, (List.Forall₂.{u2, u1} α β R l₁ l₂) -> (Eq.{1} Nat (List.length.{u2} α l₁) (List.length.{u1} β l₂)) Case conversion may be inaccurate. Consider using '#align list.forall₂.length_eq List.Forall₂.length_eqₓ'. -/ theorem Forall₂.length_eq : ∀ {l₁ l₂}, Forall₂ R l₁ l₂ → length l₁ = length l₂ | _, _, forall₂.nil => rfl | _, _, forall₂.cons h₁ h₂ => congr_arg succ (forall₂.length_eq h₂) #align list.forall₂.length_eq List.Forall₂.length_eq /- warning: list.forall₂.nth_le -> List.Forall₂.nthLe is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {R : α -> β -> Prop} {x : List.{u1} α} {y : List.{u2} β}, (List.Forall₂.{u1, u2} α β R x y) -> (forall {{i : Nat}} (hx : LT.lt.{0} Nat Nat.hasLt i (List.length.{u1} α x)) (hy : LT.lt.{0} Nat Nat.hasLt i (List.length.{u2} β y)), R (List.nthLe.{u1} α x i hx) (List.nthLe.{u2} β y i hy)) but is expected to have type forall {α : Type.{u2}} {β : Type.{u1}} {R : α -> β -> Prop} {x : List.{u2} α} {y : List.{u1} β}, (List.Forall₂.{u2, u1} α β R x y) -> (forall {{i : Nat}} (hx : LT.lt.{0} Nat instLTNat i (List.length.{u2} α x)) (hy : LT.lt.{0} Nat instLTNat i (List.length.{u1} β y)), R (List.nthLe.{u2} α x i hx) (List.nthLe.{u1} β y i hy)) Case conversion may be inaccurate. Consider using '#align list.forall₂.nth_le List.Forall₂.nthLeₓ'. -/ theorem Forall₂.nthLe : ∀ {x : List α} {y : List β} (h : Forall₂ R x y) ⦃i : ℕ⦄ (hx : i < x.length) (hy : i < y.length), R (x.nthLe i hx) (y.nthLe i hy) | a₁ :: l₁, a₂ :: l₂, forall₂.cons ha hl, 0, hx, hy => ha | a₁ :: l₁, a₂ :: l₂, forall₂.cons ha hl, succ i, hx, hy => hl.nthLe _ _ #align list.forall₂.nth_le List.Forall₂.nthLe /- warning: list.forall₂_of_length_eq_of_nth_le -> List.forall₂_of_length_eq_of_nthLe is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {R : α -> β -> Prop} {x : List.{u1} α} {y : List.{u2} β}, (Eq.{1} Nat (List.length.{u1} α x) (List.length.{u2} β y)) -> (forall (i : Nat) (h₁ : LT.lt.{0} Nat Nat.hasLt i (List.length.{u1} α x)) (h₂ : LT.lt.{0} Nat Nat.hasLt i (List.length.{u2} β y)), R (List.nthLe.{u1} α x i h₁) (List.nthLe.{u2} β y i h₂)) -> (List.Forall₂.{u1, u2} α β R x y) but is expected to have type forall {α : Type.{u2}} {β : Type.{u1}} {R : α -> β -> Prop} {x : List.{u2} α} {y : List.{u1} β}, (Eq.{1} Nat (List.length.{u2} α x) (List.length.{u1} β y)) -> (forall (i : Nat) (h₁ : LT.lt.{0} Nat instLTNat i (List.length.{u2} α x)) (h₂ : LT.lt.{0} Nat instLTNat i (List.length.{u1} β y)), R (List.nthLe.{u2} α x i h₁) (List.nthLe.{u1} β y i h₂)) -> (List.Forall₂.{u2, u1} α β R x y) Case conversion may be inaccurate. Consider using '#align list.forall₂_of_length_eq_of_nth_le List.forall₂_of_length_eq_of_nthLeₓ'. -/ theorem forall₂_of_length_eq_of_nthLe : ∀ {x : List α} {y : List β}, x.length = y.length → (∀ i h₁ h₂, R (x.nthLe i h₁) (y.nthLe i h₂)) → Forall₂ R x y | [], [], hl, h => Forall₂.nil | a₁ :: l₁, a₂ :: l₂, hl, h => Forall₂.cons (h 0 (Nat.zero_lt_succ _) (Nat.zero_lt_succ _)) (forall₂_of_length_eq_of_nth_le (succ.inj hl) fun i h₁ h₂ => h i.succ (succ_lt_succ h₁) (succ_lt_succ h₂)) #align list.forall₂_of_length_eq_of_nth_le List.forall₂_of_length_eq_of_nthLe /- warning: list.forall₂_iff_nth_le -> List.forall₂_iff_nthLe is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {R : α -> β -> Prop} {l₁ : List.{u1} α} {l₂ : List.{u2} β}, Iff (List.Forall₂.{u1, u2} α β R l₁ l₂) (And (Eq.{1} Nat (List.length.{u1} α l₁) (List.length.{u2} β l₂)) (forall (i : Nat) (h₁ : LT.lt.{0} Nat Nat.hasLt i (List.length.{u1} α l₁)) (h₂ : LT.lt.{0} Nat Nat.hasLt i (List.length.{u2} β l₂)), R (List.nthLe.{u1} α l₁ i h₁) (List.nthLe.{u2} β l₂ i h₂))) but is expected to have type forall {α : Type.{u2}} {β : Type.{u1}} {R : α -> β -> Prop} {l₁ : List.{u2} α} {l₂ : List.{u1} β}, Iff (List.Forall₂.{u2, u1} α β R l₁ l₂) (And (Eq.{1} Nat (List.length.{u2} α l₁) (List.length.{u1} β l₂)) (forall (i : Nat) (h₁ : LT.lt.{0} Nat instLTNat i (List.length.{u2} α l₁)) (h₂ : LT.lt.{0} Nat instLTNat i (List.length.{u1} β l₂)), R (List.nthLe.{u2} α l₁ i h₁) (List.nthLe.{u1} β l₂ i h₂))) Case conversion may be inaccurate. Consider using '#align list.forall₂_iff_nth_le List.forall₂_iff_nthLeₓ'. -/ theorem forall₂_iff_nthLe {l₁ : List α} {l₂ : List β} : Forall₂ R l₁ l₂ ↔ l₁.length = l₂.length ∧ ∀ i h₁ h₂, R (l₁.nthLe i h₁) (l₂.nthLe i h₂) := ⟨fun h => ⟨h.length_eq, h.nthLe⟩, And.ndrec forall₂_of_length_eq_of_nthLe⟩ #align list.forall₂_iff_nth_le List.forall₂_iff_nthLe /- warning: list.forall₂_zip -> List.forall₂_zip is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {R : α -> β -> Prop} {l₁ : List.{u1} α} {l₂ : List.{u2} β}, (List.Forall₂.{u1, u2} α β R l₁ l₂) -> (forall {a : α} {b : β}, (Membership.Mem.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (List.{max u1 u2} (Prod.{u1, u2} α β)) (List.hasMem.{max u1 u2} (Prod.{u1, u2} α β)) (Prod.mk.{u1, u2} α β a b) (List.zip.{u1, u2} α β l₁ l₂)) -> (R a b)) but is expected to have type forall {α : Type.{u2}} {β : Type.{u1}} {R : α -> β -> Prop} {l₁ : List.{u2} α} {l₂ : List.{u1} β}, (List.Forall₂.{u2, u1} α β R l₁ l₂) -> (forall {a : α} {b : β}, (Membership.mem.{max u1 u2, max u1 u2} (Prod.{u2, u1} α β) (List.{max u1 u2} (Prod.{u2, u1} α β)) (List.instMembershipList.{max u1 u2} (Prod.{u2, u1} α β)) (Prod.mk.{u2, u1} α β a b) (List.zip.{u2, u1} α β l₁ l₂)) -> (R a b)) Case conversion may be inaccurate. Consider using '#align list.forall₂_zip List.forall₂_zipₓ'. -/ theorem forall₂_zip : ∀ {l₁ l₂}, Forall₂ R l₁ l₂ → ∀ {a b}, (a, b) ∈ zip l₁ l₂ → R a b | _, _, forall₂.cons h₁ h₂, x, y, Or.inl rfl => h₁ | _, _, forall₂.cons h₁ h₂, x, y, Or.inr h₃ => forall₂_zip h₂ h₃ #align list.forall₂_zip List.forall₂_zip /- warning: list.forall₂_iff_zip -> List.forall₂_iff_zip is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {R : α -> β -> Prop} {l₁ : List.{u1} α} {l₂ : List.{u2} β}, Iff (List.Forall₂.{u1, u2} α β R l₁ l₂) (And (Eq.{1} Nat (List.length.{u1} α l₁) (List.length.{u2} β l₂)) (forall {a : α} {b : β}, (Membership.Mem.{max u1 u2, max u1 u2} (Prod.{u1, u2} α β) (List.{max u1 u2} (Prod.{u1, u2} α β)) (List.hasMem.{max u1 u2} (Prod.{u1, u2} α β)) (Prod.mk.{u1, u2} α β a b) (List.zip.{u1, u2} α β l₁ l₂)) -> (R a b))) but is expected to have type forall {α : Type.{u2}} {β : Type.{u1}} {R : α -> β -> Prop} {l₁ : List.{u2} α} {l₂ : List.{u1} β}, Iff (List.Forall₂.{u2, u1} α β R l₁ l₂) (And (Eq.{1} Nat (List.length.{u2} α l₁) (List.length.{u1} β l₂)) (forall {a : α} {b : β}, (Membership.mem.{max u1 u2, max u1 u2} (Prod.{u2, u1} α β) (List.{max u1 u2} (Prod.{u2, u1} α β)) (List.instMembershipList.{max u1 u2} (Prod.{u2, u1} α β)) (Prod.mk.{u2, u1} α β a b) (List.zip.{u2, u1} α β l₁ l₂)) -> (R a b))) Case conversion may be inaccurate. Consider using '#align list.forall₂_iff_zip List.forall₂_iff_zipₓ'. -/ theorem forall₂_iff_zip {l₁ l₂} : Forall₂ R l₁ l₂ ↔ length l₁ = length l₂ ∧ ∀ {a b}, (a, b) ∈ zip l₁ l₂ → R a b := ⟨fun h => ⟨Forall₂.length_eq h, @forall₂_zip _ _ _ _ _ h⟩, fun h => by cases' h with h₁ h₂ induction' l₁ with a l₁ IH generalizing l₂ · cases length_eq_zero.1 h₁.symm constructor · cases' l₂ with b l₂ <;> injection h₁ with h₁ exact forall₂.cons (h₂ <| Or.inl rfl) (IH h₁ fun a b h => h₂ <| Or.inr h)⟩ #align list.forall₂_iff_zip List.forall₂_iff_zip /- warning: list.forall₂_take -> List.forall₂_take is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {R : α -> β -> Prop} (n : Nat) {l₁ : List.{u1} α} {l₂ : List.{u2} β}, (List.Forall₂.{u1, u2} α β R l₁ l₂) -> (List.Forall₂.{u1, u2} α β R (List.take.{u1} α n l₁) (List.take.{u2} β n l₂)) but is expected to have type forall {α : Type.{u2}} {β : Type.{u1}} {R : α -> β -> Prop} (n : Nat) {l₁ : List.{u2} α} {l₂ : List.{u1} β}, (List.Forall₂.{u2, u1} α β R l₁ l₂) -> (List.Forall₂.{u2, u1} α β R (List.take.{u2} α n l₁) (List.take.{u1} β n l₂)) Case conversion may be inaccurate. Consider using '#align list.forall₂_take List.forall₂_takeₓ'. -/ theorem forall₂_take : ∀ (n) {l₁ l₂}, Forall₂ R l₁ l₂ → Forall₂ R (take n l₁) (take n l₂) | 0, _, _, _ => by simp only [forall₂.nil, take] | n + 1, _, _, forall₂.nil => by simp only [forall₂.nil, take] | n + 1, _, _, forall₂.cons h₁ h₂ => by simp [And.intro h₁ h₂, forall₂_take n] #align list.forall₂_take List.forall₂_take /- warning: list.forall₂_drop -> List.forall₂_drop is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {R : α -> β -> Prop} (n : Nat) {l₁ : List.{u1} α} {l₂ : List.{u2} β}, (List.Forall₂.{u1, u2} α β R l₁ l₂) -> (List.Forall₂.{u1, u2} α β R (List.drop.{u1} α n l₁) (List.drop.{u2} β n l₂)) but is expected to have type forall {α : Type.{u2}} {β : Type.{u1}} {R : α -> β -> Prop} (n : Nat) {l₁ : List.{u2} α} {l₂ : List.{u1} β}, (List.Forall₂.{u2, u1} α β R l₁ l₂) -> (List.Forall₂.{u2, u1} α β R (List.drop.{u2} α n l₁) (List.drop.{u1} β n l₂)) Case conversion may be inaccurate. Consider using '#align list.forall₂_drop List.forall₂_dropₓ'. -/ theorem forall₂_drop : ∀ (n) {l₁ l₂}, Forall₂ R l₁ l₂ → Forall₂ R (drop n l₁) (drop n l₂) | 0, _, _, h => by simp only [drop, h] | n + 1, _, _, forall₂.nil => by simp only [forall₂.nil, drop] | n + 1, _, _, forall₂.cons h₁ h₂ => by simp [And.intro h₁ h₂, forall₂_drop n] #align list.forall₂_drop List.forall₂_drop /- warning: list.forall₂_take_append -> List.forall₂_take_append is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {R : α -> β -> Prop} (l : List.{u1} α) (l₁ : List.{u2} β) (l₂ : List.{u2} β), (List.Forall₂.{u1, u2} α β R l (Append.append.{u2} (List.{u2} β) (List.hasAppend.{u2} β) l₁ l₂)) -> (List.Forall₂.{u1, u2} α β R (List.take.{u1} α (List.length.{u2} β l₁) l) l₁) but is expected to have type forall {α : Type.{u2}} {β : Type.{u1}} {R : α -> β -> Prop} (l : List.{u2} α) (l₁ : List.{u1} β) (l₂ : List.{u1} β), (List.Forall₂.{u2, u1} α β R l (HAppend.hAppend.{u1, u1, u1} (List.{u1} β) (List.{u1} β) (List.{u1} β) (instHAppend.{u1} (List.{u1} β) (List.instAppendList.{u1} β)) l₁ l₂)) -> (List.Forall₂.{u2, u1} α β R (List.take.{u2} α (List.length.{u1} β l₁) l) l₁) Case conversion may be inaccurate. Consider using '#align list.forall₂_take_append List.forall₂_take_appendₓ'. -/ theorem forall₂_take_append (l : List α) (l₁ : List β) (l₂ : List β) (h : Forall₂ R l (l₁ ++ l₂)) : Forall₂ R (List.take (length l₁) l) l₁ := by have h' : Forall₂ R (take (length l₁) l) (take (length l₁) (l₁ ++ l₂)) := forall₂_take (length l₁) h rwa [take_left] at h' #align list.forall₂_take_append List.forall₂_take_append /- warning: list.forall₂_drop_append -> List.forall₂_drop_append is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {R : α -> β -> Prop} (l : List.{u1} α) (l₁ : List.{u2} β) (l₂ : List.{u2} β), (List.Forall₂.{u1, u2} α β R l (Append.append.{u2} (List.{u2} β) (List.hasAppend.{u2} β) l₁ l₂)) -> (List.Forall₂.{u1, u2} α β R (List.drop.{u1} α (List.length.{u2} β l₁) l) l₂) but is expected to have type forall {α : Type.{u2}} {β : Type.{u1}} {R : α -> β -> Prop} (l : List.{u2} α) (l₁ : List.{u1} β) (l₂ : List.{u1} β), (List.Forall₂.{u2, u1} α β R l (HAppend.hAppend.{u1, u1, u1} (List.{u1} β) (List.{u1} β) (List.{u1} β) (instHAppend.{u1} (List.{u1} β) (List.instAppendList.{u1} β)) l₁ l₂)) -> (List.Forall₂.{u2, u1} α β R (List.drop.{u2} α (List.length.{u1} β l₁) l) l₂) Case conversion may be inaccurate. Consider using '#align list.forall₂_drop_append List.forall₂_drop_appendₓ'. -/ theorem forall₂_drop_append (l : List α) (l₁ : List β) (l₂ : List β) (h : Forall₂ R l (l₁ ++ l₂)) : Forall₂ R (List.drop (length l₁) l) l₂ := by have h' : Forall₂ R (drop (length l₁) l) (drop (length l₁) (l₁ ++ l₂)) := forall₂_drop (length l₁) h rwa [drop_left] at h' #align list.forall₂_drop_append List.forall₂_drop_append /- warning: list.rel_mem -> List.rel_mem is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {R : α -> β -> Prop}, (Relator.BiUnique.{u1, u2} α β R) -> (Relator.LiftFun.{succ u1, succ u2, succ u1, succ u2} α β ((List.{u1} α) -> Prop) ((List.{u2} β) -> Prop) R (Relator.LiftFun.{succ u1, succ u2, 1, 1} (List.{u1} α) (List.{u2} β) Prop Prop (List.Forall₂.{u1, u2} α β R) Iff) (Membership.Mem.{u1, u1} α (List.{u1} α) (List.hasMem.{u1} α)) (Membership.Mem.{u2, u2} β (List.{u2} β) (List.hasMem.{u2} β))) but is expected to have type forall {α : Type.{u2}} {β : Type.{u1}} {R : α -> β -> Prop}, (Relator.BiUnique.{u2, u1} α β R) -> (Relator.LiftFun.{succ u2, succ u1, succ u2, succ u1} α β ((List.{u2} α) -> Prop) ((List.{u1} β) -> Prop) R (Relator.LiftFun.{succ u2, succ u1, 1, 1} (List.{u2} α) (List.{u1} β) Prop Prop (List.Forall₂.{u2, u1} α β R) Iff) (fun ([email protected]._hyg.5023 : α) ([email protected]._hyg.5025 : List.{u2} α) => Membership.mem.{u2, u2} α (List.{u2} α) (List.instMembershipList.{u2} α) [email protected]._hyg.5023 [email protected]._hyg.5025) (fun ([email protected]._hyg.5038 : β) ([email protected]._hyg.5040 : List.{u1} β) => Membership.mem.{u1, u1} β (List.{u1} β) (List.instMembershipList.{u1} β) [email protected]._hyg.5038 [email protected]._hyg.5040)) Case conversion may be inaccurate. Consider using '#align list.rel_mem List.rel_memₓ'. -/ theorem rel_mem (hr : BiUnique R) : (R ⇒ Forall₂ R ⇒ Iff) (· ∈ ·) (· ∈ ·) | a, b, h, [], [], forall₂.nil => by simp only [not_mem_nil] | a, b, h, a' :: as, b' :: bs, forall₂.cons h₁ h₂ => rel_or (rel_eq hr h h₁) (rel_mem h h₂) #align list.rel_mem List.rel_mem /- warning: list.rel_map -> List.rel_map is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} {δ : Type.{u4}} {R : α -> β -> Prop} {P : γ -> δ -> Prop}, Relator.LiftFun.{max (succ u1) (succ u3), max (succ u2) (succ u4), max (succ u1) (succ u3), max (succ u2) (succ u4)} (α -> γ) (β -> δ) ((List.{u1} α) -> (List.{u3} γ)) ((List.{u2} β) -> (List.{u4} δ)) (Relator.LiftFun.{succ u1, succ u2, succ u3, succ u4} α β γ δ R P) (Relator.LiftFun.{succ u1, succ u2, succ u3, succ u4} (List.{u1} α) (List.{u2} β) (List.{u3} γ) (List.{u4} δ) (List.Forall₂.{u1, u2} α β R) (List.Forall₂.{u3, u4} γ δ P)) (List.map.{u1, u3} α γ) (List.map.{u2, u4} β δ) but is expected to have type forall {α : Type.{u4}} {β : Type.{u2}} {γ : Type.{u3}} {δ : Type.{u1}} {R : α -> β -> Prop} {P : γ -> δ -> Prop}, Relator.LiftFun.{max (succ u4) (succ u3), max (succ u2) (succ u1), max (succ u4) (succ u3), max (succ u2) (succ u1)} (α -> γ) (β -> δ) ((List.{u4} α) -> (List.{u3} γ)) ((List.{u2} β) -> (List.{u1} δ)) (Relator.LiftFun.{succ u4, succ u2, succ u3, succ u1} α β γ δ R P) (Relator.LiftFun.{succ u4, succ u2, succ u3, succ u1} (List.{u4} α) (List.{u2} β) (List.{u3} γ) (List.{u1} δ) (List.Forall₂.{u4, u2} α β R) (List.Forall₂.{u3, u1} γ δ P)) (List.map.{u4, u3} α γ) (List.map.{u2, u1} β δ) Case conversion may be inaccurate. Consider using '#align list.rel_map List.rel_mapₓ'. -/ theorem rel_map : ((R ⇒ P) ⇒ Forall₂ R ⇒ Forall₂ P) map map | f, g, h, [], [], forall₂.nil => Forall₂.nil | f, g, h, a :: as, b :: bs, forall₂.cons h₁ h₂ => Forall₂.cons (h h₁) (rel_map (@h) h₂) #align list.rel_map List.rel_map /- warning: list.rel_append -> List.rel_append is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {R : α -> β -> Prop}, Relator.LiftFun.{succ u1, succ u2, succ u1, succ u2} (List.{u1} α) (List.{u2} β) ((List.{u1} α) -> (List.{u1} α)) ((List.{u2} β) -> (List.{u2} β)) (List.Forall₂.{u1, u2} α β R) (Relator.LiftFun.{succ u1, succ u2, succ u1, succ u2} (List.{u1} α) (List.{u2} β) (List.{u1} α) (List.{u2} β) (List.Forall₂.{u1, u2} α β R) (List.Forall₂.{u1, u2} α β R)) (Append.append.{u1} (List.{u1} α) (List.hasAppend.{u1} α)) (Append.append.{u2} (List.{u2} β) (List.hasAppend.{u2} β)) but is expected to have type forall {α : Type.{u2}} {β : Type.{u1}} {R : α -> β -> Prop}, Relator.LiftFun.{succ u2, succ u1, succ u2, succ u1} (List.{u2} α) (List.{u1} β) ((List.{u2} α) -> (List.{u2} α)) ((List.{u1} β) -> (List.{u1} β)) (List.Forall₂.{u2, u1} α β R) (Relator.LiftFun.{succ u2, succ u1, succ u2, succ u1} (List.{u2} α) (List.{u1} β) (List.{u2} α) (List.{u1} β) (List.Forall₂.{u2, u1} α β R) (List.Forall₂.{u2, u1} α β R)) (fun ([email protected]._hyg.5724 : List.{u2} α) ([email protected]._hyg.5726 : List.{u2} α) => HAppend.hAppend.{u2, u2, u2} (List.{u2} α) (List.{u2} α) (List.{u2} α) (instHAppend.{u2} (List.{u2} α) (List.instAppendList.{u2} α)) [email protected]._hyg.5724 [email protected]._hyg.5726) (fun ([email protected]._hyg.5739 : List.{u1} β) ([email protected]._hyg.5741 : List.{u1} β) => HAppend.hAppend.{u1, u1, u1} (List.{u1} β) (List.{u1} β) (List.{u1} β) (instHAppend.{u1} (List.{u1} β) (List.instAppendList.{u1} β)) [email protected]._hyg.5739 [email protected]._hyg.5741) Case conversion may be inaccurate. Consider using '#align list.rel_append List.rel_appendₓ'. -/ theorem rel_append : (Forall₂ R ⇒ Forall₂ R ⇒ Forall₂ R) append append | [], [], h, l₁, l₂, hl => hl | a :: as, b :: bs, forall₂.cons h₁ h₂, l₁, l₂, hl => Forall₂.cons h₁ (rel_append h₂ hl) #align list.rel_append List.rel_append /- warning: list.rel_reverse -> List.rel_reverse is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {R : α -> β -> Prop}, Relator.LiftFun.{succ u1, succ u2, succ u1, succ u2} (List.{u1} α) (List.{u2} β) (List.{u1} α) (List.{u2} β) (List.Forall₂.{u1, u2} α β R) (List.Forall₂.{u1, u2} α β R) (List.reverse.{u1} α) (List.reverse.{u2} β) but is expected to have type forall {α : Type.{u2}} {β : Type.{u1}} {R : α -> β -> Prop}, Relator.LiftFun.{succ u2, succ u1, succ u2, succ u1} (List.{u2} α) (List.{u1} β) (List.{u2} α) (List.{u1} β) (List.Forall₂.{u2, u1} α β R) (List.Forall₂.{u2, u1} α β R) (List.reverse.{u2} α) (List.reverse.{u1} β) Case conversion may be inaccurate. Consider using '#align list.rel_reverse List.rel_reverseₓ'. -/ theorem rel_reverse : (Forall₂ R ⇒ Forall₂ R) reverse reverse | [], [], forall₂.nil => Forall₂.nil | a :: as, b :: bs, forall₂.cons h₁ h₂ => by simp only [reverse_cons] exact rel_append (rel_reverse h₂) (forall₂.cons h₁ forall₂.nil) #align list.rel_reverse List.rel_reverse /- warning: list.forall₂_reverse_iff -> List.forall₂_reverse_iff is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {R : α -> β -> Prop} {l₁ : List.{u1} α} {l₂ : List.{u2} β}, Iff (List.Forall₂.{u1, u2} α β R (List.reverse.{u1} α l₁) (List.reverse.{u2} β l₂)) (List.Forall₂.{u1, u2} α β R l₁ l₂) but is expected to have type forall {α : Type.{u2}} {β : Type.{u1}} {R : α -> β -> Prop} {l₁ : List.{u2} α} {l₂ : List.{u1} β}, Iff (List.Forall₂.{u2, u1} α β R (List.reverse.{u2} α l₁) (List.reverse.{u1} β l₂)) (List.Forall₂.{u2, u1} α β R l₁ l₂) Case conversion may be inaccurate. Consider using '#align list.forall₂_reverse_iff List.forall₂_reverse_iffₓ'. -/ @[simp] theorem forall₂_reverse_iff {l₁ l₂} : Forall₂ R (reverse l₁) (reverse l₂) ↔ Forall₂ R l₁ l₂ := Iff.intro (fun h => by rw [← reverse_reverse l₁, ← reverse_reverse l₂] exact rel_reverse h) fun h => rel_reverse h #align list.forall₂_reverse_iff List.forall₂_reverse_iff /- warning: list.rel_join -> List.rel_join is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {R : α -> β -> Prop}, Relator.LiftFun.{succ u1, succ u2, succ u1, succ u2} (List.{u1} (List.{u1} α)) (List.{u2} (List.{u2} β)) (List.{u1} α) (List.{u2} β) (List.Forall₂.{u1, u2} (List.{u1} α) (List.{u2} β) (List.Forall₂.{u1, u2} α β R)) (List.Forall₂.{u1, u2} α β R) (List.join.{u1} α) (List.join.{u2} β) but is expected to have type forall {α : Type.{u2}} {β : Type.{u1}} {R : α -> β -> Prop}, Relator.LiftFun.{succ u2, succ u1, succ u2, succ u1} (List.{u2} (List.{u2} α)) (List.{u1} (List.{u1} β)) (List.{u2} α) (List.{u1} β) (List.Forall₂.{u2, u1} (List.{u2} α) (List.{u1} β) (List.Forall₂.{u2, u1} α β R)) (List.Forall₂.{u2, u1} α β R) (List.join.{u2} α) (List.join.{u1} β) Case conversion may be inaccurate. Consider using '#align list.rel_join List.rel_joinₓ'. -/ theorem rel_join : (Forall₂ (Forall₂ R) ⇒ Forall₂ R) join join | [], [], forall₂.nil => Forall₂.nil | a :: as, b :: bs, forall₂.cons h₁ h₂ => rel_append h₁ (rel_join h₂) #align list.rel_join List.rel_join /- warning: list.rel_bind -> List.rel_bind is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} {δ : Type.{u4}} {R : α -> β -> Prop} {P : γ -> δ -> Prop}, Relator.LiftFun.{succ u1, succ u2, max (succ u1) (succ u3), max (succ u2) (succ u4)} (List.{u1} α) (List.{u2} β) ((α -> (List.{u3} γ)) -> (List.{u3} γ)) ((β -> (List.{u4} δ)) -> (List.{u4} δ)) (List.Forall₂.{u1, u2} α β R) (Relator.LiftFun.{max (succ u1) (succ u3), max (succ u2) (succ u4), succ u3, succ u4} (α -> (List.{u3} γ)) (β -> (List.{u4} δ)) (List.{u3} γ) (List.{u4} δ) (Relator.LiftFun.{succ u1, succ u2, succ u3, succ u4} α β (List.{u3} γ) (List.{u4} δ) R (List.Forall₂.{u3, u4} γ δ P)) (List.Forall₂.{u3, u4} γ δ P)) (List.bind.{u1, u3} α γ) (List.bind.{u2, u4} β δ) but is expected to have type forall {α : Type.{u4}} {β : Type.{u3}} {γ : Type.{u2}} {δ : Type.{u1}} {R : α -> β -> Prop} {P : γ -> δ -> Prop}, Relator.LiftFun.{succ u4, succ u3, max (succ u4) (succ u2), max (succ u3) (succ u1)} (List.{u4} α) (List.{u3} β) ((α -> (List.{u2} γ)) -> (List.{u2} γ)) ((β -> (List.{u1} δ)) -> (List.{u1} δ)) (List.Forall₂.{u4, u3} α β R) (Relator.LiftFun.{max (succ u4) (succ u2), max (succ u3) (succ u1), succ u2, succ u1} (α -> (List.{u2} γ)) (β -> (List.{u1} δ)) (List.{u2} γ) (List.{u1} δ) (Relator.LiftFun.{succ u4, succ u3, succ u2, succ u1} α β (List.{u2} γ) (List.{u1} δ) R (List.Forall₂.{u2, u1} γ δ P)) (List.Forall₂.{u2, u1} γ δ P)) (List.bind.{u4, u2} α γ) (List.bind.{u3, u1} β δ) Case conversion may be inaccurate. Consider using '#align list.rel_bind List.rel_bindₓ'. -/ theorem rel_bind : (Forall₂ R ⇒ (R ⇒ Forall₂ P) ⇒ Forall₂ P) List.bind List.bind := fun a b h₁ f g h₂ => rel_join (rel_map (@h₂) h₁) #align list.rel_bind List.rel_bind /- warning: list.rel_foldl -> List.rel_foldl is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} {δ : Type.{u4}} {R : α -> β -> Prop} {P : γ -> δ -> Prop}, Relator.LiftFun.{max (succ u1) (succ u3), max (succ u2) (succ u4), max (succ u1) (succ u3), max (succ u2) (succ u4)} (γ -> α -> γ) (δ -> β -> δ) (γ -> (List.{u1} α) -> γ) (δ -> (List.{u2} β) -> δ) (Relator.LiftFun.{succ u3, succ u4, max (succ u1) (succ u3), max (succ u2) (succ u4)} γ δ (α -> γ) (β -> δ) P (Relator.LiftFun.{succ u1, succ u2, succ u3, succ u4} α β γ δ R P)) (Relator.LiftFun.{succ u3, succ u4, max (succ u1) (succ u3), max (succ u2) (succ u4)} γ δ ((List.{u1} α) -> γ) ((List.{u2} β) -> δ) P (Relator.LiftFun.{succ u1, succ u2, succ u3, succ u4} (List.{u1} α) (List.{u2} β) γ δ (List.Forall₂.{u1, u2} α β R) P)) (List.foldl.{u3, u1} γ α) (List.foldl.{u4, u2} δ β) but is expected to have type forall {α : Type.{u4}} {β : Type.{u2}} {γ : Type.{u3}} {δ : Type.{u1}} {R : α -> β -> Prop} {P : γ -> δ -> Prop}, Relator.LiftFun.{max (succ u4) (succ u3), max (succ u2) (succ u1), max (succ u4) (succ u3), max (succ u2) (succ u1)} (γ -> α -> γ) (δ -> β -> δ) (γ -> (List.{u4} α) -> γ) (δ -> (List.{u2} β) -> δ) (Relator.LiftFun.{succ u3, succ u1, max (succ u4) (succ u3), max (succ u2) (succ u1)} γ δ (α -> γ) (β -> δ) P (Relator.LiftFun.{succ u4, succ u2, succ u3, succ u1} α β γ δ R P)) (Relator.LiftFun.{succ u3, succ u1, max (succ u4) (succ u3), max (succ u2) (succ u1)} γ δ ((List.{u4} α) -> γ) ((List.{u2} β) -> δ) P (Relator.LiftFun.{succ u4, succ u2, succ u3, succ u1} (List.{u4} α) (List.{u2} β) γ δ (List.Forall₂.{u4, u2} α β R) P)) (List.foldl.{u3, u4} γ α) (List.foldl.{u1, u2} δ β) Case conversion may be inaccurate. Consider using '#align list.rel_foldl List.rel_foldlₓ'. -/ theorem rel_foldl : ((P ⇒ R ⇒ P) ⇒ P ⇒ Forall₂ R ⇒ P) foldl foldl | f, g, hfg, _, _, h, _, _, forall₂.nil => h | f, g, hfg, x, y, hxy, _, _, forall₂.cons hab hs => rel_foldl (@hfg) (hfg hxy hab) hs #align list.rel_foldl List.rel_foldl /- warning: list.rel_foldr -> List.rel_foldr is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} {δ : Type.{u4}} {R : α -> β -> Prop} {P : γ -> δ -> Prop}, Relator.LiftFun.{max (succ u1) (succ u3), max (succ u2) (succ u4), max (succ u1) (succ u3), max (succ u2) (succ u4)} (α -> γ -> γ) (β -> δ -> δ) (γ -> (List.{u1} α) -> γ) (δ -> (List.{u2} β) -> δ) (Relator.LiftFun.{succ u1, succ u2, succ u3, succ u4} α β (γ -> γ) (δ -> δ) R (Relator.LiftFun.{succ u3, succ u4, succ u3, succ u4} γ δ γ δ P P)) (Relator.LiftFun.{succ u3, succ u4, max (succ u1) (succ u3), max (succ u2) (succ u4)} γ δ ((List.{u1} α) -> γ) ((List.{u2} β) -> δ) P (Relator.LiftFun.{succ u1, succ u2, succ u3, succ u4} (List.{u1} α) (List.{u2} β) γ δ (List.Forall₂.{u1, u2} α β R) P)) (List.foldr.{u1, u3} α γ) (List.foldr.{u2, u4} β δ) but is expected to have type forall {α : Type.{u4}} {β : Type.{u2}} {γ : Type.{u3}} {δ : Type.{u1}} {R : α -> β -> Prop} {P : γ -> δ -> Prop}, Relator.LiftFun.{max (succ u4) (succ u3), max (succ u2) (succ u1), max (succ u4) (succ u3), max (succ u2) (succ u1)} (α -> γ -> γ) (β -> δ -> δ) (γ -> (List.{u4} α) -> γ) (δ -> (List.{u2} β) -> δ) (Relator.LiftFun.{succ u4, succ u2, succ u3, succ u1} α β (γ -> γ) (δ -> δ) R (Relator.LiftFun.{succ u3, succ u1, succ u3, succ u1} γ δ γ δ P P)) (Relator.LiftFun.{succ u3, succ u1, max (succ u4) (succ u3), max (succ u2) (succ u1)} γ δ ((List.{u4} α) -> γ) ((List.{u2} β) -> δ) P (Relator.LiftFun.{succ u4, succ u2, succ u3, succ u1} (List.{u4} α) (List.{u2} β) γ δ (List.Forall₂.{u4, u2} α β R) P)) (List.foldr.{u4, u3} α γ) (List.foldr.{u2, u1} β δ) Case conversion may be inaccurate. Consider using '#align list.rel_foldr List.rel_foldrₓ'. -/ theorem rel_foldr : ((R ⇒ P ⇒ P) ⇒ P ⇒ Forall₂ R ⇒ P) foldr foldr | f, g, hfg, _, _, h, _, _, forall₂.nil => h | f, g, hfg, x, y, hxy, _, _, forall₂.cons hab hs => hfg hab (rel_foldr (@hfg) hxy hs) #align list.rel_foldr List.rel_foldr /- warning: list.rel_filter -> List.rel_filter is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {R : α -> β -> Prop} {p : α -> Prop} {q : β -> Prop} [_inst_1 : DecidablePred.{succ u1} α p] [_inst_2 : DecidablePred.{succ u2} β q], (Relator.LiftFun.{succ u1, succ u2, 1, 1} α β Prop Prop R Iff p q) -> (Relator.LiftFun.{succ u1, succ u2, succ u1, succ u2} (List.{u1} α) (List.{u2} β) (List.{u1} α) (List.{u2} β) (List.Forall₂.{u1, u2} α β R) (List.Forall₂.{u1, u2} α β R) (List.filterₓ.{u1} α p (fun (a : α) => _inst_1 a)) (List.filterₓ.{u2} β q (fun (a : β) => _inst_2 a))) but is expected to have type forall {α : Type.{u2}} {β : Type.{u1}} {R : α -> β -> Prop} {p : α -> Bool} {q : β -> Bool}, (Relator.LiftFun.{succ u2, succ u1, 1, 1} α β Prop Prop R (fun ([email protected]._hyg.7252 : Prop) ([email protected]._hyg.7254 : Prop) => Iff [email protected]._hyg.7252 [email protected]._hyg.7254) (fun (x : α) => Eq.{1} Bool (p x) Bool.true) (fun (x : β) => Eq.{1} Bool (q x) Bool.true)) -> (Relator.LiftFun.{succ u2, succ u1, succ u2, succ u1} (List.{u2} α) (List.{u1} β) (List.{u2} α) (List.{u1} β) (List.Forall₂.{u2, u1} α β R) (List.Forall₂.{u2, u1} α β R) (List.filter.{u2} α p) (List.filter.{u1} β q)) Case conversion may be inaccurate. Consider using '#align list.rel_filter List.rel_filterₓ'. -/ theorem rel_filter {p : α → Prop} {q : β → Prop} [DecidablePred p] [DecidablePred q] (hpq : (R ⇒ (· ↔ ·)) p q) : (Forall₂ R ⇒ Forall₂ R) (filter p) (filter q) | _, _, forall₂.nil => Forall₂.nil | a :: as, b :: bs, forall₂.cons h₁ h₂ => by by_cases p a · have : q b := by rwa [← hpq h₁] simp only [filter_cons_of_pos _ h, filter_cons_of_pos _ this, forall₂_cons, h₁, rel_filter h₂, and_true_iff] · have : ¬q b := by rwa [← hpq h₁] simp only [filter_cons_of_neg _ h, filter_cons_of_neg _ this, rel_filter h₂] #align list.rel_filter List.rel_filter /- warning: list.rel_filter_map -> List.rel_filterMap is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} {δ : Type.{u4}} {R : α -> β -> Prop} {P : γ -> δ -> Prop}, Relator.LiftFun.{max (succ u1) (succ u3), max (succ u2) (succ u4), max (succ u1) (succ u3), max (succ u2) (succ u4)} (α -> (Option.{u3} γ)) (β -> (Option.{u4} δ)) ((List.{u1} α) -> (List.{u3} γ)) ((List.{u2} β) -> (List.{u4} δ)) (Relator.LiftFun.{succ u1, succ u2, succ u3, succ u4} α β (Option.{u3} γ) (Option.{u4} δ) R (Option.Rel.{u3, u4} γ δ P)) (Relator.LiftFun.{succ u1, succ u2, succ u3, succ u4} (List.{u1} α) (List.{u2} β) (List.{u3} γ) (List.{u4} δ) (List.Forall₂.{u1, u2} α β R) (List.Forall₂.{u3, u4} γ δ P)) (List.filterMap.{u1, u3} α γ) (List.filterMap.{u2, u4} β δ) but is expected to have type forall {α : Type.{u4}} {β : Type.{u2}} {γ : Type.{u3}} {δ : Type.{u1}} {R : α -> β -> Prop} {P : γ -> δ -> Prop}, Relator.LiftFun.{max (succ u4) (succ u3), max (succ u2) (succ u1), max (succ u4) (succ u3), max (succ u2) (succ u1)} (α -> (Option.{u3} γ)) (β -> (Option.{u1} δ)) ((List.{u4} α) -> (List.{u3} γ)) ((List.{u2} β) -> (List.{u1} δ)) (Relator.LiftFun.{succ u4, succ u2, succ u3, succ u1} α β (Option.{u3} γ) (Option.{u1} δ) R (Option.Rel.{u3, u1} γ δ P)) (Relator.LiftFun.{succ u4, succ u2, succ u3, succ u1} (List.{u4} α) (List.{u2} β) (List.{u3} γ) (List.{u1} δ) (List.Forall₂.{u4, u2} α β R) (List.Forall₂.{u3, u1} γ δ P)) (List.filterMap.{u4, u3} α γ) (List.filterMap.{u2, u1} β δ) Case conversion may be inaccurate. Consider using '#align list.rel_filter_map List.rel_filterMapₓ'. -/ theorem rel_filterMap : ((R ⇒ Option.Rel P) ⇒ Forall₂ R ⇒ Forall₂ P) filterMap filterMap | f, g, hfg, _, _, forall₂.nil => Forall₂.nil | f, g, hfg, a :: as, b :: bs, forall₂.cons h₁ h₂ => by rw [filter_map_cons, filter_map_cons] <;> exact match f a, g b, hfg h₁ with | _, _, Option.Rel.none => rel_filter_map (@hfg) h₂ | _, _, Option.Rel.some h => forall₂.cons h (rel_filter_map (@hfg) h₂) #align list.rel_filter_map List.rel_filterMap /- warning: list.rel_prod -> List.rel_prod is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {R : α -> β -> Prop} [_inst_1 : Monoid.{u1} α] [_inst_2 : Monoid.{u2} β], (R (OfNat.ofNat.{u1} α 1 (OfNat.mk.{u1} α 1 (One.one.{u1} α (MulOneClass.toHasOne.{u1} α (Monoid.toMulOneClass.{u1} α _inst_1))))) (OfNat.ofNat.{u2} β 1 (OfNat.mk.{u2} β 1 (One.one.{u2} β (MulOneClass.toHasOne.{u2} β (Monoid.toMulOneClass.{u2} β _inst_2)))))) -> (Relator.LiftFun.{succ u1, succ u2, succ u1, succ u2} α β (α -> α) (β -> β) R (Relator.LiftFun.{succ u1, succ u2, succ u1, succ u2} α β α β R R) (HMul.hMul.{u1, u1, u1} α α α (instHMul.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α _inst_1)))) (HMul.hMul.{u2, u2, u2} β β β (instHMul.{u2} β (MulOneClass.toHasMul.{u2} β (Monoid.toMulOneClass.{u2} β _inst_2))))) -> (Relator.LiftFun.{succ u1, succ u2, succ u1, succ u2} (List.{u1} α) (List.{u2} β) α β (List.Forall₂.{u1, u2} α β R) R (List.prod.{u1} α (MulOneClass.toHasMul.{u1} α (Monoid.toMulOneClass.{u1} α _inst_1)) (MulOneClass.toHasOne.{u1} α (Monoid.toMulOneClass.{u1} α _inst_1))) (List.prod.{u2} β (MulOneClass.toHasMul.{u2} β (Monoid.toMulOneClass.{u2} β _inst_2)) (MulOneClass.toHasOne.{u2} β (Monoid.toMulOneClass.{u2} β _inst_2)))) but is expected to have type forall {α : Type.{u2}} {β : Type.{u1}} {R : α -> β -> Prop} [_inst_1 : Monoid.{u2} α] [_inst_2 : Monoid.{u1} β], (R (OfNat.ofNat.{u2} α 1 (One.toOfNat1.{u2} α (Monoid.toOne.{u2} α _inst_1))) (OfNat.ofNat.{u1} β 1 (One.toOfNat1.{u1} β (Monoid.toOne.{u1} β _inst_2)))) -> (Relator.LiftFun.{succ u2, succ u1, succ u2, succ u1} α β (α -> α) (β -> β) R (Relator.LiftFun.{succ u2, succ u1, succ u2, succ u1} α β α β R R) (fun ([email protected]._hyg.8113 : α) ([email protected]._hyg.8115 : α) => HMul.hMul.{u2, u2, u2} α α α (instHMul.{u2} α (MulOneClass.toMul.{u2} α (Monoid.toMulOneClass.{u2} α _inst_1))) [email protected]._hyg.8113 [email protected]._hyg.8115) (fun ([email protected]._hyg.8128 : β) ([email protected]._hyg.8130 : β) => HMul.hMul.{u1, u1, u1} β β β (instHMul.{u1} β (MulOneClass.toMul.{u1} β (Monoid.toMulOneClass.{u1} β _inst_2))) [email protected]._hyg.8128 [email protected]._hyg.8130)) -> (Relator.LiftFun.{succ u2, succ u1, succ u2, succ u1} (List.{u2} α) (List.{u1} β) α β (List.Forall₂.{u2, u1} α β R) R (List.prod.{u2} α (MulOneClass.toMul.{u2} α (Monoid.toMulOneClass.{u2} α _inst_1)) (Monoid.toOne.{u2} α _inst_1)) (List.prod.{u1} β (MulOneClass.toMul.{u1} β (Monoid.toMulOneClass.{u1} β _inst_2)) (Monoid.toOne.{u1} β _inst_2))) Case conversion may be inaccurate. Consider using '#align list.rel_prod List.rel_prodₓ'. -/ @[to_additive] theorem rel_prod [Monoid α] [Monoid β] (h : R 1 1) (hf : (R ⇒ R ⇒ R) (· * ·) (· * ·)) : (Forall₂ R ⇒ R) prod prod := rel_foldl hf h #align list.rel_prod List.rel_prod #align list.rel_sum List.rel_sum #print List.SublistForall₂ /- /-- Given a relation `R`, `sublist_forall₂ r l₁ l₂` indicates that there is a sublist of `l₂` such that `forall₂ r l₁ l₂`. -/ inductive SublistForall₂ (R : α → β → Prop) : List α → List β → Prop | nil {l} : sublist_forall₂ [] l | cons {a₁ a₂ l₁ l₂} : R a₁ a₂ → sublist_forall₂ l₁ l₂ → sublist_forall₂ (a₁ :: l₁) (a₂ :: l₂) | cons_right {a l₁ l₂} : sublist_forall₂ l₁ l₂ → sublist_forall₂ l₁ (a :: l₂) #align list.sublist_forall₂ List.SublistForall₂ -/ /- warning: list.sublist_forall₂_iff -> List.sublistForall₂_iff is a dubious translation: lean 3 declaration is forall {α : Type.{u1}} {β : Type.{u2}} {R : α -> β -> Prop} {l₁ : List.{u1} α} {l₂ : List.{u2} β}, Iff (List.SublistForall₂.{u1, u2} α β R l₁ l₂) (Exists.{succ u2} (List.{u2} β) (fun (l : List.{u2} β) => And (List.Forall₂.{u1, u2} α β R l₁ l) (List.Sublist.{u2} β l l₂))) but is expected to have type forall {α : Type.{u2}} {β : Type.{u1}} {R : α -> β -> Prop} {l₁ : List.{u2} α} {l₂ : List.{u1} β}, Iff (List.SublistForall₂.{u2, u1} α β R l₁ l₂) (Exists.{succ u1} (List.{u1} β) (fun (l : List.{u1} β) => And (List.Forall₂.{u2, u1} α β R l₁ l) (List.Sublist.{u1} β l l₂))) Case conversion may be inaccurate. Consider using '#align list.sublist_forall₂_iff List.sublistForall₂_iffₓ'. -/ theorem sublistForall₂_iff {l₁ : List α} {l₂ : List β} : SublistForall₂ R l₁ l₂ ↔ ∃ l, Forall₂ R l₁ l ∧ l <+ l₂ := by constructor <;> intro h · induction' h with _ a b l1 l2 rab rll ih b l1 l2 hl ih · exact ⟨nil, forall₂.nil, nil_sublist _⟩ · obtain ⟨l, hl1, hl2⟩ := ih refine' ⟨b :: l, forall₂.cons rab hl1, hl2.cons_cons b⟩ · obtain ⟨l, hl1, hl2⟩ := ih exact ⟨l, hl1, hl2.trans (sublist.cons _ _ _ (sublist.refl _))⟩ · obtain ⟨l, hl1, hl2⟩ := h revert l₁ induction' hl2 with _ _ _ _ ih _ _ _ _ ih <;> intro l₁ hl1 · rw [forall₂_nil_right_iff.1 hl1] exact sublist_forall₂.nil · exact sublist_forall₂.cons_right (ih hl1) · cases' hl1 with _ _ _ _ hr hl _ exact sublist_forall₂.cons hr (ih hl) #align list.sublist_forall₂_iff List.sublistForall₂_iff #print List.SublistForall₂.is_refl /- instance SublistForall₂.is_refl [IsRefl α Rₐ] : IsRefl (List α) (SublistForall₂ Rₐ) := ⟨fun l => sublistForall₂_iff.2 ⟨l, forall₂_refl l, Sublist.refl l⟩⟩ #align list.sublist_forall₂.is_refl List.SublistForall₂.is_refl -/ #print List.SublistForall₂.is_trans /- instance SublistForall₂.is_trans [IsTrans α Rₐ] : IsTrans (List α) (SublistForall₂ Rₐ) := ⟨fun a b c => by revert a b induction' c with _ _ ih · rintro _ _ h1 (_ | _ | _) exact h1 · rintro a b h1 h2 cases' h2 with _ _ _ _ _ hbc tbc _ _ y1 btc · cases h1 exact sublist_forall₂.nil · cases' h1 with _ _ _ _ _ hab tab _ _ _ atb · exact sublist_forall₂.nil · exact sublist_forall₂.cons (trans hab hbc) (ih _ _ tab tbc) · exact sublist_forall₂.cons_right (ih _ _ atb tbc) · exact sublist_forall₂.cons_right (ih _ _ h1 btc)⟩ #align list.sublist_forall₂.is_trans List.SublistForall₂.is_trans -/ #print List.Sublist.sublistForall₂ /- theorem Sublist.sublistForall₂ {l₁ l₂ : List α} (h : l₁ <+ l₂) [IsRefl α Rₐ] : SublistForall₂ Rₐ l₁ l₂ := sublistForall₂_iff.2 ⟨l₁, forall₂_refl l₁, h⟩ #align list.sublist.sublist_forall₂ List.Sublist.sublistForall₂ -/ #print List.tail_sublistForall₂_self /- theorem tail_sublistForall₂_self [IsRefl α Rₐ] (l : List α) : SublistForall₂ Rₐ l.tail l := l.tail_sublist.SublistForall₂ #align list.tail_sublist_forall₂_self List.tail_sublistForall₂_self -/ end List
eCallChina is a reliable and responsible phone card vendor. We sell trouble free Paraguay-Mobile calling cards and provide outstanding service. How to call Paraguay-Mobile (Cellular) from the United States (USA)? The general rule to dial: 011 + Paraguay country code + area code + phone number. When you purchase your prepaid Cheaprate and receive our instant PIN, you will aslo receive simple, easy to understand dialling instructions. 4/24/2019 10:54:31 PM Provided By Responsible and Reliable Phone Card Vendor.
Set Implicit Arguments. Require Import AutoSep Arith. (* usuful and safe hints *) Hint Rewrite Npow2_nat : N. Hint Resolve bound_N_nat : N. Hint Rewrite natToWord_wordToNat : N. (* ============================================================================ * word to nat * ========================================================================= *) Theorem wordToNat_inj : forall sz (x y:word sz), wordToNat x = wordToNat y -> x = y. intros. apply (f_equal (natToWord sz)) in H. autorewrite with N in *. assumption. Qed. Theorem wordToNat_inj' : forall sz (x y:word sz), x <> y -> wordToNat x <> wordToNat y. intros. contradict H. apply wordToNat_inj; assumption. Qed. (* ============================================================================ * nat to word * ========================================================================= *) Lemma natToWord_pow2' : forall(sz k:nat)(w:word sz), natToWord sz (k * pow2 sz) ^+ w = w. induction k; intros; simpl. apply wplus_unit. rewrite natToWord_plus. rewrite <- wplus_assoc. rewrite natToWord_pow2. rewrite wplus_unit. apply IHk. Qed. Lemma natToWord_pow2_zero: forall sz n, $ (n * pow2 sz) = natToWord sz 0. intros. rewrite <- (wplus_unit $(n * pow2 sz)). rewrite wplus_comm. apply natToWord_pow2'. Qed. Lemma natToWord_pow2_factor : forall (sz:nat)(w:word sz), exists n, forall k, (n < pow2 sz)%nat /\ w = natToWord sz (k * pow2 sz + n). intros. exists (wordToNat w). intro. split. apply (wordToNat_bound w). rewrite natToWord_plus. rewrite natToWord_pow2'. rewrite natToWord_wordToNat. reflexivity. Qed. Corollary word_eq_natToWord : forall (sz:nat)(w:word sz), exists n, (n < pow2 sz)%nat /\ w = natToWord sz n. intros. generalize natToWord_pow2_factor; intro. specialize (H sz w). destruct H. specialize (H 0). destruct H. simpl in H0. exists x; auto. Qed. Lemma natToWord_inj' : forall sz a b, goodSize a -> goodSize b -> natToWord sz a <> $ b -> a <> b. intros; intro; subst; congruence. Qed. (* ============================================================================ * nat to W * ========================================================================= *) Transparent goodSize. Lemma goodSize_natToW_wlt_lt : forall n m:nat, goodSize n -> goodSize m -> natToW n < natToW m -> (n < m)%nat. unfold goodSize, natToW. generalize dependent 32; intros; nomega. Qed. Corollary W_eq_natToW : forall(w:W), exists n, goodSize n /\ w = natToW n. intros. generalize word_eq_natToWord; intro. specialize (H 32 w). destruct H. destruct H. exists x. unfold goodSize. split; auto. Qed. Opaque goodSize. Lemma wneg_natToW_pow2_minus : forall n:nat, goodSize n -> ^~ (natToW n) = natToW (pow2 32 - n). unfold wneg; intros. rewrite NToWord_nat. autorewrite with N; reflexivity. Qed. Lemma natToW_plus_pow2 : forall n : nat, natToW (pow2 32 + n) = $ n. unfold natToW; intros. rewrite natToWord_plus. rewrite natToWord_pow2. words. Qed. (* ============================================================================ * destruct_word Turn word arithmetic into nat arithmetic. * ========================================================================= *) Ltac destruct_word sz w n := let H := fresh "W" in let Hub := fresh "Wub" in let Heq := fresh "Weq" in assert (H:exists w', (w' < pow2 sz)%nat /\ w = natToWord sz w') by apply word_eq_natToWord; elim H; clear H; intros n H; elim H; intros Hub Heq; rewrite Heq in *; clear H Heq w. Ltac destruct_W w n := let H := fresh "W" in let Hub := fresh "Wub" in let Heq := fresh "Weq" in assert (H:exists w', goodSize w' /\ w = natToW w') by apply W_eq_natToW; elim H; clear H; intros n H; elim H; intros Hub Heq; rewrite Heq in *; clear H Heq w. Ltac destruct_words := repeat match goal with | w : W |- context[?w] => is_var w; let w' := fresh "w" in destruct_W w w' | w : word 32 |- context[?w] => is_var w; let w' := fresh "w" in destruct_W w w' | w : word ?sz |- context[?w] => is_var w; let w' := fresh "w" in destruct_word sz w w' end. (* ============================================================================ * goodsize tactic * ========================================================================= *) Ltac goodsize := match goal with | |- (_ < pow2 32)%nat => apply goodSize_danger | _ => idtac end; match goal with | [ H: goodSize _ |- goodSize _ ] => solve [apply (goodSize_weaken _ _ H); auto; omega] | |- goodSize _ => solve [auto] | _ => omega end. (* ============================================================================ * roundTrip-related lemmas and roundtrip tactic * ========================================================================= *) Corollary wordToNat_natToWord_idempotent_W : forall n, goodSize n -> wordToNat (natToW n) = n. intros; apply wordToNat_natToWord_idempotent; auto. Qed. Hint Rewrite wordToNat_natToWord_idempotent_W using solve [goodsize] : N. Corollary roundTrip : forall sz n : nat, (n < pow2 sz)%nat -> wordToNat (natToWord sz n) = n. intros; apply wordToNat_natToWord_idempotent; nomega. Qed. Hint Rewrite roundTrip using solve [eauto] : N. Lemma natToW_wordToNat : forall w:W, natToW (wordToNat w) = w. intros; rewrite <- natToWord_wordToNat; auto. Qed. Hint Rewrite natToW_wordToNat : N. Lemma wordToNat_wplus' : forall sz (x y: word sz), (wordToNat x + wordToNat y < pow2 sz)%nat -> wordToNat (x ^+ y) = wordToNat x + wordToNat y. intros. destruct_words. rewrite <- natToWord_plus. rewrite roundTrip; auto. pre_nomega; auto. rewrite wordToNat_natToWord_idempotent in * by nomega. rewrite wordToNat_natToWord_idempotent in * by nomega. auto. Qed. Corollary wordToNat_wplus'' : forall sz (x y: nat), (x + y < pow2 sz)%nat -> wordToNat ($ x ^+ natToWord sz y) = x + y. intros. rewrite wordToNat_wplus' by nomega. rewrite ! roundTrip; auto. Qed. Lemma wordToNat_wminus : forall sz (w u : word sz), u <= w -> wordToNat (w ^- u) = wordToNat w - wordToNat u. intros. eapply natToWord_inj; try eapply wordToNat_bound. 2: generalize (wordToNat_bound w); omega. rewrite natToWord_wordToNat. unfold wminus. rewrite wneg_alt. unfold wnegN. pattern w at 1. rewrite <- (natToWord_wordToNat w). rewrite <- natToWord_plus. specialize (wordToNat_bound u); intro. destruct (le_lt_dec (wordToNat u) (wordToNat w)). replace (wordToNat w + (pow2 sz - wordToNat u)) with (pow2 sz + (wordToNat w - wordToNat u)) by omega. rewrite natToWord_plus. rewrite natToWord_pow2. apply wplus_unit. elimtype False; apply H. nomega. Qed. Corollary wordToNat_wminus'' : forall sz (x y: nat), (x < pow2 sz)%nat -> (y <= x)%nat -> wordToNat ($ x ^- natToWord sz y) = x - y. intros. rewrite wordToNat_wminus by nomega. rewrite ! roundTrip; auto. Qed. Lemma mult_S : forall x y, (x <= x * S y)%nat. intros; rewrite mult_comm; simpl; apply le_plus_l. Qed. Local Hint Resolve mult_S. Local Hint Resolve mult_comm. Lemma wordToNat_wmult : forall (w u : W), goodSize (wordToNat w * wordToNat u) -> wordToNat (w ^* u) = wordToNat w * wordToNat u. intros. rewrite wmult_alt; unfold wmultN, wordBinN. apply wordToNat_natToWord_idempotent; auto. Qed. Corollary wordToNat_wmult_W : forall (x y: nat), goodSize (x * y)%nat -> wordToNat (natToW x ^* natToW y) = x * y. intros. unfold natToW in *. destruct x, y; simpl; auto. assert (wordToNat (natToWord 32 0) = 0). rewrite roundTrip by goodsize; auto. rewrite wordToNat_wmult. unfold natToW in *; rewrite H0; omega. rewrite H0; simpl; goodsize. assert (goodSize (S x)) by goodsize. assert (goodSize (S y)). { apply (goodSize_weaken _ _ H). rewrite mult_comm; auto. } rewrite wordToNat_wmult. rewrite ! roundTrip by goodsize; simpl; omega. rewrite ! roundTrip by goodsize; auto. Qed. (* ============================================================================ * natToWord and operators * ========================================================================= *) Lemma natToWord_mult : forall sz x y, natToWord sz (x * y) = natToWord _ x ^* natToWord _ y. unfold "^*", wordBin; intros. pre_nomega. rewrite <- Nat2N.inj_mul. rewrite NToWord_nat. pre_nomega. destruct (wordToNat_natToWord' sz x). rewrite <- H at 1. remember (wordToNat $ (x)) as x'. rewrite mult_plus_distr_r. rewrite natToWord_plus. replace (x0 * pow2 sz * y)%nat with ((x0 * y) * pow2 sz)%nat. rewrite natToWord_pow2_zero. rewrite <- natToWord_plus. rewrite plus_0_r. destruct (wordToNat_natToWord' sz y). rewrite <- H0 at 1. remember (wordToNat $ (y)) as y'. rewrite mult_plus_distr_l. rewrite natToWord_plus. replace (x' * (x1 * pow2 sz))%nat with ((x' * x1) * pow2 sz)%nat by apply mult_assoc_reverse. rewrite natToWord_pow2_zero. rewrite <- natToWord_plus; auto. rewrite 2 mult_assoc_reverse. f_equal. apply mult_comm. Qed. (* ============================================================================ * simplification tactic * ========================================================================= *) Ltac roundtrip := pre_nomega; unfold natToW in *; repeat match goal with | _ => rewrite wordToNat_natToWord_idempotent_W in * by goodsize | _ => rewrite wordToNat_wminus'' in * by goodsize | _ => rewrite wordToNat_wminus in * by nomega | _ => rewrite wordToNat_wplus'' in * by goodsize | _ => rewrite wordToNat_wplus' in * by goodsize | _ => rewrite wordToNat_wmult_W in * by goodsize | _ => rewrite wordToNat_wmult in * by goodsize | H: _ |- _ => rewrite <- natToW_minus in H by omega; unfold natToW in H | H: _ |- _ => rewrite <- natToWord_plus in H | H: _ |- _ => rewrite <- natToWord_mult in H | H: natToWord ?sz _ = natToWord ?sz _ |- _ => apply natToWord_inj with sz _ _ in H; try goodsize | H: not (natToWord ?sz _ = natToWord ?sz _) |- _ => apply natToWord_inj' with sz _ _ in H; try goodsize end. (* ============================================================================ * word equality lemmas * ========================================================================= *) Definition eq_W_dec : forall x y : W, { x = y } + { x <> y }. intros. destruct (Word.weqb x y) eqn:Heq; [apply weqb_sound in Heq | ]; auto. right; intro. apply weqb_true_iff in H; congruence. Qed. Lemma weqb_false_iff : forall sz (x y : word sz), Word.weqb x y = false <-> x <> y. intros. split; intros. intro Eq; apply weqb_true_iff in Eq; congruence. case_eq (Word.weqb x y); intro; auto. apply weqb_sound in H0; congruence. Qed. Lemma weqb_refl : forall w, weqb w w = true. intros; apply weqb_true_iff; auto. Qed. Hint Rewrite weqb_refl : N. Lemma weqb_refl' : forall x y, x = y -> weqb x y = true. intros; subst; autorewrite with N; auto. Qed. Hint Rewrite weqb_refl' using solve [auto; words] : N. Lemma weqb_diff : forall w1 w2, w1 <> w2 -> weqb w1 w2 = false. intros; apply weqb_false_iff; auto. Qed. Hint Rewrite weqb_diff using solve [auto; discriminate] : N. (* ============================================================================ * word operators * ========================================================================= *) Lemma wplus_unit_r : forall sz w, w ^+ natToWord sz 0 = w. intros; rewrite wplus_comm; rewrite wplus_unit; auto. Qed. Hint Rewrite wplus_unit wplus_unit_r : N. Lemma wminus_unit : forall sz w, w ^- natToWord sz 0 = w. intros. unfold "^-", "^~". roundtrip. rewrite N.sub_0_r. rewrite NToWord_nat. roundtrip. rewrite natToWord_pow2. autorewrite with N; auto. Qed. Lemma wmult_zero : forall w, natToW 0 ^* w = natToW 0. auto. Qed. Lemma wmult_zero_r : forall w, w ^* natToW 0 = natToW 0. intros; roundtrip; rewrite wmult_comm; auto. Qed. Hint Rewrite wmult_zero wmult_zero_r : N. (* ============================================================================ * natToW and operators * ========================================================================= *) Lemma natToW_S_wminus_1 : forall n, $ (S n) ^- $1 = natToW n. unfold natToW; intros. replace (S n) with (n + 1) by omega; rewrite natToWord_plus. words. Qed. Hint Rewrite natToW_S_wminus_1 : N. (* ============================================================================ * goodSize lemmas * ========================================================================= *) Transparent goodSize. Lemma goodSize_dec : forall x, { goodSize x } + { ~ goodSize x }. intros. destruct (le_lt_dec (pow2 32) x); [right | left]. unfold goodSize; intro; contradict l. apply Lt.lt_not_le. apply Nlt_out in H. rewrite ! Nat2N.id in H. rewrite Npow2_nat in H; auto. unfold goodSize. apply Nlt_in. rewrite ! Nat2N.id. rewrite Npow2_nat. auto. Qed. Lemma not_goodSize_gt : forall x y, goodSize x -> ~ goodSize y -> (x < y)%nat. intros. unfold goodSize in *. apply N.nlt_ge in H0. assert (N.of_nat x < N.of_nat y)%N. eapply N.lt_le_trans; eassumption. apply Nlt_out in H1. rewrite ! Nat2N.id in *; auto. Qed. Opaque goodSize. (* ============================================================================ * word inequalities * ========================================================================= *) Lemma wle_wneq_wlt : forall i j:W, i <= j -> i <> j -> i < j. intros; destruct_words. apply wordToNat_inj' in H0. autorewrite with N in *; nomega. Qed. Lemma wle_wle_antisym : forall n m:W, n <= m -> m <= n -> n = m. intros; destruct_words; f_equal; nomega. Qed. Lemma lt_natToW : forall n (w : W), w < natToW n -> (wordToNat w < n)%nat. intros. destruct (goodSize_dec n). roundtrip; auto. destruct_words. roundtrip. apply not_goodSize_gt; auto. Qed.
open import Formalization.PredicateLogic.Signature module Formalization.PredicateLogic.Classical.Semantics.Satisfaction (𝔏 : Signature) {ℓₘ} where open Signature(𝔏) import Lvl open import Data open import Data.Boolean open import Data.Boolean.Stmt open import Data.ListSized import Data.ListSized.Functions as List open import Formalization.PredicateLogic.Classical.Semantics(𝔏){ℓₘ} open import Formalization.PredicateLogic.Syntax(𝔏) open import Functional using (_∘_ ; _∘₂_) import Logic.Propositional as Logic import Logic.Predicate as Logic open import Numeral.Finite open import Numeral.Finite.Bound open import Numeral.Natural open import Relator.Equals open import Sets.PredicateSet using (PredSet) open Sets.PredicateSet.BoundedQuantifiers open import Syntax.Function open import Type.Dependent renaming (intro to _,_) open import Type.Properties.Decidable open import Type private variable ℓ ℓ₁ ℓ₂ : Lvl.Level private variable P : Type{ℓₚ} private variable args n vars : ℕ private variable 𝔐 : Model -- A `VarMapping(vars)(𝔐)` maps `vars` number of variables to objects in the domain of the model `𝔐`. -- Also called: Variable assignment. VarMapping : ℕ → Model → Type VarMapping(vars)(𝔐) = 𝕟(vars) → Model.Domain(𝔐) module VarMapping where -- Adds a mapping to an object in the domain of the model `𝔐`. add0 : VarMapping(vars)(𝔐) → Model.Domain(𝔐) → VarMapping(𝐒(vars))(𝔐) add0 𝔰 t 𝟎 = t add0 𝔰 t (𝐒(v)) = 𝔰(v) private variable 𝔰 : VarMapping(vars)(𝔐) module _ ((𝔐 , 𝔰) : Σ Model (VarMapping(vars))) where -- Maps terms to objects in the domain given a model and a variable mapping. val : Term(vars) → Model.Domain(𝔐) val₊ : List(Term(vars))(n) → List(Model.Domain(𝔐))(n) val(var v) = 𝔰(v) val(func f x) = Model.function 𝔐 f (val₊ x) val₊ {0} ∅ = ∅ val₊ {𝐒(n)} (t ⊰ ts) = (val t ⊰ val₊ {n} ts) --val₊ = List.map val -- Satisfication relation. -- ((𝔐 , 𝔰) ⊧ φ) means that the formula φ is satisfied in the model 𝔐 with the variable mapping. -- Or in other words: A formula is true in the model 𝔐. _⊧_ : (Σ Model (VarMapping(vars))) → Formula(vars) → Type{ℓₘ} (𝔐 , 𝔰) ⊧ (f $ x) = Lvl.Up(IsTrue(Model.relation 𝔐 f (val₊(𝔐 , 𝔰) x))) -- A model decides whether a relation is satisfied. (𝔐 , 𝔰) ⊧ ⊤ = Unit -- All models satisfy top. (𝔐 , 𝔰) ⊧ ⊥ = Empty -- No model satisfies bottom. (𝔐 , 𝔰) ⊧ (φ ∧ ψ) = ((𝔐 , 𝔰) ⊧ φ) Logic.∧ ((𝔐 , 𝔰) ⊧ ψ) -- A model satisfies a conjunction when it satisfies both of the propositions. (𝔐 , 𝔰) ⊧ (φ ∨ ψ) = ((𝔐 , 𝔰) ⊧ φ) Logic.∨ ((𝔐 , 𝔰) ⊧ ψ) -- A model satisfies a disjunction when it satisfies any one of the propositions. (𝔐 , 𝔰) ⊧ (φ ⟶ ψ) = Logic.¬((𝔐 , 𝔰) ⊧ φ) Logic.∨ ((𝔐 , 𝔰) ⊧ ψ) (𝔐 , 𝔰) ⊧ (Ɐ φ) = Logic.∀ₗ(t ↦ (𝔐 , VarMapping.add0{𝔐 = 𝔐} 𝔰 t) ⊧ φ) (𝔐 , 𝔰) ⊧ (∃ φ) = Logic.∃(t ↦ (𝔐 , VarMapping.add0{𝔐 = 𝔐} 𝔰 t) ⊧ φ) -- Satisfication of a set of formulas. -- This means that a model satisfies all formulas at the same time. _⊧₊_ : (Σ Model (VarMapping(vars))) → PredSet{ℓ}(Formula(vars)) → Type 𝔐 ⊧₊ Γ = ∀ₛ(Γ) (𝔐 ⊧_) -- Validity of a formula. -- A formula is valid when it is true independent of any model (is satisfied by all models). -- Examples: -- Valid(⊤) -- Valid(⊥ ⟶ ⊥) -- ¬ Valid(⊥) -- ¬ Valid(P) where P : Prop(0) Valid : Formula(vars) → Type Valid(φ) = Logic.∀ₗ(_⊧ φ) -- Satisfiability of sets of formulas. -- A set of formulas is satisfiable when there is a model that satisfies all of them at the same time. Satisfiable : PredSet{ℓ}(Formula(vars)) → Type Satisfiable(Γ) = Logic.∃(_⊧₊ Γ) -- Unsatisfiability of sets of formulas. Unsatisfiable : PredSet{ℓ}(Formula(vars)) → Type Unsatisfiable{ℓ} = Logic.¬_ ∘ Satisfiable{ℓ} -- Semantic entailment of a formula. -- A hypothetical statement. If a model would satisfy all formulas in Γ, then this same model satisifes the formula φ. _⊨_ : PredSet{ℓ}(Formula(vars)) → Formula(vars) → Type Γ ⊨ φ = ∀{𝔐} → (𝔐 ⊧₊ Γ) → (𝔐 ⊧ φ) _⊭_ : PredSet{ℓ}(Formula(vars)) → Formula(vars) → Type _⊭_ = (Logic.¬_) ∘₂ (_⊨_) -- Axiomatization of a theory by a set of axioms. -- A set of axioms is a set of formulas. -- A theory is the closure of a set of axioms. -- An axiomatization is a subset of formulas of the theory which entails all formulas in the axiomatized theory. _axiomatizes_ : PredSet{ℓ₁}(Formula(vars)) → PredSet{ℓ₂}(Formula(vars)) → Type Γ₁ axiomatizes Γ₂ = ∀{φ} → (Γ₁ ⊨ φ) → Γ₂(φ) -- A set of formulas is closed when it includes all formulas that it entails. Closed : PredSet{ℓ}(Formula(vars)) → Type Closed(Γ) = Γ axiomatizes Γ _⊨₊_ : PredSet{ℓ₁}(Formula(vars)) → PredSet{ℓ₂}(Formula(vars)) → Type Γ₁ ⊨₊ Γ₂ = ∀{𝔐} → (𝔐 ⊧₊ Γ₁) → (𝔐 ⊧₊ Γ₂) _⊭₊_ : PredSet{ℓ₁}(Formula(vars)) → PredSet{ℓ₂}(Formula(vars)) → Type _⊭₊_ = (Logic.¬_) ∘₂ (_⊨₊_)
{-# LANGUAGE DisambiguateRecordFields #-} {-# LANGUAGE DuplicateRecordFields #-} {-# LANGUAGE NamedFieldPuns #-} module SimulatedAnnealing ( State, energy, metric, perturb, Args(..), CoolingTemp(..), anneal ) where import Metric (MetricSpace, Metric, metric) import qualified Metric import Prelude hiding (min) import Control.Monad.Random (evalRand) import Control.Monad.Random.Class (MonadRandom, getRandom) import Data.Function ((&)) import Data.Functor ((<&>)) import Data.Int (Int32) import Numeric.GSL.SimulatedAnnealing (SimulatedAnnealingParams(..), simanSolve) import Numeric.LinearAlgebra.Data (Vector, (!)) import System.Random (mkStdGen, StdGen) import Debug.Trace class MetricSpace a => State a where energy :: a -> Double perturb :: MonadRandom m => Double -> a -> m a data Args = Args { numTries :: Word, numItersPerTemp :: Word, maxStepSize :: Double, boltzmannK :: Double, coolingTemp :: CoolingTemp } data CoolingTemp = CoolingTemp { initial :: Double, rate :: Double, min :: Double } argsToParams :: Args -> SimulatedAnnealingParams argsToParams Args { numTries, numItersPerTemp, maxStepSize, boltzmannK, coolingTemp = CoolingTemp { initial, rate, min } } = SimulatedAnnealingParams { n_tries = fromIntegral numTries, iters_fixed_T = fromIntegral numItersPerTemp, step_size = maxStepSize, boltzmann_k = boltzmannK, cooling_t_initial = initial, cooling_mu_t = rate, cooling_t_min = min } data Annealer a = Annealer { seed :: Int, numRandoms :: Int, params :: SimulatedAnnealingParams, initialState :: a, energy' :: a -> Double, metric' :: a -> a -> Double, step :: Vector Double -> Double -> a -> a, maybeShow :: Maybe (a -> String) } rawAnneal :: Annealer a -> a rawAnneal Annealer { seed, numRandoms, params, initialState, energy', metric', step, maybeShow } = simanSolve seed numRandoms params initialState energy' metric' step maybeShow anneal :: (State a, MonadRandom m) => a -> Args -> Maybe (a -> String) -> m a anneal initialState args maybeShow = do seed <- getRandom Annealer { seed, numRandoms = 1, params = argsToParams args, initialState, energy' = energy, metric' = metric, step, maybeShow } & rawAnneal & return where -- It's too hard to thread the monad through the SA, -- b/c the FFI library isn't designed with monads. -- So I'm doing it manually here, seeding a new StdGen using mkStdGen -- with a seed from the MonadRandom. step vectors stepSize s = s' where seed = (vectors ! 0) * (fromIntegral (maxBound :: Int32)) & truncate g = mkStdGen seed rand = perturb stepSize s s' = evalRand rand g realAnneal :: (State a, MonadRandom m) => a -> Args -> m a realAnneal initialState args = do undefined
Set Implicit Arguments. Require Import RelationClasses. From sflib Require Import sflib. From Paco Require Import paco. From PromisingLib Require Import Basic. From PromisingLib Require Import Loc. From PromisingLib Require Import Language. From PromisingLib Require Import Axioms. From PromisingLib Require Import Event. Require Export Program. Require Import Sequential. Require Import FlagAux. Ltac contra := match goal with [H: forall _: is_true true, is_true false |- _] => exploit H; ss end. Ltac clearb := repeat match goal with | [H: forall _: is_true false, is_true false |- _] => clear H | [H: forall _: is_true true, is_true true |- _] => clear H end. Section REDAT. Lemma perm_meet_high: forall p1 p2 (MEET: Perm.meet p1 p2 = Perm.high), (p1 = Perm.high) /\ (p2 = Perm.high). Proof. i. split. - hexploit Perm.meet_le_l. i. erewrite MEET in H. destruct p1; ss. - hexploit Perm.meet_le_r. i. erewrite MEET in H. destruct p2; ss. Qed. Lemma input_transl_access0: forall i0 i (INPUTLE: Oracle.input_le i0 (SeqEvent.get_oracle_input i)) (ACC: Oracle.in_access i0 = None), SeqEvent.in_access i = None. Proof. i. unfold Oracle.input_le in INPUTLE. des. ss. clear ACQUIRE RELEASE. destruct i0, i; ss. destruct in_access, in_access0; ss. Qed. Lemma input_transl_access1: forall i0 i (INPUTLE: Oracle.input_le i0 (SeqEvent.get_oracle_input i)) l v f (ACC: Oracle.in_access i0 = Some (l, v, f)), exists v1 f1 v2, SeqEvent.in_access i = Some (l, v1, f1, v2). Proof. i. unfold Oracle.input_le in INPUTLE. des. ss. clear ACQUIRE RELEASE. destruct i0, i; ss. destruct in_access, in_access0; ss. clarify. destruct p0; ss. des_ifs. des; clarify. eauto. Qed. Lemma input_transl_acquire0: forall i0 i (INPUTLE: Oracle.input_le i0 (SeqEvent.get_oracle_input i)) (ACC: Oracle.in_acquire i0 = None), SeqEvent.in_acquire i = None. Proof. i. unfold Oracle.input_le in INPUTLE. des. ss. clear ACCESS RELEASE. destruct i0, i; ss. destruct in_acquire, in_acquire0; ss. Qed. Lemma input_transl_acquire1: forall i0 i (INPUTLE: Oracle.input_le i0 (SeqEvent.get_oracle_input i)) (ACC: Oracle.in_acquire i0 = Some ()), exists f, SeqEvent.in_acquire i = Some f. Proof. i. unfold Oracle.input_le in INPUTLE. des. ss. clear ACCESS RELEASE. destruct i0, i; ss. destruct in_acquire, in_acquire0; ss. eauto. Qed. Lemma input_transl_release0: forall i0 i (INPUTLE: Oracle.input_le i0 (SeqEvent.get_oracle_input i)) (ACC: Oracle.in_release i0 = None), SeqEvent.in_release i = None. Proof. i. unfold Oracle.input_le in INPUTLE. des. ss. clear ACCESS ACQUIRE. destruct i0, i; ss. destruct in_release, in_release0; ss. Qed. Lemma input_transl_release1: forall i0 i (INPUTLE: Oracle.input_le i0 (SeqEvent.get_oracle_input i)) (ACC: Oracle.in_release i0 = Some ()), exists v f, SeqEvent.in_release i = Some (v, f). Proof. i. unfold Oracle.input_le in INPUTLE. des. ss. clear ACCESS ACQUIRE. destruct i0, i; ss. destruct in_release, in_release0; ss. destruct p, u; ss. eauto. Qed. Lemma red_rlx_full: forall p m l v ev i o p1 m1 (ACC: is_accessing ev = Some (l, v)) (ATOMIC: is_atomic_event ev) (NOACQ: is_acquire ev = false) (NOREL: is_release ev = false) (EVENT: SeqEvent.wf_input ev i) (STEP: SeqEvent.step i o p m p1 m1) , exists p_new v_new, (<<EVACC: is_accessing ev = Some (l, v_new)>>) /\ (<<OO: o = Oracle.mk_output (Some p_new) None None>>) /\ (<<IN: i = SeqEvent.mk_input (Some (l, (SeqMemory.value_map m l), (SeqMemory.flags m l), v_new)) None None>>) /\ (<<MEMV: m1.(SeqMemory.value_map) = ValueMap.write l v_new m.(SeqMemory.value_map)>>) /\ (<<MEMF: m1.(SeqMemory.flags) = Flags.update l false m.(SeqMemory.flags)>>) /\ (<<PERM: p1 = Perms.update l p_new p>>). Proof. i. unfold SeqEvent.wf_input in EVENT. des. inv STEP. inv REL. 2:{ hexploit RELEASE. rewrite <- H0. ss. i; clarify. } clear RELEASE RELEASE0. inv ACQ. 2:{ hexploit ACQUIRE. rewrite <- H2; ss. i. clarify. } clear ACQUIRE ACQUIRE0. inv UPD. { hexploit UPDATE; clear UPDATE. i; des. clear H1. hexploit H6; clear H6. rewrite ACC. eauto. i. des. rewrite H1 in H4. ss. } hexploit UPDATE; clear UPDATE. i; des. clear H6. hexploit H1; clear H1. erewrite <- H4. eauto. i; des; clarify. inv MEM. ss. esplits; ss; eauto. destruct o; ss; clarify. destruct i; ss; clarify. Qed. Lemma red_rlx: forall p m l v ev i o p1 m1 (ACC: is_accessing ev = Some (l, v)) (ATOMIC: is_atomic_event ev) (NOACQ: is_acquire ev = false) (NOREL: is_release ev = false) (EVENT: SeqEvent.wf_input ev i) (STEP: SeqEvent.step i o p m p1 m1) loc (NEQ: l <> loc) , (<<MEMV: SeqMemory.read loc m = SeqMemory.read loc m1>>) /\ (<<MEMF: SeqMemory.flags m loc = SeqMemory.flags m1 loc>>) /\ (<<PERM: p loc = p1 loc>>). Proof. i. hexploit red_rlx_full; eauto. i; des. destruct m, m1; ss. rewrite MEMV, MEMF, PERM. unfold ValueMap.write , Flags.update, Perms.update. rewrite ! Loc.eq_dec_neq; auto. Qed. Lemma red_acq_full: forall p m l v ev i o p1 m1 (ACC: is_accessing ev = Some (l, v)) (ATOMIC: is_atomic_event ev) (ISACQ: is_acquire ev = true) (NOREL: is_release ev = false) (EVENT: SeqEvent.wf_input ev i) (STEP: SeqEvent.step i o p m p1 m1) , exists p_new v_new (p_acq : Perms.t) (v_acq : ValueMap.t), (<<EVACC: is_accessing ev = Some (l, v_new)>>) /\ (<<OO: o = Oracle.mk_output (Some p_new) (Some (p_acq, v_acq)) None>>) /\ (<<IN: i = SeqEvent.mk_input (Some (l, (SeqMemory.value_map m l), (SeqMemory.flags m l), v_new)) (Some (Flags.update l false m.(SeqMemory.flags))) None>>) /\ (<<PERM: p1 = Perms.join (Perms.update l p_new p) p_acq>>) /\ (<<MEMV: m1.(SeqMemory.value_map) = ValueMap.acquire (Perms.acquired (Perms.update l p_new p) p_acq) v_acq (ValueMap.write l v_new m.(SeqMemory.value_map))>>) /\ (<<MEMF: m1.(SeqMemory.flags) = Flags.update l false m.(SeqMemory.flags)>>). Proof. i. unfold SeqEvent.wf_input in EVENT. des. inv STEP. inv REL. 2:{ hexploit RELEASE. rewrite <- H0. ss. i; clarify. } clear RELEASE RELEASE0. inv UPD. { hexploit UPDATE; clear UPDATE. i; des. clear H1. hexploit H4; clear H4. rewrite ACC. eauto. i. des. rewrite H1 in H2. ss. } hexploit UPDATE; clear UPDATE. i; des. clear H4. hexploit H1; clear H1. erewrite <- H2. eauto. i; des; clarify. inv ACQ. { hexploit ACQUIRE0; auto. i. rewrite <- H5 in H4. ss. } clear ACQUIRE ACQUIRE0. inv MEM. inv MEM0. ss. esplits; eauto. destruct o; ss; clarify. destruct i; ss; clarify. Qed. Lemma red_acq: forall p m l v ev i o p1 m1 (ACC: is_accessing ev = Some (l, v)) (ATOMIC: is_atomic_event ev) (ISACQ: is_acquire ev = true) (NOREL: is_release ev = false) (EVENT: SeqEvent.wf_input ev i) (STEP: SeqEvent.step i o p m p1 m1) , exists (p_acq : Perms.t) (v_acq : ValueMap.t) (f_acq : Flags.t), forall loc (NEQ: l <> loc), (<<ACQFLAG: Flag.le (f_acq loc) (SeqMemory.flags m loc)>>) /\ (<<ACQPERM: p1 loc = Perm.join (p loc) (p_acq loc)>>) /\ (<<ACQMEMV: SeqMemory.read loc m1 = ValueMap.read loc (ValueMap.acquire (Perms.acquired p p_acq) v_acq (SeqMemory.value_map m))>>) /\ (<<ACQMEMF: SeqMemory.flags m1 loc = SeqMemory.flags m loc>>). Proof. i. hexploit red_acq_full; eauto. i; des. destruct m, m1; ss. rewrite MEMV, MEMF, PERM. esplits. i. unfold ValueMap.write , Flags.update, Perms.update. splits. - refl. - unfold Perms.join, Perms.update. rewrite Loc.eq_dec_neq; auto. - unfold ValueMap.acquire, ValueMap.read, Perms.acquired, Perms.update, ValueMap.write. rewrite ! Loc.eq_dec_neq; auto. eapply NEQ. - unfold Flags.update. rewrite Loc.eq_dec_neq; auto. Unshelve. all: ss. Qed. Lemma red_rel_full: forall p m l v ev i o p1 m1 (ACC: is_accessing ev = Some (l, v)) (ATOMIC: is_atomic_event ev) (NOACQ: is_acquire ev = false) (ISREL: is_release ev = true) (EVENT: SeqEvent.wf_input ev i) (STEP: SeqEvent.step i o p m p1 m1) , exists p_new v_new (p_rel : Perms.t), (<<EVACC: is_accessing ev = Some (l, v_new)>>) /\ (<<OO: o = Oracle.mk_output (Some p_new) None (Some p_rel)>>) /\ (<<IN: i = SeqEvent.mk_input (Some (l, (SeqMemory.value_map m l), (SeqMemory.flags m l), v_new)) None (Some (ValueMap.write l v_new (SeqMemory.value_map m), Flags.update l false (SeqMemory.flags m)))>>) /\ (<<PERM: p1 = Perms.meet (Perms.update l p_new p) p_rel>>) /\ (<<MEMV: m1.(SeqMemory.value_map) = (ValueMap.write l v_new m.(SeqMemory.value_map))>>) /\ (<<MEMF: m1.(SeqMemory.flags) = Flags.bot>>). Proof. i. unfold SeqEvent.wf_input in EVENT. des. inv STEP. clarify. inv ACQ. 2:{ hexploit ACQUIRE. rewrite <- H0. ss. i; clarify. } clear ACQUIRE ACQUIRE0. inv UPD. { hexploit UPDATE; clear UPDATE. i; des. clear H1. hexploit H4; clear H4. rewrite ACC. eauto. i. des. rewrite H1 in H2. ss. } hexploit UPDATE; clear UPDATE. i; des. clear H4. hexploit H1; clear H1. erewrite <- H2. eauto. i; des; clarify. inv REL. { hexploit RELEASE0; auto. i. rewrite <- H5 in H4. ss. } clear RELEASE RELEASE0. inv MEM. inv MEM0. ss. esplits; eauto. destruct o; ss; clarify. destruct i; ss; clarify. Qed. Lemma red_rel: forall p m l v ev i o p1 m1 (ACC: is_accessing ev = Some (l, v)) (ATOMIC: is_atomic_event ev) (NOACQ: is_acquire ev = false) (ISREL: is_release ev = true) (EVENT: SeqEvent.wf_input ev i) (STEP: SeqEvent.step i o p m p1 m1) , exists (p_rel : Perms.t), forall loc (NEQ: l <> loc), (<<RELPERM: p1 loc = Perm.meet (p loc) (p_rel loc)>>) /\ (<<RELMEMV: SeqMemory.read loc m1 = SeqMemory.read loc m>>) /\ (<<RELMEMF: SeqMemory.flags m1 loc = false>>). Proof. i. hexploit red_rel_full; eauto. i; des. destruct m, m1; ss. rewrite MEMV, MEMF, PERM. esplits. i. splits. - unfold Perms.meet, Perms.update. rewrite Loc.eq_dec_neq; auto. - unfold ValueMap.write. rewrite Loc.eq_dec_neq; auto. - ss. Qed. Lemma red_acq_rel_full: forall p m l v ev i o p1 m1 (ACC: is_accessing ev = Some (l, v)) (ATOMIC: is_atomic_event ev) (ISACQ: is_acquire ev = true) (ISREL: is_release ev = true) (EVENT: SeqEvent.wf_input ev i) (STEP: SeqEvent.step i o p m p1 m1) , exists p_new v_new (p_acq p_rel : Perms.t) (v_acq : ValueMap.t), (<<EVACC: is_accessing ev = Some (l, v_new)>>) /\ (<<OO: o = Oracle.mk_output (Some p_new) (Some (p_acq, v_acq)) (Some p_rel)>>) /\ (<<IN: i = SeqEvent.mk_input (Some (l, (SeqMemory.value_map m l), (SeqMemory.flags m l), v_new)) (Some (Flags.update l false m.(SeqMemory.flags))) (Some (ValueMap.acquire (Perms.acquired (Perms.update l p_new p) p_acq) v_acq (ValueMap.write l v_new (SeqMemory.value_map m)), Flags.update l false (SeqMemory.flags m)))>>) /\ (<<PERM: p1 = Perms.meet (Perms.join (Perms.update l p_new p) p_acq) p_rel>>) /\ (<<MEMV: m1.(SeqMemory.value_map) = ValueMap.acquire (Perms.acquired (Perms.update l p_new p) p_acq) v_acq (ValueMap.write l v_new m.(SeqMemory.value_map))>>) /\ (<<MEMF: m1.(SeqMemory.flags) = Flags.bot>>). Proof. i. unfold SeqEvent.wf_input in EVENT. des. inv STEP. clarify. inv ACQ. { hexploit ACQUIRE0; auto. i. rewrite <- H0 in H1. ss. } clear ACQUIRE ACQUIRE0. inv REL. { hexploit RELEASE0; auto. i. rewrite <- H2 in H1. ss. } clear RELEASE RELEASE0. inv UPD. { hexploit UPDATE; clear UPDATE. i; des. clear H1. hexploit H6; clear H6. rewrite ACC. eauto. i. des. rewrite H1 in H4. ss. } hexploit UPDATE; clear UPDATE. i; des. hexploit H1; clear H1. erewrite <- H4. eauto. i; des; clarify. inv MEM. inv MEM0. inv MEM1. ss. esplits; eauto. destruct o; ss; clarify. destruct i; ss; clarify. Qed. Lemma red_acq_rel: forall p m l v ev i o p1 m1 (ACC: is_accessing ev = Some (l, v)) (ATOMIC: is_atomic_event ev) (ISACQ: is_acquire ev = true) (ISREL: is_release ev = true) (EVENT: SeqEvent.wf_input ev i) (STEP: SeqEvent.step i o p m p1 m1) , exists (p_acq p_rel : Perms.t) (v_acq : ValueMap.t) (f_acq : Flags.t), forall loc (NEQ: l <> loc), (<<ACQFLAG: Flag.le (f_acq loc) (SeqMemory.flags m loc)>>) /\ (<<PERM1: p1 loc = Perm.meet (Perm.join (p loc) (p_acq loc)) (p_rel loc)>>) /\ (<<MEMV1: SeqMemory.read loc m1 = ValueMap.read loc (ValueMap.acquire (Perms.acquired p p_acq) v_acq (SeqMemory.value_map m))>>) /\ (<<MEMF1: SeqMemory.flags m1 loc = false>>). Proof. i. hexploit red_acq_rel_full; eauto. i; des. destruct m, m1; ss. rewrite MEMV, MEMF, PERM. esplits. i. splits. - refl. - unfold Perms.join, Perms.meet, Perms.update. rewrite Loc.eq_dec_neq; auto. - unfold ValueMap.acquire, ValueMap.read, Perms.acquired, Perms.update, ValueMap.write. rewrite ! Loc.eq_dec_neq; auto. eapply NEQ. - ss. Unshelve. all: ss. Qed. Lemma red_rlx2_oracle: forall p m ev0 i0 i o p1 m1 (ACC: Oracle.in_access i0 = None) (NOACQ: Oracle.in_acquire i0 = None) (NOREL: Oracle.in_release i0 = None) (INPUTLE: Oracle.input_le i0 (SeqEvent.get_oracle_input i)) (EVENT: Oracle.wf_input ev0 i0) (STEP: SeqEvent.step i o p m p1 m1) , (<<OO: o = Oracle.mk_output None None None>>) /\ (<<IN: i = SeqEvent.mk_input None None None>>) /\ (<<MEM: m = m1>>) /\ (<<PERM: p = p1>>). Proof. i. unfold Oracle.wf_input in EVENT. des. inv STEP. hexploit input_transl_release0; eauto. intro IREL; des. hexploit input_transl_acquire0; eauto. intro IACQ; des. hexploit input_transl_access0; eauto. intro IACC; des. inv REL. 2:{ rewrite IREL in H0; ss. } clear RELEASE RELEASE0. inv ACQ. 2:{ rewrite IACQ in H2; ss. } clear ACQUIRE ACQUIRE0. inv UPD. 2:{ rewrite IACC in H4; ss. } splits; auto. destruct o; ss; clarify. destruct i; ss; clarify. Qed. Lemma red_rlx2_full: forall p m ev i o p1 m1 (ACC: is_accessing ev = None) (ATOMIC: is_atomic_event ev) (NOACQ: is_acquire ev = false) (NOREL: is_release ev = false) (EVENT: SeqEvent.wf_input ev i) (STEP: SeqEvent.step i o p m p1 m1) , (<<OO: o = Oracle.mk_output None None None>>) /\ (<<IN: i = SeqEvent.mk_input None None None>>) /\ (<<MEM: m = m1>>) /\ (<<PERM: p = p1>>). Proof. i. unfold SeqEvent.wf_input in EVENT. des. inv STEP. inv REL. 2:{ hexploit RELEASE. rewrite <- H0. ss. i; clarify. } clear RELEASE RELEASE0. inv ACQ. 2:{ hexploit ACQUIRE. rewrite <- H2; ss. i. clarify. } clear ACQUIRE ACQUIRE0. inv UPD. 2:{ hexploit UPDATE. rewrite <- H4; ss. i. rewrite ACC in H1. des. hexploit H1. do 2 eexists. refl. i; des; ss. } splits; auto. destruct o; ss. rewrite <- H; rewrite <- H3; rewrite <- H5. ss. destruct i; ss. rewrite <- H0; rewrite <- H2; rewrite <- H4. ss. Qed. Lemma red_rlx2: forall p m ev i o p1 m1 (ACC: is_accessing ev = None) (ATOMIC: is_atomic_event ev) (NOACQ: is_acquire ev = false) (NOREL: is_release ev = false) (EVENT: SeqEvent.wf_input ev i) (STEP: SeqEvent.step i o p m p1 m1) , (<<MEM: m = m1>>) /\ (<<PERM: p = p1>>). Proof. i. hexploit red_rlx2_full; eauto. i; des. destruct m, m1; ss. Qed. Lemma red_acq2_oracle: forall p m ev0 i0 i o p1 m1 (ACC: Oracle.in_access i0 = None) (ISACQ: Oracle.in_acquire i0 = Some ()) (NOREL: Oracle.in_release i0 = None) (INPUTLE: Oracle.input_le i0 (SeqEvent.get_oracle_input i)) (EVENT: Oracle.wf_input ev0 i0) (STEP: SeqEvent.step i o p m p1 m1) , exists (p_acq : Perms.t) (v_acq : ValueMap.t), (<<OO: o = Oracle.mk_output None (Some (p_acq, v_acq)) None>>) /\ (<<IN: i = SeqEvent.mk_input None (Some m.(SeqMemory.flags)) None>>) /\ (<<PERM: p1 = Perms.join p p_acq>>) /\ (<<MEMV: m1.(SeqMemory.value_map) = ValueMap.acquire (Perms.acquired p p_acq) v_acq m.(SeqMemory.value_map)>>) /\ (<<MEMF: m1.(SeqMemory.flags) = m.(SeqMemory.flags)>>). Proof. i. unfold Oracle.wf_input in EVENT. des. inv STEP. hexploit input_transl_release0; eauto. intro IREL; des. hexploit input_transl_acquire1; eauto. intro IACQ; des. hexploit input_transl_access0; eauto. intro IACC; des. inv REL. 2:{ rewrite IREL in H0; ss. } clear RELEASE RELEASE0. inv UPD. 2:{ rewrite IACC in H2; ss. } inv ACQ. { rewrite IACQ in H4; ss. } clear ACQUIRE ACQUIRE0. inv MEM. ss. esplits; auto. destruct o; ss; clarify. destruct i; ss; clarify. Qed. Lemma red_acq2_full: forall p m ev i o p1 m1 (ACC: is_accessing ev = None) (ATOMIC: is_atomic_event ev) (ISACQ: is_acquire ev = true) (NOREL: is_release ev = false) (EVENT: SeqEvent.wf_input ev i) (STEP: SeqEvent.step i o p m p1 m1) , exists (p_acq : Perms.t) (v_acq : ValueMap.t), (<<OO: o = Oracle.mk_output None (Some (p_acq, v_acq)) None>>) /\ (<<IN: i = SeqEvent.mk_input None (Some m.(SeqMemory.flags)) None>>) /\ (<<PERM: p1 = Perms.join p p_acq>>) /\ (<<MEMV: m1.(SeqMemory.value_map) = ValueMap.acquire (Perms.acquired p p_acq) v_acq m.(SeqMemory.value_map)>>) /\ (<<MEMF: m1.(SeqMemory.flags) = m.(SeqMemory.flags)>>). Proof. i. unfold SeqEvent.wf_input in EVENT. des. inv STEP. inv REL. 2:{ hexploit RELEASE. rewrite <- H0. ss. i; clarify. } clear RELEASE RELEASE0. inv UPD. 2:{ hexploit UPDATE. rewrite <- H2; ss. i. rewrite ACC in H1. des. hexploit H1. do 2 eexists. refl. i; des; ss. } inv ACQ. { clarify. hexploit ACQUIRE0; auto. i. rewrite <- H4 in H1. ss. } clear ACQUIRE ACQUIRE0. inv MEM. ss. esplits; auto. destruct o; ss. rewrite <- H; rewrite <- H3; rewrite <- H5. ss. destruct i; ss. rewrite <- H0; rewrite <- H2; rewrite <- H4. ss. Qed. Lemma red_acq2: forall p m ev i o p1 m1 (ACC: is_accessing ev = None) (ATOMIC: is_atomic_event ev) (ISACQ: is_acquire ev = true) (NOREL: is_release ev = false) (EVENT: SeqEvent.wf_input ev i) (STEP: SeqEvent.step i o p m p1 m1) , exists (p_acq : Perms.t) (v_acq : ValueMap.t) (f_acq : Flags.t), (<<ACQFLAG: Flags.le (f_acq) (SeqMemory.flags m)>>) /\ (<<ACQPERM: p1 = Perms.join (p) (p_acq)>>) /\ (<<ACQMEMV: SeqMemory.value_map m1 = (ValueMap.acquire (Perms.acquired p p_acq) v_acq (SeqMemory.value_map m))>>) /\ (<<ACQMEMF: SeqMemory.flags m1 = SeqMemory.flags m>>). Proof. i. hexploit red_acq2_full; eauto. i; des. destruct m, m1; ss. do 3 eexists. splits; eauto. refl. Qed. Lemma red_rel2_oracle: forall p m ev0 i0 i o p1 m1 (ACC: Oracle.in_access i0 = None) (NOACQ: Oracle.in_acquire i0 = None) (ISREL: Oracle.in_release i0 = Some ()) (INPUTLE: Oracle.input_le i0 (SeqEvent.get_oracle_input i)) (EVENT: Oracle.wf_input ev0 i0) (STEP: SeqEvent.step i o p m p1 m1) , exists (p_rel : Perms.t), (<<OO: o = Oracle.mk_output None None (Some p_rel)>>) /\ (<<IN: i = SeqEvent.mk_input None None (Some (SeqMemory.value_map m, SeqMemory.flags m))>>) /\ (<<PERM: p1 = Perms.meet p p_rel>>) /\ (<<MEMV: m1.(SeqMemory.value_map) = m.(SeqMemory.value_map)>>) /\ (<<MEMF: m1.(SeqMemory.flags) = Flags.bot>>). Proof. i. unfold Oracle.wf_input in EVENT. des. inv STEP. hexploit input_transl_release1; eauto. intro IREL; des. hexploit input_transl_acquire0; eauto. intro IACQ; des. hexploit input_transl_access0; eauto. intro IACC; des. inv REL. { rewrite IREL in H0; ss. } clear RELEASE RELEASE0. inv UPD. 2:{ rewrite IACC in H2; ss. } inv ACQ. 2:{ rewrite IACQ in H4; ss. } clear ACQUIRE ACQUIRE0. inv MEM. ss. esplits; auto. destruct o; ss; clarify. destruct i; ss; clarify. Qed. Lemma red_rel2_full: forall p m ev i o p1 m1 (ACC: is_accessing ev = None) (ATOMIC: is_atomic_event ev) (NOACQ: is_acquire ev = false) (ISREL: is_release ev = true) (EVENT: SeqEvent.wf_input ev i) (STEP: SeqEvent.step i o p m p1 m1) , exists (p_rel : Perms.t), (<<OO: o = Oracle.mk_output None None (Some p_rel)>>) /\ (<<IN: i = SeqEvent.mk_input None None (Some (SeqMemory.value_map m, SeqMemory.flags m))>>) /\ (<<PERM: p1 = Perms.meet p p_rel>>) /\ (<<MEMV: m1.(SeqMemory.value_map) = m.(SeqMemory.value_map)>>) /\ (<<MEMF: m1.(SeqMemory.flags) = Flags.bot>>). Proof. i. unfold SeqEvent.wf_input in EVENT. des. inv STEP. clarify. inv ACQ. 2:{ hexploit ACQUIRE. rewrite <- H0. ss. i; clarify. } clear ACQUIRE ACQUIRE0. inv UPD. 2:{ hexploit UPDATE. rewrite <- H2; ss. i. rewrite ACC in H1. des. hexploit H1. do 2 eexists. refl. i; des; ss. } inv REL. { hexploit RELEASE0; auto. i. rewrite <- H4 in H1. ss. } clear RELEASE RELEASE0. inv MEM. ss. do 1 eexists. splits; auto. destruct o; ss. rewrite <- H; rewrite <- H3; rewrite <- H5. ss. destruct i; ss. rewrite <- H0; rewrite <- H2; rewrite <- H4. ss. Qed. Lemma red_rel2: forall p m ev i o p1 m1 (ACC: is_accessing ev = None) (ATOMIC: is_atomic_event ev) (NOACQ: is_acquire ev = false) (ISREL: is_release ev = true) (EVENT: SeqEvent.wf_input ev i) (STEP: SeqEvent.step i o p m p1 m1) , exists (p_rel : Perms.t), (<<RELPERM: p1 = Perms.meet (p) (p_rel)>>) /\ (<<RELMEMV: SeqMemory.value_map m1 = SeqMemory.value_map m>>) /\ (<<RELMEMF: SeqMemory.flags m1 = Flags.bot>>). Proof. i. hexploit red_rel2_full; eauto. i; des. destruct m, m1; ss. do 1 eexists. splits; eauto. Qed. Lemma red_acq_rel2_oracle: forall p m ev0 i0 i o p1 m1 (ACC: Oracle.in_access i0 = None) (ISACQ: Oracle.in_acquire i0 = Some ()) (ISREL: Oracle.in_release i0 = Some ()) (INPUTLE: Oracle.input_le i0 (SeqEvent.get_oracle_input i)) (EVENT: Oracle.wf_input ev0 i0) (STEP: SeqEvent.step i o p m p1 m1) , exists (p_acq p_rel : Perms.t) (v_acq : ValueMap.t), (<<OO: o = Oracle.mk_output None (Some (p_acq, v_acq)) (Some p_rel)>>) /\ (<<IN: i = SeqEvent.mk_input None (Some m.(SeqMemory.flags)) (Some (ValueMap.acquire (Perms.acquired p p_acq) v_acq (SeqMemory.value_map m), (SeqMemory.flags m)))>>) /\ (<<PERM: p1 = Perms.meet (Perms.join p p_acq) p_rel>>) /\ (<<MEMV: m1.(SeqMemory.value_map) = ValueMap.acquire (Perms.acquired p p_acq) v_acq m.(SeqMemory.value_map)>>) /\ (<<MEMF: m1.(SeqMemory.flags) = Flags.bot>>). Proof. i. unfold Oracle.wf_input in EVENT. des. inv STEP. hexploit input_transl_release1; eauto. intro IREL; des. hexploit input_transl_acquire1; eauto. intro IACQ; des. hexploit input_transl_access0; eauto. intro IACC; des. inv REL. { rewrite IREL in H0; ss. } clear RELEASE RELEASE0. inv UPD. 2:{ rewrite IACC in H2; ss. } inv ACQ. { rewrite IACQ in H4; ss. } clear ACQUIRE ACQUIRE0. inv MEM. inv MEM0. ss. esplits; auto. destruct o; ss; clarify. destruct i; ss; clarify. Qed. Lemma red_acq_rel2_full: forall p m ev i o p1 m1 (ACC: is_accessing ev = None) (ATOMIC: is_atomic_event ev) (ISACQ: is_acquire ev = true) (ISREL: is_release ev = true) (EVENT: SeqEvent.wf_input ev i) (STEP: SeqEvent.step i o p m p1 m1) , exists (p_acq p_rel : Perms.t) (v_acq : ValueMap.t), (<<OO: o = Oracle.mk_output None (Some (p_acq, v_acq)) (Some p_rel)>>) /\ (<<IN: i = SeqEvent.mk_input None (Some m.(SeqMemory.flags)) (Some (ValueMap.acquire (Perms.acquired p p_acq) v_acq (SeqMemory.value_map m), (SeqMemory.flags m)))>>) /\ (<<PERM: p1 = Perms.meet (Perms.join p p_acq) p_rel>>) /\ (<<MEMV: m1.(SeqMemory.value_map) = ValueMap.acquire (Perms.acquired p p_acq) v_acq m.(SeqMemory.value_map)>>) /\ (<<MEMF: m1.(SeqMemory.flags) = Flags.bot>>). Proof. i. unfold SeqEvent.wf_input in EVENT. des. inv STEP. clarify. inv ACQ. { hexploit ACQUIRE0; auto. i. rewrite <- H0 in H1. ss. } clear ACQUIRE ACQUIRE0. inv REL. { hexploit RELEASE0; auto. i. rewrite <- H2 in H1. ss. } clear RELEASE RELEASE0. inv UPD. 2:{ hexploit UPDATE. rewrite <- H4; ss. i. rewrite ACC in H1. des. hexploit H1. do 2 eexists. refl. i; des; ss. } inv MEM. inv MEM0. ss. do 3 eexists. splits; auto. destruct o; ss. rewrite <- H; rewrite <- H3; rewrite <- H5. ss. destruct i; ss. rewrite <- H0; rewrite <- H2; rewrite <- H4. ss. Qed. Lemma red_acq_rel2: forall p m ev i o p1 m1 (ACC: is_accessing ev = None) (ATOMIC: is_atomic_event ev) (ISACQ: is_acquire ev = true) (ISREL: is_release ev = true) (EVENT: SeqEvent.wf_input ev i) (STEP: SeqEvent.step i o p m p1 m1) , exists (p_acq p_rel : Perms.t) (v_acq : ValueMap.t) (f_acq : Flags.t), (<<ACQFLAG: Flags.le (f_acq) (SeqMemory.flags m)>>) /\ (<<PERM1: p1 = Perms.meet (Perms.join (p) (p_acq)) (p_rel)>>) /\ (<<MEMV1: SeqMemory.value_map m1 = (ValueMap.acquire (Perms.acquired p p_acq) v_acq (SeqMemory.value_map m))>>) /\ (<<MEMF1: SeqMemory.flags m1 = Flags.bot>>). Proof. i. hexploit red_acq_rel2_full; eauto. i; des. destruct m, m1; ss. do 3 eexists. esplits; eauto. refl. Qed. End REDAT. Section REDSTEP. Lemma step_rlx: forall ev l v i_tgt o p src_m tgt_m p1 mem_tgt (ACC: is_accessing ev = Some (l, v)) (ATOMIC: is_atomic_event ev) (NOACQ: is_acquire ev = false) (NOREL: is_release ev = false) (INPUT: SeqEvent.wf_input ev i_tgt) (OUTPUT: Oracle.wf_output ev o) (STEP_TGT: SeqEvent.step i_tgt o p tgt_m p1 mem_tgt) , exists (i_src : SeqEvent.input) (mem_src : SeqMemory.t), (<<IN_SRC: SeqEvent.wf_input ev i_src>>) /\ (<<STEP_SRC: SeqEvent.step i_src o p src_m p1 mem_src>>). Proof. i. hexploit red_rlx_full; eauto. i; des. subst o i_tgt. ss. inv INPUT. ss. des. hexploit H. i; des. clear H0. hexploit H1; clear H1. rewrite ACC. eauto. i; des; clarify. eexists (SeqEvent.mk_input _ _ _). esplits. 2:{ repeat econs; eauto. } repeat econs; i; ss; des; clarify; eauto. Qed. Lemma step_acq: forall ev l v i_tgt o p src_m tgt_m p1 mem_tgt (ACC: is_accessing ev = Some (l, v)) (ATOMIC: is_atomic_event ev) (ISACQ: is_acquire ev = true) (NOREL: is_release ev = false) (INPUT: SeqEvent.wf_input ev i_tgt) (OUTPUT: Oracle.wf_output ev o) (STEP_TGT: SeqEvent.step i_tgt o p tgt_m p1 mem_tgt) , exists (i_src : SeqEvent.input) (mem_src : SeqMemory.t), (<<IN_SRC: SeqEvent.wf_input ev i_src>>) /\ (<<STEP_SRC: SeqEvent.step i_src o p src_m p1 mem_src>>). Proof. i. hexploit red_acq_full; eauto. i; des. subst o i_tgt. ss. inv INPUT. ss. des. hexploit H. i; des. clear H0. hexploit H1; clear H1. rewrite ACC. eauto. i; des; clarify. eexists (SeqEvent.mk_input _ _ _). esplits. 2:{ repeat econs; eauto. } repeat econs; i; ss; des; clarify; eauto. Qed. Lemma step_rel: forall ev l v i_tgt o p src_m tgt_m p1 mem_tgt (ACC: is_accessing ev = Some (l, v)) (ATOMIC: is_atomic_event ev) (NOACQ: is_acquire ev = false) (ISREL: is_release ev = true) (INPUT: SeqEvent.wf_input ev i_tgt) (OUTPUT: Oracle.wf_output ev o) (STEP_TGT: SeqEvent.step i_tgt o p tgt_m p1 mem_tgt) , exists (i_src : SeqEvent.input) (mem_src : SeqMemory.t), (<<IN_SRC: SeqEvent.wf_input ev i_src>>) /\ (<<STEP_SRC: SeqEvent.step i_src o p src_m p1 mem_src>>). Proof. i. hexploit red_rel_full; eauto. i; des. subst o i_tgt. ss. inv INPUT. ss. des. hexploit H. i; des. clear H0. hexploit H1; clear H1. rewrite ACC. eauto. i; des; clarify. eexists (SeqEvent.mk_input _ _ _). esplits. 2:{ repeat econs; eauto. } repeat econs; i; ss; des; clarify; eauto. Qed. Lemma step_acq_rel: forall ev l v i_tgt o p src_m tgt_m p1 mem_tgt (ACC: is_accessing ev = Some (l, v)) (ATOMIC: is_atomic_event ev) (ISACQ: is_acquire ev = true) (ISREL: is_release ev = true) (INPUT: SeqEvent.wf_input ev i_tgt) (OUTPUT: Oracle.wf_output ev o) (STEP_TGT: SeqEvent.step i_tgt o p tgt_m p1 mem_tgt) , exists (i_src : SeqEvent.input) (mem_src : SeqMemory.t), (<<IN_SRC: SeqEvent.wf_input ev i_src>>) /\ (<<STEP_SRC: SeqEvent.step i_src o p src_m p1 mem_src>>). Proof. i. hexploit red_acq_rel_full; eauto. i; des. subst o i_tgt. ss. inv INPUT. ss. des. hexploit H. i; des. clear H0. hexploit H1; clear H1. rewrite ACC. eauto. i; des; clarify. eexists (SeqEvent.mk_input _ _ _). esplits. 2:{ repeat econs; eauto. } repeat econs; i; ss; des; clarify; eauto. Qed. Lemma step_rlx2: forall ev i_tgt o p src_m tgt_m p1 mem_tgt (ACC: is_accessing ev = None) (ATOMIC: is_atomic_event ev) (NOACQ: is_acquire ev = false) (NOREL: is_release ev = false) (INPUT: SeqEvent.wf_input ev i_tgt) (OUTPUT: Oracle.wf_output ev o) (STEP_TGT: SeqEvent.step i_tgt o p tgt_m p1 mem_tgt) , exists (i_src : SeqEvent.input) (mem_src : SeqMemory.t), (<<IN_SRC: SeqEvent.wf_input ev i_src>>) /\ (<<STEP_SRC: SeqEvent.step i_src o p src_m p1 mem_src>>). Proof. i. hexploit red_rlx2_full; eauto. i; des. subst o i_tgt. ss. inv INPUT. ss. des. clarify. eexists (SeqEvent.mk_input _ _ _). esplits. 2:{ repeat econs; eauto. } repeat econs; i; ss; des; clarify; eauto. Unshelve. all: ss. all: try exact 0. exact false. Qed. Lemma step_acq2: forall ev i_tgt o p src_m tgt_m p1 mem_tgt (ACC: is_accessing ev = None) (ATOMIC: is_atomic_event ev) (ISACQ: is_acquire ev = true) (NOREL: is_release ev = false) (INPUT: SeqEvent.wf_input ev i_tgt) (OUTPUT: Oracle.wf_output ev o) (STEP_TGT: SeqEvent.step i_tgt o p tgt_m p1 mem_tgt) , exists (i_src : SeqEvent.input) (mem_src : SeqMemory.t), (<<IN_SRC: SeqEvent.wf_input ev i_src>>) /\ (<<STEP_SRC: SeqEvent.step i_src o p src_m p1 mem_src>>). Proof. i. hexploit red_acq2_full; eauto. i; des. subst o i_tgt. ss. inv INPUT. ss. des. clarify. eexists (SeqEvent.mk_input _ _ _). esplits. 2:{ repeat econs; eauto. } repeat econs; i; ss; des; clarify; eauto. Unshelve. all: ss. all: try exact 0. exact false. Qed. Lemma step_rel2: forall ev i_tgt o p src_m tgt_m p1 mem_tgt (ACC: is_accessing ev = None) (ATOMIC: is_atomic_event ev) (NOACQ: is_acquire ev = false) (ISREL: is_release ev = true) (INPUT: SeqEvent.wf_input ev i_tgt) (OUTPUT: Oracle.wf_output ev o) (STEP_TGT: SeqEvent.step i_tgt o p tgt_m p1 mem_tgt) , exists (i_src : SeqEvent.input) (mem_src : SeqMemory.t), (<<IN_SRC: SeqEvent.wf_input ev i_src>>) /\ (<<STEP_SRC: SeqEvent.step i_src o p src_m p1 mem_src>>). Proof. i. hexploit red_rel2_full; eauto. i; des. subst o i_tgt. ss. inv INPUT. ss. des. clarify. eexists (SeqEvent.mk_input _ _ _). esplits. 2:{ repeat econs; eauto. } repeat econs; i; ss; des; clarify; eauto. Unshelve. all: ss. all: try exact 0. exact false. Qed. Lemma step_acq_rel2: forall ev i_tgt o p src_m tgt_m p1 mem_tgt (ACC: is_accessing ev = None) (ATOMIC: is_atomic_event ev) (ISACQ: is_acquire ev = true) (ISREL: is_release ev = true) (INPUT: SeqEvent.wf_input ev i_tgt) (OUTPUT: Oracle.wf_output ev o) (STEP_TGT: SeqEvent.step i_tgt o p tgt_m p1 mem_tgt) , exists (i_src : SeqEvent.input) (mem_src : SeqMemory.t), (<<IN_SRC: SeqEvent.wf_input ev i_src>>) /\ (<<STEP_SRC: SeqEvent.step i_src o p src_m p1 mem_src>>). Proof. i. hexploit red_acq_rel2_full; eauto. i; des. subst o i_tgt. ss. inv INPUT. ss. des. clarify. eexists (SeqEvent.mk_input _ _ _). esplits. 2:{ repeat econs; eauto. } repeat econs; i; ss; des; clarify; eauto. Unshelve. all: ss. all: try exact 0. exact false. Qed. Lemma exists_input_no_acq: forall pe (NOACQ: is_acquire pe = false) m0 p0, (exists i, (forall o (WFO: Oracle.wf_output pe o), exists m1 p1, (SeqEvent.step i o p0 m0 p1 m1)) /\ (Oracle.wf_input pe (SeqEvent.get_oracle_input i)) /\ (SeqEvent.wf_input pe i)). Proof. i. destruct (is_accessing pe) eqn:ACC, (is_release pe) eqn:REL. - destruct p. eexists (SeqEvent.mk_input (Some (t, _, _, _)) None (Some _)). splits. 2:{ econs; ss; eauto. ii. rewrite ACC. split; i; des; clarify; eauto. clarify. } 2:{ econs; ss; eauto. ii. rewrite ACC. split; i; des; clarify; eauto. clarify. } i. unfold Oracle.wf_output in WFO. des. clarify. destruct o; ss. destruct out_access, out_acquire, out_release; ss; clearb; try by contra. 2:{ hexploit UPDATE0. rewrite ACC; ss. i; clarify. } do 2 eexists. econs; ss. econs; eauto. econs; eauto. econs; eauto. econs; eauto. econs. - destruct p. eexists (SeqEvent.mk_input (Some (t, _, _, _)) None None). splits. 2:{ econs; ss; eauto. ii. rewrite ACC. split; i; des; clarify; eauto. clarify. } 2:{ econs; ss; eauto. ii. rewrite ACC. split; i; des; clarify; eauto. clarify. } i. unfold Oracle.wf_output in WFO. des. clarify. destruct o; ss. destruct out_access, out_acquire, out_release; ss; clearb; try by contra. 2:{ hexploit UPDATE0. rewrite ACC; ss. i; clarify. } do 2 eexists. econs; ss. econs; eauto. econs; eauto. econs; eauto. econs; eauto. - eexists (SeqEvent.mk_input None None (Some _)). splits. 2:{ econs; ss; eauto. ii. split; i; des; clarify; eauto. clarify. } 2:{ econs; ss; eauto. ii. split; i; des; clarify; eauto. clarify. } i. unfold Oracle.wf_output in WFO. des. clarify. destruct o; ss. destruct out_access, out_acquire, out_release; ss; clearb; try by contra. { hexploit UPDATE; auto. rewrite ACC; ss. } do 2 eexists. econs; ss. econs; eauto. econs; eauto. econs; eauto. econs; eauto. - eexists (SeqEvent.mk_input None None None). splits. 2:{ econs; ss; eauto. ii. split; i; des; clarify; eauto. clarify. } 2:{ econs; ss; eauto. ii. split; i; des; clarify; eauto. clarify. } i. unfold Oracle.wf_output in WFO. des. clarify. destruct o; ss. destruct out_access, out_acquire, out_release; ss; clearb; try by contra. { hexploit UPDATE; auto. rewrite ACC; ss. } do 2 eexists. econs; ss. econs; eauto. econs; eauto. econs; eauto. Qed. End REDSTEP. Section ORACLE. Lemma oracle_no_acquire: forall e0 e i0 (ACQUIRE : ~ is_acquire e) (EVENT : ProgramEvent.le e0 e) (INPUT0 : Oracle.wf_input e0 i0) , Oracle.in_acquire i0 = None. Proof. i. unfold Oracle.wf_input in INPUT0. des. unfold ProgramEvent.le in EVENT. clear UPDATE RELEASE RELEASE0. des_ifs; destruct (Oracle.in_acquire i0); ss; try by contra. - hexploit ACQUIRE0; auto; i. destruct ord; ss. - des; clarify. hexploit ACQUIRE0; auto; i. destruct ordr0; ss. - hexploit ACQUIRE0; auto; i. destruct ordw; destruct ordr; ss. Qed. Lemma oracle_access_none: forall e0 e i0 (ORACLE: Oracle.in_access i0 = None) (EVENT: ProgramEvent.le e0 e) (INPUT0 : Oracle.wf_input e0 i0) , is_accessing e = None. Proof. i. unfold Oracle.wf_input in INPUT0. des. unfold ProgramEvent.le in EVENT. clear ACQUIRE ACQUIRE0 RELEASE RELEASE0. des_ifs; destruct (Oracle.in_access i0); ss; clarify; try by contra. all: hexploit UPDATE; i; des; hexploit H0; i; des; ss; eauto. Qed. Lemma oracle_access_same: forall e0 e i0 t0 t1 t2 (ORACLE: Oracle.in_access i0 = Some (t0, t1, t2)) (EVENT: ProgramEvent.le e0 e) (INPUT0 : Oracle.wf_input e0 i0) , exists val, is_accessing e = Some (t0, val). Proof. i. unfold Oracle.wf_input in INPUT0. des. unfold ProgramEvent.le in EVENT. clear ACQUIRE ACQUIRE0 RELEASE RELEASE0. des_ifs; destruct (Oracle.in_access i0); ss; clarify; try by contra. 1,5,6,7: hexploit UPDATE; i; des; hexploit H; eauto. - hexploit UPDATE. i; des. hexploit H0; eauto. i; des. clarify; eauto. - des; clarify. hexploit UPDATE. i; des. hexploit H0; eauto. i; des. clarify; eauto. - des; clarify. hexploit UPDATE. i; des. hexploit H0; eauto. i; des. clarify; eauto. Qed. End ORACLE. Section UPTO. Section LANG. Variable lang_src: language. Variable lang_tgt: language. Variable sim_terminal: forall (st_src:(Language.state lang_src)) (st_tgt:(Language.state lang_tgt)), Prop. Lemma event_step_flags i o p0 p1 m0 m1 (STEP: SeqEvent.step i o p0 m0 p1 m1) : Flags.le (SeqMemory.flags m0) (Flags.join (SeqMemory.flags m1) (SeqEvent.written i)). Proof. inv STEP. inv ACQ. { clear H. inv UPD. { clear H. inv REL. - clear H. unfold SeqEvent.written. rewrite <- H1. rewrite <- H2. rewrite flags_join_bot_l. rewrite flags_join_bot_r. refl. - clear H. unfold SeqEvent.written. rewrite <- H1. rewrite <- H2. rewrite flags_join_bot_l. inv MEM. ss. rewrite flags_join_bot_l. refl. } { clear H. inv REL. - clear H. unfold SeqEvent.written. rewrite <- H1. rewrite <- H2. inv MEM. ss. des_ifs. + ii. unfold Flags.update, Flags.add, Flags.join. des_ifs. apply Flag.join_ge_l. + ii. unfold Flags.update, Flags.add, Flags.join. des_ifs. apply Flag.join_ge_l. - clear H. unfold SeqEvent.written. rewrite <- H1. rewrite <- H2. inv MEM. ss. inv MEM0. ss. des_ifs. + ii. unfold Flags.update, Flags.add, Flags.join. des_ifs. do 2 rewrite flag_join_bot_l. refl. + ii. unfold Flags.update, Flags.add, Flags.join. des_ifs. do 2 rewrite flag_join_bot_l. refl. } } { clear H. inv MEM. ss. inv UPD; ss. { clear H. inv REL; ss. - clear H. apply Flags.join_ge_l. - clear H. inv MEM; ss. unfold SeqEvent.written. rewrite <- H1. rewrite <- H2. do 2 rewrite flags_join_bot_l. refl. } { clear H. inv REL; ss. - clear H. unfold SeqEvent.written. rewrite <- H1. rewrite <- H2. inv MEM. ss. des_ifs. + ii. unfold Flags.update, Flags.add, Flags.join. des_ifs. apply Flag.join_ge_l. + ii. unfold Flags.update, Flags.add, Flags.join. des_ifs. apply Flag.join_ge_l. - clear H. unfold SeqEvent.written. rewrite <- H1. rewrite <- H2. inv MEM. ss. inv MEM0. ss. des_ifs. + ii. unfold Flags.update, Flags.add, Flags.join. des_ifs. do 2 rewrite flag_join_bot_l. refl. + ii. unfold Flags.update, Flags.add, Flags.join. des_ifs. do 2 rewrite flag_join_bot_l. refl. } } Qed. Lemma at_step_flags e i o (th0 th1: SeqThread.t (lang_src)) (STEP: SeqThread.at_step e i o th0 th1) : Flags.le (SeqMemory.flags (SeqState.memory (SeqThread.state th0))) (Flags.join (SeqMemory.flags (SeqState.memory (SeqThread.state th1))) (SeqEvent.written i)). Proof. inv STEP. ss. eapply event_step_flags; eauto. Qed. Lemma one_na_step_flags (st1 st2: SeqState.t (lang_src)) p (STEP: SeqState.na_step p MachineEvent.silent st1 st2) : Flags.le st1.(SeqState.memory).(SeqMemory.flags) st2.(SeqState.memory).(SeqMemory.flags). Proof. inv STEP. inv LOCAL; try refl. des_ifs. destruct m0; ss. ii. unfold Flags.update. condtac. - destruct (flags loc0); ss. - refl. Qed. Lemma na_step_flags (th0 th1: SeqThread.t (lang_src)) (STEP: SeqThread.na_step (@SeqState.na_step lang_src) MachineEvent.silent th0 th1) : Flags.le (SeqMemory.flags (SeqState.memory (SeqThread.state th0))) (SeqMemory.flags (SeqState.memory (SeqThread.state th1))). Proof. inv STEP. eapply one_na_step_flags; eauto. Qed. Lemma na_steps_flags (st1 st2: SeqState.t (lang_src)) p (STEPS: rtc (SeqState.na_step p MachineEvent.silent) st1 st2) : Flags.le st1.(SeqState.memory).(SeqMemory.flags) st2.(SeqState.memory).(SeqMemory.flags). Proof. induction STEPS. { refl. } hexploit one_na_step_flags; eauto. i. etrans; eauto. Qed. Lemma one_na_step_flags_events (st1 st2: SeqState.t (lang_src)) p e (STEP: SeqState.na_step p e st1 st2) : Flags.le st1.(SeqState.memory).(SeqMemory.flags) st2.(SeqState.memory).(SeqMemory.flags). Proof. destruct e. { eapply one_na_step_flags; eauto. } - inv STEP. inv LOCAL. des_ifs. - inv STEP. inv LOCAL; ss; try refl. des_ifs. ii. unfold Flags.update. clear Heq. des_ifs. destruct (SeqMemory.flags m0 loc); ss. refl. Qed. Lemma opt_na_step_flags_events (st1 st2: SeqState.t (lang_src)) p e (STEP: SeqState.na_opt_step p e st1 st2) : Flags.le st1.(SeqState.memory).(SeqMemory.flags) st2.(SeqState.memory).(SeqMemory.flags). Proof. inv STEP. eapply one_na_step_flags_events; eauto. refl. Qed. Lemma partial_step_flags tr w th0 th1 (STEPS: SeqThread.steps (@SeqState.na_step lang_src) tr th0 th1) (WF: Oracle.wf th0.(SeqThread.oracle)) (TRACE: SeqThread.writing_trace tr w) : (Flags.le (th0.(SeqThread.state).(SeqState.memory).(SeqMemory.flags)) (Flags.join th1.(SeqThread.state).(SeqState.memory).(SeqMemory.flags) w)). Proof. depgen w. induction STEPS; i; ss. { inv TRACE. rewrite flags_join_bot_r. refl. } { hexploit IHSTEPS; clear IHSTEPS; eauto. { inv STEP. ss. } { i. hexploit na_step_flags; eauto. i. etrans. 2:eauto. auto. } } inv TRACE. hexploit IHSTEPS; clear IHSTEPS; eauto. { inv STEP. ss. unfold Oracle.wf in WF. punfold WF. 2: eapply Oracle.wf_mon. inv WF. hexploit WF0; clear WF0; eauto. i; des. pclearbot. auto. } i. rename H into IH. hexploit at_step_flags; eauto. i. etrans. eauto. clear H. match goal with | [|-_ _ (_ ?a (_ ?b ?c))] => replace (Flags.join a (Flags.join b c)) with (Flags.join (Flags.join a c) b) end. 2:{ rewrite flags_join_comm. rewrite flags_join_assoc. symmetry. rewrite flags_join_assoc. f_equal. rewrite flags_join_comm. auto. } apply Flags.join_mon_l. auto. Qed. End LANG. Variant deferred_le_sf_ctx (sim_seq: forall (lang_src: language) (lang_tgt: language) (sim_terminal: forall (st_src:(Language.state lang_src)) (st_tgt:(Language.state lang_tgt)), Prop) (p0: Perms.t) (d0: Flags.t) (st_src0: SeqState.t lang_src) (st_tgt0: SeqState.t lang_tgt), Prop) (lang_src: language) (lang_tgt: language) (sim_terminal: forall (st_src:(Language.state lang_src)) (st_tgt:(Language.state lang_tgt)), Prop) (p0: Perms.t) (d0: Flags.t) (st_src0: SeqState.t lang_src) (st_tgt0: SeqState.t lang_tgt): Prop := | deferred_le_sf_ctx_intro d1 (LESF: Flags.le d0 (Flags.join d1 (st_src0.(SeqState.memory).(SeqMemory.flags)))) (SIM: @sim_seq _ _ sim_terminal p0 d1 st_src0 st_tgt0). Lemma deferred_le_sf_ctx_mon: monotone7 deferred_le_sf_ctx. Proof. ii. inv IN. econs 1; eauto. Qed. Hint Resolve deferred_le_sf_ctx_mon: paco. Lemma deferred_le_sf_ctx_wrespectful: wrespectful7 _sim_seq deferred_le_sf_ctx. Proof. econs; eauto with paco. ii. inv PR. dup SIM. apply GF in SIM. inv SIM. 2:{ econs 2. unfold sim_seq_failure_case in *. i. hexploit FAILURE; clear FAILURE; eauto. } econs 1. 4:{ unfold sim_seq_partial_case in PARTIAL. ii. hexploit PARTIAL; clear PARTIAL; eauto. i; des. - esplits; eauto. hexploit partial_step_flags; eauto. i. ss. left. etrans. eapply Flags.join_mon_l. eapply LESF. etrans. rewrite <- flags_join_assoc. match goal with | [|- _ (_ _ (Flags.join ?a ?b)) _] => replace (Flags.join a b) with (Flags.join b a) end. 2:{ apply flags_join_comm. } rewrite flags_join_assoc. eapply Flags.join_mon_l. eapply FLAGS. rewrite flags_join_comm. apply Flags.join_spec; auto. rewrite flags_join_comm. auto. refl. - esplits; eauto. } { clear NASTEP ATSTEP PARTIAL. unfold sim_seq_terminal_case in *. i. hexploit TERMINAL; clear TERMINAL; eauto. i; des. eexists. splits; eauto. etrans. eapply Flags.join_mon_l. eapply LESF. rewrite <- flags_join_assoc. match goal with | [|- _ (_ _ (Flags.join ?a ?b)) _] => replace (Flags.join a b) with (Flags.join b a) end. 2:{ apply flags_join_comm. } rewrite flags_join_assoc. etrans. eapply Flags.join_mon_l. eapply FLAG. hexploit na_steps_flags; eauto. i. rewrite flags_join_comm. hexploit Flags.join_spec. eapply H. refl. i; auto. } { clear TERMINAL ATSTEP PARTIAL. unfold sim_seq_na_step_case in *. i. hexploit NASTEP; clear NASTEP; eauto. i; des. do 2 eexists. splits; eauto. eapply rclo7_clo_base. econs; eauto. hexploit opt_na_step_flags_events; eauto. i. hexploit na_steps_flags; eauto. i. etrans. eapply LESF. apply Flags.join_mon_r. etrans. eapply H0. auto. } { clear TERMINAL NASTEP PARTIAL. unfold sim_seq_at_step_case in *. i. hexploit ATSTEP; clear ATSTEP; eauto. i; des. do 3 eexists. splits; eauto. i. hexploit SIM; clear SIM; eauto. i; des. do 2 eexists. eexists. esplits; eauto. 2:{ eapply rclo7_clo_base. econs. refl. eauto. } ss. eapply SeqEvent.input_match_mon. 3: refl. { eapply SeqEvent.step_input_match. eapply STEP_SRC. eapply MATCH. } etrans. eapply LESF. apply Flags.join_mon_r. eapply na_steps_flags; eauto. } Qed. Lemma deferred_le_sf_ctx_spec: deferred_le_sf_ctx <8= gupaco7 _sim_seq (cpn7 _sim_seq). Proof. i. eapply wrespect7_uclo; eauto with paco. eapply deferred_le_sf_ctx_wrespectful. Qed. Lemma sim_seq_upto_deferred g p d0 d1 lang_src lang_tgt src tgt sim_terminal (LE: Flags.le d0 d1) (SIM: gupaco7 _sim_seq (cpn7 _sim_seq) g lang_src lang_tgt sim_terminal p d1 src tgt) : gupaco7 _sim_seq (cpn7 _sim_seq) g lang_src lang_tgt sim_terminal p d0 src tgt. Proof. guclo deferred_le_sf_ctx_spec. econs; eauto. etrans; eauto. apply Flags.join_ge_l. Qed. Variant seqevent_in_access_le (i0 i1: SeqEvent.input) : Prop := | in_access_none (IN0: i0.(SeqEvent.in_access) = None) (IN1: i1.(SeqEvent.in_access) = None) | in_access_some l v0 f0 v1 f1 vn (VAL: Const.le v0 v1) (FLAG: Flag.le f0 f1) (IN0: i0.(SeqEvent.in_access) = Some (l, v0, f0, vn)) (IN1: i1.(SeqEvent.in_access) = Some (l, v1, f1, vn)) . Variant seqevent_in_acquire_le (i0 i1: SeqEvent.input) : Prop := | in_acquire_none (IN0: i0.(SeqEvent.in_acquire) = None) (IN1: i1.(SeqEvent.in_acquire) = None) | in_acquire_some f0 f1 (FLAG: Flags.le f0 f1) (IN0: i0.(SeqEvent.in_acquire) = Some f0) (IN1: i1.(SeqEvent.in_acquire) = Some f1) . Variant seqevent_in_release_le (i0 i1: SeqEvent.input) : Prop := | in_release_none (IN0: i0.(SeqEvent.in_release) = None) (IN1: i1.(SeqEvent.in_release) = None) | in_release_some v0 f0 v1 f1 (VAL: ValueMap.le v0 v1) (FLAG: Flags.le f0 f1) (IN0: i0.(SeqEvent.in_release) = Some (v0, f0)) (IN1: i1.(SeqEvent.in_release) = Some (v1, f1)) . Definition seqevent_input_le (i0 i1: SeqEvent.input) := (<<LEINACC: seqevent_in_access_le i0 i1>>) /\ (<<LEINACQ: seqevent_in_acquire_le i0 i1>>) /\ (<<LEINREL: seqevent_in_release_le i0 i1>>). Lemma input_le_same_oracle_input i0 i1 i2 (LE: seqevent_input_le i1 i2) (INPUT: Oracle.input_le i0 (SeqEvent.get_oracle_input i1)) : Oracle.input_le i0 (SeqEvent.get_oracle_input i2). Proof. destruct i0, i1, i2. unfold seqevent_input_le in LE. unfold Oracle.input_le in *. des. ss. splits. 2:{ destruct in_acquire0, in_acquire1, in_acquire; ss. inv LEINACQ; ss. inv LEINACQ; ss. } 2:{ destruct in_release0, in_release1, in_release; ss. inv LEINREL; ss. inv LEINREL; ss. } clear ACQUIRE RELEASE LEINACQ LEINREL. inv LEINACC; ss; subst; ss. destruct in_access; ss. unfold Oracle.in_access_le in *. des_ifs. des; clarify. splits; auto. - etrans; eauto. - etrans; eauto. Qed. Lemma input_le_wf ev i1 i2 (LE: seqevent_input_le i1 i2) (INPUT: SeqEvent.wf_input ev i1) : SeqEvent.wf_input ev i2. Proof. destruct i1, i2. unfold seqevent_input_le in LE. unfold SeqEvent.wf_input in *. des. ss. splits. 2:{ destruct in_acquire0, in_acquire, (is_acquire ev); ss. inv LEINACQ; ss. inv LEINACQ; ss. } 2:{ destruct in_release0, in_release, (is_release ev); ss. inv LEINREL; ss. inv LEINREL; ss. } clear ACQUIRE ACQUIRE0 RELEASE RELEASE0 LEINACQ LEINREL. inv LEINACC; ss; subst; ss. i. split; i. - des. clarify. eapply UPDATE; eauto. - hexploit UPDATE; clear UPDATE. i; des. clear H0. hexploit H1; clear H1; eauto. i; des. inv H0. eauto. Qed. Lemma input_le_written_le i1 i2 (LEIN : seqevent_input_le i1 i2) : Flags.le (SeqEvent.written i1) (SeqEvent.written i2). Proof. destruct i1, i2. unfold seqevent_input_le in LEIN. des; ss. unfold SeqEvent.written. ss. clear LEINACQ. inv LEINACC; ss; subst; ss. - rewrite ! flags_join_bot_l. inv LEINREL; ss; subst ;ss. - inv LEINREL; ss; subst ;ss. + des_ifs. refl. + des_ifs. * apply Flags.join_mon_r. auto. * rewrite ! flags_join_bot_l. etrans. eauto. apply Flags.join_ge_r. Qed. Lemma input_le_match x1 d1 i1 i2 i_tgt (MATCH : SeqEvent.input_match x1 d1 i1 i_tgt) (INLE : seqevent_input_le i1 i2) : SeqEvent.input_match x1 d1 i2 i_tgt. Proof. destruct i1, i2. unfold seqevent_input_le in INLE. des; ss. inv MATCH. ss. inv LEINACC; ss. { inv LEINACQ; ss. { inv LEINREL; ss; clarify. - econs; ss; eauto. - econs; ss; eauto. inv RELEASE. econs; eauto. + i. etrans; eauto. + etrans; eauto. apply Flags.join_mon_l; auto. } { inv LEINREL; ss; clarify. - econs; ss; eauto. inv ACQUIRE; ss. econs; eauto. etrans; eauto. - inv RELEASE; inv ACQUIRE. ss. econs; ss; eauto. + rewrite <- H3. econs; eauto. etrans; eauto. + rewrite <- H4. econs; eauto. * i. etrans; eauto. * etrans; eauto. apply Flags.join_mon_l; auto. } } { inv LEINACQ; ss. { inv ACCESS; ss. inv LEINREL; ss; subst. - econs; ss; eauto. rewrite <- H6. econs; eauto. etrans; eauto. etrans; eauto. - inv RELEASE; ss. econs; ss; eauto. + rewrite <- H6. econs; eauto. etrans; eauto. etrans; eauto. + rewrite <- H4. econs; eauto. * i. etrans; eauto. * etrans; eauto. apply Flags.join_mon_l; auto. } { inv ACCESS; ss. inv ACQUIRE; ss. inv LEINREL; ss; subst. - econs; ss; eauto. + rewrite <- H6. econs; eauto. etrans; eauto. etrans; eauto. + rewrite <- H3. econs; eauto. etrans; eauto. - inv RELEASE; ss. econs; ss; eauto. + rewrite <- H6. econs; eauto. etrans; eauto. etrans; eauto. + rewrite <- H3. econs; eauto. etrans; eauto. + rewrite <- H5. econs; eauto. * i. etrans; eauto. * etrans; eauto. apply Flags.join_mon_l; auto. } } Qed. Definition trace_le0 (t0 t1: (ProgramEvent.t * SeqEvent.input * Oracle.output)) : Prop := let '(pe0, i0, o0) := t0 in let '(pe1, i1, o1) := t1 in (<<LEPE: pe0 = pe1>>) /\ (<<LEIN: seqevent_input_le i0 i1>>) /\ (o0 = o1). Definition trace_le (tr0 tr1: list (ProgramEvent.t * SeqEvent.input * Oracle.output)) := List.Forall2 trace_le0 tr0 tr1. Lemma writing_trace_mon_on_trace tr0 tr w (TRACE: SeqThread.writing_trace tr w) (LE: trace_le tr tr0) : exists w0, (SeqThread.writing_trace tr0 w0) /\ (Flags.le w w0). Proof. depgen tr0. induction TRACE; i; ss. { inv LE. exists Flags.bot. split. econs. refl. } inv LE. rename l' into tr2. destruct y as [y o2]. destruct y as [e2 i2]. eapply IHTRACE in H3. des. clear IHTRACE. unfold trace_le0 in *. des. clarify. eexists. split. econs; eauto. etrans. eapply Flags.join_mon_r. eauto. apply Flags.join_mon_l. apply input_le_written_le; auto. Qed. Definition mem_le (m0 m1: SeqMemory.t) : Prop := (<<LEF: Flags.le m0.(SeqMemory.flags) m1.(SeqMemory.flags)>>) /\ (<<LEV: ValueMap.le m0.(SeqMemory.value_map) m1.(SeqMemory.value_map)>>). Ltac unfold_many2 := unfold SeqMemory.write in *; unfold_many; ss. Lemma thread_na_step_le lang_src memory1 memory0 state o p th1 (LE: mem_le memory1 memory0) (STEPS: SeqThread.na_step (SeqState.na_step (lang:=lang_src)) MachineEvent.silent {| SeqThread.state := {| SeqState.state := state; SeqState.memory := memory1 |}; SeqThread.perm := p; SeqThread.oracle := o |} th1) : exists th0, (<<STEPS: SeqThread.na_step (SeqState.na_step (lang:=lang_src)) MachineEvent.silent {| SeqThread.state := {| SeqState.state := state; SeqState.memory := memory0 |}; SeqThread.perm := p; SeqThread.oracle := o |} th0>>) /\ (<<STATE: (th1.(SeqThread.state).(SeqState.state)) = (th0.(SeqThread.state).(SeqState.state))>>) /\ (<<PERM: (th1.(SeqThread.perm)) = (th0.(SeqThread.perm))>>) /\ (<<ORACLE: (th1.(SeqThread.oracle)) = (th0.(SeqThread.oracle))>>) /\ (<<MEMLE: mem_le (th1.(SeqThread.state).(SeqState.memory)) (th0.(SeqThread.state).(SeqState.memory))>>). Proof. unfold mem_le in LE. des. inv STEPS. inv STEP. inv LOCAL; ss. - esplits. econs. econs. eauto. econs. all: ss. - esplits. econs. econs. eauto. econs; auto. i. destruct (p loc); ss. etrans. eapply VAL; eauto. eapply LEV. all: ss. - esplits. econs. econs. eauto. econs; auto. all: ss. econs; eauto. + ii. unfold_many2. des_ifs. + ii. unfold_many2. des_ifs. refl. Qed. Lemma state_na_step_le lang_src st_src1 x2 x0 ev (MEMLE: mem_le (SeqState.memory st_src1) (SeqState.memory x2)) (STATE: SeqState.state st_src1 = SeqState.state x2) (st_src0: SeqState.t lang_src) (STEP : SeqState.na_step x0 ev st_src1 st_src0) : exists st_src2 : SeqState.t lang_src, (<<STEPS: SeqState.na_step x0 ev x2 st_src2>>) /\ (<<STATE: SeqState.state st_src2 = SeqState.state st_src0>>) /\ (<<MEMLE: mem_le (SeqState.memory st_src0) (SeqState.memory st_src2)>>). Proof. destruct st_src1, x2; ss. inv STEP. inv LOCAL; ss; subst. - esplits. { econs; eauto. econs. } all: ss. - esplits. { econs; eauto. econs; eauto. i. destruct (x0 loc); ss. etrans. eapply VAL; eauto. unfold mem_le in MEMLE; des. apply LEV. } all: ss. - destruct (x0 loc) eqn:PERMCASE; ss. + esplits. { econs; eauto. econs; eauto. rewrite PERMCASE. ss. } all: ss. unfold mem_le in *. des. unfold_many2. split; ii. * des_ifs. * des_ifs. refl. + esplits. { econs; eauto. econs; eauto. rewrite PERMCASE. ss. } all: ss. unfold mem_le in *. des. unfold_many2. split; ii. * des_ifs. * des_ifs. refl. - esplits. { econs; eauto. econs. } all: ss. - esplits. { econs; eauto. econs. auto. } all: ss. Qed. Lemma state_na_steps_le lang_src st_src1 x2 x0 (MEMLE: mem_le (SeqState.memory st_src1) (SeqState.memory x2)) (STATE: SeqState.state st_src1 = SeqState.state x2) (st_src0: SeqState.t lang_src) (STEPS : rtc (SeqState.na_step x0 MachineEvent.silent) st_src1 st_src0) : exists st_src2 : SeqState.t lang_src, (<<STEPS: rtc (SeqState.na_step x0 MachineEvent.silent) x2 st_src2>>) /\ (<<STATE: SeqState.state st_src2 = SeqState.state st_src0>>) /\ (<<MEMLE: mem_le (SeqState.memory st_src0) (SeqState.memory st_src2)>>). Proof. depgen x2. induction STEPS; i; ss. { esplits. refl. all: auto. } destruct x, x2; ss. inv H. inv LOCAL; ss; subst. - specialize IHSTEPS with {| SeqState.state := st1; SeqState.memory := memory0 |}. hexploit IHSTEPS; clear IHSTEPS; ss. i; des. esplits. { econs 2. econs. eauto. econs. eapply STEPS0. } all: auto. - specialize IHSTEPS with {| SeqState.state := st1; SeqState.memory := memory0 |}. hexploit IHSTEPS; clear IHSTEPS; ss. i; des. esplits. { econs 2. econs. eauto. econs; auto. i. destruct (x0 loc); ss. { unfold mem_le in MEMLE. des. etrans. eapply VAL; auto. apply LEV. } eapply STEPS0. } all: auto. - destruct (x0 loc) eqn:PERMCASE; ss. specialize IHSTEPS with {| SeqState.state := st1; SeqState.memory := SeqMemory.write loc val memory0 |}. hexploit IHSTEPS; clear IHSTEPS; ss. { unfold mem_le in *. des. unfold_many2. split; ss. - ii. des_ifs. - ii. des_ifs. refl. } i; des. esplits. { econs 2. econs. eauto. econs; auto. rewrite PERMCASE. ss. eapply STEPS0. } all: auto. Qed. Lemma seqevent_step_le memory1 memory0 p i1 oo p1 m1 (LE: mem_le memory1 memory0) (STEP: SeqEvent.step i1 oo p memory1 p1 m1) : exists i0 m0, (<<STEPS: SeqEvent.step i0 oo p memory0 p1 m0>>) /\ (<<INLE: seqevent_input_le i1 i0>>) /\ (<<MEMLE: mem_le m1 m0>>). Proof. unfold mem_le in LE. des. destruct (SeqEvent.in_access i1) eqn:IACC, (SeqEvent.in_acquire i1) eqn:IACQ, (SeqEvent.in_release i1) eqn:IREL. { destruct p0 as [p0 vn]. destruct p0 as [p0 f]. destruct p0 as [l v]. destruct p2 as [vm fs]. inv STEP. rewrite IACC in UPD. rewrite IACQ in ACQ. rewrite IREL in REL. inv UPD. inv REL. inv ACQ. destruct oo; ss; clarify. inv MEM. inv MEM0. inv MEM1. ss. destruct i1; ss; clarify. eexists (SeqEvent.mk_input _ _ _). esplits. - econs; ss. + econs; eauto. econs; eauto. + econs; eauto. econs; eauto. + econs; eauto. econs; eauto. - ss. unfold seqevent_input_le. splits; ss. + econs 2. 3,4: ss. 1,2: eauto. + econs 2. 2,3: ss. ii. unfold_flags. des_ifs. + econs 2. 3,4: ss. * ii. unfold_many2. des_ifs. refl. refl. * ii. unfold_flags. des_ifs. - ss. econs; ss. ii. unfold_many2. des_ifs. refl. refl. } { destruct p0 as [p0 vn]. destruct p0 as [p0 f]. destruct p0 as [l v]. inv STEP. rewrite IACC in UPD. rewrite IACQ in ACQ. rewrite IREL in REL. inv UPD. inv REL. inv ACQ. destruct oo; ss; clarify. inv MEM. inv MEM0. ss. destruct i1; ss; clarify. eexists (SeqEvent.mk_input _ _ _). esplits. - econs; ss. + econs; eauto. econs; eauto. + econs; eauto. econs; eauto. + econs; eauto. - ss. unfold seqevent_input_le. splits; ss. + econs 2. 3,4: ss. 1,2: eauto. + econs 2. 2,3: ss. ii. unfold_flags. des_ifs. + econs 1. all: ss. - ss. econs; ss. + ii. unfold_flags. des_ifs. + ii. unfold_many2. des_ifs. refl. refl. } { destruct p0 as [p0 vn]. destruct p0 as [p0 f]. destruct p0 as [l v]. destruct p2 as [vm fs]. inv STEP. rewrite IACC in UPD. rewrite IACQ in ACQ. rewrite IREL in REL. inv UPD. inv REL. inv ACQ. destruct oo; ss; clarify. inv MEM. inv MEM0. ss. destruct i1; ss; clarify. eexists (SeqEvent.mk_input _ _ _). esplits. - econs; ss. + econs; eauto. econs; eauto. + econs; eauto. + econs; eauto. econs; eauto. - ss. unfold seqevent_input_le. splits; ss. + econs 2. 3,4: ss. 1,2: eauto. + econs 1. all: ss. + econs 2. 3,4: ss. * ii. unfold_many2. des_ifs. refl. * ii. unfold_flags. des_ifs. - ss. econs; ss. ii. unfold_many2. des_ifs. refl. } { destruct p0 as [p0 vn]. destruct p0 as [p0 f]. destruct p0 as [l v]. inv STEP. rewrite IACC in UPD. rewrite IACQ in ACQ. rewrite IREL in REL. inv UPD. inv REL. inv ACQ. destruct oo; ss; clarify. inv MEM. ss. destruct i1; ss; clarify. eexists (SeqEvent.mk_input _ _ _). esplits. - econs; ss. + econs; eauto. econs; eauto. + econs; eauto. + econs; eauto. - ss. unfold seqevent_input_le. splits; ss. + econs 2. 3,4: ss. 1,2: eauto. + econs 1. all: ss. + econs 1. all: ss. - ss. econs; ss. + ii. unfold_flags. des_ifs. + ii. unfold_many2. des_ifs. refl. } { destruct p0 as [vm fs]. inv STEP. rewrite IACC in UPD. rewrite IACQ in ACQ. rewrite IREL in REL. inv UPD. inv REL. inv ACQ. destruct oo; ss; clarify. inv MEM. inv MEM0. ss. destruct i1; ss; clarify. eexists (SeqEvent.mk_input _ _ _). esplits. - econs; ss. + econs; eauto. + econs; eauto. econs; eauto. + econs; eauto. econs; eauto. - ss. unfold seqevent_input_le. splits; ss. + econs 1. all: ss. + econs 2. 2,3: ss. ii. unfold_flags. des_ifs. + econs 2. 3,4: ss. * ii. unfold_many2. des_ifs. refl. * ii. unfold_flags. des_ifs. - ss. econs; ss. ii. unfold_many2. des_ifs. refl. } { inv STEP. rewrite IACC in UPD. rewrite IACQ in ACQ. rewrite IREL in REL. inv UPD. inv REL. inv ACQ. destruct oo; ss; clarify. inv MEM. ss. destruct i1; ss; clarify. eexists (SeqEvent.mk_input _ _ _). esplits. - econs; ss. + econs; eauto. + econs; eauto. econs; eauto. + econs; eauto. - ss. unfold seqevent_input_le. splits; ss. + econs 1. all: ss. + econs 2. 2,3: ss. ii. unfold_flags. des_ifs. + econs 1. all: ss. - ss. econs; ss. ii. unfold_many2. des_ifs. refl. } { destruct p0 as [vm fs]. inv STEP. rewrite IACC in UPD. rewrite IACQ in ACQ. rewrite IREL in REL. inv UPD. inv REL. inv ACQ. destruct oo; ss; clarify. inv MEM. ss. destruct i1; ss; clarify. eexists (SeqEvent.mk_input _ _ _). esplits. - econs; ss. + econs; eauto. + econs; eauto. + econs; eauto. econs; eauto. - ss. unfold seqevent_input_le. splits; ss. + econs 1. all: ss. + econs 1. all: ss. + econs 2. 3,4: ss. all: auto. - ss. } { inv STEP. rewrite IACC in UPD. rewrite IACQ in ACQ. rewrite IREL in REL. inv UPD. inv REL. inv ACQ. destruct oo; ss; clarify. ss. destruct i1; ss; clarify. eexists (SeqEvent.mk_input _ _ _). esplits. - econs; ss. + econs; eauto. + econs; eauto. + econs; eauto. - ss. unfold seqevent_input_le. splits; ss. + econs 1. all: ss. + econs 1. all: ss. + econs 1. all: ss. - ss. } Qed. Lemma thread_at_step_le lang_src memory1 memory0 state p o ev i1 oo th1 (* (WF: Oracle.wf o) *) (LE: mem_le memory1 memory0) (STEPS: SeqThread.at_step (lang:=lang_src) ev i1 oo {| SeqThread.state := {| SeqState.state := state; SeqState.memory := memory1 |}; SeqThread.perm := p; SeqThread.oracle := o |} th1) : exists i0 th0, (<<STEPS: SeqThread.at_step (lang:=lang_src) ev i0 oo {| SeqThread.state := {| SeqState.state := state; SeqState.memory := memory0 |}; SeqThread.perm := p; SeqThread.oracle := o |} th0>>) /\ (<<INLE: seqevent_input_le i1 i0>>) /\ (<<STATE: (th1.(SeqThread.state).(SeqState.state)) = (th0.(SeqThread.state).(SeqState.state))>>) /\ (<<PERM: (th1.(SeqThread.perm)) = (th0.(SeqThread.perm))>>) /\ (<<ORACLE: (th1.(SeqThread.oracle)) = (th0.(SeqThread.oracle))>>) /\ (<<MEMLE: mem_le (th1.(SeqThread.state).(SeqState.memory)) (th0.(SeqThread.state).(SeqState.memory))>>). Proof. dup LE. rename LE0 into MEMLE. unfold mem_le in LE. des. inv STEPS. destruct (is_accessing ev) eqn:ACC, (is_acquire ev) eqn:ACQ, (is_release ev) eqn:REL. { destruct p0 as [l v]. hexploit red_acq_rel_full; eauto. i; des. ss. hexploit seqevent_step_le; eauto. i; des. esplits; eauto. { econs; eauto. eapply input_le_same_oracle_input; eauto. eapply input_le_wf; eauto. } all: ss. } { destruct p0 as [l v]. hexploit red_acq_full; eauto. i; des. ss. hexploit seqevent_step_le; eauto. i; des. esplits; eauto. { econs; eauto. eapply input_le_same_oracle_input; eauto. eapply input_le_wf; eauto. } all: ss. } { destruct p0 as [l v]. hexploit red_rel_full; eauto. i; des. ss. hexploit seqevent_step_le; eauto. i; des. esplits; eauto. { econs; eauto. eapply input_le_same_oracle_input; eauto. eapply input_le_wf; eauto. } all: ss. } { destruct p0 as [l v]. hexploit red_rlx_full; eauto. i; des. ss. hexploit seqevent_step_le; eauto. i; des. esplits; eauto. { econs; eauto. eapply input_le_same_oracle_input; eauto. eapply input_le_wf; eauto. } all: ss. } { hexploit red_acq_rel2_full; eauto. i; des. ss. hexploit seqevent_step_le; eauto. i; des. esplits; eauto. { econs; eauto. eapply input_le_same_oracle_input; eauto. eapply input_le_wf; eauto. } all: ss. } { hexploit red_acq2_full; eauto. i; des. ss. hexploit seqevent_step_le; eauto. i; des. esplits; eauto. { econs; eauto. eapply input_le_same_oracle_input; eauto. eapply input_le_wf; eauto. } all: ss. } { hexploit red_rel2_full; eauto. i; des. ss. hexploit seqevent_step_le; eauto. i; des. esplits; eauto. { econs; eauto. eapply input_le_same_oracle_input; eauto. eapply input_le_wf; eauto. } all: ss. } { hexploit red_rlx2_full; eauto. i; des. ss. hexploit seqevent_step_le; eauto. i; des. esplits; eauto. { econs; eauto. eapply input_le_same_oracle_input; eauto. eapply input_le_wf; eauto. } all: ss. } Qed. Lemma thread_steps_le lang_src tr1 th1 th2 th0 (WF: Oracle.wf th1.(SeqThread.oracle)) (STATE: th1.(SeqThread.state).(SeqState.state) = th0.(SeqThread.state).(SeqState.state)) (MEMLE: mem_le th1.(SeqThread.state).(SeqState.memory) th0.(SeqThread.state).(SeqState.memory)) (PERM: th1.(SeqThread.perm) = th0.(SeqThread.perm)) (ORACLE: th1.(SeqThread.oracle) = th0.(SeqThread.oracle)) (STEPS: SeqThread.steps (SeqState.na_step (lang:=lang_src)) tr1 th1 th2) : exists tr0 th3, (<<STEPS: SeqThread.steps (SeqState.na_step (lang:=lang_src)) tr0 th0 th3>>) /\ (<<STATE: (th3.(SeqThread.state).(SeqState.state)) = (th2.(SeqThread.state).(SeqState.state))>>) /\ (<<PERM: (th3.(SeqThread.perm)) = (th2.(SeqThread.perm))>>) /\ (<<ORACLE: (th3.(SeqThread.oracle)) = (th2.(SeqThread.oracle))>>) /\ (<<TRACE: trace_le tr1 tr0>>) /\ (<<MEMLE: mem_le th2.(SeqThread.state).(SeqState.memory) th3.(SeqThread.state).(SeqState.memory)>>). Proof. depgen th0. depgen WF. induction STEPS; i. - esplits. econs 1. all: ss. - destruct th0, th3; ss; clarify. destruct state, state0; ss; clarify. hexploit thread_na_step_le; eauto. i; des. hexploit IHSTEPS. { inv STEP; ss. } eapply STATE. all: auto. i; des. esplits; eauto. econs 2. eapply STEPS0. eauto. - destruct th0, th3; ss; clarify. destruct state, state0; ss; clarify. hexploit thread_at_step_le; eauto. i; des. hexploit IHSTEPS. { inv STEP; ss. clarify. punfold WF. 2:eapply Oracle.wf_mon. inv WF. hexploit WF0; eauto. i; des. pclearbot. auto. } eapply STATE. all: auto. i; des. esplits; eauto. econs 3. eapply STEPS0. eauto. econs 2; eauto. ss. Qed. Lemma seqthread_failure_diff_mem lang_src th th3 (FAILURE : SeqThread.failure (SeqState.na_step (lang:=lang_src)) th) (STATE : SeqState.state (SeqThread.state th3) = SeqState.state (SeqThread.state th)) (PERM : SeqThread.perm th3 = SeqThread.perm th) (* (ORACLE : SeqThread.oracle th3 = SeqThread.oracle th) *) (* (MEMLE : mem_le (SeqState.memory (SeqThread.state th)) (SeqState.memory (SeqThread.state th3))) *) : SeqThread.failure (SeqState.na_step (lang:=lang_src)) th3. Proof. inv FAILURE. inv H. inv STEP. ss. destruct th3. ss. destruct state. ss. clarify. inv LOCAL. - econs. econs. econs; eauto. econs; eauto. - econs. econs. econs; eauto. econs; eauto. - econs. econs. econs; eauto. econs; eauto. Qed. Variant mem_le_ctx (sim_seq: forall (lang_src: language) (lang_tgt: language) (sim_terminal: forall (st_src:(Language.state lang_src)) (st_tgt:(Language.state lang_tgt)), Prop) (p0: Perms.t) (d0: Flags.t) (st_src0: SeqState.t lang_src) (st_tgt0: SeqState.t lang_tgt), Prop) (lang_src: language) (lang_tgt: language) (sim_terminal: forall (st_src:(Language.state lang_src)) (st_tgt:(Language.state lang_tgt)), Prop) (p0: Perms.t) (d0: Flags.t) (st_src0: SeqState.t lang_src) (st_tgt0: SeqState.t lang_tgt): Prop := | flags_le_ctx_intro st_src1 (MEMLE: mem_le st_src1.(SeqState.memory) st_src0.(SeqState.memory)) (STATE: st_src1.(SeqState.state) = st_src0.(SeqState.state)) (SIM: sim_seq _ _ sim_terminal p0 d0 st_src1 st_tgt0). Lemma mem_le_ctx_mon: monotone7 mem_le_ctx. Proof. ii. inv IN. econs 1; eauto. Qed. Hint Resolve mem_le_ctx_mon: paco. Lemma mem_le_ctx_wrespectful: wrespectful7 _sim_seq mem_le_ctx. Proof. econs; eauto with paco. ii. inv PR. dup SIM. apply GF in SIM. inv SIM. 2:{ econs 2. unfold sim_seq_failure_case in *. i. hexploit FAILURE; clear FAILURE; eauto. i; des. hexploit thread_steps_le. 6: eauto. all: ss. instantiate (1:= {| SeqThread.state := x5; SeqThread.perm := x3; SeqThread.oracle := o |}). all: ss. i; des. hexploit writing_trace_mon_on_trace. eauto. eauto. i; des. esplits. eapply STEPS0. eauto. eapply seqthread_failure_diff_mem; eauto. } econs 1. 4:{ unfold sim_seq_partial_case in PARTIAL. ii. hexploit PARTIAL; clear PARTIAL; eauto. i; des. - hexploit thread_steps_le. 6: eauto. all: ss. instantiate (1:= {| SeqThread.state := x5; SeqThread.perm := x3; SeqThread.oracle := o |}). all: ss. i; des. hexploit writing_trace_mon_on_trace. eauto. eauto. i; des. esplits; eauto. left. depgen FLAGS. depgen MEMLE0. depgen H0. clear; i. etrans; eauto. etrans. eapply Flags.join_mon_l. eauto. apply Flags.join_mon_r. unfold mem_le in MEMLE0. des. auto. - hexploit thread_steps_le. 6: eauto. all: ss. instantiate (1:= {| SeqThread.state := x5; SeqThread.perm := x3; SeqThread.oracle := o |}). all: ss. i; des. hexploit writing_trace_mon_on_trace. eauto. eauto. i; des. esplits; eauto. right. eapply seqthread_failure_diff_mem; eauto. } { clear NASTEP ATSTEP PARTIAL. unfold sim_seq_terminal_case in *. i. hexploit TERMINAL; clear TERMINAL; eauto. i; des. hexploit state_na_steps_le; eauto. i; des. unfold mem_le in *; des. exists st_src2. splits; auto. rewrite STATE0; auto. rewrite STATE0; auto. etrans; eauto. etrans; eauto. } { clear TERMINAL ATSTEP PARTIAL. unfold sim_seq_na_step_case in *. i. hexploit NASTEP; clear NASTEP; eauto. i; des. hexploit state_na_steps_le; eauto. i; des. unfold mem_le in *; des. inv STEP. - hexploit state_na_step_le. 3: eapply STEP0. instantiate (1:=st_src3). all: ss. i; des. exists st_src3, st_src4. splits; auto. econs 1; auto. eapply rclo7_clo_base. econs; eauto. - esplits. eapply STEPS0. econs 2. eapply rclo7_clo_base. econs. 3: eauto. all: auto. econs; eauto. } { clear TERMINAL NASTEP PARTIAL. unfold sim_seq_at_step_case in *. i. hexploit ATSTEP; clear ATSTEP; eauto. i; des. hexploit state_na_steps_le; eauto. i; des. esplits; eauto. rewrite STATE0; eauto. i. hexploit SIM; clear SIM; eauto. i; des. hexploit seqevent_step_le. 2: eapply STEP_SRC. eapply MEMLE0. i; des. hexploit input_le_match; eauto. i; des. esplits. eapply STEPS1. { eauto. } { eapply input_le_wf; eauto. } apply rclo7_clo_base. econs. 3: eauto. all: ss. } Qed. Lemma mem_le_ctx_spec: mem_le_ctx <8= gupaco7 _sim_seq (cpn7 _sim_seq). Proof. i. eapply wrespect7_uclo; eauto with paco. eapply mem_le_ctx_wrespectful. Qed. Lemma sim_seq_upto_mem lang_src lang_tgt sim_terminal g p d st_src0 st_src1 tgt (MEMLE: mem_le st_src1.(SeqState.memory) st_src0.(SeqState.memory)) (STATE: st_src1.(SeqState.state) = st_src0.(SeqState.state)) (SIM: gupaco7 _sim_seq (cpn7 _sim_seq) g lang_src lang_tgt sim_terminal p d st_src1 tgt) : gupaco7 _sim_seq (cpn7 _sim_seq) g lang_src lang_tgt sim_terminal p d st_src0 tgt. Proof. guclo mem_le_ctx_spec. econs; eauto. Qed. End UPTO.
import Numeric.LinearAlgebra import Numeric.LinearAlgebra.LAPACK m :: Matrix Double m = (3><3) [7.589183,1.703609,-4.477162, -4.597851,9.434889,-6.543450, 0.4588202,-6.115153,1.331191] v :: Matrix Double v = (3><1) [1.745005,-4.448092,-4.160842]
variables p q r s: Prop -- commutativity of ∧ and ∨ theorem and_switch (h: p ∧ q): q ∧ p := and.intro h.right h.left example: p ∧ q ↔ q ∧ p := iff.intro (and_switch p q) (and_switch q p) theorem or_switch (h: p ∨ q): q ∨ p := or.elim h (assume p, or.inr p) (assume q, or.inl q) example: p ∨ q ↔ q ∨ p := iff.intro (or_switch p q) (or_switch q p) -- associativity of ∧ and ∨ example: p ∧ (q ∨ r) ↔ (p ∧ q) ∨ (p ∧ r) := iff.intro (assume h: p ∧ (q ∨ r), have hp: p, from h.left, or.elim h.right (assume hq: q, have hpq: p ∧ q, from and.intro hp hq, or.inl hpq) (assume hr: r, have hpr: p ∧ r, from and.intro hp hr, or.inr hpr)) (assume h: (p ∧ q) ∨ (p ∧ r), or.elim h (assume hpq: p ∧ q, have hp: p, from hpq.left, have hqr: q ∨ r, from or.inl hpq.right, and.intro hp hqr) (assume hpr: p ∧ r, have hp: p, from hpr.left, have hqr: q ∨ r, from or.inr hpr.right, and.intro hp hqr)) example: (p ∧ q) ∧ r ↔ p ∧ (q ∧ r) := iff.intro (assume h: (p ∧ q) ∧ r, have hpq: p ∧ q, from h.left, have hp: p, from hpq.left, have hq: q, from hpq.right, have hr: r, from h.right, ⟨hp, ⟨hq, hr⟩⟩) (assume h: p ∧ (q ∧ r), have hp: p, from h.left, have hqr: q ∧ r, from h.right, have hq: q, from hqr.left, have hr: r, from hqr.right, ⟨⟨hp, hq⟩, hr⟩)
from math import exp import torch from torch.autograd import Function, Variable from torch.nn.modules.loss import _Loss import numpy as np import torch.nn.functional as F from saveNet import * class weighted_mse(_Loss): def __init__(self): super(weighted_mse, self).__init__() def forward(self, input, output, weight): return torch.sum(weight * (input - output) ** 2) / input.numel() class weighted_mae(_Loss): def __init__(self): super(weighted_mae, self).__init__() def forward(self, input, output, weight): tmp = weight[weight>0] return torch.sum(weight * torch.abs(input - output)) / tmp.numel()
[STATEMENT] lemma REL_IS_ID_trigger: "R=Id \<Longrightarrow> REL_IS_ID R" [PROOF STATE] proof (prove) goal (1 subgoal): 1. R = Id \<Longrightarrow> REL_IS_ID R [PROOF STEP] by simp
(* Property from Productive Use of Failure in Inductive Proof, Andrew Ireland and Alan Bundy, JAR 1996. This Isabelle theory is produced using the TIP tool offered at the following website: https://github.com/tip-org/tools This file was originally provided as part of TIP benchmark at the following website: https://github.com/tip-org/benchmarks Yutaka Nagashima at CIIRC, CTU changed the TIP output theory file slightly to make it compatible with Isabelle2017. Some proofs were added by Yutaka Nagashima.*) theory TIP_prop_30 imports "../../Test_Base" begin datatype 'a list = nil2 | cons2 "'a" "'a list" fun x :: "'a list => 'a list => 'a list" where "x (nil2) z = z" | "x (cons2 z2 xs) z = cons2 z2 (x xs z)" fun rev :: "'a list => 'a list" where "rev (nil2) = nil2" | "rev (cons2 z xs) = x (rev xs) (cons2 z (nil2))" lemma app_nil: "x y nil2 = y" by (induct y, auto) lemma app_assoc: "x (x y z) w = x y (x z w)" by (induction y, auto) lemma rev_app: "rev (x y z) = x (rev z) (rev y)" apply(induction y, auto) apply(simp add: app_nil) using app_assoc apply(auto) done lemma revrev: "rev (rev y) = y" apply(induction y, auto) apply(simp add: rev_app) done theorem property0 : "((rev (x (rev y) (nil2))) = y)" apply(simp add: app_nil revrev) done end
(* * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * * SPDX-License-Identifier: BSD-2-Clause *) theory Crunch_Test_Trace imports Lib.Crunch_Instances_Trace Crunch_Test_Qualified_Trace Lib.Defs begin text \<open>Test cases for crunch\<close> definition "crunch_foo1 (x :: nat) \<equiv> do modify ((+) x); modify ((+) x) od" definition "crunch_foo2 \<equiv> do crunch_foo1 12; crunch_foo1 13 od" crunch_ignore (add: crunch_foo1) crunch gt: crunch_foo2 "\<lambda>x. x > y" (ignore: modify bind ignore_del: crunch_foo1) crunch_ignore (del: crunch_foo1) definition "crunch_always_true (x :: nat) \<equiv> \<lambda>y :: nat. True" lemma crunch_foo1_at_2: "True \<Longrightarrow> \<lbrace>crunch_always_true 3 and crunch_always_true 2\<rbrace> crunch_foo1 x \<lbrace>\<lambda>rv. crunch_always_true 2 and K True\<rbrace>" by (simp add: crunch_always_true_def, wp) lemma crunch_foo1_at_3[wp]: "\<lbrace>crunch_always_true 3\<rbrace> crunch_foo1 x \<lbrace>\<lambda>rv. crunch_always_true 3\<rbrace>" by (simp add: crunch_always_true_def, wp) lemma no_fail_crunch_foo1: "True \<Longrightarrow> no_fail (crunch_always_true 2 and crunch_always_true 3) (crunch_foo1 x)" apply (simp add:crunch_always_true_def crunch_foo1_def) apply (rule no_fail_pre) apply (wp, simp) done crunch (no_fail) no_fail: crunch_foo2 (ignore: modify bind wp: crunch_foo1_at_2[simplified]) crunch (valid) at_2: crunch_foo2 "crunch_always_true 2" (ignore: modify bind wp: crunch_foo1_at_2[simplified]) fun crunch_foo3 :: "nat => nat => 'a => (nat,unit) tmonad" where "crunch_foo3 0 x _ = crunch_foo1 x" | "crunch_foo3 (Suc n) x y = crunch_foo3 n x y" crunch gt2: crunch_foo3 "\<lambda>x. x > y" (ignore: modify bind) class foo_class = fixes stuff :: 'a begin fun crunch_foo4 :: "nat => nat => 'a => (nat,unit) tmonad" where "crunch_foo4 0 x _ = crunch_foo1 x" | "crunch_foo4 (Suc n) x y = crunch_foo4 n x y" definition "crunch_foo5 x (y::'a) \<equiv> crunch_foo1 x" end lemma crunch_foo4_alt: "crunch_foo4 n x y \<equiv> crunch_foo1 x" apply (induct n) apply simp+ done crunch gt3: crunch_foo4 "\<lambda>x. x > y" (ignore: modify bind) crunch (no_fail) no_fail2: crunch_foo4 (rule: crunch_foo4_alt ignore: modify bind) crunch gt3': crunch_foo4 "\<lambda>x. x > y" (rule: crunch_foo4_alt ignore: modify bind) crunch gt4: crunch_foo5 "\<lambda>x. x > y" (ignore: modify bind) (* Test cases for crunch in locales *) definition "crunch_foo6 \<equiv> return () >>= (\<lambda>_. return ())" locale test_locale = fixes fixed_return_unit :: "(unit, unit) tmonad" begin definition "crunch_foo7 \<equiv> return () >>= (\<lambda>_. return ())" (* crunch works on a global constant within a locale *) crunch test[wp]: crunch_foo6 P (ignore: bind) (* crunch works on a locale constant *) crunch test[wp]: crunch_foo7 P (ignore: bind) definition "crunch_foo8 \<equiv> fixed_return_unit >>= (\<lambda>_. fixed_return_unit)" definition "crunch_foo9 (x :: nat) \<equiv> do modify ((+) x); modify ((+) x) od" crunch test: crunch_foo9 "\<lambda>x. x > y" (ignore: bind) definition "crunch_foo10 (x :: nat) \<equiv> do modify ((+) x); modify ((+) x) od" (*crunch_def attribute overrides definition lookup *) lemma crunch_foo10_def2[crunch_def]: "crunch_foo10 = crunch_foo9" unfolding crunch_foo10_def[abs_def] crunch_foo9_def[abs_def] by simp crunch test[wp]: crunch_foo10 "\<lambda>x. x > y" (* crunch_ignore works within a locale *) crunch_ignore (add: bind) crunch test': crunch_foo9 "\<lambda>x. x > y" end interpretation test_locale "return ()" . (* interpretation promotes the wp attribute from the locale *) lemma "\<lbrace>Q\<rbrace> crunch_foo7 \<lbrace>\<lambda>_. Q\<rbrace>" by wp (* crunch still works on an interpreted locale constant *) crunch test2: crunch_foo7 P (wp_del: crunch_foo7_test) locale test_sublocale sublocale test_sublocale < test_locale "return ()" . context test_sublocale begin (* crunch works on a locale constant with a fixed locale parameter *) crunch test[wp]: crunch_foo8 P end (* check that qualified names are handled properly. *) consts foo_const :: "(unit, unit) tmonad" defs foo_const_def: "foo_const \<equiv> Crunch_Test_Qualified_Trace.foo_const" crunch test: foo_const P (* check that the grid-style crunch is working *) crunches crunch_foo3, crunch_foo4, crunch_foo5 for silly: "\<lambda>s. True \<noteq> False" and (no_fail) nf (ignore: modify bind rule: crunch_foo4_alt wp_del: hoare_vcg_prop) end
# <center> École des Ponts ParisTech</center> ## <center> SPH pour l'hydraulique </center> ### <center> SPyH Séance de TD 3</center> #### <center> Écoulement Confiné : Forces de viscosité, conditions au mur </center> <center> Rémi Carmigniani et Damien Violeau </center> But de la séance de TD3 -- Dans cette séance, nous allons résoudre les équations de Navier-Stokes pour un fluide faiblement compressible en utilisant la méthode SPH. Nous nous intéressons à l'écoulement de Poiseuille plan. Les équations s'écrivent : \begin{align} \frac{\mathbf{v}}{dt} &= -\frac{1}{\rho} \mathbf{\nabla} p + \mathbf{g} + \nu \Delta \mathbf{v},\\ \frac{\rho}{dt} &= -\rho \nabla\cdot \mathbf{v},\\ p &= B \left(\frac{\rho^\gamma}{\rho_0^\gamma} - 1\right), \end{align} où la dernière equation correspond à l'équation d'état pour un gaz polytropique avec $B ={\rho_0} {c_0}^2/\gamma$. ```python %load_ext autoreload %autoreload 2 import numpy as np from sys import exit import os.path from os import path import csv import time import matplotlib import matplotlib.pyplot as plt matplotlib.rcParams['text.usetex'] = True from src.spyh import * from src.checkTD3 import * from src.sphvar import * from src.plotParticles import * from src.state import * from src.contrib import * from src.analytical_solutions import * ``` Partie 1 : Un peu de théorie et construction du cas test --- Nous considérons un écoulement entre deux plaques infinies séparées d'une distance de $2e$, d'un fluide de densité $\rho_0 = 1000$ kg.m$^{-3}$ dans un champ gravitaire $ \mathbf{g} = g \mathbf{e}_x$ m.s$^{-2}$ (qui joue le rôle d'un gradient de pression moteur de l'écoulement). La configuration est représentée dans la figure ci-dessous. L'écoulement est considéré permanent selon $\mathbf{e}_x$. Nous pouvons donc considérer un domaine périodique dans cette direction. **1-a) Rappelez la solution de l'écoulement de Poiseuille en régime permanent.** On définera $Re = \rho_0 U_0 e/\mu$, où $U_0$ est la vitesse maximum, $e$ la demi largeur, $\mu$ la viscosité dynamique. **Montrez qu'il faut :** **\begin{equation} g = \frac{2U_0\mu}{\rho_0 e^2} \end{equation}** **1-b) Définissez dans la cellule suivante $\mu$ et $g$ en fonction des autres paramètres du problème.** ```python #POISEUILLE PARAMETERS e = 0.5 #half width in meters U0 = 1 # maximum velocity m/s Re = 1 # Reynolds number #FLUID PARAMETERS rhoF = 1000 #TODO : COMPLETE HERE mu = rhoF*U0*e/Re #(Pa.s) grav = np.array([1,0])*2*U0*mu/(rhoF*e**2) #END q1b(grav,mu) ``` Your values : g = 4.00 mu = 500.00 1-b) All good! Dans la cellule suivante on définit les paramètres de fluides, de géométrie et de simulation. ```python #OTHER FLUID PARAMETERS c0 = 10*U0 gamma = 7 B = rhoF*c0**2/gamma #DENSITY & SHEPARD THRESHOLDS : shepardMin = 10**(-6) rhoMin = 0.5*rhoF rhoMax = 1.5*rhoF #PARTICLES & SPACES PARAMETERS : N = 4 dr = 2*e/(4*N) h = smthfc*dr m=dr*dr*rhoF lspace = 2*h #COMPUTATION DOMAIN : xOrigin = 0 yOrigin = -e-nBound*dr xSize = 4*lspace ySize = 2*e+2*nBound*dr xMax = xOrigin+xSize yMax = yOrigin+ySize ``` **1-c) Construisez la géométrie en utilisant la fonction *addBox* et les FLAG : FLUID pour les particules de fluide et BOUND pour les particules de mur. Notez que la base fait $4\times l_s$ où $l_s = 2h$.** Les FLAGs sont définis dans le fichier [src/sphvar.py](src/sphvar.py) Les murs seront composés d'une couche de *nBound =4* particules *fictives*. Ce nombre est suffisant pour éviter la pénétration du mur. Pour ajouter des particules au tableau *part*, utilisez la fonction *addBox* : ```python part = addBox(part,[x_0,y_0,L_x,L_y],FLAG,dr,rhoF) ``` cette commande ajoute à part des particules de type FLAG dans le domaine rectangulaire : $\left[x_0,x_0+L_x\right]\times\left[y_0,y_0+L_y\right]$ ```python #INIT PART: part = init_particles() ``` ```python #% COMPLETE HERE part = addBox(part,[0,-e,4*lspace,2*e],FLUID,dr,rhoF) part = addBox(part,[0,-e-nBound*dr,4*lspace,nBound*dr],BOUND,dr,rhoF) part = addBox(part,[0,e,4*lspace,nBound*dr],BOUND,dr,rhoF) # END ``` ```python len(part) ``` 384 Vous devriez avoir 384 particules avec dr = 2e/16 ou N=4. **Affichage de la vitesse horizontale** ```python %matplotlib notebook Umax= 1 tabUx = part[:,VEL[0]] domain = [xOrigin,xMax,yOrigin,yMax,0,Umax] plotPropertiesWithBound(part,tabUx,r'$U_x$',domain,dr,1) ``` <IPython.core.display.Javascript object> **1-d) Le domaine que nous considérons est périodique selon $\mathbf{e}_x$. Pour définir cette périodicité dans le code, nous avons besoin de spécifier les *spaces* voisines. En particulier pour la *space* 0 : elle a pour voisine avec la résolution $N=15$, les *spaces* 1, 6 et 7 mais aussi les *spaces* 18 et 19 (voir figure)! Vous pouvez vérifier que la construction est bien faite en variant le nombre de particules.** ```python #PERIODICITY VECTOR vecPer = np.array([4*lspace,0]) posSpace,neibSpace,partSpace,listNeibSpace = \ init_spaces(xOrigin,yOrigin,xSize,ySize,lspace,dr,vecPer) ``` ```python plotSpaces(posSpace,'k',lspace,1) neibSpace[0][neibSpace[0]>-1] ``` array([ 0, 1, 6, 7, 18, 19]) ```python spacesOutline(posSpace[neibSpace[0][neibSpace[0]>-1]],'r',lspace,1) ``` ```python f = plt.figure(1) figName = 'Figures/periodic.pdf' f.savefig(figName,bbox_inches='tight') ``` Partie 2 : Calcul des forces de viscosité : Modèles de Morris *et al.* et de Monaghan & Gingold --- Nous allons ajouter deux modèles de viscosité : Morris *et al.* puis de Monaghan \& Gingold . Nous rappelons : \begin{align} \frac{1}{\rho_i}\mu \mathbf{L}^{visc,Mor}_i \left\{ \mathbf{v}_j \right\}&=\sum_j 2 \mu \frac{m}{\rho_i\rho_j} (\mathbf{v}_i-\mathbf{v}_j) \frac{\mathbf{r}_{ij} \cdot \nabla w_{ij}}{r_{ij}^2} \\ &=\sum_j \mathbf{F}^{Visc,Mor}_{ij} \end{align} \begin{align} \frac{1}{\rho_i}\mu \mathbf{L}^{visc,M\&G}_i \left\{ \mathbf{v}_j \right\}&=\sum_j 2\left(d+2\right) \mu \frac{m}{\rho_i\rho_j} \frac{\mathbf{r}_{ij}\cdot (\mathbf{v}_i-\mathbf{v}_j)}{r^2_{ij}}\nabla w_{ij}\\ &=\sum_j \mathbf{F}^{Visc,M\&G}_{ij} \end{align} **2-a) Complétez *contrib.py* afin de créer deux fonctions :** * **MonaghanViscContrib** * **MorrisViscContrib** **Les deux fonctions prendront en argument :** ```python F = xxxViscContrib(mu,rho_i, rho_j,dwdr,rVel,rPos,m) ``` **Elles retournent la force $F_{i,j}$.** **Indications :** vous pouvez vous inspirer de ce qui est fait pour la viscosité artificielle. Vous pouvez tester votre implémentation avec la cellule suivante. ```python q2a() ``` Morris is correctly implemented Monaghan is correctly implemented Une fois que vous avez correctement implémenté vos fonctions, ajouté @njit avant la fonction pour autoriser la compilation et redémarrez le noyau. **2-b) En vous inspirant de *computeForcesARTPeriodicX* dans *spyh.py*, créer des fonctions *computeForcesMorrisPeriodicX* et *computeForcesMonaghanPeriodicX* pour calculer les forces sur les particules avec les différentes forces de viscosité.** **Notes :** Nous avons ajouté le cas périodic selon X. ```python part,partSpace = sortPart(part,posSpace,partSpace,xOrigin,yOrigin,xSize,ySize,lspace,dr) listNeibSpace= getListNeib(partSpace,neibSpace,listNeibSpace) ``` ```python dt=CFLConditions(part[:,VEL],h,c0,grav,rhoF,mu) part[:,RHO],part[:,VEL] =interpolateBoundaryPeriodicX((part[:,INFO]==BOUND),\ part[:,SPID],\ part[:,POS],\ part[:,VEL],\ part[:,RHO],\ listNeibSpace,\ aW,h,m,B,rhoF,gamma,grav,vecPer[0],shepardMin) part[:,FORCES],part[:,DRHODT] = computeForcesMorrisPeriodicX((part[:,INFO]==FLUID),\ part[:,SPID],\ part[:,POS],\ part[:,VEL],\ part[:,RHO],\ listNeibSpace,\ aW,h,m,B,rhoF,gamma,grav,mu,vecPer[0]) part[:,POS],part[:,VEL],part[:,RHO] = integrationStepPeriodicX((part[:,INFO]==FLUID),\ part[:,POS],\ part[:,VEL],\ part[:,RHO],\ part[:,FORCES],\ part[:,DRHODT],\ 0,vecPer[0]) ``` ```python current_directory = os.getcwd() case_directory = os.path.join(current_directory, r'Results/Poiseuille_'+time.strftime("%Y%m%d_%H%M%S")) os.mkdir(case_directory) data_directory = os.path.join(case_directory,r'Data') figures_directory = os.path.join(case_directory,r'Figures') os.mkdir(data_directory) os.mkdir(figures_directory) ``` ```python # Here we specify the output frequencies dt_figure = 0.025*e**2*rhoF/mu t_print = 0 #final time : t_end = 0.7 t=0 it=0 im_count=0 ytab = np.linspace(-1,1,100) timetab = np.linspace(0,5,60) tau = 2*e/U0 Uan_with_time = analyticalPoiseuilleFlow(0,Re,timetab*rhoF/mu*e**2/tau) Utab = 1-ytab**2 timeTabU = np.empty((0,2),float) ``` ```python %matplotlib notebook while t<t_end: #STEP1 : Calcul de la CFL dt = CFLConditions(part[:,VEL],h,c0,grav,rhoF,mu) #STEP2 : Interpolation des conditions au bord part[:,RHO],part[:,VEL] =interpolateBoundaryPeriodicX((part[:,INFO]==BOUND),\ part[:,SPID],\ part[:,POS],\ part[:,VEL],\ part[:,RHO],\ listNeibSpace,\ aW,h,m,B,rhoF,gamma,grav,vecPer[0],shepardMin) #STEP3 : Calcul des forces et des termes de densité part[:,FORCES],part[:,DRHODT] = computeForcesMorrisPeriodicX((part[:,INFO]==FLUID),\ part[:,SPID],\ part[:,POS],\ part[:,VEL],\ part[:,RHO],\ listNeibSpace,\ aW,h,m,B,rhoF,gamma,grav,mu,vecPer[0]) #STEP4 : Integration en temps part[:,POS],part[:,VEL],part[:,RHO] = integrationStepPeriodicX((part[:,INFO]==FLUID),\ part[:,POS],\ part[:,VEL],\ part[:,RHO],\ part[:,FORCES],\ part[:,DRHODT],\ dt,vecPer[0]) #STEP5 : Corriger densité trop basse part[:,RHO] = checkDensity(part[:,RHO],rhoMin,rhoMax) #STEP6 : Mise à jour des voisins (pas forcément à tous les pas de temps) part,partSpace = sortPart(part,posSpace,partSpace,xOrigin,yOrigin,xSize,ySize,lspace,dr) listNeibSpace= getListNeib(partSpace,neibSpace,listNeibSpace) t +=dt it +=1 if t>=t_print: fig = plt.figure(1) plt.clf() plt.title(r'$t\nu/e^2 = %2.2f$'%(t*mu/rhoF/e**2)) velMagn = (part[:,VEL[0]]*part[:,VEL[0]]+part[:,VEL[1]]*part[:,VEL[1]])**.5 domain = [xOrigin,xMax,yOrigin,yMax,0,1] plotPropertiesWithBound(part,velMagn,r'$u/U_0$',domain,dr,1) figname = os.path.join(figures_directory,r'vel_%06d.png'%im_count) fig.savefig(figname,bbox_inches='tight') fig.canvas.draw() plt.pause(0.01) #FIGURE DISPLAY fig2 = plt.figure(2) plt.clf() plt.plot(part[:,POS[1]]/e,part[:,VEL[0]]/U0,'bo',label=r'$U_x$ SPH') plt.plot(part[:,POS[1]]/e,part[:,VEL[1]]/U0,'ro',label=r'$U_y$ SPH') plt.plot(ytab,Utab,'--k',label=r'Asymptotic') uan = analyticalPoiseuilleFlow(ytab,Re,t/tau) plt.plot(ytab,uan,'--r',label='Analytical') plt.xlabel('$z/e$',fontsize=18) plt.ylabel('$u/U_0$',fontsize=18) plt.xlim(-1-nBound*dr/e,1+nBound*dr/e) plt.ylim(-0.2,1.2) plt.legend(loc='upper left') plt.tight_layout() ax = plt.gca() ax.tick_params(axis = 'both', which = 'major', labelsize = 18) ax.xaxis.set_major_locator(plt.MaxNLocator(5)) ax.yaxis.set_major_locator(plt.MaxNLocator(5)) plt.tight_layout() plt.show(block=False) plt.draw() figname = os.path.join(figures_directory,r'UV_%06d.png'%im_count) fig2.savefig(figname,bbox_inches='tight') fig2.canvas.draw() plt.pause(0.01) #Figure fig3 = plt.figure(3) plt.clf() timeTabU = np.append(timeTabU,[[t*mu/rhoF/e**2,part[:,VEL[0]].max()/U0]],axis=0) plt.plot(timeTabU[:,0],timeTabU[:,1],'b.',label='SPH') plt.plot(timetab,Uan_with_time,'--r',label='Analytical') plt.xlim(0,3) plt.ylim(0,1.2) plt.xlabel(r'$t\nu/e^2$',fontsize=18) plt.ylabel(r'$u_{max}/U_0$',fontsize=18) plt.legend(loc='lower right') plt.tight_layout() ax = plt.gca() ax.tick_params(axis = 'both', which = 'major', labelsize = 18) ax.xaxis.set_major_locator(plt.MaxNLocator(5)) ax.yaxis.set_major_locator(plt.MaxNLocator(5)) plt.tight_layout() plt.show(block=False) plt.draw() figname = os.path.join(figures_directory,'VelMaxDT.png') fig3.savefig(figname,bbox_inches='tight') fig3.canvas.draw() plt.pause(0.01) im_count = im_count+1 t_print +=dt_figure ``` Vous devriez voir que la simulation ne donne pas le résultat attendu. Ça vient du fait que les conditions au bord sont mal implémentées. Nous allons corriger ce problème dans le projet fichier. Ouvrez le dernier fichier [main_part2](main_part2.ipynb) ```python ```
Require Import Nat. Require Import PeanoNat. Ltac inv H := inversion H; subst; clear H. (* destruct a match in a hypothesis *) Ltac dmh := match goal with | H : context[match ?x with | _ => _ end] |- _ => destruct x eqn:?E end. (* destruct a match in the goal *) Ltac dmg := match goal with | |- context[match ?x with | _ => _ end] => destruct x eqn:?E end. Ltac dm := (first [dmh | dmg]); auto. Lemma false_not_true : forall(b : bool), b = false <-> not(b = true). Proof. intros b. split. - intros H. destruct b. + discriminate. + unfold not. intros C. discriminate. - intros H. destruct b. + contradiction. + reflexivity. Qed. Ltac inj_all := match goal with | H:context [ (_, _) = (_, _) ] |- _ => injection H; intros; subst; clear H | H:context [ Some _ = Some _ ] |- _ => injection H; intros; subst; clear H end. Ltac eqb_eq_all := match goal with | H:context [ (_ =? _) = _ ] |- _ => try(rewrite false_not_true in H); rewrite Nat.eqb_eq in H end. Ltac ltb_lt_all := match goal with | H:context [ (_ <? _) = _ ] |- _ => try(rewrite false_not_true in H); rewrite Nat.ltb_lt in H end.
# Realization of Non-Recursive Filters *This jupyter notebook is part of a [collection of notebooks](../index.ipynb) on various topics of Digital Signal Processing. Please direct questions and suggestions to [[email protected]](mailto:[email protected]).* ## Fast Convolution The straightforward convolution of two finite-length signals $x[k]$ and $h[k]$ is a numerically complex task. This has led to the development of various techniques with considerably lower complexity. The basic concept of the *fast convolution* is to exploit the correspondence between the convolution and the scalar multiplication in the frequency domain. ### Convolution of Finite-Length Signals The convolution of a causal signal $x_L[k]$ of length $L$ with a causal impulse response $h_N[k]$ of length $N$ is given as \begin{equation} y[k] = x_L[k] * h_N[k] = \sum_{\kappa = 0}^{L-1} x_L[\kappa] \; h_N[k - \kappa] = \sum_{\kappa = 0}^{N-1} h_N[\kappa] \; x_L[k - \kappa] \end{equation} where $x_L[k] = 0$ for $k<0 \wedge k \geq L$ and $h_N[k] = 0$ for $k<0 \wedge k \geq N$. The resulting signal $y[k]$ is of finite length $M = N+L-1$. The computation of $y[k]$ for $k=0,1, \dots, M-1$ requires $M \cdot N$ multiplications and $M \cdot (N-1)$ additions. The computational complexity of the convolution is consequently [in the order of](https://en.wikipedia.org/wiki/Big_O_notation) $\mathcal{O}(M \cdot N)$. Discrete-time Fourier transformation (DTFT) of above relation yields \begin{equation} Y(e^{j \Omega}) = X_L(e^{j \Omega}) \cdot H_N(e^{j \Omega}) \end{equation} Discarding the effort of transformation, the computationally complex convolution is replaced by a scalar multiplication with respect to the frequency $\Omega$. However, $\Omega$ is a continuous frequency variable which limits the numerical evaluation of this scalar multiplication. In practice, the DTFT is replaced by the discrete Fourier transformation (DFT). Two aspects have to be considered before a straightforward application of the DFT 1. The DFTs $X_L[\mu]$ and $H_N[\mu]$ are of length $L$ and $N$ respectively and cannot be multiplied straightforwardly 2. For $N = L$, the multiplication of the two spectra $X_L[\mu]$ and $H_L[\mu]$ would result in the [periodic/circular convolution](https://en.wikipedia.org/wiki/Circular_convolution) $x_L[k] \circledast h_L[k]$ due to the periodicity of the DFT. Since we aim at realizing the linear convolution $x_L[k] * h_N[k]$ with the DFT, special care has to be taken to avoid cyclic effects. ### Linear Convolution by Periodic Convolution The periodic convolution of the two signals $x_L[k]$ and $h_N[k]$ is defined as \begin{equation} x_L[k] \circledast h_N[k] = \sum_{\kappa=0}^{M-1} \tilde{x}_M[k - \kappa] \; \tilde{h}_M[\kappa] \end{equation} where the periodic continuations $\tilde{x}_M[k]$ of $x_L[k]$ and $\tilde{h}_M[k]$ of $h_N[k]$ with period $M$ are given as \begin{align} \tilde{x}_M[k] &= \sum_{m = -\infty}^{\infty} x_L[m \cdot M + k] \\ \tilde{h}_M[k] &= \sum_{m = -\infty}^{\infty} h_N[m \cdot M + k] \end{align} The result of the circular convolution has a periodicity of $M$. To compute the linear convolution by the periodic convolution one has to take care that the result of the linear convolution fits into one period of the periodic convolution. Hence, the periodicity has to be chosen as $M \geq N+L-1$. This can be achieved by zero-padding of $x_L[k]$ and $h_N[k]$ to a total length of $M$ \begin{align} x_M[k] &= \begin{cases} x_L[k] & \mathrm{for} \; k=0, 1, \dots, L-1 \\ 0 & \mathrm{for} \; k=L, L+1, \dots, M-1 \end{cases} \\ h_M[k] &= \begin{cases} h_N[k] & \mathrm{for} \; k=0, 1, \dots, N-1 \\ 0 & \mathrm{for} \; k=N, N+1, \dots, M-1 \end{cases} \end{align} This results in the desired equality of linear and periodic convolution \begin{equation} x_L[k] * h_N[k] = x_M[k] \circledast h_M[k] \end{equation} for $k = 0,1,\dots, M-1$ with $M = N+L-1$. #### Example - Linear by periodic convolution The following example computes the linear, periodic and linear by periodic convolution of a rectangular signal $x[k] = \text{rect}_L[k]$ of length $L$ with a triangular signal $h[k] = \Lambda_N[k]$ of length $N$. ```python %matplotlib inline import numpy as np import matplotlib.pyplot as plt import scipy.signal as sig L = 32 # length of signal x[k] N = 16 # length of signal h[k] M = 16 # periodicity of periodic convolution def periodic_summation(x, N): "Zero-padding to length N or periodic summation with period N." M = len(x) rows = int(np.ceil(M/N)) if (M < int(N*rows)): x = np.pad(x, (0, int(N*rows-M)), 'constant') x = np.reshape(x, (rows, N)) return np.sum(x, axis=0) def periodic_convolve(x, y, P): "Periodic convolution of two signals x and y with period P." x = periodic_summation(x, P) h = periodic_summation(y, P) return np.array([np.dot(np.roll(x[::-1], k+1), h) for k in range(P)], float) # generate signals x = np.ones(L) h = sig.triang(N) # linear convolution y1 = np.convolve(x, h, 'full') # periodic convolution y2 = periodic_convolve(x, h, M) # linear convolution via periodic convolution xp = np.append(x, np.zeros(N-1)) hp = np.append(h, np.zeros(L-1)) y3 = periodic_convolve(xp, hp, L+N-1) # plot results def plot_signal(x): plt.figure(figsize = (10, 3)) plt.stem(x) plt.xlabel(r'$k$') plt.ylabel(r'$y[k]$') plt.axis([0, N+L, 0, 1.1*x.max()]) plot_signal(x) plt.title('Signal $x[k]$') plot_signal(y1) plt.title('Linear convolution') plot_signal(y2) plt.title('Periodic convolution with period M = %d' %M) plot_signal(y3) plt.title('Linear convolution by periodic convolution'); ``` **Exercise** * Change the lengths `L`, `N` and `M` and check how the results for the different convolutions change ### The Fast Convolution Using the above derived equality of the linear and periodic convolution one can express the linear convolution $y[k] = x_L[k] * h_N[k]$ by the DFT as \begin{equation} y[k] = \text{IDFT}_M \{ \; \text{DFT}_M\{ x_M[k] \} \cdot \text{DFT}_M\{ h_M[k] \} \; \} \end{equation} This operation requires three DFTs of length $M$ and $M$ complex multiplications. On first sight this does not seem to be an improvement, since one DFT/IDFT requires $M^2$ complex multiplications and $M \cdot (M-1)$ complex additions. The overall numerical complexity is hence in the order of $\mathcal{O}(M^2)$. The DFT can be realized efficiently by the [fast Fourier transformation](https://en.wikipedia.org/wiki/Fast_Fourier_transform) (FFT), which lowers the computational complexity to $\mathcal{O}(M \log_2 M)$. The resulting algorithm is known as *fast convolution* due to its computational efficiency. The fast convolution algorithm is composed of the following steps 1. Zero-padding of the two input signals $x_L[k]$ and $h_N[k]$ to at least a total length of $M \geq N+L-1$ 2. Computation of the DFTs $X[\mu]$ and $H[\mu]$ using a FFT of length $M$ 3. Multiplication of the spectra $Y[\mu] = X[\mu] \cdot H[\mu]$ 4. Inverse DFT of $Y[\mu]$ using an inverse FFT of length $M$ The overall complexity depends on the particular implementation of the FFT. Many FFTs are most efficient for lengths which are a power of two. It therefore can make sense, in terms of computational complexity, to choose $M$ as a power of two instead of the shortest possible length $N+L-1$. For real valued signals $x[k] \in \mathbb{R}$ and $h[k] \in \mathbb{R}$ the computational complexity can be reduced significantly by using a real valued FFT. #### Example - Fast convolution The implementation of the fast convolution algorithm is straightforward. Most implementations of the FFT include zero-padding to a given length $M$, e.g in `numpy` by `numpy.fft.fft(x, M)`. In the following example an implementation of the fast convolution is shown. For illustration the convolution of a rectangular signal $x[k] = \text{rect}_L[k]$ of length $L$ with a triangular signal $h[k] = \Lambda_N[k]$ of length $N$ is considered. ```python L = 16 # length of signal x[k] N = 16 # length of signal h[k] M = N+L-1 # generate signals x = np.ones(L) h = sig.triang(N) # linear convolution y1 = np.convolve(x, h, 'full') # fast convolution y2 = np.fft.ifft(np.fft.fft(x, M) * np.fft.fft(h, M)) plt.figure(figsize=(10, 6)) plt.subplot(211) plt.stem(y1) plt.xlabel(r'$k$') plt.ylabel(r'$y[k] = x_L[k] * h_N[k]$') plt.title('Result of linear convolution') plt.subplot(212) plt.stem(y1) plt.xlabel(r'$k$') plt.ylabel(r'$y[k] = x_L[k] * h_N[k]$') plt.title('Result of fast convolution') plt.tight_layout() ``` #### Example - Numerical complexity It was already argued that the numerical complexity of the fast convolution is considerably lower due to the usage of the FFT. The gain with respect to the convolution is evaluated in the following. In order to measure the execution times for both algorithms the `timeit` module is used. The algorithms are evaluated for the convolution of two random signals $x_L[k]$ and $h_N[k]$ of length $L=N=2^n$ for $n=0, 1, \dots, 16$. ```python import timeit n = np.arange(17) # lengths = 2**n to evaluate reps = 20 # number of repetitions for timeit gain = np.zeros(len(n)) for N in n: length = 2**N # setup environment for timeit tsetup = 'import numpy as np; from numpy.fft import rfft, irfft; \ x=np.random.randn(%d); h=np.random.randn(%d)' % (length, length) # direct convolution tc = timeit.timeit('np.convolve(x, x, mode="full")', setup=tsetup, number=reps) # fast convolution tf = timeit.timeit('irfft(rfft(x, %d) * rfft(h, %d))' % (2*length, 2*length), setup=tsetup, number=reps) # speedup by using the fast convolution gain[N] = tc/tf # show the results plt.figure(figsize = (15, 10)) plt.barh(n, gain, log=True) plt.plot([1, 1], [-1, n[-1]+1], 'r-') plt.yticks(n, 2**n) plt.xlabel('Gain of fast convolution') plt.ylabel('Length of signals') plt.title('Comparison between direct/fast convolution') plt.grid() ``` **Exercise** * When is the fast convolution more efficient/faster than a direct convolution? * Why is it slower below a given signal length? * Is the trend of the gain as expected by the numerical complexity of the FFT? Solution: The gain in execution time of a fast convolution over a direct implementation of the the convolution for different signal lengths depends heavily on the particular implementation and hardware used. The fast convolution in this example is faster for two signals having a length equal or larger than 1024 samples. Discarding the outliers and short lengths, the overall trend in the gain is approximately logarithmic as predicted above. **Copyright** This notebook is provided as [Open Educational Resource](https://en.wikipedia.org/wiki/Open_educational_resources). Feel free to use the notebook for your own purposes. The text is licensed under [Creative Commons Attribution 4.0](https://creativecommons.org/licenses/by/4.0/), the code of the IPython examples under the [MIT license](https://opensource.org/licenses/MIT). Please attribute the work as follows: *Sascha Spors, Digital Signal Processing - Lecture notes featuring computational examples, 2016-2018*.
# Goal: To read in files produced by CMIE's "Business Beacon". # This assumes you have made a file of MONTHLY data using CMIE's # Business Beacon program. This contains 2 columns: M3 and M0. A <- read.table( # Generic to all BB files -- sep="|", # CMIE's .txt file is pipe delimited skip=3, # Skip the 1st 3 lines na.strings=c("N.A.","Err"), # The ways they encode missing data # Specific to your immediate situation -- file="bb_data.text", col.names=c("junk", "date", "M3", "M0") ) A$junk <- NULL # Blow away this column # Parse the CMIE-style "Mmm yy" date string that's used on monthly data A$date <- as.Date(paste("1", as.character(A$date)), format="%d %b %Y")
theory Variable imports Main begin datatype var = V nat primrec fresh' :: "var set \<Rightarrow> nat \<Rightarrow> nat" where "fresh' xs 0 = 0" | "fresh' xs (Suc x) = (if V (Suc x) \<in> xs then fresh' (xs - {V (Suc x)}) x else Suc x)" definition fresh :: "var set \<Rightarrow> var" where "fresh xs = V (fresh' xs (card xs))" abbreviation extend_set :: "var set \<Rightarrow> var set" where "extend_set vs \<equiv> insert (fresh vs) vs" lemma [simp]: "finite xs \<Longrightarrow> fresh' xs x \<noteq> Suc x" proof - assume "finite xs" hence "fresh' xs x < Suc x" by simp thus ?thesis by simp qed lemma [simp]: "finite xs \<Longrightarrow> x = card xs \<Longrightarrow> V (fresh' xs x) \<notin> xs" proof (induction x arbitrary: xs) case (Suc x) moreover hence "finite (xs - {V (Suc x)})" by simp moreover from Suc have "V (Suc x) \<in> xs \<Longrightarrow> x = card (xs - {V (Suc x)})" by simp ultimately have "V (Suc x) \<in> xs \<Longrightarrow> V (fresh' (xs - {V (Suc x)}) x) \<notin> xs - {V (Suc x)}" by metis moreover from Suc(2) have "fresh' (xs - {V (Suc x)}) x \<noteq> Suc x" by simp ultimately show ?case by simp qed simp_all lemma fresh_is_fresh [simp]: "finite xs \<Longrightarrow> fresh xs \<notin> xs" by (simp add: fresh_def) end