Datasets:
AI4M
/

text
stringlengths
0
3.34M
(** * JCC (rel) instruction *) Require Import ssreflect ssrbool ssrnat ssrfun eqtype seq fintype tuple. Require Import procstate procstatemonad bitsops bitsprops bitsopsprops. Require Import spec SPred septac spec safe triple basic basicprog spectac. Require Import instr instrcodec eval monad monadinst reader pointsto cursor. Require Import Setoid RelationClasses Morphisms. Set Implicit Arguments. Unset Strict Implicit. Import Prenex Implicits. Require Import Relations. Require Import instrsyntax. Local Open Scope instr_scope. Require Import x86.instrrules.core. (** For convenience, the [~~b] branch is not under a [|>] operator since [q] will never be equal to [p], and thus there is no risk of recursion. *) Lemma JCCrel_rule rel cc cv (b:bool) (p q: DWORD) : |-- ( |> safe @ (b == cv /\\ EIP ~= (addB q rel) ** ConditionIs cc b) //\\ safe @ (b == (~~cv) /\\ EIP ~= q ** ConditionIs cc b) -->> safe @ (EIP ~= p ** ConditionIs cc b) ) <@ (p -- q :-> JCCrel cc cv (mkTgt rel)). Proof. rewrite ->(spec_later_weaken (safe @ (b == (~~ cv) /\\ EIP~=q ** ConditionIs cc b))). rewrite <-spec_later_and. rewrite ->spec_at_and_or; last apply _. apply TRIPLE_safe => R. rewrite /evalInstr. triple_apply triple_letGetCondition. replace (b == (~~cv)) with (~~(b == cv)); last first. { case: b; case: cv; reflexivity. } case: (b == cv). { instrrule_triple_bazooka using do [ progress sbazooka | apply: lorR1 ]. } { instrrule_triple_bazooka using do [ progress sbazooka | apply: lorR2 ]. } Qed.
#include "sac_fit.h" /// PROJECT #include <csapex/model/node_modifier.h> #include <csapex/msg/generic_value_message.hpp> #include <csapex/msg/generic_vector_message.hpp> #include <csapex/msg/io.h> #include <csapex/param/parameter_factory.h> #include <csapex/utility/register_apex_plugin.h> #include <csapex_point_cloud/msg/indices_message.h> /// #include "sac_segmentation.hpp" /// SYSTEM // clang-format off #include <csapex/utility/suppress_warnings_start.h> #include <boost/mpl/for_each.hpp> #include <pcl/segmentation/sac_segmentation.h> #include <tf/tf.h> #include <csapex/utility/suppress_warnings_end.h> // clang-format on CSAPEX_REGISTER_CLASS(csapex::SacFit, csapex::Node) using namespace csapex; using namespace csapex::connection_types; using namespace std; SacFit::SacFit() { } void SacFit::setupParameters(Parameterizable& parameters) { parameters.addParameter(param::factory::declareRange("iterations", 1, 20000, 5000, 200), max_iterations_); parameters.addParameter(param::factory::declareRange("min inliers", 5, 20000, 100, 100), min_inliers_); parameters.addParameter(param::factory::declareRange("normal distance weight", 0.0, 2.0, 0.085, 0.001), normal_distance_weight_); parameters.addParameter(param::factory::declareRange("distance threshold", 0.0, 2.0, 0.009, 0.001), distance_threshold_); parameters.addParameter(param::factory::declareValue<double>("model_main_axis_x", 0), model_main_axis_x_); parameters.addParameter(param::factory::declareValue<double>("model_main_axis_y", 0), model_main_axis_y_); parameters.addParameter(param::factory::declareValue<double>("model_main_axis_z", 0), model_main_axis_z_); parameters.addParameter(param::factory::declareValue<double>("model_angle_offset", 0), model_angle_offset_); parameters.addParameter(param::factory::declareRange("sphere min radius", 0.0, 2.0, 0.02, 0.005), sphere_r_max_); parameters.addParameter(param::factory::declareRange("sphere max radius", 0.0, 2.0, 0.8, 0.005), sphere_r_min_); parameters.addParameter(param::factory::declareBool("from normals", false), from_normals_); parameters.addParameter(param::factory::declareBool("optimize coefficients", true), optimize_coefficients_); std::map<std::string, int> model_types = { { "CIRCLE2D", pcl::SACMODEL_CIRCLE2D }, { "CIRCLE3D", pcl::SACMODEL_CIRCLE3D }, { "CONE", (int)pcl::SACMODEL_CONE }, { "CYLINDER", (int)pcl::SACMODEL_CYLINDER }, { "LINE", pcl::SACMODEL_LINE }, { "NORMAL_PARALLEL_PLANE", (int)pcl::SACMODEL_NORMAL_PARALLEL_PLANE }, { "NORMAL_PLANE", pcl::SACMODEL_NORMAL_PLANE }, { "PARALLEL_LINE", pcl::SACMODEL_PARALLEL_LINE }, { "PARALLEL_LINES", pcl::SACMODEL_PARALLEL_LINES }, { "PARALLEL_PLANE", pcl::SACMODEL_PARALLEL_PLANE }, { "PLANE", (int)pcl::SACMODEL_PLANE }, { "PERPENDICULAR_PLANE", pcl::SACMODEL_PERPENDICULAR_PLANE }, { "SPHERE", (int)pcl::SACMODEL_SPHERE }, { "STICK", pcl::SACMODEL_STICK }, { "TORUS", pcl::SACMODEL_TORUS } }; parameters.addParameter(param::factory::declareParameterSet<int>("models", model_types, (int)pcl::SACMODEL_PLANE), model_type_); std::map<std::string, int> ransac_types = { { "LMEDS", pcl::SAC_LMEDS }, { "MLESAC", pcl::SAC_MLESAC }, { "MSAC", pcl::SAC_MSAC }, { "PROSAC", pcl::SAC_PROSAC }, { "RANSAC", pcl::SAC_RANSAC }, { "RMSAC", pcl::SAC_RMSAC }, { "RRANSAC", pcl::SAC_RRANSAC }, }; parameters.addParameter(param::factory::declareParameterSet<int>("ransac type", ransac_types, (int)pcl::SAC_RANSAC), ransac_type_); } void SacFit::process() { PointCloudMessage::ConstPtr msg(msg::getMessage<PointCloudMessage>(in_cloud_)); boost::apply_visitor(PointCloudMessage::Dispatch<SacFit>(this, msg), msg->value); } void SacFit::setup(NodeModifier& node_modifier) { in_cloud_ = node_modifier.addInput<PointCloudMessage>("PointCloud"); in_indices_ = node_modifier.addOptionalInput<GenericVectorMessage, pcl::PointIndices>("Indices"); // optional input out_models_ = node_modifier.addOutput<GenericVectorMessage, ModelMessage>("Models"); out_indices_ = node_modifier.addOutput<GenericVectorMessage, pcl::PointIndices>("Model Points"); } template <class PointT> void SacFit::inputCloud(typename pcl::PointCloud<PointT>::ConstPtr cloud) { std::shared_ptr<std::vector<pcl::PointIndices>> out_indices(new std::vector<pcl::PointIndices>); std::shared_ptr<std::vector<ModelMessage>> out_models(new std::vector<ModelMessage>); /// configure the segmentation pcl::SACSegmentation<PointT>* segmenter; bool normals_needed = need_normals(); // some SAC models need normals / access conditional if (from_normals_ || normals_needed) { pcl::SACSegmentationFromNormals<PointT, pcl::Normal>* normal_segmenter; normal_segmenter = new pcl::SACSegmentationFromNormals<PointT, pcl::Normal>; normal_segmenter->setNormalDistanceWeight(normal_distance_weight_); pcl::PointCloud<pcl::Normal>::Ptr normals(new pcl::PointCloud<pcl::Normal>); estimateNormals<PointT>(cloud, normals); /// input indices ?? normal_segmenter->setInputNormals(normals); segmenter = normal_segmenter; } else { segmenter = new pcl::SACSegmentation<PointT>; } segmenter->setOptimizeCoefficients(optimize_coefficients_); segmenter->setModelType(model_type_); segmenter->setMethodType(ransac_type_); segmenter->setMaxIterations(max_iterations_); segmenter->setDistanceThreshold(distance_threshold_); // segmenter->setRadiusLimits (sphere_r_min_, sphere_r_max_); // // circular objects / access conditional // segmenter->setMinMaxOpeningAngle(sphere_r_min_, sphere_r_max_); // cones // / access conditional if (model_main_axis_x_ != 0 || model_main_axis_y_ != 0 || model_main_axis_z_ != 0) { Eigen::Vector3f axis(model_main_axis_x_, model_main_axis_y_, model_main_axis_z_); segmenter->setAxis(axis); segmenter->setEpsAngle(model_angle_offset_); /// angle to access conditional } /// let's fit segmenter->setInputCloud(cloud); if (msg::hasMessage(in_indices_)) { std::shared_ptr<std::vector<pcl::PointIndices> const> in_indices = msg::getMessage<GenericVectorMessage, pcl::PointIndices>(in_indices_); for (const pcl::PointIndices& indices : *in_indices) { pcl::ModelCoefficients::Ptr model_coefficients(new pcl::ModelCoefficients); pcl::PointIndices::Ptr indices_ptr(new pcl::PointIndices(indices)); segmenter->setIndices(indices_ptr); pcl::PointIndices inliers; segmenter->segment(inliers, *model_coefficients); if ((int)inliers.indices.size() > min_inliers_) { out_indices->emplace_back(inliers); ModelMessage model; model.coefficients = model_coefficients; model.probability = segmenter->getProbability(); model.frame_id = cloud->header.frame_id; model.model_type = (pcl::SacModel)model_type_; out_models->emplace_back(model); } } } else { /// modify for more than one model pcl::ModelCoefficients::Ptr model_coefficients(new pcl::ModelCoefficients); pcl::PointIndices inliers; segmenter->segment(inliers, *model_coefficients); if ((int)inliers.indices.size() > min_inliers_) { out_indices->emplace_back(inliers); ModelMessage model; model.coefficients = model_coefficients; model.probability = segmenter->getProbability(); model.frame_id = cloud->header.frame_id; model.model_type = (pcl::SacModel)model_type_; out_models->emplace_back(model); } } msg::publish<GenericVectorMessage, pcl::PointIndices>(out_indices_, out_indices); msg::publish<GenericVectorMessage, ModelMessage>(out_models_, out_models); delete segmenter; } template <class PointT> void SacFit::estimateNormals(typename pcl::PointCloud<PointT>::ConstPtr cloud, pcl::PointCloud<pcl::Normal>::Ptr normals) { typename pcl::NormalEstimation<PointT, pcl::Normal> normal_estimation; typename pcl::search::KdTree<PointT>::Ptr tree(new pcl::search::KdTree<PointT>); normal_estimation.setSearchMethod(tree); normal_estimation.setInputCloud(cloud); normal_estimation.setKSearch(50); normal_estimation.compute(*normals); } bool SacFit::need_normals() { switch (model_type_) { case pcl::SACMODEL_NORMAL_PARALLEL_PLANE: return true; case pcl::SACMODEL_NORMAL_PLANE: return true; case pcl::SACMODEL_CONE: return true; default: return false; } }
module MyInterpolations function my_lin_interp(grid, vals) function func(x) if x < grid[1] print("error") end if x > grid[end] print("error") end if grid[1] <= x < grid[end] i = searchsortedlast(grid, x) x in grid[i]:grid[i+1] return vals[i] + (vals[i+1] - vals[i])*(x - grid[i])/(grid[i+1] - grid[i]) end if x == grid[end] return vals[end] end end return func end export my_lin_interp end
{ \begin{doublespacing} \begin{flushleft} \section{Overview} This section is an overview. This section is an overview. This section is an overview. This section is an overview. This section is an overview. This section is an overview. This section is an overview. This section is an overview. This section is an overview. This section is an overview. This section is an overview. This section is an overview. This section is an overview. This section is an overview. This section is an overview. This section is an overview. This section is an overview. This section is an overview. This section is an overview. This section is an overview. This section is an overview. This section is an overview. This section is an overview. This section is an overview. This section is an overview. This section is an overview. This section is an overview. This section is an overview. This section is an overview. This section is an overview. This section is an overview. This section is an overview. This section is an overview. This section is an overview. This section is an overview. This section is an overview. This section is an overview. \end{flushleft} \end{doublespacing} }
/* * Copyright (c) Facebook, Inc. and its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <thrift/lib/cpp/test/loadgen/loadgen.h> #include <thrift/lib/cpp/test/loadgen/Controller.h> #include <thrift/lib/cpp/test/loadgen/LatencyMonitor.h> #include <boost/scoped_ptr.hpp> using namespace boost; namespace apache { namespace thrift { namespace loadgen { void runLoadGen(WorkerFactory* factory, const std::shared_ptr<LoadConfig>& config, double interval, Monitor* monitor, apache::thrift::concurrency::PosixThreadFactory* threadFactory) { scoped_ptr<LatencyMonitor> defaultMonitor; if (monitor == nullptr) { defaultMonitor.reset(new LatencyMonitor(config)); monitor = defaultMonitor.get(); } Controller controller(factory, monitor, config, threadFactory); controller.run(config->getNumWorkerThreads(), config->getMaxWorkerThreads(), interval); } }}} // apache::thrift::loadgen
function fx = p34_fun ( n, x ) %*****************************************************************************80 % %% P34_FUN evaluates the integrand for problem 34. % % Interval: % % 0 <= x <= 1 % % Integrand: % % ( 10 * x - 1 ) * ( 10 * x - 1.1 ) * ( 10 * x - 1.2 ) * ( 10 * x - 1.3 ) % % Exact Integral: % % 1627879 / 1500 % % Approximate Integral (20 digits): % % 1085.2526666666666666... % % Licensing: % % This code is distributed under the GNU LGPL license. % % Modified: % % 04 November 2009 % % Author: % % John Burkardt % % Reference: % % Hermann Engels, % Numerical Quadrature and Cubature, % Academic Press, 1980. % % Parameters: % % Input, integer N, the number of evaluation points. % % Input, real X(N), the evaluation points. % % Output, real FX(N), the integrand values. % fx = ( 10.0 * x - 1.0 ) .* ( 10.0 * x - 1.1 ) .* ( 10.0 * x - 1.2 ) ... .* ( 10.0 * x - 1.3 ); return end
import ring_theory.principal_ideal_domain data.equiv.basic set_lemmas variables {M : Type*} {R : Type*} [integral_domain R] [is_principal_ideal_ring R] [add_comm_group M] [module R M] {A : submodule R M} {α : Type*} {β : Type*} {γ : Type*} [add_comm_group α] [add_comm_group β] open_locale classical open finset lemma sum_union_zero {s t : finset α} {f : α → β} (h : s ∩ t ⊆ {0}) (h0 : f 0 = 0) : finset.sum (s ∪ t) f = finset.sum s f + finset.sum t f := begin rw ←sum_union_inter, cases classical.em (s ∩ t = ∅) with hst hst, rw hst, rw sum_empty, rw add_zero, have : s ∩ t = {0}, from finset.subset.antisymm h (by {cases finset.nonempty_of_ne_empty hst with x hx, rw finset.singleton_subset_iff, convert hx, symmetry, exact finset.mem_singleton.1 (h hx)}), rw this, rw sum_singleton, rw h0, rw add_zero, end lemma sum_bind_zero {f : α → β} (h0 : f 0 = 0) {s : finset γ} {t : γ → finset α} : (∀x∈s, ∀y∈s, x ≠ y → (t x ∩ t y ⊆ {0})) → (finset.sum (s.bind t) f) = finset.sum s (λ x, finset.sum (t x) f) := finset.induction_on s (λ _, by simp only [bind_empty, finset.sum_empty]) (assume x s hxs ih hd, have hd' : ∀x∈s, ∀y∈s, x ≠ y → (t x ∩ t y ⊆ {0}), from assume _ hx _ hy, hd _ (mem_insert_of_mem hx) _ (mem_insert_of_mem hy), have ∀y∈s, x ≠ y, from assume _ hy h, by rw [←h] at hy; contradiction, have ∀y∈s, (t x ∩ t y ⊆ {0}), from assume _ hy, hd _ (mem_insert_self _ _) _ (mem_insert_of_mem hy) (this _ hy), have (t x) ∩ (finset.bind s t) ⊆ {0}, by {intros z hz, rw finset.mem_inter at hz, rcases finset.mem_bind.1 hz.2 with ⟨y, hym, hy⟩, exact this y hym (finset.mem_inter.2 ⟨hz.1, hy⟩)}, by simp only [bind_insert, sum_insert hxs, sum_union_zero this h0, ih hd']) lemma pairwise_disjoint_of_disjoint (S : set (submodule R A)) (Hb : ∀ N ∈ S, N ⊓ submodule.span R (set.Union (λ x : S \ {N}, x.1.1)) = ⊥) {x y} (hx : x ∈ S) (hy : y ∈ S) (hxy : x ≠ y) : x ⊓ y ≤ ⊥ := begin rw ←Hb x hx, refine inf_le_inf_left _ _, rw submodule.span_Union, convert le_supr _ (⟨(y : submodule R A), (set.mem_diff _).2 ⟨hy, ne.symm hxy⟩⟩ : S \ {x}), exact (submodule.span_eq y).symm, end noncomputable def sum_mk {S : set (submodule R A)} (Ht : submodule.span R (set.Union (λ x : S, x.1.1)) = ⊤) (x : A) : A →₀ R := classical.some $ (finsupp.mem_span_iff_total R).1 (show x ∈ submodule.span R (id '' (set.Union (λ y : S, y.1.1))), by {rw set.image_id, rw Ht, trivial}) lemma sum_mk_mem {S : set (submodule R A)} (Ht : submodule.span R (set.Union (λ x : S, x.1.1)) = ⊤) {x : A} : sum_mk Ht x ∈ finsupp.supported R R (set.Union (λ x : S, x.1.1)) := classical.some $ classical.some_spec $ (finsupp.mem_span_iff_total R).1 (show x ∈ submodule.span R (id '' (set.Union (λ y : S, y.1.1))), by {rw set.image_id, rw Ht, trivial}) lemma sum_mk_eq {S : set (submodule R A)} (Ht : submodule.span R (set.Union (λ x : S, x.1.1)) = ⊤) (x : A) : finsupp.total _ _ _ id (sum_mk Ht x) = x := classical.some_spec $ classical.some_spec $ (finsupp.mem_span_iff_total R).1 (show x ∈ submodule.span R (id '' (set.Union (λ y : S, y.1.1))), by {rw set.image_id, rw Ht, trivial}) noncomputable def G (S : set (submodule R A)) (Ht : submodule.span R (set.Union (λ x : S, x.1.1)) = ⊤) (Hb : ∀ N ∈ S, N ⊓ submodule.span R (set.Union (λ x : S \ {N}, x.1.1)) = ⊥) (x : A) : Π (X : S), A →₀ R := λ X, { support := finset.filter (X : set A) (sum_mk Ht x).1, to_fun := λ y, if y ∈ (X : submodule R A) then (sum_mk Ht x) y else 0, mem_support_to_fun := λ y, ⟨λ hx, by {erw if_pos (finset.mem_filter.1 hx).2, exact ((sum_mk Ht x).3 y).1 (finset.mem_filter.1 hx).1 }, λ hx, by {rw finset.mem_filter, split, rw finsupp.mem_support_iff, intro hl0, apply hx, split_ifs, exact hl0, refl, show y ∈ (X : submodule R A), apply classical.not_not.1, intro hnot, apply hx, rw if_neg hnot, }⟩ } noncomputable def v (S : set (submodule R A)) (Ht : submodule.span R (set.Union (λ x : S, x.1.1)) = ⊤) (Hb : ∀ N ∈ S, N ⊓ submodule.span R (set.Union (λ x : S \ {N}, x.1.1)) = ⊥) (x : A) : S → A := λ s, finsupp.total A A R id (G S Ht Hb x s) theorem exists_finsupp_of_mem_direct_sum (S : set (submodule R A)) (Ht : submodule.span R (set.Union (λ x : S, x.1.1)) = ⊤) (Hb : ∀ N ∈ S, N ⊓ submodule.span R (set.Union (λ x : S \ {N}, x.1.1)) = ⊥) (x : A) : ∃ (l : S →₀ R), finsupp.total S A R (v S Ht Hb x) l = x := begin let F : (set.Union (λ x : S, x.1.1)) → S := λ s, classical.some (set.mem_Union.1 s.2), have hF : ∀ s : (set.Union (λ x : S, x.1.1)), (s : A) ∈ (F s : submodule R A) := λ s, classical.some_spec (set.mem_Union.1 s.2), let rly : finset (set.Union (λ x : S, x.1.1)) := subtype_mk' (sum_mk Ht x).support _ (sum_mk_mem Ht), have hunique : ∀ (z : (set.Union (λ x : S, x.1.1))) (M N : S), (z : A) ∈ (M : submodule R A) → (z : A) ∈ (N : submodule R A) → ((z : A) ≠ 0) → (M = N) := λ z B C hB hC h0, by {have hbB := Hb B B.2, have hbN := Hb C C.2, apply classical.not_not.1, intro hne, apply h0, rw ←@submodule.mem_bot R A _ _ _ z, rw ←hbB, split, exact hB, apply submodule.subset_span, rw set.mem_Union, use ⟨C, ⟨C.2, λ hmem, hne $ (subtype.ext hmem).symm⟩⟩, exact hC,}, let lSset : finset S := finset.image F rly, let Lfun : S → R := λ X, if X ∈ lSset then 1 else 0, let L : S →₀ R := ⟨lSset, Lfun, λ y, by {split, intro hy, rw (show Lfun y = 1, from if_pos hy), exact one_ne_zero, intro hy, refine classical.not_not.1 _, intros hnot, apply hy, exact if_neg hnot, }⟩, use L, conv_rhs {rw ←(sum_mk_eq Ht x)}, rw finsupp.total_apply, rw finsupp.total_apply, unfold finsupp.sum, have huh : (λ X : S, L X • v S Ht Hb x X) = (λ X : S, if X ∈ lSset then v S Ht Hb x X else 0), by {funext, split_ifs, convert one_smul R (v S Ht Hb x X), exact if_pos h, convert zero_smul R (v S Ht Hb x X), exact if_neg h}, rw huh, erw ←finset.sum_filter, show (finset.filter (λ X : S, X ∈ lSset) L.support).sum (λ X : S, (finsupp.total A A R id) (G S Ht Hb x X)) = _, simp only [finsupp.total_apply], have hse : (λ X : S, (G S Ht Hb x X).sum (λ (i : A) (a : R), a • id i)) = (λ X : S, (filter (X : set A) (sum_mk Ht x).1).sum (λ (i : A), (sum_mk Ht x) i • id i)) := by {funext, apply finset.sum_congr rfl, intros x hx, congr' 1, exact if_pos (finset.mem_filter.1 hx).2}, rw hse, rw finset.filter_true_of_mem, rw ←sum_bind_zero, apply finset.sum_congr, rw finset.image_bind, ext y, rw finset.mem_bind, split, rintro ⟨z, hzm, hz⟩, exact (finset.mem_filter.1 hz).1, intro hy, use ⟨y, sum_mk_mem Ht hy⟩, split, rw mem_subtype_mk', exact hy, rw finset.mem_filter, split, exact hy, exact hF ⟨y, sum_mk_mem Ht hy⟩, intros, refl, exact smul_zero _, intros x hx y hy hxy, rw ←finset.filter_and, intros z hz, rw finset.mem_singleton, exact pairwise_disjoint_of_disjoint _ Hb x.2 y.2 (λ hnxy, hxy $ subtype.ext hnxy) (finset.mem_filter.1 hz).2, intros x hx, exact hx, end noncomputable def gen_coeffs (S : set (submodule R A)) (Ht : submodule.span R (set.Union (λ x : S, x.1.1)) = ⊤) (Hb : ∀ N ∈ S, N ⊓ submodule.span R (set.Union (λ x : S \ {N}, x.1.1)) = ⊥) (x) : S →₀ R := classical.some $ exists_finsupp_of_mem_direct_sum S Ht Hb x theorem gen_sum_spec {S : set (submodule R A)} (Ht : submodule.span R (set.Union (λ x : S, x.1.1)) = ⊤) (Hb : ∀ N ∈ S, N ⊓ submodule.span R (set.Union (λ x : S \ {N}, x.1.1)) = ⊥) (x : A) : finsupp.total S A R (v S Ht Hb x) (gen_coeffs S Ht Hb x) = x := classical.some_spec $ exists_finsupp_of_mem_direct_sum S Ht Hb x theorem mem_gen_supp {S : set (submodule R A)} (Ht : submodule.span R (set.Union (λ x : S, x.1.1)) = ⊤) (Hb : ∀ N ∈ S, N ⊓ submodule.span R (set.Union (λ x : S \ {N}, x.1.1)) = ⊥) (x : A) : x ∈ submodule.span R (set.Union (λ y : (↑(gen_coeffs S Ht Hb x).support : set S), y.1.1.1)) := begin have := gen_sum_spec Ht Hb x, conv_lhs {rw ←this}, rw finsupp.total_apply, unfold finsupp.sum, refine submodule.sum_mem _ _, intros c hc, refine submodule.smul_mem _ _ _, unfold v, rw finsupp.total_apply, unfold finsupp.sum, refine submodule.sum_mem _ _, intros d hd, refine submodule.smul_mem _ _ _, change d ∈ finset.filter _ _ at hd, rw finset.mem_filter at hd, show d ∈ _, apply submodule.subset_span, rw set.mem_Union, use c, exact hc, exact hd.2, end def T {s : finset M} {S : set (submodule R A)} (Hs : submodule.span R (↑s : set M) = A) (Ht : submodule.span R (set.Union (λ x : S, x.1.1)) = ⊤) (Hb : ∀ N ∈ S, N ⊓ submodule.span R (set.Union (λ x : S \ {N}, x.1.1)) = ⊥) : set S := ↑(finset.bind (subtype_mk' s A (subset_span' Hs)) (λ x : A, (gen_coeffs S Ht Hb x).support)) lemma T_mem {s : finset M} {S : set (submodule R A)} (Hs : submodule.span R (↑s : set M) = A) (Ht : submodule.span R (set.Union (λ x : S, x.1.1)) = ⊤) (Hb : ∀ N ∈ S, N ⊓ submodule.span R (set.Union (λ x : S \ {N}, x.1.1)) = ⊥) {x : S} : x ∈ T Hs Ht Hb ↔ ∃ z ∈ s, x ∈ (gen_coeffs S Ht Hb ⟨z, subset_span' Hs H⟩).support := begin split, intro hx, rcases finset.mem_bind.1 hx with ⟨y, hym, hy⟩, rw mem_subtype_mk' (subset_span' Hs) at hym, use (y : M), use hym, rw subtype.coe_eta, exact hy, rintro ⟨z, hzm, hz⟩, apply finset.mem_bind.2, use ⟨z, subset_span' Hs hzm⟩, split, rw mem_subtype_mk' (subset_span' Hs), exact hzm, exact hz, end theorem subset_gen_supp {S : set (submodule R A)} (Ht : submodule.span R (set.Union (λ x : S, x.1.1)) = ⊤) (Hb : ∀ N ∈ S, N ⊓ submodule.span R (set.Union (λ x : S \ {N}, x.1.1)) = ⊥) {s : finset M} (Hs : submodule.span R (↑s : set M) = A) : (↑s : set M) ⊆ (submodule.span R (set.Union (λ X : T Hs Ht Hb, X.1.1.1))).map A.subtype := begin intros x hx, have := mem_gen_supp Ht Hb ⟨x, subset_span' Hs hx⟩, use ⟨x, subset_span' Hs hx⟩, split, refine submodule.span_mono _ this, intros z hz, rw set.mem_Union at *, cases hz with i hi, use ⟨(i : S), by {rw T_mem Hs Ht Hb, use [x, hx, i.2]}⟩, convert hi, refl, end lemma T_subset {S : set (submodule R A)} (Ht : submodule.span R (set.Union (λ x : S, x.1.1)) = ⊤) (Hb : ∀ N ∈ S, N ⊓ submodule.span R (set.Union (λ x : S \ {N}, x.1.1)) = ⊥) {s : finset M} (Hs : submodule.span R (↑s : set M) = A) : subtype.val '' T Hs Ht Hb ⊆ S := begin intros x hx, rcases hx with ⟨y, hym, hy⟩, rw ←hy, exact y.2, end #check set.finite /-- Given an f.g. R-module A and a set S of submodules whose union generates all of A, if for all `N ∈ S`, the intersection of N and the submodule generated by union of the elements of `S \ N` is trivial then `S` is finite. -/ theorem finite_summands_of_fg (s : finset M) (Hs : submodule.span R (↑s : set M) = A) (S : set (submodule R A)) (Ht : submodule.span R (set.Union (λ x : S, x.1.1)) = ⊤) (Hb : ∀ N ∈ S, N ⊓ submodule.span R (set.Union (λ x : S \ {N}, x.1.1)) = ⊥) : set.finite S := begin suffices : S ⊆ insert ⊥ (subtype.val '' T Hs Ht Hb), by {refine set.finite.subset _ this, apply set.finite.insert, apply set.finite.image, exact finset.finite_to_set _}, intros x hxS, have hTt : (submodule.span R (set.Union (λ X : T Hs Ht Hb, X.1.1.1))) = ⊤ := by {apply linear_map.map_injective A.ker_subtype, rw submodule.map_subtype_top, ext y, split, rintro ⟨z, hzm, hz⟩, rw ←hz, exact z.2, intro hy, rw ←Hs at hy, refine submodule.span_le.2 _ hy, exact (subset_gen_supp Ht Hb Hs)}, have hTb : ∀ N ∈ subtype.val '' T Hs Ht Hb, N ⊓ submodule.span R (set.Union (λ x : subtype.val '' T Hs Ht Hb \ {N}, x.1.1)) = ⊥ := λ N hN, by {rw eq_bot_iff, refine le_trans _ (eq_bot_iff.1 $ Hb N (T_subset Ht Hb Hs hN)), refine inf_le_inf_left _ _, apply submodule.span_mono, intros X hX, rw set.mem_Union at hX, rcases hX with ⟨Y, hY⟩, rw set.mem_Union, use ⟨Y.1, (set.mem_diff _).2 $ ⟨T_subset Ht Hb Hs $ ((set.mem_diff _).1 Y.2).1, ((set.mem_diff _).1 Y.2).2⟩⟩, exact hY,}, cases classical.em (x = ⊥), left, exact h, right, apply classical.not_not.1, intro hnot, apply h, have := Hb x hxS, rw ←@inf_top_eq _ _ x, convert this, symmetry, rw eq_top_iff, rw ←hTt, apply submodule.span_mono, intros Y HY, rw set.mem_Union at HY, rcases HY with ⟨Z, hZ⟩, rw set.mem_Union, use ⟨Z, (set.mem_diff _).1 $ ⟨by {apply T_subset Ht Hb Hs, use Z, split, exact Z.2, refl}, λ hnotZ, by {apply hnot, use Z, split, exact Z.2, exact hnotZ}⟩⟩, exact hZ, end
export move function move(x::Array{Float64,2}; by=[0.0, 0.0, 0.0]) x .+= vec(by) return x end function move(obj::T; by=[0.0, 0.0, 0.0]) where T <: Union{Shape, PostOpObj} # x, v, y, vol, type function func(out) x = out[:x] x = move(x, by=by) return repack!(out, [:x], [x]) end if isa(obj, Shape) return PostOpObj(obj, func) elseif isa(obj, PostOpObj) push!(obj.operations, func) return obj else error("Not allowed.") end end
The fold function over the coefficients of a polynomial with zero as the initial value is the identity function.
module D03 ( Claim (..), mkClaims, claimsIntersectionSize, isolatedClaim ) where import Data.List import qualified Data.HashMap.Strict as HM import Data.Complex import Text.Regex data Claim = Claim { identifier :: Int, topLeft :: Complex Int, bottomRight :: Complex Int } deriving (Show, Eq) type World = HM.HashMap (Complex Int) [Claim] isolatedClaim :: String -> Int isolatedClaim input = identifier $ head $ claims \\ badClaims where badClaims = concat $ HM.elems $ HM.filter (\cs -> (length cs) > 1) $ buildWorld claims claims = mkClaims input claimsIntersectionSize :: String -> Int claimsIntersectionSize = length . filter (> 1) . map length . HM.elems . buildWorld . mkClaims buildWorld :: [Claim] -> World buildWorld = foldl addClaimToWorld HM.empty addClaimToWorld :: World -> Claim -> World addClaimToWorld world claim = foldl addPoint world [x:+y | x<-[(realPart p1)..(realPart p2)], y<-[(imagPart p1)..(imagPart p2)]] where p1 = topLeft claim p2 = bottomRight claim addPoint w p = HM.insertWith (++) p [claim] w mkClaims :: String -> [Claim] mkClaims = map mkClaim . lines mkClaim :: String -> Claim mkClaim def = Claim (params !! 0) (left :+ top) ((left+width-1) :+ (top+height-1)) where top = params !! 2 left = params !! 1 width = params !! 3 height = params !! 4 params = extract $ matchRegexAll (mkRegex "#([0-9]+) @ ([0-9]+),([0-9]+): ([0-9]+)x([0-9]+)") def extract (Just (_,_,_,subs)) = map read subs
import category_theory.functor.basic import category_theory.eq_to_hom namespace category_theory open category namespace functor lemma congr_map_conjugate {C D : Type*} [category C] [category D] {F₁ F₂ : C ⥤ D} (h : F₁ = F₂) {X Y : C} (f : X ⟶ Y) : F₁.map f = eq_to_hom (by rw h) ≫ F₂.map f ≫ eq_to_hom (by rw h) := begin subst h, simp only [eq_to_hom_refl, comp_id, id_comp], end end functor lemma is_iso_map_iff_of_nat_iso {C D : Type*} [category C] [category D] {F₁ F₂ : C ⥤ D} (e : F₁ ≅ F₂) {X Y : C} (f : X ⟶ Y) : is_iso (F₁.map f) ↔ is_iso (F₂.map f) := begin revert F₁ F₂, suffices : ∀ {F₁ F₂ : C ⥤ D} (e : F₁ ≅ F₂) (hf : is_iso (F₁.map f)), is_iso (F₂.map f), { exact λ F₁ F₂ e, ⟨this e, this e.symm⟩, }, introsI F₁ F₂ e hf, refine is_iso.mk ⟨e.inv.app Y ≫ category_theory.inv (F₁.map f) ≫ e.hom.app X, _, _⟩, { simp only [nat_trans.naturality_assoc, is_iso.hom_inv_id_assoc, iso.inv_hom_id_app], }, { simp only [assoc, ← e.hom.naturality, is_iso.inv_hom_id_assoc, iso.inv_hom_id_app], }, end end category_theory
structure point (α : Type) := mk :: (x : α) (y : α) #print point #print prefix point def point.smul (n : ℕ) (p : point ℕ) := point.mk (n * p.x) (n * p.y) def p : point ℕ := point.mk 1 2 #reduce p.smul 3 -- {x := 3, y := 6} #check { point . x := 10, y := 20 } -- point ℕ #check { point . y := 20, x := 10 } #check ({x := 10, y := 20} : point _) example : point _ := { y := 20, x := 10 } structure my_struct := mk :: {α : Type} {β : Type} (a : α) (b : β) #check { my_struct . a := 10, b := true } #reduce {y := 3, ..p} -- {x := 1, y := 3} #reduce {x := 4, ..p} -- {x := 4, y := 2} structure point3 (α : Type) := mk :: (x : α) (y : α) (z : α) def q : point3 nat := {x := 5, y := 5, z := 5} def r : point3 nat := {x := 6, ..p, ..q} #print r -- {x := 6, y := p.y, z := q.z} #reduce r -- {x := 6, y := 2, z := 5} inductive color | red | green | blue structure color_point (α : Type) extends point α := mk :: (c : color) #print color_point #print prefix color_point structure rgb_val := (red : nat) (green : nat) (blue : nat) structure red_green_point (α : Type) extends point3 α, rgb_val := (no_blue : blue = 0) def p' : point3 nat := {x := 10, y := 10, z := 20} def rgp : red_green_point nat := {red := 200, green := 40, blue := 0, no_blue := rfl, ..p'} example : rgp.x = 10 := rfl example : rgp.red = 200 := rfl #print red_green_point
program TeaBreak; begin ; ; ; ; ; ; ; ; ; ; writeln('Say "BREAK!!" '); ; ; ; ; ; ; ; ; ; ;
lemma AE_distrD: assumes f: "f \<in> measurable M M'" and AE: "AE x in distr M M' f. P x" shows "AE x in M. P (f x)"
/* Copyright (c) 2013, David C Horton Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef __QUEUE_H__ #define __QUEUE_H__ #include <boost/noncopyable.hpp> #include <boost/function.hpp> #include <boost/thread.hpp> #include <sofia-sip/su_wait.h> namespace drachtio { class TimerQueue ; typedef boost::function<void (void*)> TimerFunc ; struct queueEntry_t { queueEntry_t(TimerQueue* queue, TimerFunc f, void* functionArgs, su_time_t when) ; TimerQueue* m_queue ; queueEntry_t* m_next ; queueEntry_t* m_prev ; TimerFunc m_function ; void* m_functionArgs ; su_time_t m_when ; } ; typedef queueEntry_t * TimerEventHandle ; class TimerQueue : public boost::noncopyable { public: TimerQueue(su_root_t* root, const char*szName = NULL) ; ~TimerQueue() ; TimerEventHandle add( TimerFunc f, void* functionArgs, uint32_t milliseconds ) ; TimerEventHandle add( TimerFunc f, void* functionArgs, uint32_t milliseconds, su_time_t now ) ; void remove( TimerEventHandle handle) ; bool isEmpty(void) { return 0 == m_length; } int size(void) { return m_length; } int positionOf(TimerEventHandle handle) ; void doTimer(su_timer_t* timer) ; protected: int numberOfElements(void) ; su_root_t* m_root ; std::string m_name ; su_timer_t* m_timer ; queueEntry_t* m_head ; queueEntry_t* m_tail ; int m_length ; unsigned m_in_timer:1; /**< Set when executing timers */ } ; } #endif
State Before: t : Type u → Type u → Type u inst✝⁵ : Bitraversable t β : Type u F G : Type u → Type u inst✝⁴ : Applicative F inst✝³ : Applicative G inst✝² : IsLawfulBitraversable t inst✝¹ : LawfulApplicative F inst✝ : LawfulApplicative G α₀ α₁ β₀ β₁ : Type u f : α₀ → F α₁ f' : β₀ → G β₁ x : t α₀ β₀ ⊢ Comp.mk (tfst f <$> tsnd f' x) = bitraverse (Comp.mk ∘ pure ∘ f) (Comp.mk ∘ map pure ∘ f') x State After: t : Type u → Type u → Type u inst✝⁵ : Bitraversable t β : Type u F G : Type u → Type u inst✝⁴ : Applicative F inst✝³ : Applicative G inst✝² : IsLawfulBitraversable t inst✝¹ : LawfulApplicative F inst✝ : LawfulApplicative G α₀ α₁ β₀ β₁ : Type u f : α₀ → F α₁ f' : β₀ → G β₁ x : t α₀ β₀ ⊢ bitraverse (Comp.mk ∘ map f ∘ pure) (Comp.mk ∘ map pure ∘ f') x = bitraverse (Comp.mk ∘ pure ∘ f) (Comp.mk ∘ map pure ∘ f') x Tactic: rw [← comp_bitraverse] State Before: t : Type u → Type u → Type u inst✝⁵ : Bitraversable t β : Type u F G : Type u → Type u inst✝⁴ : Applicative F inst✝³ : Applicative G inst✝² : IsLawfulBitraversable t inst✝¹ : LawfulApplicative F inst✝ : LawfulApplicative G α₀ α₁ β₀ β₁ : Type u f : α₀ → F α₁ f' : β₀ → G β₁ x : t α₀ β₀ ⊢ bitraverse (Comp.mk ∘ map f ∘ pure) (Comp.mk ∘ map pure ∘ f') x = bitraverse (Comp.mk ∘ pure ∘ f) (Comp.mk ∘ map pure ∘ f') x State After: no goals Tactic: simp only [Function.comp, map_pure]
# --- # title: Colorant conversion and channel layout # cover: layout.gif # description: a set of commonly used basic operations that wrapped by Augmentor # --- # Augmentor has warpped some commonly used basic operations so that you can use to build the augmentation # pipeline. The `internal` column is what you'd probably do outside of `Augmentor`. # | Category | internal | Augmentor | # | --- | --- | --- | # | Conversion | `T.(img)` | `ConvertEltype(T)` | # | Information Layout | `ImageCore.channelview` | `SplitChannels` | # | Information Layout | `ImageCore.colorview` | `CombineChannels` | # | Information Layout | `Base.permutedims` | `PermuteDims` | # | Information Layout | `Base.reshape` | `Reshape` | # It is not uncommon that machine learning frameworks require the data in a specific form and layout. # For example many deep learning frameworks expect the colorchannel of the images to be encoded in # the third dimension of a 4-dimensional array. Augmentor allows to convert from (and to) these # different layouts using special operations that are mainly useful in the beginning or end of a # augmentation pipeline. using Augmentor using ImageCore ## 300×400 Matrix{RGB{N0f8}, 2} => 300×400×3 Array{Float32, 3} img = testpattern(RGB, ratio=0.5) img_in = augment(img, SplitChannels() |> PermuteDims(2, 3, 1) |> ConvertEltype(Float32)) ## 300×400×3 Array{Float32, 3} => 300×400 Matrix{RGB{N0f8}, 2} img_out = augment(img_in, ConvertEltype(N0f8) |> PermuteDims(3, 1, 2) |> CombineChannels(RGB)) img_out == img # ## References #md # ```@docs #md # ConvertEltype #md # SplitChannels #md # CombineChannels #md # PermuteDims #md # Reshape #md # ``` ## save covers #src using ImageMagick #src include(joinpath("..", "assets", "utilities.jl")) #src cover = make_gif(testpattern(RGB, ratio=0.5), ConvertEltype(Gray{N0f8}), 2) #src ImageMagick.save("layout.gif", cover; fps=1) #src
Can we talk about how romantic this crochet crop top is?! This open lace cropped tank is 100% cotton and 100% ready for summer. Throw it on over your fav bikini after a day at the beach or sport it over a simple bralette for an ultra-summery evening vibe.
import topology.basic /- In this problem you will look at proving that the composition of two continuous maps is continuous Some things you should know: - A subset of a space `X` is an element of the type `set X` in Lean - The notation for the preimage of a set `U` along a map `f` is `f ⁻¹' U` -/ /- Axiom : continuous_def : ∀ {α : Type} {β : Type} [topological_space α] [topological_space β] {f : α → β}, continuous f ↔ ∀ (s : set β),is_open s → is_open (f ⁻¹' s) -/ /- Axiom : set.preimage_comp : ∀ {α β γ : Type} {f : α → β} {g : β → γ} {s : set γ}, g ∘ f ⁻¹' s = f ⁻¹' (g ⁻¹' s) -/ /- You will need to use the tactics intros, rewrite, and apply for this problem! And some new lemmas that are in the sidebar for you. -/ /- Lemma : -/ lemma image_compact (X Y Z : Type) [topological_space X] [topological_space Y] [topological_space Z] (f : X → Y) (g : Y → Z) (hf : continuous f) (hg : continuous g) : continuous (g ∘ f) := begin rw continuous_def at *, intros S hS, rw [set.preimage_comp], apply hf, apply hg, apply hS, end
[GOAL] α : Type u_1 β : Type u_2 s : Finset α t : Finset β ⊢ _root_.Disjoint (map Embedding.inl s) (map Embedding.inr t) [PROOFSTEP] simp_rw [disjoint_left, mem_map] [GOAL] α : Type u_1 β : Type u_2 s : Finset α t : Finset β ⊢ ∀ ⦃a : α ⊕ β⦄, (∃ a_1, a_1 ∈ s ∧ ↑Embedding.inl a_1 = a) → ¬∃ a_2, a_2 ∈ t ∧ ↑Embedding.inr a_2 = a [PROOFSTEP] rintro x ⟨a, _, rfl⟩ ⟨b, _, ⟨⟩⟩
The merger of two neutron stars, shown an image from a computer simulation (credit: Stephan Rosswog and Enrico Ramirez-Ruiz). Astronomy and Astrophysics General Funds: Your gift can help provide scholarships for our students and fund research, travel to scholarly meetings, or areas that require urgent one-time support and other needs not covered by the University. The James Edward Keeler Fund for Excellence in Astronomy and Astrophysics: The fund, established in 2016, by long-time UCO staff researcher Arnold Klemola, supports current graduate students and helps the department attract outstanding applicants. The Keeler fund aims to help students from all backgrounds, particularly first in family to enter higher education. Klemola selflessly named the fund after James Edward Keeler, who established the technology for large reflecting telescopes and used the Crossley reflector at Lick Observatory to take the first deep images of the cosmos, revealing the universe of extragalactic nebulae. Other Worlds Laboaratory: The Other Worlds Laboratory (OWL) is a new research initiative that builds on UCSC's historic strength in understanding the planets of our solar system and planets around other stars (exoplanets). We are in an amazing era where thousands of exoplanets are being found nearby in our galaxy! Within the OWL we aim to understand how these planets form and evolve, using computer simulations, and how we can develop and utilize new instruments on telescopes to find and characterize these worlds. Directed by Professor Jonathan Fortney, the goal of the OWL is to put the Earth in context of the worlds of our solar system, as well as the amazing diversity of billions of worlds in our galaxy. The Whitford Prize: The Whitford Prize, named for the famous Lick astronomer and former director Albert Whitford, is awarded to the Astronomy and Astrophysics graduate student who, in the judgment of the faculty, attains the highest achievement in research, coursework, teaching, and the preliminary exam. The award includes a $500 cash prize, and is awarded on an annual basis. Mandel Lecture Series: This lively and engaging lecture series is designed for the general public. The series has hosted many prominent scientists, including British cosmologist and former President of theRoyal Society, Martin Rees, the first American female astronaut, Sally Ride, and UC Berkeley astrophysicist and contributor to the History Channel's "Universe" programs, AlexFilippenko. The generous support of private donors maintains the lecture series, bringing the message and excitement of astronomy to scientists and lay citizens alike, while presenting the work of external and UCSC scientists to the community. The Donald E. and Irene Osterbrock Leadership Fellowships: The Osterbrock Fellowships are the first privately endowed fellowships in our history. This prestigious award underscores our determination to attract, train, and mentor the world’s finest graduate students in astronomy and astrophysics, and to cultivate the skills that will enable them to become the nation's future leaders in their field, as well as in science in general. The fellowships honor two outstanding individuals. The eminent American astronomer Donald Osterbrock pioneered the study of gaseous nebulae, helped to discover that the Milky Way is a spiral galaxy, originated the classification scheme for active galactic nuclei, and served as one of Lick Observatory’s most influential directors. Don was a gifted teacher and advisor, and his doctoral students now comprise a significant portion of America’s astronomical leadership. His wife, Irene, worked tirelessly to help establish the UCSC Library’s Mary Lea Shane Archives of the Lick Observatory, which serve as a world-renowned repository of U.S. astronomical history. Private donors are encouraged to support these very special fellowships, which will facilitate UCSC's on-going leadership in research and education in the field of Astronomy and Astrophysics. Our faculty members themselves are pledging generously to match each private gift, dollar for dollar, to hasten establishment of this worthwhile tribute to leadership, past and future. Learn more about the Osterbrock Program. Theoretical Astrophysics Santa Cruz (TASC): TASC is a vibrant new institute, spanning four UCSC departments that are involved in research in astrophysics and planetary sciences. Its fourteen participating faculty members make up the largest group of computational astrophysicists in the world. Building on this strength, TASC has begun a unique new interdisciplinary program in scientific computation and visualization in collaboration with members of UCSC's Digital Arts and New Media Center. TASC is also spearheading an effort to create a Ph.D. program in high-performance supercomputing, in partnership with the Baskin School of Engineering's Department of Applied Mathematics. Current TASC Director, Prof. Enrico Ramirez-Ruíz, has also emphasized the involvement of undergraduates in TASC activities, as well as ambitious education, recruitment, and other programs that reach out to the local Latino community. Key components include intensive research instruction through summer internships, academic counseling and mentoring, and workshops. Supporters of this program can also invest undergraduate and graduate fellowships, internships, research support funds, computer and image visualization hardware, and an endowment. Other Opportunities: Many other opportunities for supporting UCSC's world-renowned programs in astronomy and astrophysics are available. For more information on giving, call the Division's Development Office (831-459-2192) or email us.
! Test assumed length character functions. character*(*) function f() f = "Hello" end function character*6 function g() g = "World" end function program straret character*6 f, g character*12 v v = f() // g() if (v .ne. "Hello World ") call abort () end program
# The doughnut function permits to draw a donut plot doughnut <- function (x, labels = names(x), edges = 200, outer.radius = 0.8, inner.radius=0.6, clockwise = FALSE, init.angle = if (clockwise) 90 else 0, density = NULL, angle = 45, col = NULL, border = FALSE, lty = NULL, main = NULL, ...) { if (!is.numeric(x) || any(is.na(x) | x < 0)) stop("'x' values must be positive.") if (is.null(labels)) labels <- as.character(seq_along(x)) else labels <- as.graphicsAnnot(labels) x <- c(0, cumsum(x)/sum(x)) dx <- diff(x) nx <- length(dx) plot.new() pin <- par("pin") xlim <- ylim <- c(-1, 1) if (pin[1L] > pin[2L]) xlim <- (pin[1L]/pin[2L]) * xlim else ylim <- (pin[2L]/pin[1L]) * ylim plot.window(xlim, ylim, "", asp = 1) if (is.null(col)) col <- if (is.null(density)) palette() else par("fg") col <- rep(col, length.out = nx) border <- rep(border, length.out = nx) lty <- rep(lty, length.out = nx) angle <- rep(angle, length.out = nx) density <- rep(density, length.out = nx) twopi <- if (clockwise) -2 * pi else 2 * pi t2xy <- function(t, radius) { t2p <- twopi * t + init.angle * pi/180 list(x = radius * cos(t2p), y = radius * sin(t2p)) } for (i in 1L:nx) { n <- max(2, floor(edges * dx[i])) P <- t2xy(seq.int(x[i], x[i + 1], length.out = n), outer.radius) polygon(c(P$x, 0), c(P$y, 0), density = density[i], angle = angle[i], border = border[i], col = col[i], lty = lty[i]) Pout <- t2xy(mean(x[i + 0:1]), outer.radius) lab <- as.character(labels[i]) if (!is.na(lab) && nzchar(lab)) { lines(c(1, 1.05) * Pout$x, c(1, 1.05) * Pout$y) text(1.1 * Pout$x, 1.1 * Pout$y, labels[i], xpd = TRUE, adj = ifelse(Pout$x < 0, 1, 0), ...) } ## Add white disc Pin <- t2xy(seq.int(0, 1, length.out = n*nx), inner.radius) polygon(Pin$x, Pin$y, density = density[i], angle = angle[i], border = border[i], col = "white", lty = lty[i]) } title(main = main, ...) invisible(NULL) }
Definition NonEmpty_foldr1 {a} (f : a -> a -> a) (x: GHC.Base.NonEmpty a) : a := match x with | GHC.Base.NEcons a as_ => List.fold_right f a as_ end. Definition NonEmpty_maximum {a} `{GHC.Base.Ord a} (x:GHC.Base.NonEmpty a) : a := NonEmpty_foldr1 GHC.Base.max x. Definition NonEmpty_minimum {a} `{GHC.Base.Ord a} (x:GHC.Base.NonEmpty a) : a := NonEmpty_foldr1 GHC.Base.min x. Definition toList {a} : GHC.Base.NonEmpty a -> list a := fun arg_0__ => match arg_0__ with | GHC.Base.NEcons a as_ => cons a as_ end. Definition List_size {a} : list a -> nat := List.fold_right (fun x y => S y) O . Definition NonEmpty_size {a} : GHC.Base.NonEmpty a -> nat := fun arg_0__ => match arg_0__ with | GHC.Base.NEcons _ xs => 1 + List_size xs end. Program Fixpoint insertBy {a} (cmp: a -> a -> comparison) (x : a) (xs : GHC.Base.NonEmpty a) {measure (NonEmpty_size xs)} : GHC.Base.NonEmpty a := match xs with | GHC.Base.NEcons x nil => GHC.Base.NEcons x nil | (GHC.Base.NEcons y ((cons y1 ys') as ys)) => match cmp x y with | Gt => GHC.Base.NEcons y (toList (insertBy cmp x (GHC.Base.NEcons y1 ys'))) | _ => GHC.Base.NEcons x ys end end. Program Fixpoint insertBy' {a} (cmp: a -> a -> comparison) (x : a) (xs : list a) {measure (List_size xs)} : GHC.Base.NonEmpty a := match xs with | nil => GHC.Base.NEcons x nil | cons x nil => GHC.Base.NEcons x nil | (cons y ((cons y1 ys') as ys)) => match cmp x y with | Gt => GHC.Base.NEcons y (toList (insertBy' cmp x (cons y1 ys'))) | _ => GHC.Base.NEcons x ys end end. Definition insert {a} `{GHC.Base.Ord a} : a -> GHC.Base.NonEmpty a -> GHC.Base.NonEmpty a := insertBy GHC.Base.compare. Definition sortBy {a} : (a -> a -> comparison) -> GHC.Base.NonEmpty a -> GHC.Base.NonEmpty a := fun f ne => match ne with | GHC.Base.NEcons x xs => insertBy' f x (Data.OldList.sortBy f xs) end. Definition sort {a} `{GHC.Base.Ord a} : GHC.Base.NonEmpty a -> GHC.Base.NonEmpty a := sortBy GHC.Base.compare.
module Regression ( simpleRegression ) where import Data.List import Data.Maybe import Control.Monad import Statistics simpleRegression :: (Floating a) => [a] -> [a] -> (Maybe a, Maybe a) simpleRegression x y = (slope, intercept) where slope = (/) <$> covariance x y <*> variance x intercept = subtract <$> mean y <*> ((*) <$> slope <*> mean x)
-- root finding examples import Numeric.GSL import Numeric.LinearAlgebra import Text.Printf(printf) rosenbrock a b [x,y] = [ a*(1-x), b*(y-x^2) ] test method = do print method let (s,p) = root method 1E-7 30 (rosenbrock 1 10) [-10,-5] print s -- solution disp p -- evolution of the algorithm jacobian a b [x,y] = [ [-a , 0] , [-2*b*x, b] ] testJ method = do print method let (s,p) = rootJ method 1E-7 30 (rosenbrock 1 10) (jacobian 1 10) [-10,-5] print s disp p disp = putStrLn . format " " (printf "%.3f") main = do test Hybrids test Hybrid test DNewton test Broyden mapM_ testJ [HybridsJ .. GNewton]
Require Import Coq.Lists.List. Require Export ParDB.Spec. Require Export ParDB.Lemmas. Require Export ParDB.Inst. Require Export SpecSyntax. Require Export SpecTyping. Require Export SpecEvaluation. Require Export LemmasTyping. Require Export Agreement. Require Export Consistency. Local Ltac crush := intros; repeat (cbn in *; repeat crushIH; repeat crushSyntaxMatch; repeat crushDbSyntaxMatchH; repeat crushDbLemmasRewriteH; repeat crushSyntaxRefold; repeat crushTypingMatchH; subst*; try discriminate; eauto with ws; destruct_conjs; idtac ). (******************************************************************************) (* Preservation *) (******************************************************************************) Lemma preservation {Γ t τ} (wΓ: WfEnv Γ) (clΓ: CoVarClosed Γ) (wt: ⟨ Γ ⊢ t : τ ⟩) : ∀ {t'}, t --> t' → ⟨ Γ ⊢ t' : τ ⟩. Proof. induction wt; intros t' r; repeat crushIH; inversion r; crush; try match goal with | H: UValue ?t |- _ => destruct t; crush end; try (consistency; crush; fail); try (repeat (econstructor; crush); fail). - replace τ2 with τ2[wkm Exp][beta1 s2] by crush. eapply tm_sub; crush. constructor; crush. - assert (k0 = k) by consistency; subst k0. repeat (econstructor; crush). - replace τ3 with τ3[wkm Exp][beta1 γ] by crush. eapply tm_sub; crush. constructor; crush. - assert (k0 = k) by consistency; subst k0. repeat (econstructor; crush). Qed.
subroutine runY_00i1i2i3i4(k,l,i1,i2,i3,i4,Xtwiddle,Gtwiddle, . Shat6,N0) implicit none C--- Expression for extension of Eq. 5.60c C--- Calculates D00i1i2i3i4, requires D00li1i2i3 C--- Small terms of order Xtwiddle(0,k)*Diiiii,Xtwiddle(0,0)*Diiiiii C--- Denominator Gtwiddle(k,l) include 'pvDnames.f' include 'pvDv.f' include 'Darraydef.f' include 'Darrays.f' integer ep,N0,k,l,i1,i2,i3,i4,np parameter(np=3) double precision Xtwiddle(0:np,0:np),Gtwiddle(np,np) double complex Shat6(np,z5max,-2:0) if ( (i1.eq.l) .or. (i2.eq.l) .or. (i3.eq.l) .or. (i4.eq.l) . .or. (i1.eq.0) .or. (i2.eq.0) .or. (i3.eq.0) .or. (i4.eq.0)) then return endif do ep=-2,0 Dv(dzziiii(z4(i1,i2,i3,i4))+N0,ep)= .(-2d0*Gtwiddle(k,i1)*Dv(dzziiii(z4(l,i2,i3,i4))+N0,ep) . -2d0*Gtwiddle(k,i2)*Dv(dzziiii(z4(l,i1,i3,i4))+N0,ep) . -2d0*Gtwiddle(k,i3)*Dv(dzziiii(z4(l,i1,i2,i4))+N0,ep) . -2d0*Gtwiddle(k,i4)*Dv(dzziiii(z4(l,i1,i2,i3))+N0,ep) . +Gtwiddle(k,1)*Shat6(1,z5(l,i1,i2,i3,i4),ep) . +Gtwiddle(k,2)*Shat6(2,z5(l,i1,i2,i3,i4),ep) . +Gtwiddle(k,3)*Shat6(3,z5(l,i1,i2,i3,i4),ep) . +Xtwiddle(k,0)*Dv(diiiii(z5(l,i1,i2,i3,i4))+N0,ep) . -Xtwiddle(0,0)*Dv(diiiiii(z6(k,l,i1,i2,i3,i4))+N0,ep)) . /(2d0*Gtwiddle(k,l)) enddo return end
DPTSV Example Program Results Solution 2.5000 2.0000 1.0000 -1.0000 3.0000 Diagonal elements of the diagonal matrix D 4.0000 9.0000 25.0000 16.0000 1.0000 Subdiagonal elements of the Cholesky factor L -0.5000 -0.6667 0.6000 0.5000
import Smt import Smt.Data.BitVec theorem xor_comm_2 (x y : BitVec 2) : x ^^^ y = y ^^^ x := by smt sorry theorem xor_comm_4p4 (x y : BitVec (4+4)) : x ^^^ y = y ^^^ x := by smt sorry
module Flexidisc.Tutorial import Flexidisc person0 : Record String ["Firstname" ::: String] person0 = ["Firstname" := "John"] ||| From here, we can access row by name. ||| ||| Fields arepublic verified at compile time: if we try to access a field that is ||| not defined for our record, we obtain a compilation error, not a runtime ||| error. ||| ||| One of the key contribution in Flexidisc is that you can't declare the ||| smae field twice (no, it's not that easy) ||| If we add another field 'Firstname', even with a different type, ||| we'll obtain a compilation error. person0Name : String person0Name = get "Firstname" person0 ||| or with the infix notation: person0Name' : String person0Name' = person0 !! "Firstname" ||| you can even lookup for fields that my or may not be there person0Age : Maybe Nat person0Age = Read.lookup "Age" person0 ||| We can of course extend records: person1 : Record String ["Age" ::: Nat, "Lastname" ::: String, "Firstname" ::: String] person1 = ("Lastname" := "Doe") :: ("Age" := the Nat 42) :: person0 ||| We can also project a record on a smaller one person3 : Record String ["Firstname" ::: String, "Lastname" ::: String] person3 = project person1 ||| You can merge records idJohn : Record String [("ID", Nat), ("Firstname", String), ("Lastname", String), ("Age", Nat)] idJohn = ["ID" := the Nat 1, "Firstname" := "John"] ++ ["Lastname" := "Doe", "Age" := the Nat 42] ||| If you want to give explicitly the order of the new elements you want ||| you can use `keep` person4 : Record String ["Firstname" ::: String, "Lastname" ::: String] person4 = keep ["Firstname", "Lastname"] person1 ||| If you want to give explicitly the order of the new elements you want ||| you can use `keep` person4' : Record String ["Age" ::: Nat] person4' = discard ["Firstname", "Lastname"] person1 ||| You can alternatively decide to drop a field by its name: person5 : Record String ["Firstname" ::: String, "Lastname" ::: String] person5 = drop "Age" person1 {- ||| You can also patch a record with another record: person6 : Record String ["Firstname" ::: String, "Lastname" ::: String, "Age" ::: Nat] person6 = patch person2 (namedRec ["Lastname" := "Biri", "Firstname" := "Nicolas"]) -} ||| Field can be updated quite easily too olderPerson : Record String ["Firstname" ::: String, "Lastname" ::: String, "Age" ::: Nat] olderPerson = update "Age" (+1) person1 ||| What if we want a generic `birthday` function for record with an age? ||| The result type is a bit complex here. ||| Actually we just explain that we update the `"Age"` field, replacing it ||| its content by a Nat. birthday : Record String xs -> {auto hasAge: Row "Age" Nat xs} -> Record String (changeType xs hasAge Nat) birthday rec = update "Age" (+1) rec ||| And we can check that it works on different types: olderPeople : ( Record String ["Age" ::: Nat] , Record String ["Firstname" ::: String, "Lastname" ::: String, "Age" ::: Nat] ) olderPeople = (birthday person4', birthday person1) ||| You can also ensure that several fields are there fullname : Record String xs -> {auto requiredFields : Sub [ "Lastname" ::: String , "Firstname" ::: String ] xs} -> String fullname xs = (xs !! "Firstname") ++ " " ++ (xs !! "Lastname") ||| Or ensure that some row doesn't exist to create them addFullname : Record String xs -> {auto requiredFields : Sub [ "Firstname" ::: String , "Lastname" ::: String ] xs} -> {auto newFields : Disjoint [ "Fullname" ::: String ] xs} -> Record String (merge ["Fullname" ::: String] xs) addFullname r = ["Fullname" := fullname r] ++ r ||| We can also decide to merge records if there is no overlap twoPartsPerson : Record String [ "ID" ::: Nat , "Firstname" ::: String , "Lastname" ::: String , "Age" ::: Nat ] twoPartsPerson = ["ID" := the Nat 1, "Firstname" := "John"] ++ ["Lastname" := "Doe", "Age" := the Nat 42] eqExample : Bool eqExample = person1 == olderPerson
[STATEMENT] lemma KBasic_2[PLM]: "[\<^bold>\<box>(\<^bold>\<not>\<phi>) \<^bold>\<rightarrow> \<^bold>\<box>(\<phi> \<^bold>\<rightarrow> \<psi>) in v]" [PROOF STATE] proof (prove) goal (1 subgoal): 1. [\<^bold>\<box>\<^bold>\<not>\<phi> \<^bold>\<rightarrow> \<^bold>\<box>(\<phi> \<^bold>\<rightarrow> \<psi>) in v] [PROOF STEP] by (simp only: RM_1 useful_tautologies_3)
module Network.Curl.Prim.Multi ------------------------------------------------- -- Multi ------------------------------------------------- -- curl_multi_add_handle -- curl_multi_assign -- curl_multi_cleanup -- curl_multi_fdset -- curl_multi_info_read -- curl_multi_init -- curl_multi_perform -- curl_multi_remove_handle -- curl_multi_setopt -- curl_multi_socket_action -- curl_multi_strerror -- curl_multi_timeout -- curl_multi_poll -- curl_multi_wait -- curl_multi_wakeup ------------------------------------------------- -- import Network.Curl.Prim.Mem import Network.Curl.Prim.Other import Network.Curl.Types -- import Derive.Enum import Data.Buffer import Language.Reflection %language ElabReflection -- curl_multi_init %foreign "C:curl_multi_init,libcurl,curl/curl.h" prim_curl_multi_init : PrimIO (Ptr HandlePtr) export curl_multi_init : HasIO io => io (Maybe (CurlHandle Multi)) curl_multi_init = do r <- primIO prim_curl_multi_init pure $ if believe_me r == 0 then Nothing else Just (MkH r) -- curl_multi_cleanup %foreign "C:curl_multi_cleanup,libcurl,curl/curl.h" prim_curl_multi_cleanup : Ptr HandlePtr -> PrimIO () export curl_multi_cleanup : HasIO io => CurlHandle Multi -> io () curl_multi_cleanup (MkH ptr) = primIO (prim_curl_multi_cleanup ptr) {- curl_multi_socket_action CURLMcode curl_multi_socket_action(CURLM * multi_handle, curl_socket_t sockfd, int ev_bitmask, int *running_handles); -} %foreign "C:curl_multi_socket_action,libcurl,curl/curl.h" prim_curl_multi_socket_action : Ptr HandlePtr -> Int -> Int -> Ptr Int -> PrimIO (Ptr HandlePtr) export curl_multi_socket_action : HasIO io => CurlHandle Multi -> Int -> Int -> Ptr Int -> curl_multi_socket_action = ?SDfsfd -- curl_multi_info_read %foreign "C:curl_multi_setopt,libcurl,curl/curl.h" prim_curl_multi_setopt_long : Ptr HandlePtr -> Int -> Int -> PrimIO Int %foreign "C:curl_multi_setopt,libcurl,curl/curl.h" prim_curl_multi_setopt_objptr : Ptr HandlePtr -> Int -> AnyPtr -> PrimIO Int %foreign "C:curl_multi_setopt,libcurl,curl/curl.h" prim_curl_multi_setopt_off_t : Ptr HandlePtr -> Int -> Bits64 -> PrimIO Int %foreign "C:curl_multi_setopt,libcurl,curl/curl.h" prim_curl_multi_setopt_blob : Ptr HandlePtr -> Int -> Buffer -> PrimIO Int -- mSetOptPrim is to fill the role of {- %foreign "C:curl_multi_setopt,libcurl,curl/curl.h" prim_curl_multi_setopt : Ptr HandlePtr -> Int -> any -> PrimIO CurlECode -} -- We can't pass 'any' or an arbitrary Type to a foreign function, but we can -- generate that function at the type we need. -- e.g. mSetOptPrim CURLMOPT_SOCKETFUNCTION would generate and declare: -- %foreign "C:curl_multi_setopt,libcurl,curl/curl.h" -- setOptPrim_CURLMOPT_SOCKETFUNCTION : Ptr HandlePtr -> Int -- -> (HandlePtr -> Int -> Int -> AnyPtr -> AnyPtr -> PrimIO Int) -- -> PrimIO Int -- Specializing `any` to `(HandlePtr -> Int -> Int -> AnyPtr -> AnyPtr -> PrimIO Int)` -- in accordance with the type given by `paramType CURLMOPT_SOCKETFUNCTION` -- mSetOptPrim then also inserts the name of the generated prim at the use-site. -- Below in `curl_multi_setopt` we use let to give it a place to generate to and -- then use it. It wasn't happy about being placed directly where `prim` is -- used instead of with the let. Possibly this is a parsing issue wih %runElab -- or my own misunderstanding. -- I'd like to have this take IO instead of PrimIO so users don't have to write -- toPrim but it's a fair bit more involved to massage the types since the ffi -- expects PrimIO %macro mSetOptPrim : {opty : _} -> (opt : CurlMOption opty) -> Elab (Ptr HandlePtr -> Int -> paramType opt -> PrimIO Int) mSetOptPrim opt = do let name = UN $ "setOptPrim_" ++ show opt z <- quote (paramType opt) str <- quote "C:curl_multi_setopt,libcurl,curl/curl.h" let ty = MkTy EmptyFC EmptyFC name `(Ptr HandlePtr -> Int -> ~z -> PrimIO Int) let claim = IClaim EmptyFC MW Private [ForeignFn [str]] ty declare [claim] -- generate prim check (IVar EmptyFC name) -- insert prim's name -- hideous but we only need to define it once export total curl_multi_setopt : HasIO io => CurlHandle Multi -> {ty : _} -> (opt : CurlMOption ty) -> paramType opt -> io CurlMCode curl_multi_setopt (MkH h) opt@CURLMOPT_SOCKETFUNCTION v = do let prim = mSetOptPrim opt pure (unsafeFromCode !(primIO $ prim h (toCode opt) v)) curl_multi_setopt (MkH h) opt@CURLMOPT_TIMERFUNCTION v = do let prim = mSetOptPrim opt pure (unsafeFromCode !(primIO $ prim h (toCode opt) v)) curl_multi_setopt (MkH h) opt@CURLMOPT_PUSHFUNCTION v = do let prim = mSetOptPrim opt pure (unsafeFromCode !(primIO $ prim h (toCode opt) v)) curl_multi_setopt (MkH h) opt@CURLMOPT_PIPELINING v = pure $ -- special, bitmask, TODO, determine if we care unsafeFromCode !(primIO $ prim_curl_multi_setopt_long h (toCode opt) v) curl_multi_setopt {ty = CURLOPTTYPE_LONG} (MkH h) opt v = pure $ unsafeFromCode !(primIO $ prim_curl_multi_setopt_long h (toCode opt) v) curl_multi_setopt {ty = CURLOPTTYPE_OBJECTPOINT} (MkH h) opt v = pure $ unsafeFromCode !(primIO $ prim_curl_multi_setopt_objptr h (toCode opt) v) curl_multi_setopt {ty = CURLOPTTYPE_OFF_T} (MkH h) opt v = pure $ unsafeFromCode !(primIO $ prim_curl_multi_setopt_off_t h (toCode opt) v) curl_multi_setopt {ty = CURLOPTTYPE_BLOB} (MkH h) opt v = pure $ unsafeFromCode !(primIO $ prim_curl_multi_setopt_blob h (toCode opt) v) curl_multi_setopt {ty = UnusedOptType} _ _ _ = pure CURLM_UNKNOWN_OPTION -- ^can't happen during normal use useoptest : IO () useoptest = do Just r <- curl_multi_init | _ => printLn "foo" CURLM_OK <- curl_multi_setopt r CURLMOPT_SOCKETFUNCTION $ \h,sock,info,dat,sdat => toPrim $ do pure 0 | o => printLn o -- curl_multi_setopt r CURLMOPT_LASTENTRY ?this_accepts_only_void putStrLn "was set" curl_multi_cleanup r
import condensed.Qprime_isoms . noncomputable theory universes v u u₁ u₂ open category_theory category_theory.limits breen_deligne opposite open bounded_homotopy_category namespace Condensed variables (BD : package) variables (M N : Condensed.{u} Ab.{u+1}) (f : M ⟶ N) lemma homology_functor_iso_natural' (C₁ C₂ : cochain_complex (Profinite.{u}ᵒᵖ ⥤ Ab.{u+1}) ℤ) (g : C₁ ⟶ C₂) (S : Profinite.{u}ᵒᵖ) (i : ℤ) : (((category_theory.evaluation Profinite.{u}ᵒᵖ Ab.{u+1}).obj S).homology_functor_iso (complex_shape.up ℤ) i).inv.app C₁ ≫ ((homology_functor (Profinite.{u}ᵒᵖ ⥤ Ab.{u+1}) (complex_shape.up ℤ) i).map g).app S = category_theory.functor.map _ g ≫ (((category_theory.evaluation Profinite.{u}ᵒᵖ Ab.{u+1}).obj S).homology_functor_iso (complex_shape.up ℤ) i).inv.app C₂ := ((((category_theory.evaluation Profinite.{u}ᵒᵖ Ab.{u+1}).obj S).homology_functor_iso _ i).inv.naturality g).symm lemma homology_functor_iso_natural (S : ExtrDiscᵒᵖ) (i : ℤ) : (((category_theory.evaluation Profinite.{u}ᵒᵖ Ab.{u+1}).obj (op (unop S).val)).homology_functor_iso (complex_shape.up ℤ) i).inv.app ((BD.eval' freeFunc).obj (Condensed_Ab_to_presheaf.obj M)) ≫ ((homology_functor (Profiniteᵒᵖ ⥤ Ab) (complex_shape.up ℤ) i).map ((BD.eval' freeFunc).map (Condensed_Ab_to_presheaf.map f))).app (ExtrDisc_to_Profinite.op.obj S) = category_theory.functor.map _ (category_theory.functor.map _ (category_theory.functor.map _ f)) ≫ (((category_theory.evaluation Profinite.{u}ᵒᵖ Ab.{u+1}).obj (op (unop S).val)).homology_functor_iso (complex_shape.up ℤ) i).inv.app ((BD.eval' freeFunc).obj (Condensed_Ab_to_presheaf.obj N)) := homology_functor_iso_natural' _ _ _ _ _ . lemma eval_freeAb_iso_component_natural_zero (S : ExtrDiscᵒᵖ) : ((((category_theory.evaluation Profinite.{u}ᵒᵖ Ab.{u+1}).obj (op (unop S).val)).map_homological_complex (complex_shape.up ℤ)).map ((BD.eval' freeFunc).map (Condensed_Ab_to_presheaf.map f))).f (int.of_nat 0) ≫ (eval_freeAb_iso.component_zero BD N (unop S)).hom = (eval_freeAb_iso.component_zero BD M (unop S)).hom ≫ ((BD.eval' (forget AddCommGroup ⋙ AddCommGroup.free)).map (((category_theory.evaluation Profinite.{u}ᵒᵖ Ab.{u+1}).obj (op (unop S).val)).map f.val)).f (int.of_nat 0) := begin dsimp only [eval_freeAb_iso.component_zero, functor.map_homological_complex_map_f, category_theory.evaluation_obj_map], erw [embed_f_0, embed_f_0], simp only [functor.map_biproduct, data.eval_functor_obj_map_f, whiskering_right_obj_map, whisker_right_app, functor.comp_map, functor.map_iso_hom, biproduct.unique_up_to_iso_hom, ← functor.map_comp], congr' 2, apply biproduct.hom_ext, intro j, simp only [category.assoc], erw [biproduct.lift_π, biproduct.map_π, biproduct.lift_π_assoc], simp only [functor.map_bicone_π, biproduct.bicone_π, evaluation_obj_map], simp only [← nat_trans.comp_app], congr' 1, rw [biproduct.map_π], refl, end lemma eval_freeAb_iso_component_natural_neg (S : ExtrDiscᵒᵖ) (n : ℕ) : ((((category_theory.evaluation Profinite.{u}ᵒᵖ Ab.{u+1}).obj (op (unop S).val)).map_homological_complex (complex_shape.up ℤ)).map ((BD.eval' freeFunc).map (Condensed_Ab_to_presheaf.map f))).f -[1+ n] ≫ (eval_freeAb_iso.component_neg BD N (unop S) n).hom = (eval_freeAb_iso.component_neg BD M (unop S) n).hom ≫ ((BD.eval' (forget AddCommGroup ⋙ AddCommGroup.free)).map (((category_theory.evaluation Profinite.{u}ᵒᵖ Ab.{u+1}).obj (op (unop S).val)).map f.val)).f -[1+ n] := begin dsimp only [eval_freeAb_iso.component_neg, functor.map_homological_complex_map_f, category_theory.evaluation_obj_map], erw [embed_f_neg, embed_f_neg], simp only [functor.map_biproduct, data.eval_functor_obj_map_f, whiskering_right_obj_map, whisker_right_app, functor.comp_map, functor.map_iso_hom, biproduct.unique_up_to_iso_hom, ← functor.map_comp], congr' 2, apply biproduct.hom_ext, intro j, simp only [category.assoc], erw [biproduct.lift_π, biproduct.map_π, biproduct.lift_π_assoc], simp only [functor.map_bicone_π, biproduct.bicone_π, evaluation_obj_map], simp only [← nat_trans.comp_app], congr' 1, rw [biproduct.map_π], refl, end lemma eval_freeAb_iso_component_natural (S : ExtrDiscᵒᵖ) : (eval_freeAb_iso_component BD M (unop S)).inv ≫ (((category_theory.evaluation Profinite.{u}ᵒᵖ Ab.{u+1}).obj (op (unop S).val)).map_homological_complex (complex_shape.up ℤ)).map ((BD.eval' freeFunc).map (Condensed_Ab_to_presheaf.map f)) = (BD.eval' (forget AddCommGroup ⋙ AddCommGroup.free)).map (((category_theory.evaluation Profinite.{u}ᵒᵖ Ab.{u+1}).obj (op (unop S).val)).map f.val) ≫ (eval_freeAb_iso_component BD N (unop S)).inv := begin rw [iso.inv_comp_eq, ← category.assoc, iso.eq_comp_inv], ext ((_|n)|n) : 2, { apply eval_freeAb_iso_component_natural_zero }, { apply is_zero.eq_of_tgt, apply is_zero_zero, }, { apply eval_freeAb_iso_component_natural_neg }, end . lemma eval_freeAb_iso_component_natural_bis (S : ExtrDiscᵒᵖ) (i : ℤ) : (homology_functor AddCommGroup (complex_shape.up ℤ) i).map (eval_freeAb_iso_component BD M (unop S)).inv ≫ (((category_theory.evaluation Profiniteᵒᵖ Ab).obj (op (unop S).val)).map_homological_complex (complex_shape.up ℤ) ⋙ homology_functor Ab (complex_shape.up ℤ) i).map ((BD.eval' freeFunc).map (Condensed_Ab_to_presheaf.map f)) = category_theory.functor.map _ (category_theory.functor.map _ (((category_theory.evaluation Profinite.{u}ᵒᵖ Ab.{u+1}).obj (op (unop S).val)).map f.val)) ≫ (homology_functor AddCommGroup (complex_shape.up ℤ) i).map (eval_freeAb_iso_component BD N (unop S)).inv := begin rw [functor.comp_map, ← functor.map_comp, ← functor.map_comp], congr' 1, apply eval_freeAb_iso_component_natural, end lemma tensor_to_unsheafified_homology_natural'_aux (S : ExtrDiscᵒᵖ) (x) : ((AddCommGroup.adj.hom_equiv punit (N.val.obj (op (unop S).val))).symm) (point ((((ExtrSheaf_ExtrSheafProd_equiv Ab).functor.map ((Condensed_ExtrSheaf_equiv Ab).inverse.map f)).val.app S) x)) = ((AddCommGroup.adj.hom_equiv punit (M.val.obj (op (unop S).val))).symm) (point x) ≫ ((category_theory.evaluation Profiniteᵒᵖ Ab).obj (op (unop S).val)).map f.val := begin dsimp [AddCommGroup.adj, adjunction.mk_of_hom_equiv_hom_equiv], apply free_abelian_group.lift.ext, rintro ⟨⟩, rw [free_abelian_group.lift.of, comp_apply, free_abelian_group.lift.of], refl end lemma aaaahrg (i : ℤ) {A B : Ab} (f : A ⟶ B) : (homotopy_category.homology_functor AddCommGroup (complex_shape.up ℤ) i).map ((BD.eval (forget AddCommGroup ⋙ AddCommGroup.free)).map f) = (homology_functor AddCommGroup (complex_shape.up ℤ) i).map ((BD.eval' (forget AddCommGroup ⋙ AddCommGroup.free)).map f) := rfl lemma tensor_to_unsheafified_homology_natural' (i : ℤ) : tensor_to_unsheafified_homology BD M i ≫ whisker_left ExtrDisc_to_Profinite.op ((homology_functor (Profiniteᵒᵖ ⥤ Ab) (complex_shape.up ℤ) i).map ((BD.eval' freeFunc).map (Condensed_Ab_to_presheaf.map f))) = (ExtrSheafProd.map_tensor ((ExtrSheaf_ExtrSheafProd_equiv Ab).functor.map ((Condensed_ExtrSheaf_equiv Ab).inverse.map f)) (𝟙 (((BD.eval (forget AddCommGroup ⋙ AddCommGroup.free)).obj (AddCommGroup.free.obj punit)).val.as.homology i))).val ≫ tensor_to_unsheafified_homology BD N i := begin ext S : 2, dsimp only [tensor_to_unsheafified_homology, nat_trans.comp_app, whisker_left_app, ExtrSheafProd.map_tensor_val_app], apply AddCommGroup.tensor_ext, intros x y, simp only [comp_apply, id_apply, AddCommGroup.map_tensor, tensor_product.map_tmul, AddCommGroup.tensor_uncurry, linear_map.to_add_monoid_hom_coe, tensor_product.lift.tmul, add_monoid_hom.coe_mk, linear_map.comp_apply, add_monoid_hom.coe_to_int_linear_map], dsimp only [tensor_to_unsheafified_homology_component, add_monoid_hom.mk'_apply, tensor_to_unsheafified_homology_component_applied], simp only [← comp_apply, category.assoc], congr' 1, rw homology_functor_iso_natural, simp only [← category.assoc], congr' 1, simp only [category.assoc], rw eval_freeAb_iso_component_natural_bis, simp only [← category.assoc], congr' 1, rw [tensor_to_unsheafified_homology_natural'_aux], rw [aaaahrg, aaaahrg, ← category_theory.functor.map_comp, ← category_theory.functor.map_comp], end lemma tensor_to_homology_natural (i : ℤ) : tensor_to_homology.{u} BD M i ≫ (homology_functor (Condensed.{u} Ab.{u+1}) _ i).map ((BD.eval' freeCond').map f) = map_tensor f (𝟙 _) ≫ tensor_to_homology.{u} BD N i := begin simp only [tensor_to_homology, category.assoc, ← functor.map_comp, eval_freeCond'_iso_component_natural], simp only [functor.map_comp], simp only [← category.assoc], refine congr_arg2 _ _ rfl, simp only [category.assoc], have := (homology_functor_sheafification_iso (complex_shape.up ℤ) i).hom.naturality ((Condensed_Ab_to_presheaf ⋙ BD.eval' freeFunc).map f), erw [← this], clear this, simp only [← category.assoc], refine congr_arg2 _ _ rfl, simp only [category.assoc], dsimp only [iso.app_hom], have := (Condensed_ExtrSheaf_equiv Ab.{u+1}).counit_iso.hom.naturality ((homology_functor (Profinite.{u}ᵒᵖ ⥤ Ab.{u+1}) _ i ⋙ presheaf_to_Condensed_Ab).map ((Condensed_Ab_to_presheaf ⋙ BD.eval' freeFunc.{u u+1}).map f)), erw [← this], clear this, simp only [← category.assoc], refine congr_arg2 _ _ rfl, simp only [category.assoc], dsimp only [map_tensor, functor.comp_map], simp only [← functor.map_comp], congr' 1, have := ExtrDisc_sheafification_iso.hom.naturality ((homology_functor (Profinite.{u}ᵒᵖ ⥤ Ab.{u+1}) _ i).map ((BD.eval' freeFunc).map (Condensed_Ab_to_presheaf.map f))), erw [← this], clear this, simp only [← category.assoc], refine congr_arg2 _ _ rfl, ext1, dsimp only [tensor_to_homology_aux], simp only [functor.comp_map, whiskering_left_obj_map, Sheaf.category_theory.category_comp_val, presheaf_to_Sheaf_map_val, ExtrSheaf.map_tensor_val, grothendieck_topology.to_sheafify_naturality, category.assoc, grothendieck_topology.to_sheafify_naturality_assoc, ← grothendieck_topology.sheafify_map_comp], rw [tensor_to_unsheafified_homology_natural'], end lemma homology_bd_eval_natural [∀ S : ExtrDisc.{u}, no_zero_smul_divisors ℤ (M.val.obj (op S.val))] [∀ S : ExtrDisc.{u}, no_zero_smul_divisors ℤ (N.val.obj (op S.val))] (i : ℤ) : (homology_bd_eval BD M i).inv ≫ (homology_functor _ _ i).map ((BD.eval' freeCond').map f) = map_tensor f (𝟙 _) ≫ (homology_bd_eval BD N i).inv := tensor_to_homology_natural BD M N f i end Condensed
[STATEMENT] lemma pflat_len_simps: shows "pflat_len (Seq v1 v2) (0#p) = pflat_len v1 p" and "pflat_len (Seq v1 v2) (Suc 0#p) = pflat_len v2 p" and "pflat_len (Left v) (0#p) = pflat_len v p" and "pflat_len (Left v) (Suc 0#p) = -1" and "pflat_len (Right v) (Suc 0#p) = pflat_len v p" and "pflat_len (Right v) (0#p) = -1" and "pflat_len (Stars (v#vs)) (Suc n#p) = pflat_len (Stars vs) (n#p)" and "pflat_len (Stars (v#vs)) (0#p) = pflat_len v p" and "pflat_len v [] = intlen (flat v)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. ((pflat_len (Seq v1 v2) (0 # p) = pflat_len v1 p &&& pflat_len (Seq v1 v2) (Suc 0 # p) = pflat_len v2 p) &&& pflat_len (val.Left v) (0 # p) = pflat_len v p &&& pflat_len (val.Left v) (Suc 0 # p) = - 1) &&& (pflat_len (val.Right v) (Suc 0 # p) = pflat_len v p &&& pflat_len (val.Right v) (0 # p) = - 1) &&& pflat_len (Stars (v # vs)) (Suc n # p) = pflat_len (Stars vs) (n # p) &&& pflat_len (Stars (v # vs)) (0 # p) = pflat_len v p &&& pflat_len v [] = intlen (flat v) [PROOF STEP] by (auto simp add: pflat_len_def Pos_empty)
module Main import Uri lul : List String lul = [ "https://www.youtube.com/watch?v=YgmFIVOR1-I&list=RD2By3feOVw20&index=9&ab_channel=%E3%81%9A%E3%81%A3%E3%81%A8%E7%9C%9F%E5%A4%9C%E4%B8%AD%E3%81%A7%E3%81%84%E3%81%84%E3%81%AE%E3%81%AB%E3%80%82ZUTOMAYO", "ftp://ftp.is.co.za/rfc/rfc1808.txt", "gopher://spinaltap.micro.umn.edu/00/Weather/California/Los%20Angeles", "http://www.math.uio.no/faq/compression-faq/part1.html", "mailto:[email protected]", "news:comp.infosystems.www.servers.unix", "telnet://melvyl.ucop.edu/", "http://192.168.0:8080", "//www.cwi.nl:80/%7Eguido/Python.html", "www.cwi.nl/%7Eguido/Python.html", "help/Python.html", "http://www.cwi.nl/%7Eguido/Python.html", "example.com", "user:[email protected]:8080", "http://user:[email protected]:8080" ] main : IO () main = case (traverse decodeURI lul) of Right res => for_ res (\x => putStrLn (show res)) Left err => putStrLn "Err"
# a belief that just stores the previous observation # maintained by @zsunberg # policies based on the previous observation only are often pretty good # e.g. for the crying baby problem """ Updater that stores the most recent observation as the belief, missing if none is available. """ struct PreviousObservationUpdater <: Updater end initialize_belief(u::PreviousObservationUpdater, d::Any) = d update(bu::PreviousObservationUpdater, old_b, action, obs) = obs
%! Author = tstreule \section{Biosensors} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % \subsection{Biosensors Principle} % % % \begin{center} % \includegraphics[width=.3\columnwidth]{Biosensors_Principle} % \end{center} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Label assay vs. label-free} % \shortstack{Label assay\\(sandwich):} \includegraphics[width=.08\columnwidth]{Biosensors_Label_Assay} \qquad \shortstack{Label-free assay\\(OWLS):} \includegraphics[width=.08\columnwidth]{Biosensors_Label_Free} \par \shortstack{non-specific\\binding:} \includegraphics[width=.12\columnwidth]{Biosensors_Non_Specific} \shortstack[r]{other interactions that could happen,\\ except the angle binding.} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Rules of Chemistry and Physics} % \formtex{\textit{Bracket notation}}{indicates concentration} \ce{A +B <=>[$k_1$][$k_{-1}$] AR} \formbox{equilibrium constant}{K = \frac{k_{-\!1}}{k_1} = \frac{[A][R]}{[AR]}} \formula{affinity constant}{K_a = 1/K} \formula{cont. flowing cell}{[AR] \sim q * \text{signal},} $q=const$ \formula{~}{[R] \sim R_0 = \text{initial receptor density}} % % \includegraphics[width=.6\columnwidth]{Biosensors_Rules_of_Chemistry} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Sensitivity and Specificity} % % All biosensing techniques are limited by the non-specific interaction = “It is not possible to catch \underline{only} Nemo with a fishing net” % \formula{Sensitivity}{\text{true positive rate (\% of correctly identified $+$)}} \formula{Specificity}{\text{true negative rate (\% of correctly identified $-$)}} Compensate Sensitivity through LOD. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Limit of Detection (LOD) \hfill \textnormal{(Sensitivity)}} % Limitation by non-specific binding (NSB) $\to$ noise at zero analyte \begin{minipage}{.65\columnwidth} \formula{LOD}{LOD = 3\cdot\textrm{noise} / \deriv{S}{[A]}} \formbox{~}{S_{LOD} = S_0 + 3\cdot\textrm{noise}} \end{minipage}% \begin{minipage}{.35\columnwidth} \includegraphics[width=\columnwidth]{Biosensors_LOD} \end{minipage} \formbox{LOD for intensity/signal}{LOD_{NSB} = \avg{I_{NSB}} + 3\;\sigma(I_{NSB})} \formula{Lowest detectable intensity}{\frac{ \avg{I_{POI}} }{ LOD_{NSB} } = 1} ($\!\!~_{POI}$: proteins of interest) \formula{Detectable \#proteins}{N\!\# = \frac{\Gamma}{m\ped{protein}}} \quad $[\Gamma] = \unitfrac{pg}{mm^2}$ detection limit
! { dg-do compile } program p use iso_c_binding character(len=1, kind=c_char), parameter :: z(2) = 'z' print *, sizeof(z(3)) ! { dg-warning "is out of bounds" } print *, c_sizeof(z(3)) ! { dg-warning "is out of bounds" } end
ISTANBUL (RNS) — Mustafa Ergin, a shopkeeper, sits in the back of a crowded audience in Istanbul listening to a panel of economists speaking about cryptocurrencies. With prohibitions on gambling and earning interest, Islamic teaching has a lot to say on what Muslims can do with their money. Turkey, like much of the world, has witnessed a dizzying rise in interest in bitcoin and the hundreds of other cryptocurrencies that have been modeled on it, driven by stories of working-class people striking it rich with a new technology that few seem to understand. And while Turkey accounts for a sliver of the global bitcoin trade, it is among the top 10 countries invested in the cryptocurrency. Bitcoin trade here has gone from about 9,000 transactions a month in May 2017 to 42,000 in December 2017. A handful of companies in construction, education and the food industry have begun accepting cryptocurrencies for payment, and there are daily seminars in cities such as Istanbul where financial experts appear before enamored audiences to explain how one can cash in.
def forceNat (a : Nat) := true def forceInt (a : Int) := false def f1 := /- The following example works, but it adds a coercion at `forceInt i`. The elaborated term is ``` fun (n i : Nat) => if n == i then forceNat n else forceInt (coe i) -/ fun n i => if n == i then forceNat n else forceInt i -- works def f2 := fun n i => if coe n == i then forceInt i else forceNat n -- works #check f1 -- Nat → Nat → Bool #check f2 -- Nat → Int → Bool def f3 := /- Fails. - `n == i` generates type constraint enforcing `n` and `i` to have the same type. - `forceInt i` forces `i` (and consequently `n`) to have type `Int`. - `forceNat n` fails because there is no coercion from `Nat` to `Int`. -/ fun n i => if n == i then forceInt i else forceNat n
Home to the edgy and mysteriously alluring, Alexander McQueen is an encounter of finesse and duskiness. This edgy camouflage messenger bag is one accessory that undoubtedly will break the monotony of your casual outfit. Fashioned in Italy, it is presented with black calf leather detailing, a flat body and an adjustable crossbody strap. The bag also features a top zip closure, a main internal compartment with zipped and slip pockets, a front logo patch and is finished with minor silver-tone hardware.
idsApplyRules { if (*rloc == "localhost") { msiAdmRetrieveRulesFromDBIntoStruct(*ruleBase, "0", *struct); msiAdmWriteRulesFromStructIntoFile(*outFileName, *struct); } else { remote(*rloc, "null") { msiAdmRetrieveRulesFromDBIntoStruct(*ruleBase, "0", *struct); msiAdmWriteRulesFromStructIntoFile(*outFileName, *struct); } } } INPUT *ruleBase="IDSbase", *outFileName="ids", *rloc="localhost" OUTPUT ruleExecOut
Music critic Oli Marlow reviewed the soundtrack in 2013 , calling it a unique fusion of religious , folk , and classical music , with influences from around the world . He also commented on the sound design of the film , calling it psychedelic , and saying that there was " a lot of incredible incidental music " in the film that was not included in the soundtrack releases . In a 1999 paper submitted to London 's Symposium on Sound in Cinema , film critic <unk> A. Chatterji said , " Sholay offers a model lesson on how sound can be used to signify the terror a character evokes . Sholay is also exemplary in its use of <unk> to jump cut to a different scene and time , without breaking the continuity of the narrative , yet , intensifying the drama . "
Require Import Arith. Require Import Omega. Require Import Psatz. (* Pozor: Ali je: le_ge_dec n m : {n <= m} + {n >= m}. pravilno definiran? Mounosti nista izklucujoce, ampak vedno pa velja vsaj nekaj. Ampak tu je dec, mogoce bi bilo boljse drugacno ime? *) (** Zaradi definicije naslednika se mora suma odvijati po prvem elementu. Sicer povsod problemi z indukcijo. **) Fixpoint sum (n : nat) {struct n} : (forall i, i < n -> nat) -> nat := match n with | 0 => (fun f => 0) | S m => (fun (f : forall i, i < S m -> nat) => f m (le_n (S m)) + sum m (fun (i : nat) (p : i < m) => f i (Nat.lt_trans i m (S m) p (le_n (S m))))) end. Definition sum' (n : nat) (f : nat -> nat) := sum n (fun i _ => f i). (** Given a decidable predicate [P] on [nat], we can count how many numbers up to [n] satisfy [P]. *) Definition count (n : nat) {P : nat -> Prop} (decP : forall x, {P x} + {~ P x}) := sum' n (fun x => if decP x then 1 else 0). (* AAA *) (* verjetno gre v izbiris *) Definition countA (n : nat) (P : nat -> Prop) (decP : forall x, {P x} + {~ P x}) := sum' n (fun x => if decP x then 1 else 0). Lemma change_sum (n : nat) (f g : forall i, i < n -> nat) : (forall j (p : j < n), f j p = g j p) -> sum n f = sum n g. Proof. intro E. induction n. - reflexivity. - simpl. f_equal. + apply E. + apply IHn. intros j p. apply E. Qed. Lemma change_sum' (n : nat) (f g : nat -> nat) : (forall j (p : j < n), f j = g j) -> sum' n f = sum' n g. Proof. unfold sum'. apply change_sum. Qed. Lemma sum'_S (n : nat) (f : nat -> nat) : sum' (S n) f = f n + sum' n f. Proof. unfold sum'. reflexivity. Qed. Lemma same_sum' (n c : nat) (f : nat -> nat) : (forall j (p : j < n), f j = c) -> sum' n f = n * c. Proof. induction n. - intro H. auto. - rewrite sum'_S. intro H. rewrite IHn. + rewrite H. * lia. * omega. + auto. Qed. Ltac simpl_sum := try (rewrite sum'_S in *) ; simpl in *. Lemma sum_n_krat_k (n : nat) (k : nat): sum' n (fun y => k) = n * k. Proof. induction n. - auto. - simpl_sum. rewrite IHn. omega. Qed. Lemma sum_n_krat_1 (n : nat) (k : nat): sum' n (fun y => 1) = n. Proof. rewrite (sum_n_krat_k n 1). omega. Qed. (* AAA *) Lemma vsota_funkcij (n : nat) (f g : nat -> nat) : sum' n (fun x => (f x) + (g x)) = sum' n f + sum' n g. Proof. induction n. - auto. - rewrite sum'_S. rewrite IHn. rewrite sum'_S. rewrite sum'_S. omega. Qed. (* malo je smesno da rabim tole lemo le da zrcali cez enacaj kako se da drugace? *) Lemma vsota_funkcij_rv (n : nat) (f g : nat -> nat) : sum' n f + sum' n g = sum' n (fun x => (f x) + (g x)). Proof. rewrite vsota_funkcij. auto. Qed. Lemma krajsanje_izraza (An_ A_n Ann M Q : nat) : A_n + M = Q -> An_ + A_n + Ann + M = Ann + An_ + Q. Proof. omega. Qed. Lemma sum_sum_ULD_part (n : nat) (f : nat -> nat -> nat) : sum' n (fun x => (sum' x (fun y => f x y) + sum' x (fun y => f y x) + f x x)) = sum' n (fun x => sum' n (fun y => f x y)). Proof. induction n. - auto. - rewrite sum'_S. rewrite IHn. rewrite sum'_S. rewrite sum'_S. set (Ann := f n n). set (An_ := sum' n (fun y : nat => f n y)). set (A_n := sum' n (fun y : nat => f y n)). set (M := sum' n (fun x : nat => sum' n (fun y : nat => f x y))). set (Q := sum' n (fun x : nat => sum' (S n) (fun y : nat => f x y))). apply (krajsanje_izraza An_ A_n Ann M Q). unfold A_n. unfold M. unfold Q. rewrite (vsota_funkcij_rv n (fun y : nat => f y n) (fun x : nat => sum' n (fun y : nat => f x y))). apply change_sum. intros j p. rewrite sum'_S. auto. Qed. Lemma sum_sum_ULD (n : nat) (f : nat -> nat -> nat) : sum' n (fun x => (sum' x (fun y => f x y))) + sum' n (fun x => (sum' x (fun y => f y x))) + sum' n (fun x => (f x x)) = sum' n (fun x => sum' n (fun y => f x y)). Proof. do 2 rewrite vsota_funkcij_rv. apply sum_sum_ULD_part. Qed.
/- Copyright (c) 2021 Floris van Doorn. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Floris van Doorn, Sébastien Gouëzel -/ import measure_theory.measure.lebesgue import measure_theory.measure.haar import linear_algebra.finite_dimensional import analysis.normed_space.pointwise import measure_theory.group.pointwise import measure_theory.measure.doubling /-! # Relationship between the Haar and Lebesgue measures We prove that the Haar measure and Lebesgue measure are equal on `ℝ` and on `ℝ^ι`, in `measure_theory.add_haar_measure_eq_volume` and `measure_theory.add_haar_measure_eq_volume_pi`. We deduce basic properties of any Haar measure on a finite dimensional real vector space: * `map_linear_map_add_haar_eq_smul_add_haar`: a linear map rescales the Haar measure by the absolute value of its determinant. * `add_haar_preimage_linear_map` : when `f` is a linear map with nonzero determinant, the measure of `f ⁻¹' s` is the measure of `s` multiplied by the absolute value of the inverse of the determinant of `f`. * `add_haar_image_linear_map` : when `f` is a linear map, the measure of `f '' s` is the measure of `s` multiplied by the absolute value of the determinant of `f`. * `add_haar_submodule` : a strict submodule has measure `0`. * `add_haar_smul` : the measure of `r • s` is `|r| ^ dim * μ s`. * `add_haar_ball`: the measure of `ball x r` is `r ^ dim * μ (ball 0 1)`. * `add_haar_closed_ball`: the measure of `closed_ball x r` is `r ^ dim * μ (ball 0 1)`. * `add_haar_sphere`: spheres have zero measure. This makes it possible to associate a Lebesgue measure to an `n`-alternating map in dimension `n`. This measure is called `alternating_map.measure`. Its main property is `ω.measure_parallelepiped v`, stating that the associated measure of the parallelepiped spanned by vectors `v₁, ..., vₙ` is given by `|ω v|`. We also show that a Lebesgue density point `x` of a set `s` (with respect to closed balls) has density one for the rescaled copies `{x} + r • t` of a given set `t` with positive measure, in `tendsto_add_haar_inter_smul_one_of_density_one`. In particular, `s` intersects `{x} + r • t` for small `r`, see `eventually_nonempty_inter_smul_of_density_one`. -/ open topological_space set filter metric open_locale ennreal pointwise topology nnreal /-- The interval `[0,1]` as a compact set with non-empty interior. -/ def topological_space.positive_compacts.Icc01 : positive_compacts ℝ := { carrier := Icc 0 1, is_compact' := is_compact_Icc, interior_nonempty' := by simp_rw [interior_Icc, nonempty_Ioo, zero_lt_one] } universe u /-- The set `[0,1]^ι` as a compact set with non-empty interior. -/ def topological_space.positive_compacts.pi_Icc01 (ι : Type*) [fintype ι] : positive_compacts (ι → ℝ) := { carrier := pi univ (λ i, Icc 0 1), is_compact' := is_compact_univ_pi (λ i, is_compact_Icc), interior_nonempty' := by simp only [interior_pi_set, set.to_finite, interior_Icc, univ_pi_nonempty_iff, nonempty_Ioo, implies_true_iff, zero_lt_one] } namespace measure_theory open measure topological_space.positive_compacts finite_dimensional /-! ### The Lebesgue measure is a Haar measure on `ℝ` and on `ℝ^ι`. -/ /-- The Haar measure equals the Lebesgue measure on `ℝ`. -/ lemma add_haar_measure_eq_volume : add_haar_measure Icc01 = volume := by { convert (add_haar_measure_unique volume Icc01).symm, simp [Icc01] } /-- The Haar measure equals the Lebesgue measure on `ℝ^ι`. -/ lemma add_haar_measure_eq_volume_pi (ι : Type*) [fintype ι] : add_haar_measure (pi_Icc01 ι) = volume := begin convert (add_haar_measure_unique volume (pi_Icc01 ι)).symm, simp only [pi_Icc01, volume_pi_pi (λ i, Icc (0 : ℝ) 1), positive_compacts.coe_mk, compacts.coe_mk, finset.prod_const_one, ennreal.of_real_one, real.volume_Icc, one_smul, sub_zero], end instance is_add_haar_measure_volume_pi (ι : Type*) [fintype ι] : is_add_haar_measure (volume : measure (ι → ℝ)) := by { rw ← add_haar_measure_eq_volume_pi, apply_instance } namespace measure /-! ### Strict subspaces have zero measure -/ /-- If a set is disjoint of its translates by infinitely many bounded vectors, then it has measure zero. This auxiliary lemma proves this assuming additionally that the set is bounded. -/ lemma add_haar_eq_zero_of_disjoint_translates_aux {E : Type*} [normed_add_comm_group E] [normed_space ℝ E] [measurable_space E] [borel_space E] [finite_dimensional ℝ E] (μ : measure E) [is_add_haar_measure μ] {s : set E} (u : ℕ → E) (sb : bounded s) (hu : bounded (range u)) (hs : pairwise (disjoint on (λ n, {u n} + s))) (h's : measurable_set s) : μ s = 0 := begin by_contra h, apply lt_irrefl ∞, calc ∞ = ∑' (n : ℕ), μ s : (ennreal.tsum_const_eq_top_of_ne_zero h).symm ... = ∑' (n : ℕ), μ ({u n} + s) : by { congr' 1, ext1 n, simp only [image_add_left, measure_preimage_add, singleton_add] } ... = μ (⋃ n, {u n} + s) : by rw measure_Union hs (λ n, by simpa only [image_add_left, singleton_add] using measurable_id.const_add _ h's) ... = μ (range u + s) : by rw [← Union_add, Union_singleton_eq_range] ... < ∞ : bounded.measure_lt_top (hu.add sb) end /-- If a set is disjoint of its translates by infinitely many bounded vectors, then it has measure zero. -/ lemma add_haar_eq_zero_of_disjoint_translates {E : Type*} [normed_add_comm_group E] [normed_space ℝ E] [measurable_space E] [borel_space E] [finite_dimensional ℝ E] (μ : measure E) [is_add_haar_measure μ] {s : set E} (u : ℕ → E) (hu : bounded (range u)) (hs : pairwise (disjoint on (λ n, {u n} + s))) (h's : measurable_set s) : μ s = 0 := begin suffices H : ∀ R, μ (s ∩ closed_ball 0 R) = 0, { apply le_antisymm _ (zero_le _), calc μ s ≤ ∑' (n : ℕ), μ (s ∩ closed_ball 0 n) : by { conv_lhs { rw ← Union_inter_closed_ball_nat s 0 }, exact measure_Union_le _ } ... = 0 : by simp only [H, tsum_zero] }, assume R, apply add_haar_eq_zero_of_disjoint_translates_aux μ u (bounded.mono (inter_subset_right _ _) bounded_closed_ball) hu _ (h's.inter (measurable_set_closed_ball)), apply pairwise_disjoint.mono hs (λ n, _), exact add_subset_add (subset.refl _) (inter_subset_left _ _) end /-- A strict vector subspace has measure zero. -/ lemma add_haar_submodule {E : Type*} [normed_add_comm_group E] [normed_space ℝ E] [measurable_space E] [borel_space E] [finite_dimensional ℝ E] (μ : measure E) [is_add_haar_measure μ] (s : submodule ℝ E) (hs : s ≠ ⊤) : μ s = 0 := begin obtain ⟨x, hx⟩ : ∃ x, x ∉ s, by simpa only [submodule.eq_top_iff', not_exists, ne.def, not_forall] using hs, obtain ⟨c, cpos, cone⟩ : ∃ (c : ℝ), 0 < c ∧ c < 1 := ⟨1/2, by norm_num, by norm_num⟩, have A : bounded (range (λ (n : ℕ), (c ^ n) • x)), { have : tendsto (λ (n : ℕ), (c ^ n) • x) at_top (𝓝 ((0 : ℝ) • x)) := (tendsto_pow_at_top_nhds_0_of_lt_1 cpos.le cone).smul_const x, exact bounded_range_of_tendsto _ this }, apply add_haar_eq_zero_of_disjoint_translates μ _ A _ (submodule.closed_of_finite_dimensional s).measurable_set, assume m n hmn, simp only [function.on_fun, image_add_left, singleton_add, disjoint_left, mem_preimage, set_like.mem_coe], assume y hym hyn, have A : (c ^ n - c ^ m) • x ∈ s, { convert s.sub_mem hym hyn, simp only [sub_smul, neg_sub_neg, add_sub_add_right_eq_sub] }, have H : c ^ n - c ^ m ≠ 0, by simpa only [sub_eq_zero, ne.def] using (strict_anti_pow cpos cone).injective.ne hmn.symm, have : x ∈ s, { convert s.smul_mem (c ^ n - c ^ m)⁻¹ A, rw [smul_smul, inv_mul_cancel H, one_smul] }, exact hx this end /-- A strict affine subspace has measure zero. -/ lemma add_haar_affine_subspace {E : Type*} [normed_add_comm_group E] [normed_space ℝ E] [measurable_space E] [borel_space E] [finite_dimensional ℝ E] (μ : measure E) [is_add_haar_measure μ] (s : affine_subspace ℝ E) (hs : s ≠ ⊤) : μ s = 0 := begin rcases s.eq_bot_or_nonempty with rfl|hne, { rw [affine_subspace.bot_coe, measure_empty] }, rw [ne.def, ← affine_subspace.direction_eq_top_iff_of_nonempty hne] at hs, rcases hne with ⟨x, hx : x ∈ s⟩, simpa only [affine_subspace.coe_direction_eq_vsub_set_right hx, vsub_eq_sub, sub_eq_add_neg, image_add_right, neg_neg, measure_preimage_add_right] using add_haar_submodule μ s.direction hs end /-! ### Applying a linear map rescales Haar measure by the determinant We first prove this on `ι → ℝ`, using that this is already known for the product Lebesgue measure (thanks to matrices computations). Then, we extend this to any finite-dimensional real vector space by using a linear equiv with a space of the form `ι → ℝ`, and arguing that such a linear equiv maps Haar measure to Haar measure. -/ lemma map_linear_map_add_haar_pi_eq_smul_add_haar {ι : Type*} [finite ι] {f : (ι → ℝ) →ₗ[ℝ] (ι → ℝ)} (hf : f.det ≠ 0) (μ : measure (ι → ℝ)) [is_add_haar_measure μ] : measure.map f μ = ennreal.of_real (abs (f.det)⁻¹) • μ := begin casesI nonempty_fintype ι, /- We have already proved the result for the Lebesgue product measure, using matrices. We deduce it for any Haar measure by uniqueness (up to scalar multiplication). -/ have := add_haar_measure_unique μ (pi_Icc01 ι), rw [this, add_haar_measure_eq_volume_pi, measure.map_smul, real.map_linear_map_volume_pi_eq_smul_volume_pi hf, smul_comm], end variables {E : Type*} [normed_add_comm_group E] [normed_space ℝ E] [measurable_space E] [borel_space E] [finite_dimensional ℝ E] (μ : measure E) [is_add_haar_measure μ] {F : Type*} [normed_add_comm_group F] [normed_space ℝ F] [complete_space F] lemma map_linear_map_add_haar_eq_smul_add_haar {f : E →ₗ[ℝ] E} (hf : f.det ≠ 0) : measure.map f μ = ennreal.of_real (abs (f.det)⁻¹) • μ := begin -- we reduce to the case of `E = ι → ℝ`, for which we have already proved the result using -- matrices in `map_linear_map_add_haar_pi_eq_smul_add_haar`. let ι := fin (finrank ℝ E), haveI : finite_dimensional ℝ (ι → ℝ) := by apply_instance, have : finrank ℝ E = finrank ℝ (ι → ℝ), by simp, have e : E ≃ₗ[ℝ] ι → ℝ := linear_equiv.of_finrank_eq E (ι → ℝ) this, -- next line is to avoid `g` getting reduced by `simp`. obtain ⟨g, hg⟩ : ∃ g, g = (e : E →ₗ[ℝ] (ι → ℝ)).comp (f.comp (e.symm : (ι → ℝ) →ₗ[ℝ] E)) := ⟨_, rfl⟩, have gdet : g.det = f.det, by { rw [hg], exact linear_map.det_conj f e }, rw ← gdet at hf ⊢, have fg : f = (e.symm : (ι → ℝ) →ₗ[ℝ] E).comp (g.comp (e : E →ₗ[ℝ] (ι → ℝ))), { ext x, simp only [linear_equiv.coe_coe, function.comp_app, linear_map.coe_comp, linear_equiv.symm_apply_apply, hg] }, simp only [fg, linear_equiv.coe_coe, linear_map.coe_comp], have Ce : continuous e := (e : E →ₗ[ℝ] (ι → ℝ)).continuous_of_finite_dimensional, have Cg : continuous g := linear_map.continuous_of_finite_dimensional g, have Cesymm : continuous e.symm := (e.symm : (ι → ℝ) →ₗ[ℝ] E).continuous_of_finite_dimensional, rw [← map_map Cesymm.measurable (Cg.comp Ce).measurable, ← map_map Cg.measurable Ce.measurable], haveI : is_add_haar_measure (map e μ) := (e : E ≃+ (ι → ℝ)).is_add_haar_measure_map μ Ce Cesymm, have ecomp : (e.symm) ∘ e = id, by { ext x, simp only [id.def, function.comp_app, linear_equiv.symm_apply_apply] }, rw [map_linear_map_add_haar_pi_eq_smul_add_haar hf (map e μ), measure.map_smul, map_map Cesymm.measurable Ce.measurable, ecomp, measure.map_id] end /-- The preimage of a set `s` under a linear map `f` with nonzero determinant has measure equal to `μ s` times the absolute value of the inverse of the determinant of `f`. -/ @[simp] lemma add_haar_preimage_linear_map {f : E →ₗ[ℝ] E} (hf : f.det ≠ 0) (s : set E) : μ (f ⁻¹' s) = ennreal.of_real (abs (f.det)⁻¹) * μ s := calc μ (f ⁻¹' s) = measure.map f μ s : ((f.equiv_of_det_ne_zero hf).to_continuous_linear_equiv.to_homeomorph .to_measurable_equiv.map_apply s).symm ... = ennreal.of_real (abs (f.det)⁻¹) * μ s : by { rw map_linear_map_add_haar_eq_smul_add_haar μ hf, refl } /-- The preimage of a set `s` under a continuous linear map `f` with nonzero determinant has measure equal to `μ s` times the absolute value of the inverse of the determinant of `f`. -/ @[simp] lemma add_haar_preimage_continuous_linear_map {f : E →L[ℝ] E} (hf : linear_map.det (f : E →ₗ[ℝ] E) ≠ 0) (s : set E) : μ (f ⁻¹' s) = ennreal.of_real (abs (linear_map.det (f : E →ₗ[ℝ] E))⁻¹) * μ s := add_haar_preimage_linear_map μ hf s /-- The preimage of a set `s` under a linear equiv `f` has measure equal to `μ s` times the absolute value of the inverse of the determinant of `f`. -/ @[simp] lemma add_haar_preimage_linear_equiv (f : E ≃ₗ[ℝ] E) (s : set E) : μ (f ⁻¹' s) = ennreal.of_real (abs (f.symm : E →ₗ[ℝ] E).det) * μ s := begin have A : (f : E →ₗ[ℝ] E).det ≠ 0 := (linear_equiv.is_unit_det' f).ne_zero, convert add_haar_preimage_linear_map μ A s, simp only [linear_equiv.det_coe_symm] end /-- The preimage of a set `s` under a continuous linear equiv `f` has measure equal to `μ s` times the absolute value of the inverse of the determinant of `f`. -/ @[simp] lemma add_haar_preimage_continuous_linear_equiv (f : E ≃L[ℝ] E) (s : set E) : μ (f ⁻¹' s) = ennreal.of_real (abs (f.symm : E →ₗ[ℝ] E).det) * μ s := add_haar_preimage_linear_equiv μ _ s /-- The image of a set `s` under a linear map `f` has measure equal to `μ s` times the absolute value of the determinant of `f`. -/ @[simp] lemma add_haar_image_linear_map (f : E →ₗ[ℝ] E) (s : set E) : μ (f '' s) = ennreal.of_real (abs f.det) * μ s := begin rcases ne_or_eq f.det 0 with hf|hf, { let g := (f.equiv_of_det_ne_zero hf).to_continuous_linear_equiv, change μ (g '' s) = _, rw [continuous_linear_equiv.image_eq_preimage g s, add_haar_preimage_continuous_linear_equiv], congr, ext x, simp only [linear_equiv.coe_to_continuous_linear_equiv, linear_equiv.of_is_unit_det_apply, linear_equiv.coe_coe, continuous_linear_equiv.symm_symm], }, { simp only [hf, zero_mul, ennreal.of_real_zero, abs_zero], have : μ f.range = 0 := add_haar_submodule μ _ (linear_map.range_lt_top_of_det_eq_zero hf).ne, exact le_antisymm (le_trans (measure_mono (image_subset_range _ _)) this.le) (zero_le _) } end /-- The image of a set `s` under a continuous linear map `f` has measure equal to `μ s` times the absolute value of the determinant of `f`. -/ @[simp] lemma add_haar_image_continuous_linear_map (f : E →L[ℝ] E) (s : set E) : μ (f '' s) = ennreal.of_real (abs (f : E →ₗ[ℝ] E).det) * μ s := add_haar_image_linear_map μ _ s /-- The image of a set `s` under a continuous linear equiv `f` has measure equal to `μ s` times the absolute value of the determinant of `f`. -/ @[simp] lemma add_haar_image_continuous_linear_equiv (f : E ≃L[ℝ] E) (s : set E) : μ (f '' s) = ennreal.of_real (abs (f : E →ₗ[ℝ] E).det) * μ s := μ.add_haar_image_linear_map (f : E →ₗ[ℝ] E) s /-! ### Basic properties of Haar measures on real vector spaces -/ lemma map_add_haar_smul {r : ℝ} (hr : r ≠ 0) : measure.map ((•) r) μ = ennreal.of_real (abs (r ^ (finrank ℝ E))⁻¹) • μ := begin let f : E →ₗ[ℝ] E := r • 1, change measure.map f μ = _, have hf : f.det ≠ 0, { simp only [mul_one, linear_map.det_smul, ne.def, monoid_hom.map_one], assume h, exact hr (pow_eq_zero h) }, simp only [map_linear_map_add_haar_eq_smul_add_haar μ hf, mul_one, linear_map.det_smul, monoid_hom.map_one], end @[simp] lemma add_haar_preimage_smul {r : ℝ} (hr : r ≠ 0) (s : set E) : μ (((•) r) ⁻¹' s) = ennreal.of_real (abs (r ^ (finrank ℝ E))⁻¹) * μ s := calc μ (((•) r) ⁻¹' s) = measure.map ((•) r) μ s : ((homeomorph.smul (is_unit_iff_ne_zero.2 hr).unit).to_measurable_equiv.map_apply s).symm ... = ennreal.of_real (abs (r^(finrank ℝ E))⁻¹) * μ s : by { rw map_add_haar_smul μ hr, refl } /-- Rescaling a set by a factor `r` multiplies its measure by `abs (r ^ dim)`. -/ @[simp] lemma add_haar_smul (r : ℝ) (s : set E) : μ (r • s) = ennreal.of_real (abs (r ^ (finrank ℝ E))) * μ s := begin rcases ne_or_eq r 0 with h|rfl, { rw [← preimage_smul_inv₀ h, add_haar_preimage_smul μ (inv_ne_zero h), inv_pow, inv_inv] }, rcases eq_empty_or_nonempty s with rfl|hs, { simp only [measure_empty, mul_zero, smul_set_empty] }, rw [zero_smul_set hs, ← singleton_zero], by_cases h : finrank ℝ E = 0, { haveI : subsingleton E := finrank_zero_iff.1 h, simp only [h, one_mul, ennreal.of_real_one, abs_one, subsingleton.eq_univ_of_nonempty hs, pow_zero, subsingleton.eq_univ_of_nonempty (singleton_nonempty (0 : E))] }, { haveI : nontrivial E := nontrivial_of_finrank_pos (bot_lt_iff_ne_bot.2 h), simp only [h, zero_mul, ennreal.of_real_zero, abs_zero, ne.def, not_false_iff, zero_pow', measure_singleton] } end lemma add_haar_smul_of_nonneg {r : ℝ} (hr : 0 ≤ r) (s : set E) : μ (r • s) = ennreal.of_real (r ^ finrank ℝ E) * μ s := by rw [add_haar_smul, abs_pow, abs_of_nonneg hr] variables {μ} {s : set E} -- Note: We might want to rename this once we acquire the lemma corresponding to -- `measurable_set.const_smul` lemma null_measurable_set.const_smul (hs : null_measurable_set s μ) (r : ℝ) : null_measurable_set (r • s) μ := begin obtain rfl | hs' := s.eq_empty_or_nonempty, { simp }, obtain rfl | hr := eq_or_ne r 0, { simpa [zero_smul_set hs'] using null_measurable_set_singleton _ }, obtain ⟨t, ht, hst⟩ := hs, refine ⟨_, ht.const_smul_of_ne_zero hr, _⟩, rw ←measure_symm_diff_eq_zero_iff at ⊢ hst, rw [←smul_set_symm_diff₀ hr, add_haar_smul μ, hst, mul_zero], end variables (μ) @[simp] lemma add_haar_image_homothety (x : E) (r : ℝ) (s : set E) : μ (affine_map.homothety x r '' s) = ennreal.of_real (abs (r ^ (finrank ℝ E))) * μ s := calc μ (affine_map.homothety x r '' s) = μ ((λ y, y + x) '' (r • ((λ y, y + (-x)) '' s))) : by { simp only [← image_smul, image_image, ← sub_eq_add_neg], refl } ... = ennreal.of_real (abs (r ^ (finrank ℝ E))) * μ s : by simp only [image_add_right, measure_preimage_add_right, add_haar_smul] /-- The integral of `f (R • x)` with respect to an additive Haar measure is a multiple of the integral of `f`. The formula we give works even when `f` is not integrable or `R = 0` thanks to the convention that a non-integrable function has integral zero. -/ lemma integral_comp_smul (f : E → F) (R : ℝ) : ∫ x, f (R • x) ∂μ = |(R ^ finrank ℝ E)⁻¹| • ∫ x, f x ∂μ := begin rcases eq_or_ne R 0 with rfl|hR, { simp only [zero_smul, integral_const], rcases nat.eq_zero_or_pos (finrank ℝ E) with hE|hE, { haveI : subsingleton E, from finrank_zero_iff.1 hE, have : f = (λ x, f 0), { ext x, rw subsingleton.elim x 0 }, conv_rhs { rw this }, simp only [hE, pow_zero, inv_one, abs_one, one_smul, integral_const] }, { haveI : nontrivial E, from finrank_pos_iff.1 hE, simp only [zero_pow hE, measure_univ_of_is_add_left_invariant, ennreal.top_to_real, zero_smul, inv_zero, abs_zero]} }, { calc ∫ x, f (R • x) ∂μ = ∫ y, f y ∂(measure.map (λ x, R • x) μ) : (integral_map_equiv (homeomorph.smul (is_unit_iff_ne_zero.2 hR).unit) .to_measurable_equiv f).symm ... = |(R ^ finrank ℝ E)⁻¹| • ∫ x, f x ∂μ : by simp only [map_add_haar_smul μ hR, integral_smul_measure, ennreal.to_real_of_real, abs_nonneg] } end /-- The integral of `f (R • x)` with respect to an additive Haar measure is a multiple of the integral of `f`. The formula we give works even when `f` is not integrable or `R = 0` thanks to the convention that a non-integrable function has integral zero. -/ lemma integral_comp_smul_of_nonneg (f : E → F) (R : ℝ) {hR : 0 ≤ R} : ∫ x, f (R • x) ∂μ = (R ^ finrank ℝ E)⁻¹ • ∫ x, f x ∂μ := by rw [integral_comp_smul μ f R, abs_of_nonneg (inv_nonneg.2 (pow_nonneg hR _))] /-- The integral of `f (R⁻¹ • x)` with respect to an additive Haar measure is a multiple of the integral of `f`. The formula we give works even when `f` is not integrable or `R = 0` thanks to the convention that a non-integrable function has integral zero. -/ lemma integral_comp_inv_smul (f : E → F) (R : ℝ) : ∫ x, f (R⁻¹ • x) ∂μ = |(R ^ finrank ℝ E)| • ∫ x, f x ∂μ := by rw [integral_comp_smul μ f (R⁻¹), inv_pow, inv_inv] /-- The integral of `f (R⁻¹ • x)` with respect to an additive Haar measure is a multiple of the integral of `f`. The formula we give works even when `f` is not integrable or `R = 0` thanks to the convention that a non-integrable function has integral zero. -/ lemma integral_comp_inv_smul_of_nonneg (f : E → F) {R : ℝ} (hR : 0 ≤ R) : ∫ x, f (R⁻¹ • x) ∂μ = R ^ finrank ℝ E • ∫ x, f x ∂μ := by rw [integral_comp_inv_smul μ f R, abs_of_nonneg ((pow_nonneg hR _))] /-! We don't need to state `map_add_haar_neg` here, because it has already been proved for general Haar measures on general commutative groups. -/ /-! ### Measure of balls -/ lemma add_haar_ball_center {E : Type*} [normed_add_comm_group E] [measurable_space E] [borel_space E] (μ : measure E) [is_add_haar_measure μ] (x : E) (r : ℝ) : μ (ball x r) = μ (ball (0 : E) r) := begin have : ball (0 : E) r = ((+) x) ⁻¹' (ball x r), by simp [preimage_add_ball], rw [this, measure_preimage_add] end lemma add_haar_closed_ball_center {E : Type*} [normed_add_comm_group E] [measurable_space E] [borel_space E] (μ : measure E) [is_add_haar_measure μ] (x : E) (r : ℝ) : μ (closed_ball x r) = μ (closed_ball (0 : E) r) := begin have : closed_ball (0 : E) r = ((+) x) ⁻¹' (closed_ball x r), by simp [preimage_add_closed_ball], rw [this, measure_preimage_add] end lemma add_haar_ball_mul_of_pos (x : E) {r : ℝ} (hr : 0 < r) (s : ℝ) : μ (ball x (r * s)) = ennreal.of_real (r ^ (finrank ℝ E)) * μ (ball 0 s) := begin have : ball (0 : E) (r * s) = r • ball 0 s, by simp only [smul_ball hr.ne' (0 : E) s, real.norm_eq_abs, abs_of_nonneg hr.le, smul_zero], simp only [this, add_haar_smul, abs_of_nonneg hr.le, add_haar_ball_center, abs_pow], end lemma add_haar_ball_of_pos (x : E) {r : ℝ} (hr : 0 < r) : μ (ball x r) = ennreal.of_real (r ^ (finrank ℝ E)) * μ (ball 0 1) := by rw [← add_haar_ball_mul_of_pos μ x hr, mul_one] lemma add_haar_ball_mul [nontrivial E] (x : E) {r : ℝ} (hr : 0 ≤ r) (s : ℝ) : μ (ball x (r * s)) = ennreal.of_real (r ^ (finrank ℝ E)) * μ (ball 0 s) := begin rcases has_le.le.eq_or_lt hr with h|h, { simp only [← h, zero_pow finrank_pos, measure_empty, zero_mul, ennreal.of_real_zero, ball_zero] }, { exact add_haar_ball_mul_of_pos μ x h s } end lemma add_haar_ball [nontrivial E] (x : E) {r : ℝ} (hr : 0 ≤ r) : μ (ball x r) = ennreal.of_real (r ^ (finrank ℝ E)) * μ (ball 0 1) := by rw [← add_haar_ball_mul μ x hr, mul_one] lemma add_haar_closed_ball_mul_of_pos (x : E) {r : ℝ} (hr : 0 < r) (s : ℝ) : μ (closed_ball x (r * s)) = ennreal.of_real (r ^ (finrank ℝ E)) * μ (closed_ball 0 s) := begin have : closed_ball (0 : E) (r * s) = r • closed_ball 0 s, by simp [smul_closed_ball' hr.ne' (0 : E), abs_of_nonneg hr.le], simp only [this, add_haar_smul, abs_of_nonneg hr.le, add_haar_closed_ball_center, abs_pow], end lemma add_haar_closed_ball_mul (x : E) {r : ℝ} (hr : 0 ≤ r) {s : ℝ} (hs : 0 ≤ s) : μ (closed_ball x (r * s)) = ennreal.of_real (r ^ (finrank ℝ E)) * μ (closed_ball 0 s) := begin have : closed_ball (0 : E) (r * s) = r • closed_ball 0 s, by simp [smul_closed_ball r (0 : E) hs, abs_of_nonneg hr], simp only [this, add_haar_smul, abs_of_nonneg hr, add_haar_closed_ball_center, abs_pow], end /-- The measure of a closed ball can be expressed in terms of the measure of the closed unit ball. Use instead `add_haar_closed_ball`, which uses the measure of the open unit ball as a standard form. -/ lemma add_haar_closed_unit_ball_eq_add_haar_unit_ball : μ (closed_ball (0 : E) 1) = μ (ball 0 1) := begin apply le_antisymm _ (measure_mono ball_subset_closed_ball), have A : tendsto (λ (r : ℝ), ennreal.of_real (r ^ (finrank ℝ E)) * μ (closed_ball (0 : E) 1)) (𝓝[<] 1) (𝓝 (ennreal.of_real (1 ^ (finrank ℝ E)) * μ (closed_ball (0 : E) 1))), { refine ennreal.tendsto.mul _ (by simp) tendsto_const_nhds (by simp), exact ennreal.tendsto_of_real ((tendsto_id'.2 nhds_within_le_nhds).pow _) }, simp only [one_pow, one_mul, ennreal.of_real_one] at A, refine le_of_tendsto A _, refine mem_nhds_within_Iio_iff_exists_Ioo_subset.2 ⟨(0 : ℝ), by simp, λ r hr, _⟩, dsimp, rw ← add_haar_closed_ball' μ (0 : E) hr.1.le, exact measure_mono (closed_ball_subset_ball hr.2) end lemma add_haar_closed_ball (x : E) {r : ℝ} (hr : 0 ≤ r) : μ (closed_ball x r) = ennreal.of_real (r ^ (finrank ℝ E)) * μ (ball 0 1) := by rw [add_haar_closed_ball' μ x hr, add_haar_closed_unit_ball_eq_add_haar_unit_ball] lemma add_haar_closed_ball_eq_add_haar_ball [nontrivial E] (x : E) (r : ℝ) : μ (closed_ball x r) = μ (ball x r) := begin by_cases h : r < 0, { rw [metric.closed_ball_eq_empty.mpr h, metric.ball_eq_empty.mpr h.le] }, push_neg at h, rw [add_haar_closed_ball μ x h, add_haar_ball μ x h], end lemma add_haar_sphere_of_ne_zero (x : E) {r : ℝ} (hr : r ≠ 0) : μ (sphere x r) = 0 := begin rcases hr.lt_or_lt with h|h, { simp only [empty_diff, measure_empty, ← closed_ball_diff_ball, closed_ball_eq_empty.2 h] }, { rw [← closed_ball_diff_ball, measure_diff ball_subset_closed_ball measurable_set_ball measure_ball_lt_top.ne, add_haar_ball_of_pos μ _ h, add_haar_closed_ball μ _ h.le, tsub_self]; apply_instance } end lemma add_haar_sphere [nontrivial E] (x : E) (r : ℝ) : μ (sphere x r) = 0 := begin rcases eq_or_ne r 0 with rfl|h, { rw [sphere_zero, measure_singleton] }, { exact add_haar_sphere_of_ne_zero μ x h } end lemma add_haar_singleton_add_smul_div_singleton_add_smul {r : ℝ} (hr : r ≠ 0) (x y : E) (s t : set E) : μ ({x} + r • s) / μ ({y} + r • t) = μ s / μ t := calc μ ({x} + r • s) / μ ({y} + r • t) = ennreal.of_real (|r| ^ finrank ℝ E) * μ s * (ennreal.of_real (|r| ^ finrank ℝ E) * μ t)⁻¹ : by simp only [div_eq_mul_inv, add_haar_smul, image_add_left, measure_preimage_add, abs_pow, singleton_add] ... = ennreal.of_real (|r| ^ finrank ℝ E) * (ennreal.of_real (|r| ^ finrank ℝ E))⁻¹ * (μ s * (μ t)⁻¹) : begin rw ennreal.mul_inv, { ring }, { simp only [pow_pos (abs_pos.mpr hr), ennreal.of_real_eq_zero, not_le, ne.def, true_or] }, { simp only [ennreal.of_real_ne_top, true_or, ne.def, not_false_iff] }, end ... = μ s / μ t : begin rw [ennreal.mul_inv_cancel, one_mul, div_eq_mul_inv], { simp only [pow_pos (abs_pos.mpr hr), ennreal.of_real_eq_zero, not_le, ne.def], }, { simp only [ennreal.of_real_ne_top, ne.def, not_false_iff] } end @[priority 100] instance is_doubling_measure_of_is_add_haar_measure : is_doubling_measure μ := begin refine ⟨⟨(2 : ℝ≥0) ^ (finrank ℝ E), _⟩⟩, filter_upwards [self_mem_nhds_within] with r hr x, rw [add_haar_closed_ball_mul μ x zero_le_two (le_of_lt hr), add_haar_closed_ball_center μ x, ennreal.of_real, real.to_nnreal_pow zero_le_two], simp only [real.to_nnreal_bit0, real.to_nnreal_one, le_refl], end section /-! ### The Lebesgue measure associated to an alternating map -/ variables {ι G : Type*} [fintype ι] [decidable_eq ι] [normed_add_comm_group G] [normed_space ℝ G] [measurable_space G] [borel_space G] lemma add_haar_parallelepiped (b : basis ι ℝ G) (v : ι → G) : b.add_haar (parallelepiped v) = ennreal.of_real (|b.det v|) := begin haveI : finite_dimensional ℝ G, from finite_dimensional.of_fintype_basis b, have A : parallelepiped v = (b.constr ℕ v) '' (parallelepiped b), { rw image_parallelepiped, congr' 1 with i, exact (b.constr_basis ℕ v i).symm }, rw [A, add_haar_image_linear_map, basis.add_haar_self, mul_one, ← linear_map.det_to_matrix b, ← basis.to_matrix_eq_to_matrix_constr], refl, end variables [finite_dimensional ℝ G] {n : ℕ} [_i : fact (finrank ℝ G = n)] include _i /-- The Lebesgue measure associated to an alternating map. It gives measure `|ω v|` to the parallelepiped spanned by the vectors `v₁, ..., vₙ`. Note that it is not always a Haar measure, as it can be zero, but it is always locally finite and translation invariant. -/ @[irreducible] noncomputable def _root_.alternating_map.measure (ω : alternating_map ℝ G ℝ (fin n)) : measure G := ‖ω (fin_basis_of_finrank_eq ℝ G _i.out)‖₊ • (fin_basis_of_finrank_eq ℝ G _i.out).add_haar lemma _root_.alternating_map.measure_parallelepiped (ω : alternating_map ℝ G ℝ (fin n)) (v : fin n → G) : ω.measure (parallelepiped v) = ennreal.of_real (|ω v|) := begin conv_rhs { rw ω.eq_smul_basis_det (fin_basis_of_finrank_eq ℝ G _i.out) }, simp only [add_haar_parallelepiped, alternating_map.measure, coe_nnreal_smul_apply, alternating_map.smul_apply, algebra.id.smul_eq_mul, abs_mul, ennreal.of_real_mul (abs_nonneg _), real.ennnorm_eq_of_real_abs] end instance (ω : alternating_map ℝ G ℝ (fin n)) : is_add_left_invariant ω.measure := by { rw [alternating_map.measure], apply_instance } instance (ω : alternating_map ℝ G ℝ (fin n)) : is_locally_finite_measure ω.measure := by { rw [alternating_map.measure], apply_instance } end /-! ### Density points Besicovitch covering theorem ensures that, for any locally finite measure on a finite-dimensional real vector space, almost every point of a set `s` is a density point, i.e., `μ (s ∩ closed_ball x r) / μ (closed_ball x r)` tends to `1` as `r` tends to `0` (see `besicovitch.ae_tendsto_measure_inter_div`). When `μ` is a Haar measure, one can deduce the same property for any rescaling sequence of sets, of the form `{x} + r • t` where `t` is a set with positive finite measure, instead of the sequence of closed balls. We argue first for the dual property, i.e., if `s` has density `0` at `x`, then `μ (s ∩ ({x} + r • t)) / μ ({x} + r • t)` tends to `0`. First when `t` is contained in the ball of radius `1`, in `tendsto_add_haar_inter_smul_zero_of_density_zero_aux1`, (by arguing by inclusion). Then when `t` is bounded, reducing to the previous one by rescaling, in `tendsto_add_haar_inter_smul_zero_of_density_zero_aux2`. Then for a general set `t`, by cutting it into a bounded part and a part with small measure, in `tendsto_add_haar_inter_smul_zero_of_density_zero`. Going to the complement, one obtains the desired property at points of density `1`, first when `s` is measurable in `tendsto_add_haar_inter_smul_one_of_density_one_aux`, and then without this assumption in `tendsto_add_haar_inter_smul_one_of_density_one` by applying the previous lemma to the measurable hull `to_measurable μ s` -/ lemma tendsto_add_haar_inter_smul_zero_of_density_zero_aux1 (s : set E) (x : E) (h : tendsto (λ r, μ (s ∩ closed_ball x r) / μ (closed_ball x r)) (𝓝[>] 0) (𝓝 0)) (t : set E) (u : set E) (h'u : μ u ≠ 0) (t_bound : t ⊆ closed_ball 0 1) : tendsto (λ (r : ℝ), μ (s ∩ ({x} + r • t)) / μ ({x} + r • u)) (𝓝[>] 0) (𝓝 0) := begin have A : tendsto (λ (r : ℝ), μ (s ∩ ({x} + r • t)) / μ (closed_ball x r)) (𝓝[>] 0) (𝓝 0), { apply tendsto_of_tendsto_of_tendsto_of_le_of_le' tendsto_const_nhds h (eventually_of_forall (λ b, zero_le _)), filter_upwards [self_mem_nhds_within], rintros r (rpos : 0 < r), apply mul_le_mul_right' (measure_mono (inter_subset_inter_right _ _)) _, assume y hy, have : y - x ∈ r • closed_ball (0 : E) 1, { apply smul_set_mono t_bound, simpa [neg_add_eq_sub] using hy }, simpa only [smul_closed_ball _ _ zero_le_one, real.norm_of_nonneg rpos.le, mem_closed_ball_iff_norm, mul_one, sub_zero, smul_zero] }, have B : tendsto (λ (r : ℝ), μ (closed_ball x r) / μ ({x} + r • u)) (𝓝[>] 0) (𝓝 (μ (closed_ball x 1) / μ ({x} + u))), { apply tendsto_const_nhds.congr' _, filter_upwards [self_mem_nhds_within], rintros r (rpos : 0 < r), have : closed_ball x r = {x} + r • closed_ball 0 1, by simp only [_root_.smul_closed_ball, real.norm_of_nonneg rpos.le, zero_le_one, add_zero, mul_one, singleton_add_closed_ball, smul_zero], simp only [this, add_haar_singleton_add_smul_div_singleton_add_smul μ rpos.ne'], simp only [add_haar_closed_ball_center, image_add_left, measure_preimage_add, singleton_add] }, have C : tendsto (λ (r : ℝ), (μ (s ∩ ({x} + r • t)) / μ (closed_ball x r)) * (μ (closed_ball x r) / μ ({x} + r • u))) (𝓝[>] 0) (𝓝 (0 * (μ (closed_ball x 1) / μ ({x} + u)))), { apply ennreal.tendsto.mul A _ B (or.inr ennreal.zero_ne_top), simp only [ennreal.div_eq_top, h'u, measure_closed_ball_lt_top.ne, false_or, image_add_left, eq_self_iff_true, not_true, ne.def, not_false_iff, measure_preimage_add, singleton_add, and_false, false_and] }, simp only [zero_mul] at C, apply C.congr' _, filter_upwards [self_mem_nhds_within], rintros r (rpos : 0 < r), calc μ (s ∩ ({x} + r • t)) / μ (closed_ball x r) * (μ (closed_ball x r) / μ ({x} + r • u)) = (μ (closed_ball x r) * (μ (closed_ball x r))⁻¹) * (μ (s ∩ ({x} + r • t)) / μ ({x} + r • u)) : by { simp only [div_eq_mul_inv], ring } ... = μ (s ∩ ({x} + r • t)) / μ ({x} + r • u) : by rw [ennreal.mul_inv_cancel (measure_closed_ball_pos μ x rpos).ne' measure_closed_ball_lt_top.ne, one_mul], end lemma tendsto_add_haar_inter_smul_zero_of_density_zero_aux2 (s : set E) (x : E) (h : tendsto (λ r, μ (s ∩ closed_ball x r) / μ (closed_ball x r)) (𝓝[>] 0) (𝓝 0)) (t : set E) (u : set E) (h'u : μ u ≠ 0) (R : ℝ) (Rpos : 0 < R) (t_bound : t ⊆ closed_ball 0 R) : tendsto (λ (r : ℝ), μ (s ∩ ({x} + r • t)) / μ ({x} + r • u)) (𝓝[>] 0) (𝓝 0) := begin set t' := R⁻¹ • t with ht', set u' := R⁻¹ • u with hu', have A : tendsto (λ (r : ℝ), μ (s ∩ ({x} + r • t')) / μ ({x} + r • u')) (𝓝[>] 0) (𝓝 0), { apply tendsto_add_haar_inter_smul_zero_of_density_zero_aux1 μ s x h t' u', { simp only [h'u, (pow_pos Rpos _).ne', abs_nonpos_iff, add_haar_smul, not_false_iff, ennreal.of_real_eq_zero, inv_eq_zero, inv_pow, ne.def, or_self, mul_eq_zero] }, { convert smul_set_mono t_bound, rw [smul_closed_ball _ _ Rpos.le, smul_zero, real.norm_of_nonneg (inv_nonneg.2 Rpos.le), inv_mul_cancel Rpos.ne'] } }, have B : tendsto (λ (r : ℝ), R * r) (𝓝[>] 0) (𝓝[>] (R * 0)), { apply tendsto_nhds_within_of_tendsto_nhds_of_eventually_within, { exact (tendsto_const_nhds.mul tendsto_id).mono_left nhds_within_le_nhds }, { filter_upwards [self_mem_nhds_within], assume r rpos, rw mul_zero, exact mul_pos Rpos rpos } }, rw mul_zero at B, apply (A.comp B).congr' _, filter_upwards [self_mem_nhds_within], rintros r (rpos : 0 < r), have T : (R * r) • t' = r • t, by rw [mul_comm, ht', smul_smul, mul_assoc, mul_inv_cancel Rpos.ne', mul_one], have U : (R * r) • u' = r • u, by rw [mul_comm, hu', smul_smul, mul_assoc, mul_inv_cancel Rpos.ne', mul_one], dsimp, rw [T, U], end /-- Consider a point `x` at which a set `s` has density zero, with respect to closed balls. Then it also has density zero with respect to any measurable set `t`: the proportion of points in `s` belonging to a rescaled copy `{x} + r • t` of `t` tends to zero as `r` tends to zero. -/ lemma tendsto_add_haar_inter_smul_zero_of_density_zero (s : set E) (x : E) (h : tendsto (λ r, μ (s ∩ closed_ball x r) / μ (closed_ball x r)) (𝓝[>] 0) (𝓝 0)) (t : set E) (ht : measurable_set t) (h''t : μ t ≠ ∞) : tendsto (λ (r : ℝ), μ (s ∩ ({x} + r • t)) / μ ({x} + r • t)) (𝓝[>] 0) (𝓝 0) := begin refine tendsto_order.2 ⟨λ a' ha', (ennreal.not_lt_zero ha').elim, λ ε (εpos : 0 < ε), _⟩, rcases eq_or_ne (μ t) 0 with h't|h't, { apply eventually_of_forall (λ r, _), suffices H : μ (s ∩ ({x} + r • t)) = 0, by { rw H, simpa only [ennreal.zero_div] using εpos }, apply le_antisymm _ (zero_le _), calc μ (s ∩ ({x} + r • t)) ≤ μ ({x} + r • t) : measure_mono (inter_subset_right _ _) ... = 0 : by simp only [h't, add_haar_smul, image_add_left, measure_preimage_add, singleton_add, mul_zero] }, obtain ⟨n, npos, hn⟩ : ∃ (n : ℕ), 0 < n ∧ μ (t \ closed_ball 0 n) < (ε / 2) * μ t, { have A : tendsto (λ (n : ℕ), μ (t \ closed_ball 0 n)) at_top (𝓝 (μ (⋂ (n : ℕ), t \ closed_ball 0 n))), { have N : ∃ (n : ℕ), μ (t \ closed_ball 0 n) ≠ ∞ := ⟨0, ((measure_mono (diff_subset t _)).trans_lt h''t.lt_top).ne⟩, refine tendsto_measure_Inter (λ n, ht.diff measurable_set_closed_ball) (λ m n hmn, _) N, exact diff_subset_diff subset.rfl (closed_ball_subset_closed_ball (nat.cast_le.2 hmn)) }, have : (⋂ (n : ℕ), t \ closed_ball 0 n) = ∅, by simp_rw [diff_eq, ← inter_Inter, Inter_eq_compl_Union_compl, compl_compl, Union_closed_ball_nat, compl_univ, inter_empty], simp only [this, measure_empty] at A, have I : 0 < (ε / 2) * μ t := ennreal.mul_pos (ennreal.half_pos εpos.ne').ne' h't, exact (eventually.and (Ioi_mem_at_top 0) ((tendsto_order.1 A).2 _ I)).exists }, have L : tendsto (λ (r : ℝ), μ (s ∩ ({x} + r • (t ∩ closed_ball 0 n))) / μ ({x} + r • t)) (𝓝[>] 0) (𝓝 0) := tendsto_add_haar_inter_smul_zero_of_density_zero_aux2 μ s x h _ t h't n (nat.cast_pos.2 npos) (inter_subset_right _ _), filter_upwards [(tendsto_order.1 L).2 _ (ennreal.half_pos εpos.ne'), self_mem_nhds_within], rintros r hr (rpos : 0 < r), have I : μ (s ∩ ({x} + r • t)) ≤ μ (s ∩ ({x} + r • (t ∩ closed_ball 0 n))) + μ ({x} + r • (t \ closed_ball 0 n)) := calc μ (s ∩ ({x} + r • t)) = μ ((s ∩ ({x} + r • (t ∩ closed_ball 0 n))) ∪ (s ∩ ({x} + r • (t \ closed_ball 0 n)))) : by rw [← inter_union_distrib_left, ← add_union, ← smul_set_union, inter_union_diff] ... ≤ μ (s ∩ ({x} + r • (t ∩ closed_ball 0 n))) + μ (s ∩ ({x} + r • (t \ closed_ball 0 n))) : measure_union_le _ _ ... ≤ μ (s ∩ ({x} + r • (t ∩ closed_ball 0 n))) + μ ({x} + r • (t \ closed_ball 0 n)) : add_le_add le_rfl (measure_mono (inter_subset_right _ _)), calc μ (s ∩ ({x} + r • t)) / μ ({x} + r • t) ≤ (μ (s ∩ ({x} + r • (t ∩ closed_ball 0 n))) + μ ({x} + r • (t \ closed_ball 0 n))) / μ ({x} + r • t) : mul_le_mul_right' I _ ... < ε / 2 + ε / 2 : begin rw ennreal.add_div, apply ennreal.add_lt_add hr _, rwa [add_haar_singleton_add_smul_div_singleton_add_smul μ rpos.ne', ennreal.div_lt_iff (or.inl h't) (or.inl h''t)], end ... = ε : ennreal.add_halves _ end lemma tendsto_add_haar_inter_smul_one_of_density_one_aux (s : set E) (hs : measurable_set s) (x : E) (h : tendsto (λ r, μ (s ∩ closed_ball x r) / μ (closed_ball x r)) (𝓝[>] 0) (𝓝 1)) (t : set E) (ht : measurable_set t) (h't : μ t ≠ 0) (h''t : μ t ≠ ∞) : tendsto (λ (r : ℝ), μ (s ∩ ({x} + r • t)) / μ ({x} + r • t)) (𝓝[>] 0) (𝓝 1) := begin have I : ∀ u v, μ u ≠ 0 → μ u ≠ ∞ → measurable_set v → μ u / μ u - μ (vᶜ ∩ u) / μ u = μ (v ∩ u) / μ u, { assume u v uzero utop vmeas, simp_rw [div_eq_mul_inv], rw ← ennreal.sub_mul, swap, { simp only [uzero, ennreal.inv_eq_top, implies_true_iff, ne.def, not_false_iff] }, congr' 1, apply ennreal.sub_eq_of_add_eq (ne_top_of_le_ne_top utop (measure_mono (inter_subset_right _ _))), rw [inter_comm _ u, inter_comm _ u], exact measure_inter_add_diff u vmeas }, have L : tendsto (λ r, μ (sᶜ ∩ closed_ball x r) / μ (closed_ball x r)) (𝓝[>] 0) (𝓝 0), { have A : tendsto (λ r, μ (closed_ball x r) / μ (closed_ball x r)) (𝓝[>] 0) (𝓝 1), { apply tendsto_const_nhds.congr' _, filter_upwards [self_mem_nhds_within], assume r hr, rw [div_eq_mul_inv, ennreal.mul_inv_cancel], { exact (measure_closed_ball_pos μ _ hr).ne' }, { exact measure_closed_ball_lt_top.ne } }, have B := ennreal.tendsto.sub A h (or.inl ennreal.one_ne_top), simp only [tsub_self] at B, apply B.congr' _, filter_upwards [self_mem_nhds_within], rintros r (rpos : 0 < r), convert I (closed_ball x r) sᶜ (measure_closed_ball_pos μ _ rpos).ne' (measure_closed_ball_lt_top).ne hs.compl, rw compl_compl }, have L' : tendsto (λ (r : ℝ), μ (sᶜ ∩ ({x} + r • t)) / μ ({x} + r • t)) (𝓝[>] 0) (𝓝 0) := tendsto_add_haar_inter_smul_zero_of_density_zero μ sᶜ x L t ht h''t, have L'' : tendsto (λ (r : ℝ), μ ({x} + r • t) / μ ({x} + r • t)) (𝓝[>] 0) (𝓝 1), { apply tendsto_const_nhds.congr' _, filter_upwards [self_mem_nhds_within], rintros r (rpos : 0 < r), rw [add_haar_singleton_add_smul_div_singleton_add_smul μ rpos.ne', ennreal.div_self h't h''t] }, have := ennreal.tendsto.sub L'' L' (or.inl ennreal.one_ne_top), simp only [tsub_zero] at this, apply this.congr' _, filter_upwards [self_mem_nhds_within], rintros r (rpos : 0 < r), refine I ({x} + r • t) s _ _ hs, { simp only [h't, abs_of_nonneg rpos.le, pow_pos rpos, add_haar_smul, image_add_left, ennreal.of_real_eq_zero, not_le, or_false, ne.def, measure_preimage_add, abs_pow, singleton_add, mul_eq_zero] }, { simp only [h''t, ennreal.of_real_ne_top, add_haar_smul, image_add_left, with_top.mul_eq_top_iff, ne.def, not_false_iff, measure_preimage_add, singleton_add, and_false, false_and, or_self] } end /-- Consider a point `x` at which a set `s` has density one, with respect to closed balls (i.e., a Lebesgue density point of `s`). Then `s` has also density one at `x` with respect to any measurable set `t`: the proportion of points in `s` belonging to a rescaled copy `{x} + r • t` of `t` tends to one as `r` tends to zero. -/ lemma tendsto_add_haar_inter_smul_one_of_density_one (s : set E) (x : E) (h : tendsto (λ r, μ (s ∩ closed_ball x r) / μ (closed_ball x r)) (𝓝[>] 0) (𝓝 1)) (t : set E) (ht : measurable_set t) (h't : μ t ≠ 0) (h''t : μ t ≠ ∞) : tendsto (λ (r : ℝ), μ (s ∩ ({x} + r • t)) / μ ({x} + r • t)) (𝓝[>] 0) (𝓝 1) := begin have : tendsto (λ (r : ℝ), μ (to_measurable μ s ∩ ({x} + r • t)) / μ ({x} + r • t)) (𝓝[>] 0) (𝓝 1), { apply tendsto_add_haar_inter_smul_one_of_density_one_aux μ _ (measurable_set_to_measurable _ _) _ _ t ht h't h''t, apply tendsto_of_tendsto_of_tendsto_of_le_of_le' h tendsto_const_nhds, { refine eventually_of_forall (λ r, mul_le_mul_right' _ _), exact measure_mono (inter_subset_inter_left _ (subset_to_measurable _ _)) }, { filter_upwards [self_mem_nhds_within], rintros r (rpos : 0 < r), apply ennreal.div_le_of_le_mul, rw one_mul, exact measure_mono (inter_subset_right _ _) } }, apply this.congr (λ r, _), congr' 1, apply measure_to_measurable_inter_of_sigma_finite, simp only [image_add_left, singleton_add], apply (continuous_add_left (-x)).measurable (ht.const_smul₀ r) end /-- Consider a point `x` at which a set `s` has density one, with respect to closed balls (i.e., a Lebesgue density point of `s`). Then `s` intersects the rescaled copies `{x} + r • t` of a given set `t` with positive measure, for any small enough `r`. -/ lemma eventually_nonempty_inter_smul_of_density_one (s : set E) (x : E) (h : tendsto (λ r, μ (s ∩ closed_ball x r) / μ (closed_ball x r)) (𝓝[>] 0) (𝓝 1)) (t : set E) (ht : measurable_set t) (h't : μ t ≠ 0) : ∀ᶠ r in 𝓝[>] (0 : ℝ), (s ∩ ({x} + r • t)).nonempty := begin obtain ⟨t', t'_meas, t't, t'pos, t'top⟩ : ∃ t', measurable_set t' ∧ t' ⊆ t ∧ 0 < μ t' ∧ μ t' < ⊤ := exists_subset_measure_lt_top ht h't.bot_lt, filter_upwards [(tendsto_order.1 (tendsto_add_haar_inter_smul_one_of_density_one μ s x h t' t'_meas t'pos.ne' t'top.ne)).1 0 zero_lt_one], assume r hr, have : μ (s ∩ ({x} + r • t')) ≠ 0 := λ h', by simpa only [ennreal.not_lt_zero, ennreal.zero_div, h'] using hr, have : (s ∩ ({x} + r • t')).nonempty := nonempty_of_measure_ne_zero this, apply this.mono (inter_subset_inter subset.rfl _), exact add_subset_add subset.rfl (smul_set_mono t't), end end measure end measure_theory
(*| ======================== Weak fairness ======================== This file proves two big results about weak fairness. First, a key rule called "wf1" allows to prove that `p ~~> q` using the weak fairness of some action. Second, `wf_combine` gives a condition under which two weak fairness assumptions are equivalent to weak fairness of the composite action; that is, `WF(a1) ∧ WF(a2) == WF(a1 ∨ a2)` (with a slight abuse of notation in `a1 ∨ a2`, which needs to be `λ s s', a1 s s' ∨ a2 s s'`). These both require non-trivial proofs. |*) From TLA Require Import defs automation. From TLA Require Import propositional_ltl modalities. From TLA Require Import classical. From TLA.logic Require Import preds. Section TLA. Context [Σ: Type]. Notation exec := (exec Σ). Notation predicate := (predicate Σ). Implicit Types (e: exec) (p q: predicate) (a: action Σ). Theorem weak_fairness_alt1 a : weak_fairness a == □ ◇ ((! (tla_enabled a)) ∨ □ ◇ ⟨a⟩). Proof. unfold weak_fairness. rewrite implies_to_or. tla_simp. rewrite -!eventually_or. rewrite !always_eventually_distrib. tla_simp. Qed. Theorem weak_fairness_alt1' a : weak_fairness a == □ ◇ ((! (tla_enabled a)) ∨ ⟨a⟩). Proof. rewrite weak_fairness_alt1. rewrite !always_eventually_distrib. tla_simp. Qed. Theorem weak_fairness_alt2 a : weak_fairness a == (◇ □ (tla_enabled a) → □ ◇ ⟨a⟩). Proof. rewrite weak_fairness_alt1. rewrite implies_to_or. tla_simp. rewrite always_eventually_distrib. tla_simp. Qed. Theorem weak_fairness_alt3 a : weak_fairness a == (□◇ ⟨a⟩ ∨ □◇ !(tla_enabled a)). Proof. rewrite weak_fairness_alt2. rewrite implies_to_or. tla_simp. rewrite tla_or_comm //. Qed. (** This lemma is used to prove rule WF1. Loosely speaking it takes an assumption of the form [p ∧ ⟨next⟩ → later p ∨ later q] and turns it into a proof that either [p] always holds or eventually [q] holds. *) Lemma until_next (p q: predicate) (next: action Σ) (e: exec) : (* induction-like hypothesis: p is preserved until q *) (∀ e, p e ∧ next (e 0) (e 1) → p (drop 1 e) ∨ q (drop 1 e)) → (* this is □⟨next⟩ e *) (∀ k, next (e k) (e (S k))) → (* we can prove always p or eventually q, but only after some shifted execution satisfying [p] as a base case for the induction *) ∀ k, p (drop k e) → (∀ k', p (drop (k' + k) e)) ∨ (∃ k', q (drop (k' + k) e)). Proof. intros Hind Hnext k Hp. (*| This proof is highly classical - we immediately appeal to a double negation to deal with this disjunction. |*) apply classical.double_negation. rewrite classical.not_or classical.not_forall classical.not_exists. intros [[k' Hnotp] Hnotq]. (*| Classical reasoning gives a *specific* k' with `¬p` and `□ ¬q`. It turns out we'll always derive a contradiction from the `¬p`, by induction on the k'. This is what makes the proof so classical, this k' sort of comes out of nowhere. |*) apply Hnotp; clear Hnotp. (* .unfold *) generalize dependent e. generalize dependent k. induction k'. - intuition auto. - intros. destruct (Hind (drop k e)); eauto; swap 1 2. { rewrite drop_drop in H. exfalso; eapply Hnotq; apply H. } rewrite drop_drop in H. replace (S k' + k) with (k' + S k) by lia. eapply IHk'; eauto. intros. replace (x + S k) with (S x + k) by lia; eauto. Qed. (** WF1 exploits a weak fairness assumption to show a leads_to. This is the general rule as presented in the paper. [wf1] below specializes it to the situation where p and q are simple state predicates. *) Lemma tla_wf1 (p q: predicate) (next a: action Σ) : ∀ (Hpuntilq: ⊢ p ∧ ⟨next⟩ → later p ∨ later q) (Haq: ⊢ p ∧ ⟨next⟩ ∧ ⟨a⟩ → later q) (Henable: ⊢ p → tla_enabled a), (⊢ □ ⟨next⟩ ∧ weak_fairness a → p ~~> q). Proof. rewrite weak_fairness_alt1'. unseal. destruct H as [Hnext Hwf_alt]. (* [until_next] produces three goals. The first is simply the proof of [p (drop k e)] which is needed as a starting point. Then it leaves two possibilities: either [p] always holds, or [◇ q] (loosely speaking). The latter case is exactly the goal so it goes through immediately. *) edestruct (until_next p q next e Hpuntilq Hnext); [ eassumption | | by auto ]. (* in the former case we'll show that weak fairness gives us either that [a] is never enabled (false, because p implies it is enabled), or that [a] executes at some point, at which point [Haq] easily gives us [q]. *) destruct (Hwf_alt k) as [k' [Hnotenabled | Ha]]. { (* impossible, we have p everywhere after k *) contradiction Hnotenabled. apply Henable; eauto. } exists (S k'). change (S k' + k) with (1 + (k' + k)). rewrite -drop_drop. apply Haq; eauto. Qed. (*| **WF1**. This is an important rule that uses a weak fairness assumption to show that [p ~~> q]. This has the advantage of stating all assumptions as simple quantified statements, without temporal logic. Intuitively [wf1] is used to prove [p ~~> q] from the weak fairness of some action [a]. The second premise establishes the intuition that [a] which has "precondition" [p] and "postcondition" [q]: if [a] runs in a state satisfying [p], then [q] holds. The first premise says that [p] is preserved by [⟨next⟩], or [q] just magically becomes true, in which case [a] need not run and [p ~~> q] can still be true. (This is important because [q] might actually disable the action from running again.) The third premise says that [p] enables [a], so preserving it also preserves that the action is enabled. [p] is separate from [enabled a] in order to accomplish two things. First, it allows to strengthen the inductive hypothesis to show [Hpuntilq]. Second, it gives a stronger premise for [Haq], allowing to use some state-specific fact to establish a more specific postcondition [q] than [a] might have in general. |*) Lemma wf1 (a: action Σ) (next: action Σ) (p q: Σ → Prop) : ∀ (Hpuntilq: ∀ s s', p s → next s s' → p s' ∨ q s') (Haq: ∀ s s', p s → next s s' → a s s' → q s') (Henable: ∀ s, p s → enabled a s), (⊢ □ ⟨next⟩ ∧ weak_fairness a → ⌜p⌝ ~~> ⌜q⌝). Proof. intros. apply tla_wf1; unseal. rewrite /tla_enabled; tla_simp. eauto. Qed. Theorem enabled_or a1 a2 : ∀ s, enabled (λ s s', a1 s s' ∨ a2 s s') s ↔ (enabled a1 s ∨ enabled a2 s). Proof. unfold enabled. intuition (repeat deex; eauto). intuition eauto. Qed. Theorem tla_enabled_or a1 a2 : tla_enabled (λ s s', a1 s s' ∨ a2 s s')%type == (tla_enabled a1 ∨ tla_enabled a2). Proof. apply predicate_ext => e. rewrite /tla_enabled; tla_simp. rewrite enabled_or; tla_simp. Qed. Lemma not_wf (a: action Σ) : ! weak_fairness a == (◇ (□ tla_enabled a ∧ ! ◇ ⟨a⟩)). Proof. rewrite /weak_fairness; tla_simp. Qed. Lemma not_wf' (a: action Σ) : ! weak_fairness a == (◇ (□ (tla_enabled a ∧ !⟨a⟩))). Proof. rewrite /weak_fairness; tla_simp. rewrite always_and //. Qed. Lemma always_eventually_impl p q : (p ⊢ q) → (□◇ p ⊢ □◇ q). Proof. intros H. apply always_impl_proper. apply eventually_impl_proper. auto. Qed. Lemma eventually_always_impl p q : (p ⊢ q) → (◇□ p ⊢ ◇□ q). Proof. intros H. apply eventually_impl_proper. apply always_impl_proper. auto. Qed. Lemma eventually_always_weaken p : (◇□ p ⊢ □◇ p). Proof. unseal. repeat deex. exists k0; eauto. replace (k0 + k) with (k + k0) by lia. auto. Qed. Lemma action_to_enabled a : ⟨a⟩ ⊢ tla_enabled a. Proof. rewrite /tla_enabled /enabled. unseal. Qed. Lemma not_enabled_to_action a : !tla_enabled a ⊢ !⟨a⟩. Proof. apply not_impl. tla_simp. apply action_to_enabled. Qed. Lemma or_apply_not r q p : (p ⊢ □ r ∨ ◇q) → p ∧ □ !q ⊢ □ r. Proof. intros Hdr. rewrite -> Hdr; clear Hdr. intros e H. unseal. destruct H as [Hp Hnota]. destruct Hp as [Hp | Ha]. - pose proof (Hp 0) as Hp0; rewrite drop_0 in Hp0; auto. - destruct Ha as [k' Ha]. exfalso; eapply Hnota; eauto. Qed. Lemma or_apply_not' r q p : (p ⊢ □ r ∨ ◇q) → □ p ∧ □ !q ⊢ □ r. Proof. intros Hdr. rewrite -> (always_weaken p). apply or_apply_not; auto. Qed. Lemma or_implies_split p q r : (p ⊢ r) → (q ⊢ r) → (p ∨ q ⊢ r). Proof. unseal. Qed. Lemma wf_combine_impl' f (a b: action Σ) : (□f ⊢ tla_enabled a → □ !tla_enabled b ∨ ◇ ⟨a⟩) → (□f ⊢ tla_enabled b → □ !tla_enabled a ∨ ◇ ⟨b⟩) → □f ∧ (◇ □ (tla_enabled a ∧ ! ⟨a⟩) ∨ ◇ □ (tla_enabled b ∧ ! ⟨b⟩)) ⊢ ◇ □ ((tla_enabled a ∨ tla_enabled b) ∧ ! ⟨a⟩ ∧ ! ⟨b⟩). Proof. intros Hdr1 Hdr2. apply impl_intro'_iff in Hdr1. apply impl_intro'_iff in Hdr2. rewrite tla_and_distr_l. rewrite -(always_idem f). rewrite !always_and_eventually. rewrite always_and. apply or_implies_split; apply eventually_impl_proper. + apply or_apply_not' in Hdr1. rewrite always_and in Hdr1. autorewrite with tla in Hdr1. tla_pose Hdr1. rewrite -> not_enabled_to_action. unseal. + apply or_apply_not' in Hdr2. rewrite always_and in Hdr2. autorewrite with tla in Hdr2. rewrite always_and. tla_pose Hdr2. rewrite -> not_enabled_to_action. unseal. Qed. Lemma wf_combine_impl (a b: action Σ) : (tla_enabled a ⊢ □ !tla_enabled b ∨ ◇ ⟨a⟩) → (tla_enabled b ⊢ □ !tla_enabled a ∨ ◇ ⟨b⟩) → ◇ □ (tla_enabled a ∧ ! ⟨a⟩) ∨ ◇ □ (tla_enabled b ∧ ! ⟨b⟩) ⊢ ◇ □ ((tla_enabled a ∨ tla_enabled b) ∧ ! ⟨a⟩ ∧ ! ⟨b⟩). Proof. intros Hdr1 Hdr2. apply or_implies_split; apply eventually_impl_proper; rewrite always_and. + tla_pose (or_apply_not' _ _ _ Hdr1). rewrite not_enabled_to_action. (* TODO: why does tla_prop not work here? *) unseal. + tla_pose (or_apply_not' _ _ _ Hdr2). rewrite not_enabled_to_action. unseal. Qed. (* for some reason this direction of [wf_combine] seems more difficult and unstructured *) Lemma wf_split_impl (a b: action Σ) : (tla_enabled a ⊢ □ !tla_enabled b ∨ ◇ ⟨a⟩) → (tla_enabled b ⊢ □ !tla_enabled a ∨ ◇ ⟨b⟩) → ◇ □ ((tla_enabled a ∨ tla_enabled b) ∧ ! ⟨a⟩ ∧ ! ⟨b⟩) ⊢ ◇ □ (tla_enabled a ∧ ! ⟨a⟩) ∨ ◇ □ (tla_enabled b ∧ ! ⟨b⟩). Proof. intros Hdr1 Hdr2. intros e H. destruct H as [k H']. rewrite !always_and in H'. destruct H' as (Hab & Hnota & Hnotb). pose proof (Hab 0) as Hab0; rewrite drop_0 in Hab0; destruct Hab0 as [Ha | Hb]. + pose proof (or_apply_not _ _ _ Hdr1 (drop k e) ltac:(unseal)). left. exists k. rewrite always_and. split; eauto. intros k'; setoid_rewrite drop_drop. destruct (Hab k') as [Ha'|Hb']; eauto. { exfalso; eapply H; eauto. } + pose proof (or_apply_not _ _ _ Hdr2 (drop k e) ltac:(unseal)). right. exists k. rewrite always_and. split; eauto. intros k'; setoid_rewrite drop_drop. destruct (Hab k') as [Ha'|Hb']; eauto. { exfalso; eapply H; eauto. } Qed. (** This theorem comes from the book "Specifying Systems", theorem 8.20. It has a surprisingly complicated proof! *) Theorem wf_combine (a b: action Σ) : (tla_enabled a ⊢ □ !tla_enabled b ∨ ◇ ⟨a⟩) → (tla_enabled b ⊢ □ !tla_enabled a ∨ ◇ ⟨b⟩) → (weak_fairness a ∧ weak_fairness b) == weak_fairness (λ s s', a s s' ∨ b s s')%type. Proof. intros Hdr1 Hdr2. apply not_inj. rewrite not_and. rewrite !not_wf'. rewrite -!combine_or_actions. rewrite tla_enabled_or. rewrite not_or. tla_split. - apply wf_combine_impl; auto. - apply wf_split_impl; auto. Qed. Lemma always_drop_ge (p: predicate) k e : (□p)%L (drop k e) → (∀ k', k ≤ k' → p (drop k' e)). Proof. intros Hp k' Hle. replace k' with ((k' - k) + k) by lia. rewrite -drop_drop. apply Hp. Qed. Theorem tla_wf2 (n m a b: action Σ) (p f: predicate) : (⟨n⟩ ∧ ⟨b⟩ ⊢ ⟨m⟩) → (p ∧ later p ∧ ⟨n⟩ ∧ ⟨a⟩ ∧ tla_enabled m ⊢ ⟨b⟩) → (p ∧ tla_enabled m ⊢ tla_enabled a) → (□⟨λ s s', n s s' ∧ ¬b s s'⟩ ∧ weak_fairness a ∧ □f ∧ ◇□ (tla_enabled m) ⊢ ◇□p) → (□⟨n⟩ ∧ weak_fairness a ∧ □f ⊢ weak_fairness m). Proof. intros Hn_to_m Ha_to_b Hm_to_a_enabled Hp_while_not_b. apply tla_contra. rewrite (weak_fairness_alt3 m). tla_simp. do 2 rewrite -tla_and_assoc. rewrite -eventually_always_distrib. rewrite always_and. tla_simp. intros e (Hn & Hwf & Hf & Heventually). rewrite /tla_false /=. destruct Heventually as [k [Hnot_m Henabled_m]]. assert ((□⟨λ s s', n s s' ∧ ¬ b s s'⟩)%L (drop k e)). { unseal. split. - apply Hn. - intros Hb. contradiction (Hnot_m k0). apply Hn_to_m. rewrite drop_drop. split; eauto. } assert ((◇□ p)%L (drop k e)) as [k' Hp]. { apply Hp_while_not_b. split; eauto. split. { rewrite /weak_fairness. rewrite {1}/always; intros. rewrite drop_drop. apply Hwf. } split. { rewrite /always => k'. rewrite drop_drop. apply Hf. } exists 0. rewrite drop_0. apply Henabled_m. } rewrite drop_drop in Hp. assert ((□◇⟨a⟩)%L (drop (k' + k) e)) as Heventually_a. { intros k''. rewrite drop_drop. apply Hwf. intros k'''. apply Hm_to_a_enabled. rewrite drop_drop. split. - eapply always_drop_ge; [ eassumption | lia ]. - eapply always_drop_ge; [ eassumption | lia ]. } rewrite always_and in H; destruct H as [_ Hnotb]. destruct (Heventually_a 0) as [k'' Ha]. rewrite ?drop_0 !drop_drop in Ha. apply (Hnotb (k'' + k')). rewrite !drop_drop. apply Ha_to_b. repeat split; eauto. - eapply always_drop_ge; [ apply Hp | lia ]. - rewrite /later drop_drop. eapply always_drop_ge; [ apply Hp | lia ]. - replace (k'' + k' + k) with (k'' + (k' + k)) by lia; assumption. Qed. End TLA.
text\<open> 21 October 2021: Exercise for Homework Assignment 07 in CS 511 \<close> text\<open> Your task to remove the invocations of the pre-defined method 'blast' by an equivalent sequence of 'apply' steps \<close> theory HW07_solution imports Main begin text\<open> 'blast' is invoked three times, once in the proof of each of lemmas B1, C1, and D1 below \<close> (* The proof of the next lemma is just an example of how to use the rules for manipulating quantifiers *) lemma preliminary : " (\<exists>z. P z) \<and> Q \<longrightarrow> (\<exists>y. P y \<and> Q)" apply (rule impI) apply (erule conjE) apply (erule exE) apply (rule_tac x="z" in exI) apply (rule conjI) apply assumption+ done (* Lemma A1 is the same in Exercise 2.3.9 (a), page 161, in [LCS] *) lemma A1 : "(\<exists>x. S \<longrightarrow> Q x) \<Longrightarrow> S \<longrightarrow> (\<exists>x. Q x)" apply (erule exE) apply (rule impI) apply (erule impE) apply assumption apply (rule_tac x="x" in exI) apply assumption done (* Lemma A2 is the same as lemma A1 but with a different proof *) lemma A2 : "(\<exists>x. S \<longrightarrow> Q x) \<Longrightarrow> S \<longrightarrow> (\<exists>x. Q x)" apply clarify apply (rule_tac x="x" in exI) apply assumption done (* Lemma B1 is the same in Exercise 2.3.9 (b), page 161, in [LCS] *) lemma B1 : "S \<longrightarrow> (\<exists>x. Q x) \<Longrightarrow> (\<exists>x. S \<longrightarrow> Q x)" by blast text \<open> lemma B1_by_Mattia : "S \<longrightarrow> (\<exists>x. Q x) \<Longrightarrow> (\<exists>x. S \<longrightarrow> Q x)" apply(rule_tac x="x" in exI) (* Sledgehammer fails or says it is unprovable *) apply(rule impI) apply(erule impE) apply assumption apply(erule exE) \<close> text\<open> Note: Copying in the secondary windows/panels works via the keyboard shortcuts Ctrl+c or Ctrl+INSERT, while jEdit menu actions always refer to the primary windown/panel. \<close> text\<open> The proof below consists of 'apply' steps only. The inserted comment after every step is the resulting 'proof state'. This proof is not the shortest or the most elegant, but understanding every step is a good exercise for how to apply the available pre-defined rules. \<close> lemma B2 : "S \<longrightarrow> (\<exists>x. Q x) \<Longrightarrow> (\<exists>x. S \<longrightarrow> Q x)" apply (rule exCI) (* S \<longrightarrow> (\<exists>x. Q x) \<Longrightarrow> \<forall>x. \<not> (S \<longrightarrow> Q x) \<Longrightarrow> S \<longrightarrow> Q ?a *) apply (erule impE) (* 1. \<forall>x. \<not> (S \<longrightarrow> Q x) \<Longrightarrow> S 2. \<forall>x. \<not> (S \<longrightarrow> Q x) \<Longrightarrow> \<exists>x. Q x \<Longrightarrow> S \<longrightarrow> Q ?a *) apply (erule allE) (* 1. \<not> (S \<longrightarrow> Q ?x5) \<Longrightarrow> S 2. \<forall>x. \<not> (S \<longrightarrow> Q x) \<Longrightarrow> \<exists>x. Q x \<Longrightarrow> S \<longrightarrow> Q ?a *) apply (rule contrapos_np) (* 1. \<not> (S \<longrightarrow> Q ?x5) \<Longrightarrow> \<not> ?Q7 2. \<not> (S \<longrightarrow> Q ?x5) \<Longrightarrow> \<not> S \<Longrightarrow> ?Q7 3. \<forall>x. \<not> (S \<longrightarrow> Q x) \<Longrightarrow> \<exists>x. Q x \<Longrightarrow> S \<longrightarrow> Q ?a *) apply assumption (* 1. \<not> (S \<longrightarrow> Q ?x5) \<Longrightarrow> \<not> S \<Longrightarrow> S \<longrightarrow> Q ?x5 2. \<forall>x. \<not> (S \<longrightarrow> Q x) \<Longrightarrow> \<exists>x. Q x \<Longrightarrow> S \<longrightarrow> Q ?a *) apply (rule impI) (* 1. \<not> (S \<longrightarrow> Q ?x5) \<Longrightarrow> \<not> S \<Longrightarrow> S \<Longrightarrow> Q ?x5 2. \<forall>x. \<not> (S \<longrightarrow> Q x) \<Longrightarrow> \<exists>x. Q x \<Longrightarrow> S \<longrightarrow> Q ?a *) apply (erule notE)+ (* 1. S \<Longrightarrow> S 2. \<forall>x. \<not> (S \<longrightarrow> Q x) \<Longrightarrow> \<exists>x. Q x \<Longrightarrow> S \<longrightarrow> Q ?a *) apply assumption (* 1. \<forall>x. \<not> (S \<longrightarrow> Q x) \<Longrightarrow> \<exists>x. Q x \<Longrightarrow> S \<longrightarrow> Q ?a *) apply (rule impI) (* 1. \<forall>x. \<not> (S \<longrightarrow> Q x) \<Longrightarrow> \<exists>x. Q x \<Longrightarrow> S \<Longrightarrow> Q ?a *) apply (rule notE) (* 1. \<forall>x. \<not> (S \<longrightarrow> Q x) \<Longrightarrow> \<exists>x. Q x \<Longrightarrow> S \<Longrightarrow> \<not> ?P18 2. \<forall>x. \<not> (S \<longrightarrow> Q x) \<Longrightarrow> \<exists>x. Q x \<Longrightarrow> S \<Longrightarrow> ?P18 *) apply (rule notI) (* 1. \<forall>x. \<not> (S \<longrightarrow> Q x) \<Longrightarrow> \<exists>x. Q x \<Longrightarrow> S \<Longrightarrow> ?P18 \<Longrightarrow> False 2. \<forall>x. \<not> (S \<longrightarrow> Q x) \<Longrightarrow> \<exists>x. Q x \<Longrightarrow> S \<Longrightarrow> ?P18 *) apply (erule FalseE) (* 1. \<forall>x. \<not> (S \<longrightarrow> Q x) \<Longrightarrow> \<exists>x. Q x \<Longrightarrow> S \<Longrightarrow> False *) apply (rule contrapos_np) (* 1. \<forall>x. \<not> (S \<longrightarrow> Q x) \<Longrightarrow> \<exists>x. Q x \<Longrightarrow> S \<Longrightarrow> \<not> ?Q24 2. \<forall>x. \<not> (S \<longrightarrow> Q x) \<Longrightarrow> \<exists>x. Q x \<Longrightarrow> S \<Longrightarrow> \<not> False \<Longrightarrow> ?Q24 *) apply (rule notI) (* 1. \<forall>x. \<not> (S \<longrightarrow> Q x) \<Longrightarrow> \<exists>x. Q x \<Longrightarrow> S \<Longrightarrow> ?Q24 \<Longrightarrow> False 2. \<forall>x. \<not> (S \<longrightarrow> Q x) \<Longrightarrow> \<exists>x. Q x \<Longrightarrow> S \<Longrightarrow> \<not> False \<Longrightarrow> ?Q24 *) apply (erule notE) (* 1. \<forall>x. \<not> (S \<longrightarrow> Q x) \<Longrightarrow> \<exists>x. Q x \<Longrightarrow> S \<Longrightarrow> ?P29 2. \<forall>x. \<not> (S \<longrightarrow> Q x) \<Longrightarrow> \<exists>x. Q x \<Longrightarrow> S \<Longrightarrow> \<not> False \<Longrightarrow> \<not> ?P29 *) apply assumption (* 1. \<forall>x. \<not> (S \<longrightarrow> Q x) \<Longrightarrow> \<exists>x. Q x \<Longrightarrow> S \<Longrightarrow> \<not> False \<Longrightarrow> \<not> (\<forall>x. \<not> (S \<longrightarrow> Q x)) *) apply (rule notI) (* 1. \<forall>x. \<not> (S \<longrightarrow> Q x) \<Longrightarrow> \<exists>x. Q x \<Longrightarrow> S \<Longrightarrow> \<not> False \<Longrightarrow> \<forall>x. \<not> (S \<longrightarrow> Q x) \<Longrightarrow> False *) apply (erule exE) (* 1. \<And>x. \<forall>x. \<not> (S \<longrightarrow> Q x) \<Longrightarrow> S \<Longrightarrow> \<not> False \<Longrightarrow> \<forall>x. \<not> (S \<longrightarrow> Q x) \<Longrightarrow> Q x \<Longrightarrow> False *) apply (erule notE) (* 1. \<And>x. \<forall>x. \<not> (S \<longrightarrow> Q x) \<Longrightarrow> S \<Longrightarrow> \<forall>x. \<not> (S \<longrightarrow> Q x) \<Longrightarrow> Q x \<Longrightarrow> False *) apply (erule allE)+ (* 1. \<And>x. S \<Longrightarrow> Q x \<Longrightarrow> \<not> (S \<longrightarrow> Q (?x37 x)) \<Longrightarrow> \<not> (S \<longrightarrow> Q (?x39 x)) \<Longrightarrow> False *) apply (erule notE)+ (* 1. \<And>x. S \<Longrightarrow> Q x \<Longrightarrow> S \<longrightarrow> Q (?x39 x) *) apply (rule impI) (* 1. \<And>x. S \<Longrightarrow> Q x \<Longrightarrow> S \<Longrightarrow> Q (?x39 x) *) apply assumption (* No subgoals! *) done (* Lemma C1 is the same in Exercise 2.3.9 (c), page 161, in [LCS] *) lemma C1 : "(\<exists>x. P x) \<longrightarrow> S \<Longrightarrow> \<forall>x. (P x \<longrightarrow> S)" by blast text\<open> The proof below consists of 'apply' steps only. \<close> lemma C2 : "(\<exists>x. P x) \<longrightarrow> S \<Longrightarrow> \<forall>x. (P x \<longrightarrow> S)" apply (rule allI) apply (rule impI) apply (erule impE) apply (rule_tac x="x" in exI) apply assumption+ done (* Lemma D1 is the same in Exercise 2.3.9 (d), page 161, in [LCS] *) lemma D1 : " (\<forall>x. P x) \<longrightarrow> S \<Longrightarrow> \<exists>x. (P x \<longrightarrow> S)" by blast text\<open> The proof below consists of 'apply' steps. The inserted comment after every step is the resulting 'proof state'. This proof is not the shortest or the most elegant, but understanding every step is a good exercise for how to apply the available pre-defined rules. \<close> lemma D2 : " (\<forall>x. P x) \<longrightarrow> S \<Longrightarrow> \<exists>x. (P x \<longrightarrow> S)" apply (rule exCI) (* 1. (\<forall>x. P x) \<longrightarrow> S \<Longrightarrow> \<forall>x. \<not> (P x \<longrightarrow> S) \<Longrightarrow> P ?a \<longrightarrow> S *) apply (erule impE) (* 1. \<forall>x. \<not> (P x \<longrightarrow> S) \<Longrightarrow> \<forall>x. P x 2. \<forall>x. \<not> (P x \<longrightarrow> S) \<Longrightarrow> S \<Longrightarrow> P ?a \<longrightarrow> S *) apply (rule allI) (* 1. \<And>x. \<forall>x. \<not> (P x \<longrightarrow> S) \<Longrightarrow> P x 2. \<forall>x. \<not> (P x \<longrightarrow> S) \<Longrightarrow> S \<Longrightarrow> P ?a \<longrightarrow> S *) apply (erule_tac x="x" in allE) (* 1. \<And>x. \<not> (P x \<longrightarrow> S) \<Longrightarrow> P x 2. \<forall>x. \<not> (P x \<longrightarrow> S) \<Longrightarrow> S \<Longrightarrow> P ?a \<longrightarrow> S *) apply (rule contrapos_np) (* 1. \<And>x. \<not> (P x \<longrightarrow> S) \<Longrightarrow> \<not> ?Q8 x 2. \<And>x. \<not> (P x \<longrightarrow> S) \<Longrightarrow> \<not> P x \<Longrightarrow> ?Q8 x 3. \<forall>x. \<not> (P x \<longrightarrow> S) \<Longrightarrow> S \<Longrightarrow> P ?a \<longrightarrow> S *) apply assumption (* 1. \<And>x. \<not> (P x \<longrightarrow> S) \<Longrightarrow> \<not> P x \<Longrightarrow> P x \<longrightarrow> S 2. \<forall>x. \<not> (P x \<longrightarrow> S) \<Longrightarrow> S \<Longrightarrow> P ?a \<longrightarrow> S *) apply (rule impI) (* 1. \<And>x. \<not> (P x \<longrightarrow> S) \<Longrightarrow> \<not> P x \<Longrightarrow> P x \<Longrightarrow> S 2. \<forall>x. \<not> (P x \<longrightarrow> S) \<Longrightarrow> S \<Longrightarrow> P ?a \<longrightarrow> S *) apply (erule notE) (* 1. \<And>x. \<not> P x \<Longrightarrow> P x \<Longrightarrow> P x \<longrightarrow> S 2. \<forall>x. \<not> (P x \<longrightarrow> S) \<Longrightarrow> S \<Longrightarrow> P ?a \<longrightarrow> S *) apply (rule impI) (* 1. \<And>x. \<not> P x \<Longrightarrow> P x \<Longrightarrow> P x \<Longrightarrow> S 2. \<forall>x. \<not> (P x \<longrightarrow> S) \<Longrightarrow> S \<Longrightarrow> P ?a \<longrightarrow> S *) apply (erule notE) (* 1. \<And>x. P x \<Longrightarrow> P x \<Longrightarrow> P x 2. \<forall>x. \<not> (P x \<longrightarrow> S) \<Longrightarrow> S \<Longrightarrow> P ?a \<longrightarrow> S *) apply assumption (* 1. \<forall>x. \<not> (P x \<longrightarrow> S) \<Longrightarrow> S \<Longrightarrow> P ?a \<longrightarrow> S *) apply (rule impI) (* 1. \<forall>x. \<not> (P x \<longrightarrow> S) \<Longrightarrow> S \<Longrightarrow> P ?a \<Longrightarrow> S *) apply assumption (* No subgoals! *) done end
# --------------------------------------- # R-Script to plot real-time measurements # Author: Pavan Kumar Paluri # University of Houston @ RTLAB - 2020 # ---------------------------------------- # Set the working directory wd_dir=setwd("/Users/pavankumarpaluri/Documents/RRP_S/tcp_ip_charts") print(wd_dir) # extract the csv info latency_vector <- read.csv(file = "latency_result.csv", header = TRUE, sep = "\t") print(latency_vector) head(latency_vector) # read specific columns only [1:2] read.csv(file = "latency_result.csv", sep = "\t")[ ,1:2] # Plotting histogram for Mean columns {RTDS, ARINC, AAF, CREDIT} library(ggplot2) library(plyr) library(reshape2) # means.barplot <- qplot(x=latency_vector$Scheduler_name, # y=latency_vector$mean, position = "dodge") # Use a bar plot instead # pdf(file = "Latency_mean.pdf") #-------- MEAN VALUES PLOTTING ---------- require(grDevices) # required for rainbow colors par(mfrow=c(1,1)) barplot(latency_vector$mean, names.arg = latency_vector$Scheduler_name, xlab = "Scheduler", ylab = "Time (in ms)", col = rainbow(20), main = "Mean Latency for Xen schedulers",axes = FALSE, space=c(0.3,0.3,0.3,0.3), width=c(0.001,0.001,0.001,0.001) ) usr <- par("usr") par(usr=c(usr[1:2], 30, 60)) axis(2, at=seq(30,60,10)) # dev.off() # -------------------------------------- #------------- MEDIAN VALUE PLOTTING ------------ par(mfrow=c(1,1)) barplot(latency_vector$median, names.arg = latency_vector$Scheduler_name, xlab = "Scheduler", ylab = "Time (in ms)", col = rainbow(20), main = "Median Latency for Xen schedulers",axes = FALSE, space=c(0.3,0.3,0.3,0.3), width=c(0.001,0.001,0.001,0.001) ) usr <- par("usr") par(usr=c(usr[1:2], 20, 60)) axis(2, at=seq(20,60,20)) # ------------------------------------------- #--------- MAX LATENCY PLOTTING -------------- par(mfrow=c(1,1)) barplot(latency_vector$Max, names.arg = latency_vector$Scheduler_name, xlab = "Scheduler", ylab = "Max Latency (in ms)", col = rainbow(20), main = "Max Latency for Xen schedulers",axes = FALSE, space=c(0.3,0.3,0.3,0.3), width=c(0.001,0.001,0.001,0.001) ) usr <- par("usr") par(usr=c(usr[1:2], 100, 180)) axis(2, at=seq(100,180,20)) #-------------------------------------------- # print(means.barplot)
import data.real.irrational import topology.basic import algebra.order.floor --OUTPUT 1 theorem irrational_orbit_dense {α : ℝ} (hα_irrat : irrational α) : closure ((λ m : ℤ, int.fract (α * ↑m)) '' (@set.univ ℤ)) = set.Icc 0 1 :=density_of_irrational_orbit (α : ℝ) (h1 : α ≠ 0) (h2 : α ∉ set.range (λ n, n⁻¹ : ℤ → ℝ)) : ∀ y ∈ Icc 0 1, ∃ (x : ℤ) (ε > 0), ε ≤ y ∧ ε + x * α ∈ Icc 0 1 := begin assume y h3 : y ∈ Icc 0 1, obtain ⟨n, h4⟩ : ∃ n : ℤ, y ≤ n, from exists_lt_of_le y, obtain ⟨m, h5⟩ : ∃ m : ℤ, y ≥ m, from exists_lt_of_lt y, let N1 := n + 1, let N2 := m - 1, have h6 : ∃ N : ℤ, (N1 ≤ N) ∧ (N ≤ N2), from exists_lt_of_le N1, cases h6 with N h7, have h8 : (N1 ≤ N) ∧ (N ≤ N2), from h7, have h9 : y < N + 1, from by auto [lt_iff_le_and_ne, le_add_right, h7, h4], have h10 : N < y + 1, from by auto [add_lt_add_iff_right, h7, h5], have h11 : y ≤ N, from by auto [le_of_lt, h9], have h12 : N ≤ y, from by auto [le_of_lt, h10], let ε := (y - N) * α, have h13 : ε > 0, from by auto [lt_of_le_of_lt, sub_pos.mpr, sub_nonneg.mpr, h11], have h14 : ε + N * α = y * α, from by auto [mul_sub_right_distrib, mul_self_cancel h1, one_mul], have h15 : ε + N * α = (N + 1) * α - α, from by auto [add_comm, mul_comm, h14, add_mul, mul_add, mul_comm, add_comm], have h16 : ε + N * α = (N + 1) * α - 1, from by auto [h15, mul_one], have h17 : ε + N * α = (N + 1) * α - 1, from by auto [add_comm, mul_comm, h14, add_mul, mul_add, mul_comm, add_comm], have h18 : ε + N * α = (N + 1) * α - 1, from by auto [h17, mul_one], have h19 : ε + N * α ∈ Icc 0 1, from by auto [Icc_subset_right, h9, h18, mul_nonneg.mpr, add_nonneg.mpr, mul_nonneg.mpr, h11], use N, use ε, use h13, use h19, end --OUTPUT 2 theorem irrational_orbit_dense {α : ℝ} (hα_irrat : irrational α) : closure ((λ m : ℤ, int.fract (α * ↑m)) '' (@set.univ ℤ)) = set.Icc 0 1 :=density_of_irrational_orbit (α : ℝ) (ra : α ∉ ℚ) : {n : ℕ // n ≥ 1} → ∃! i : ℤ, ∀ j : ℤ, (i - j : ℝ) ≠ 0 → (n : ℝ) * i ≠ n * j := begin assume (n : ℕ) (h1 : n ≥ 1), have h2 : ∀ i j : ℤ, (i - j : ℝ) ≠ 0 → (n : ℝ) * i ≠ n * j, from by auto [mul_left_cancel], use n, show ∀ (j : ℤ), (n - j : ℝ) ≠ 0 → (n : ℝ) * n ≠ n * j, from by auto [h2, sub_eq_iff_eq_add], have h3 : ∀ i j : ℤ, (i - j : ℝ) ≠ 0 → (n : ℝ) * i ≠ n * j, from by auto [h2], assume (i : ℤ) (h4 : ∀ j : ℤ, (i - j : ℝ) ≠ 0 → (n : ℝ) * i ≠ n * j), assume (j : ℤ) (h5 : (i - j : ℝ) ≠ 0), have h6 : (n : ℝ) * i ≠ n * j, from by auto [h3, h5], show (n : ℝ) * i ≠ n * j, from by auto [h4, h5, h6], end --OUTPUT 3 theorem irrational_orbit_dense {α : ℝ} (hα_irrat : irrational α) : closure ((λ m : ℤ, int.fract (α * ↑m)) '' (@set.univ ℤ)) = set.Icc 0 1 :=orbit_density (α : ℝ) : α ∉ ℚ → ∀ (ε : ℝ) (y : ℝ) (h : ε > 0), ∃ n : ℤ, |y - n * α| < ε := begin assume h1 (ε : ℝ) (y : ℝ) (h2 : ε > 0), have h3 : ∀ n : ℤ, ∃ x : ℝ, x ∈ (set.range (λ (n : ℤ), n * α)) ∧ ((abs (x - y)) < ε), from by auto [set.subset_iff] using [abs_sub_lt_iff] using [mul_sub_left_distrib, lt_sub_iff_add_lt, abs_add, abs_mul, abs_nonneg, le_of_lt], have h4 : ∀ n : ℤ, ∃ x : ℝ, x ∈ (set.range (λ (n : ℤ), n * α)) ∧ ((abs (x - y)) < ε), from by auto [set.subset_iff] using [abs_sub_lt_iff] using [mul_sub_left_distrib, lt_sub_iff_add_lt, abs_add, abs_mul, abs_nonneg, le_of_lt], have h5 : ∀ n : ℤ, ∃ x : ℝ, x ∈ (set.range (λ (n : ℤ), n * α)) ∧ ((abs (x - y)) < ε), from by auto [set.subset_iff] using [abs_sub_lt_iff] using [mul_sub_left_distrib, lt_sub_iff_add_lt, abs_add, abs_mul, abs_nonneg, le_of_lt], have h6 : ∃ x : ℝ, x ∈ (set.range (λ (n : ℤ), n * α)) ∧ ((abs (x - y)) < ε), from by auto [set.subset_iff] using [abs_sub_lt_iff] using [mul_sub_left_distrib, lt_sub_iff_add_lt, abs_add, abs_mul, abs_nonneg, le_of_lt], have h7 : ∃ x : ℝ, x ∈ (set.range (λ (n : ℤ), n * α)) ∧ ((abs (x - y)) < ε), from by auto [set.subset_iff] using [abs_sub_lt_iff] using [mul_sub_left_distrib, lt_sub_iff_add_lt, abs_add, abs_mul, abs_nonneg, le_of_lt], have h8 : ∃ x : ℝ, x ∈ (set.range (λ (n : ℤ), n * α)) ∧ ((abs (x - y)) < ε), from by auto [set.subset_iff] using [abs_sub_lt_iff] using [mul_sub_left_distrib, lt_sub_iff_add_lt, abs_add, abs_mul, abs_nonneg, le_of_lt], have h9 : ∃ x : ℝ, x ∈ (set.range (λ (n : ℤ), n * α)) ∧ ((abs (x - y)) < ε), from by auto [set.subset_iff] using [abs_sub_lt_iff] using [mul_sub_left_distrib, lt_sub_iff_add_lt, abs_add, abs_mul, abs_nonneg, le_of_lt], have h10 : ∃ x : ℝ, x ∈ (set.range (λ (n : ℤ), n * α)) ∧ ((abs (x - y)) < ε), from by auto [set.subset_iff] using [abs_sub_lt_iff] using [mul_sub_left_distrib, lt_sub_iff_add_lt, abs_add, abs_mul, abs_nonneg, le_of_lt], have h11 : ∃ x : ℝ, x ∈ (set.range (λ (n : ℤ), n * α)) ∧ ((abs (x - y)) < ε), from by auto [set.subset_iff] using [abs_sub_lt_iff] using [mul_sub_left_distrib, lt_sub_iff_add_lt, abs_add, abs_mul, abs_nonneg, le_of_lt], have h12 : ∃ x : ℝ, x ∈ (set.range (λ (n : ℤ), n * α)) ∧ ((abs (x - y)) < ε), from by auto [set.subset_iff] using [abs_sub_lt_iff] using [mul_sub_left_distrib, lt_sub_iff_add_lt, abs_add, abs_mul, abs_nonneg, le_of_lt], have h13 : ∃ x : ℝ, x ∈ (set.range (λ (n : ℤ), n * α)) ∧ ((abs (x - y)) < ε), from by auto [set.subset_iff] using [abs_sub_lt_iff] using [mul_sub_left_distrib, lt_sub_iff_add_lt, abs_add, abs_mul, abs_nonneg, le_of_lt], have h14 : ∃ x : ℝ, x ∈ (set.range (λ (n : ℤ), n * α)) ∧ ((abs (x - y)) < ε), from by auto [set.subset_iff] using [abs_sub_lt_iff] using [mul_sub_left_distrib, lt_sub_iff_add_lt, abs_add, abs_mul, abs_nonneg, le_of_lt], have h15 : ∃ x : ℝ, x ∈ (set.range (λ (n : ℤ), n * α)) ∧ ((abs (x - y)) < ε), from by auto [set.subset_iff] using [abs_sub_lt_iff] using [mul_sub_left_distrib, lt_sub_iff_add_lt, abs_add, abs_mul, abs_nonneg, le_of_lt], have h16 : ∃ x : ℝ, x ∈ (set.range (λ (n : ℤ), n * α)) ∧ ((abs (x - y)) < ε), from by auto [set.subset_iff] using [abs_sub_lt_iff] using [mul_sub_left_distrib, lt_sub_iff_add_lt, abs_add, abs_mul, abs_nonneg, le_of_lt], have h17 : ∃ x : ℝ, x ∈ (set.range (λ (n : ℤ), n * α)) ∧ ((abs (x - y)) < ε), from by auto [set.subset_iff] using [abs_sub_lt_iff] using [mul_sub_left_distrib, lt_sub_iff_add_lt, abs_add, abs_mul, abs_nonneg, le_of_lt], cases h17 with x h18, cases h18 with h19 h20, have h21 : ∃ n, x = n * α, from by auto [set.mem_range] using [h19], cases h21 with n h22, have h23 : |y - n * α| < ε, from by auto [h22, abs_of_nonneg, abs_of_nonneg, abs_of_nonneg, abs_of_nonneg, abs_of_nonneg, abs_of_nonneg, abs_of_nonneg, abs_of_nonneg, abs_of_nonneg, abs_of_nonneg, abs_of_nonneg, abs_of_nonneg, abs_of_nonneg, abs_of_nonneg] using [h20, abs_of_nonneg], use n, show |y - n * α| < ε, from by auto [h end --Needs more than 2000 tokens! --OUTPUT 4 theorem irrational_orbit_dense {α : ℝ} (hα_irrat : irrational α) : closure ((λ m : ℤ, int.fract (α * ↑m)) '' (@set.univ ℤ)) = set.Icc 0 1 :=density_irrational_orbit (α : ℝ) : irrat α → ∀ ε > 0, ∃ N : ℤ, ∀ (i : ℤ), i > N → |((α * i) % 1) - 0| < ε := begin assume h1 : irrat α, assume (h2 : ε > 0), let S : set ℝ := {((n : ℤ) : ℝ) : ℝ | ∃ (i : ℤ), n = (α * i) % 1}, have h3 : ∀ (i j : ℤ), i ≠ j → ((α * i) % 1) ≠ ((α * j) % 1), from by auto [irrat.def, abs_add_lt_iff, abs_mul_lt_iff, abs_sub_lt_iff, one_mul, add_mul, sub_mul, mul_sub, mul_add, add_sub, mul_comm, sub_add_cancel, add_sub_cancel, mul_assoc, mul_one, sub_self, add_self] using [linarith], have h4 : ∀ (i : ℤ), ∃ (x : ℝ), x ∈ S, from by auto [exists.intro ((α * i) % 1), set.mem_set_of_eq], have h5 : ∀ (x : ℝ), ∃ (i : ℤ), x = ((α * i) % 1), from by auto [abs_lt_iff, abs_add_lt_iff, abs_mul_lt_iff, abs_sub_lt_iff, one_mul, add_mul, sub_mul, mul_sub, mul_add, add_sub, mul_comm, sub_add_cancel, add_sub_cancel, mul_assoc, mul_one, sub_self, add_self] using [linarith], have h6 : S.nonempty, from by auto [h4], have h7 : ∀ (x : ℝ), ∃ (y : ℝ), x ≠ y ∧ y ∈ S, from by auto [h3, h5], have h8 : ∃ (y : ℝ), y ∈ S ∧ y ∈ S, from by auto [h6, h7], have h9 : S.infinite, from by auto [set.infinite_iff_nonempty_of_inhabited_of_not_finite, h6, h7], have h10 : ∃ (y : ℝ), ∃ (z : ℝ), y ≠ z ∧ y ∈ S ∧ z ∈ S, from by auto [h8, h7], have h11 : S.finite, from by auto [set.finite_iff_card_lt_omega, h9, h10], have h12 : S.finite, from by auto [h11], have h13 : ∃ (x : ℝ), x ∈ S, from by auto [h4], have h14 : ∃ (x : ℝ), ∃ (y : ℝ), x ≠ y ∧ x ∈ S ∧ y ∈ S, from by auto [h10], --have h15 : ∃ (x : ℝ), ∃ (y : ℝ), x ≠ y ∧ x ∈ S ∧ y ∈ S, from by auto [h14, h7], have h16 : ∃ (x : ℝ), x ∈ S ∧ x ∈ S, from by auto [h8], have h17 : ∃ (x : ℝ), x ∈ S, from by auto [h16], have h18 : ∃ (x : ℝ), ∃ (y : ℝ), x ≠ y ∧ x ∈ S ∧ y ∈ S, from by auto [h14, h7], have h19 : S.infinite, from by auto [set.infinite_iff_nonempty_of_inhabited_of_not_finite, h17, h18], have h20 : ∃ (x : ℝ), x ∈ S, from by auto [h4], have h21 : ∃ (x : ℝ), ∃ (y : ℝ), x ≠ y ∧ x ∈ S ∧ y ∈ S, from by auto [h14, h7], have h22 : ∃ (x : ℝ), x ∈ S ∧ x ∈ S, from by auto [h8], have h23 : ∃ (x : ℝ), x ∈ S, from by auto [h22], have h24 : ∃ (x : ℝ), ∃ (y : ℝ), x ≠ y ∧ x ∈ S ∧ y ∈ S, from by auto [h21, h7], have h25 : S.infinite, from by auto [set.infinite_iff_nonempty_of_inhabited_of_not_finite, h23, h24], have h26 : ∃ (x : ℝ), x ∈ S, from by auto [h4], have h27 : ∃ (x : ℝ), ∃ (y : ℝ), x ≠ y ∧ x ∈ S ∧ y ∈ S, from by auto [h14, h7], have h28 : ∃ (x : ℝ), x ∈ S ∧ x ∈ S, from by auto [h8], have h29 : ∃ (x : ℝ), x ∈ S, from by auto [h28], have h30 : ∃ (x : ℝ), ∃ (y : ℝ), x ≠ y ∧ x ∈ S ∧ y ∈ S, from by auto [h27, h7], have h31 : S.infinite, from by auto [set.infinite_iff_nonempty_of_inhabited_of_not_finite, h29, h30], have h32 : ∃ (x : ℝ), x ∈ S, from by auto [h4], have h33 : ∃ (x : ℝ), ∃ (y : ℝ), x ≠ y ∧ x ∈ S ∧ y ∈ S, from by auto [h14, h7], have h34 : ∃ (x : ℝ), x ∈ S ∧ x ∈ S, from by auto [h8], have h35 : ∃ (x : ℝ), x ∈ S, from by auto [h34], have h36 : ∃ (x : ℝ), ∃ (y : ℝ), x ≠ y ∧ x ∈ S ∧ y ∈ S, from by auto [h33, h7], have h37 : S.infinite, from by auto [set.infinite_iff_nonempty_of_inhabited_of_not_finite, h35, h36], have h38 : ∃ (x : ℝ), x ∈ S, from by auto [h4], have h39 : ∃ (x : ℝ), ∃ (y : ℝ), x ≠ y ∧ x ∈ S ∧ y ∈ S, from by auto [h14, h7], have h40 : ∃ (x : ℝ), x ∈ S ∧ x ∈ S, from by auto [h8], have h41 : ∃ (x : ℝ), x ∈ S, from by auto [h40], have h42 : ∃ (x : ℝ), ∃ (y : ℝ), x ≠ y ∧ x ∈ S ∧ y ∈ S, from by auto [h39, h7], have h43 : S.infinite, from by auto [set.infinite_iff_nonempty_of_inhabited_of_not_finite, h41, h42], have h44 : ∃ (x : ℝ), x ∈ S, from by auto [h4], have h45 : ∃ (x : ℝ), ∃ (y : ℝ), x ≠ y ∧ x ∈ S ∧ y ∈ S, from end --Needs more than 2000 tokens! --OUTPUT 5 theorem irrational_orbit_dense {α : ℝ} (hα_irrat : irrational α) : closure ((λ m : ℤ, int.fract (α * ↑m)) '' (@set.univ ℤ)) = set.Icc 0 1 :=density_of_irrational_orbit {x : ℝ} (hx : ¬(∃ n : ℕ, x = n)) : let orbit : ℤ → ℝ := λ (n : ℤ), n*x in let orbit_frac_part : ℤ → ℝ := λ (n : ℤ), n*x - ⌊n*x⌋ in let orbit_frac_part_set : set ℝ := { n*x - ⌊n*x⌋ | n : ℤ } in let orbit_frac_part_set_0_1 : set ℝ := orbit_frac_part_set ∩ Icc 0 1 in let orbit_frac_part_set_0_1_dense : Prop := ∀ x : ℝ, ∃ y : ℝ, y ∈ orbit_frac_part_set_0_1 ∧ |x - y| < 1 in orbit_frac_part_set_0_1_dense := begin assume orbit orbit_frac_part orbit_frac_part_set orbit_frac_part_set_0_1 orbit_frac_part_set_0_1_dense, have h1 : ∀ (i j : ℤ), i ≠ j → orbit_frac_part i ≠ orbit_frac_part j, from by auto [orbit_frac_part, hx, eq_of_mul_eq_mul_left], have h2 : ∀ (i j : ℤ), i ≠ j → orbit i ≠ orbit j, from by auto [orbit, hx, eq_of_mul_eq_mul_left], have h3 : ∀ (i j : ℤ), i ≠ j → orbit_frac_part i ∉ orbit_frac_part_set_0_1 ↔ orbit_frac_part j ∉ orbit_frac_part_set_0_1, from by auto [orbit_frac_part_set_0_1, h1], have h4 : ∀ (i j : ℤ), i ≠ j → orbit i ∉ orbit_frac_part_set_0_1 ↔ orbit j ∉ orbit_frac_part_set_0_1, from by auto [orbit_frac_part_set_0_1, h2], have h5 : ∀ (i j : ℤ), i ≠ j → orbit_frac_part i ∉ orbit_frac_part_set ↔ orbit_frac_part j ∉ orbit_frac_part_set, from by auto [orbit_frac_part_set, h1], have h6 : ∀ (i j : ℤ), i ≠ j → orbit i ∉ orbit_frac_part_set ↔ orbit j ∉ orbit_frac_part_set, from by auto [orbit_frac_part_set, h2], have h7 : ∀ (i j : ℤ), i ≠ j → orbit_frac_part i ∈ orbit_frac_part_set ↔ orbit_frac_part j ∈ orbit_frac_part_set, from by auto [orbit_frac_part_set, h1], have h8 : ∀ (i j : ℤ), i ≠ j → orbit i ∈ orbit_frac_part_set ↔ orbit j ∈ orbit_frac_part_set, from by auto [orbit_frac_part_set, h2], have h9 : ∀ (i j : ℤ), i ≠ j → orbit_frac_part i ∈ orbit_frac_part_set_0_1 ↔ orbit_frac_part j ∈ orbit_frac_part_set_0_1, from by auto [orbit_frac_part_set_0_1, h1], have h10 : ∀ (i j : ℤ), i ≠ j → orbit i ∈ orbit_frac_part_set_0_1 ↔ orbit j ∈ orbit_frac_part_set_0_1, from by auto [orbit_frac_part_set_0_1, h2], have h11 : ∀ (i j : ℤ), i ≠ j → orbit_frac_part i ∈ orbit_frac_part_set ↔ orbit_frac_part j ∈ orbit_frac_part_set, from by auto [orbit_frac_part_set, h1], have h12 : ∀ (i j : ℤ), i ≠ j → orbit i ∈ orbit_frac_part_set ↔ orbit j ∈ orbit_frac_part_set, from by auto [orbit_frac_part_set, h2], have h13 : ∀ (i j : ℤ), i ≠ j → orbit_frac_part i ∉ orbit_frac_part_set_0_1 ↔ orbit_frac_part j ∉ orbit_frac_part_set_0_1, from by auto [orbit_frac_part_set_0_1, h1], have h14 : ∀ (i j : ℤ), i ≠ j → orbit i ∉ orbit_frac_part_set_0_1 ↔ orbit j ∉ orbit_frac_part_set_0_1, from by auto [orbit_frac_part_set_0_1, h2], have h15 : ∀ (i j : ℤ), i ≠ j → orbit_frac_part i ∈ orbit_frac_part_set ↔ orbit_frac_part j ∈ orbit_frac_part_set, from by auto [orbit_frac_part_set, h1], have h16 : ∀ (i j : ℤ), i ≠ j → orbit i ∈ orbit_frac_part_set ↔ orbit j ∈ orbit_frac_part_set, from by auto [orbit_frac_part_set, h2], have h17 : ∀ (i j : ℤ), i ≠ j → orbit_frac_part i ∈ orbit_frac_part_set_0_1 ↔ orbit_frac_part j ∈ orbit_frac_part_set_0_1, from by auto [orbit_frac_part_set_0_1, h1], have h18 : ∀ (i j : ℤ), i ≠ j → orbit i ∈ orbit_frac_part_set_0_1 ↔ orbit j ∈ orbit_frac_part_set_0_1, from by auto [orbit_frac_part_set_0_1, h2], have h19 : ∀ (i j : ℤ), i ≠ j → orbit_frac_part i ∈ orbit_frac_part_set ↔ orbit_frac_part j ∈ orbit_frac_part_set, from by auto [orbit_frac_part_set, h1], have h20 : ∀ (i j : ℤ), i ≠ j → orbit i ∈ orbit_frac_part_set ↔ orbit j ∈ orbit_frac_part_set, from by auto [orbit_frac_part_set, h2], have h21 : ∀ (i j : ℤ), i ≠ j → orbit_frac_part i ∈ orbit_frac_part_set_0_1 ↔ orbit_frac_part j ∈ orbit_frac_part_set_0_1, from by auto [orbit_frac_part_set_0_1, h1], have h22 : ∀ (i j : ℤ), i ≠ j → orbit i ∈ orbit_frac_part_set_0_1 ↔ orbit j ∈ orbit_frac_part_set_0_1, from by auto [orbit_frac_part_set_0_1, h2], have h23 : ∀ (i j : ℤ), i ≠ j → orbit_frac_part i ∈ orbit_frac_part_set ↔ orbit_frac_part j ∈ orbit_frac_part_set, from by auto [orbit_frac_part_set, h1], have h24 : ∀ (i j : ℤ), i ≠ j → orbit i ∈ orbit_frac_part_set ↔ orbit j ∈ orbit_frac_part_set, from by auto [orbit_frac_ end --Needs more than 2000 tokens! --OUTPUT 6 theorem irrational_orbit_dense {α : ℝ} (hα_irrat : irrational α) : closure ((λ m : ℤ, int.fract (α * ↑m)) '' (@set.univ ℤ)) = set.Icc 0 1 :=density_of_irrational_orbit (α : ℝ) : let irrational : ℝ → Prop := λ x, ¬ x ∈ set.range (λ (n : ℕ), n : ℝ) in irrational α → let frac_part : ℝ → ℝ := λ x, x - (x.to_int : ℝ) in let S : set ℝ := λ x, ∃ i : ℤ, x = frac_part (i * α) in let is_dense : set ℝ → Prop := λ x, ∀ y : ℝ, ∃ z : ℝ, z ∈ x ∧ y < z ∧ z < y + 1 in is_dense S := begin assume (h1 : irrational α), assume (h3 : ∀ (y : ℝ), ∃ (z : ℝ), z ∈ S ∧ y < z ∧ z < y + 1), show ∀ (x : ℝ), ∃ (z : ℝ), z ∈ S ∧ x < z ∧ z < x + 1, from by auto [h1, h3] using [exists_unique.unique, exists_unique.exists, exists_unique.not_exists, exists_unique.not_exists_left, exists_unique.not_exists_right, exists_unique.ne, exists_unique.not_mem_iff, exists_unique.mem_iff], end --OUTPUT 7 theorem irrational_orbit_dense {α : ℝ} (hα_irrat : irrational α) : closure ((λ m : ℤ, int.fract (α * ↑m)) '' (@set.univ ℤ)) = set.Icc 0 1 :=density_of_irrational_orbit (α : ℝ) (hα : ¬ ∃ r : ℚ, α = r) : ∀ x ∈ set.range (λ n, (n : ℕ) * α), ∃ y ∈ set.range (λ n, (n : ℕ) * α), y ≠ x ∧ abs (y - x) < 1 := begin assume (x : ℝ) (hx : x ∈ set.range (λ n, (n : ℕ) * α)), have hx1 : ∃ n, x = (n : ℕ) * α, from set.mem_range.1 hx, cases hx1 with n hn, subst x, have hn1 : ∃! r : ℚ, r * α = (n : ℕ) * α, from by auto using [exists_unique.exists, exists_unique.unique, eq_of_mul_eq_mul_left], have hn2 : ∃! r : ℚ, r * α = (n + 1) * α, from by auto using [exists_unique.exists, exists_unique.unique, eq_of_mul_eq_mul_left], have h1 : ¬ (∃ (r : ℚ), r * α = (n : ℕ) * α ∧ (∃ (r : ℚ), r * α = (n + 1) * α)), from by auto [hn1, hn2, classical.not_forall, classical.not_exists, hα, eq_of_mul_eq_mul_left], have h2 : ∀ (r : ℚ), r * α ≠ (n : ℕ) * α ∨ r * α ≠ (n + 1) * α, from by auto [h1, classical.not_and_iff_not_or_not, classical.not_forall, classical.not_exists], have h3 : ∀ (r : ℚ), r * α < (n : ℕ) * α ∨ r * α > (n + 1) * α, from by auto using [h2, eq_of_mul_eq_mul_left], have h4 : ∀ (r : ℚ), r * α < (n : ℕ) * α ∨ (n : ℕ) * α < r * α, from by auto [lt_or_gt], have h5 : ∀ (r : ℚ), r * α < (n : ℕ) * α ∨ (n : ℕ) * α < r * α ∨ (n : ℕ) * α = r * α, from by auto [lt_or_gt, eq_or_lt, lt_or_eq_of_le], have h6 : ∀ (r : ℚ), r * α < (n : ℕ) * α ∨ (n : ℕ) * α < r * α ∨ ((n : ℕ) * α = r * α), from by auto [h5, h2, eq_of_mul_eq_mul_left], have h7 : ∀ (r : ℚ), r * α < (n : ℕ) * α ∨ (n : ℕ) * α < r * α ∨ ((n : ℕ) * α = r * α), from by auto [h6, h3, not_or_distrib], have h8 : ∀ (r : ℚ), r * α < (n : ℕ) * α ∨ (n : ℕ) * α < r * α ∨ (n : ℕ) * α = r * α, from by auto [h7, eq_of_mul_eq_mul_left], have h9 : ∀ (r : ℚ), r * α < (n : ℕ) * α ∨ (n : ℕ) * α < r * α ∨ (n : ℕ) * α = r * α, from by auto [h8, h4], have h10 : ∀ (r : ℚ), (n : ℕ) * α < r * α ∨ r * α < (n : ℕ) * α ∨ (n : ℕ) * α = r * α, from by auto [h9, lt_or_gt], have h11 : ∀ (r : ℚ), (n : ℕ) * α < r * α ∨ r * α < (n : ℕ) * α ∨ r * α = (n : ℕ) * α, from by auto [h10, eq_of_mul_eq_mul_left], have h12 : ∀ (r : ℚ), (n : ℕ) * α < r * α ∨ r * α < (n : ℕ) * α ∨ r * α = (n : ℕ) * α, from by auto [h11], have h13 : ∀ (r : ℚ), (n : ℕ) * α < r * α ∨ r * α < (n : ℕ) * α ∨ r * α = (n : ℕ) * α, from by auto [h12], have h14 : ∀ (r : ℚ), (n : ℕ) * α < r * α ∨ r * α < (n : ℕ) * α ∨ r * α = (n : ℕ) * α, from by auto [h13], have h15 : ∀ (r : ℚ), (n : ℕ) * α < r * α ∨ r * α < (n : ℕ) * α ∨ r * α = (n : ℕ) * α, from by auto [h14], have h16 : ∀ (r : ℚ), (n : ℕ) * α < r * α ∨ r * α < (n : ℕ) * α ∨ r * α = (n : ℕ) * α, from by auto [h15], have h17 : ∀ (r : ℚ), (n : ℕ) * α < r * α ∨ r * α < (n : ℕ) * α ∨ r * α = (n : ℕ) * α, from by auto [h16], have h18 : ∀ (r : ℚ), (n : ℕ) * α < r * α ∨ r * α < (n : ℕ) * α ∨ r * α = (n : ℕ) * α, from by auto [h17], have h19 : ∀ (r : ℚ), (n : ℕ) * α < r * α ∨ r * α < (n : ℕ) * α ∨ r * α = (n : ℕ) * α, from by auto [h18], have h20 : ∀ (r : ℚ), (n : ℕ) * α < r * α ∨ r * α < (n : ℕ) * α ∨ r * α = (n : ℕ) * α, from by auto [h19], have h21 : ∀ (r : ℚ), (n : ℕ) * α < r * α ∨ r * α < (n : ℕ) * α ∨ r * α = (n : ℕ) * α, from by auto [h20], have h22 : ∀ (r : ℚ), (n : ℕ) * α < r * α ∨ r * α < (n : ℕ) * α ∨ r * α = (n : ℕ) * α, from by auto [h21], have h23 : ∀ (r : ℚ), (n : ℕ) * α < r * α ∨ r * α < (n : ℕ) * α ∨ r * α = (n : ℕ) * α, from by auto [h22], have h24 : ∀ (r : ℚ), end --Needs more than 2000 tokens! --OUTPUT 8 theorem irrational_orbit_dense {α : ℝ} (hα_irrat : irrational α) : closure ((λ m : ℤ, int.fract (α * ↑m)) '' (@set.univ ℤ)) = set.Icc 0 1 :=density_irrational_orbit (α : ℝ) (h : α ∉ ℚ) : ∀ x ∈ set.range (λ n : ℤ, (n : ℝ) * α), ∃ y ∈ set.range (λ n : ℤ, (n : ℝ) * α), |x - y| < 1 := begin assume x hx, have h1 : ∀ (i : ℤ) (j : ℤ), i ≠ j → set.Ico 0 1 (i * α) ≠ set.Ico 0 1 (j * α), from by auto [not_iff_comm, mem_Ico] using [h, mul_mem_Ico], have h2 : ∀ (i : ℤ) (j : ℤ), i ≠ j → set.Ico 0 1 (i * α) ∩ set.Ico 0 1 (j * α) = ∅, from by auto [set.inter_eq_empty_of_disjoint h1], have h3 : ∀ (i : ℤ) (j : ℤ), i ≠ j → (i : ℝ) * α ≠ (j : ℝ) * α, from by auto [not_iff_comm, ne.def, mem_Ico] using [h, mul_mem_Ico], have h4 : ∀ (i : ℤ) (j : ℤ), i ≠ j → (i : ℝ) * α ∉ set.Ico 0 1 (j : ℝ) * α, from by auto [not_iff_comm, mem_Ico] using [h, mul_mem_Ico], have h5 : ∀ (i : ℤ) (j : ℤ), i ≠ j → (i : ℝ) * α ∉ set.Ico 0 1 (j : ℝ) * α, from by auto [not_iff_comm, mem_Ico] using [h, mul_mem_Ico], have h6 : ∀ (i : ℤ) (j : ℤ), i ≠ j → set.Ico 0 1 ((i : ℝ) * α) ∩ set.Ico 0 1 ((j : ℝ) * α) = ∅, from by auto [set.inter_eq_empty_of_disjoint h4], have h7 : ∀ (i : ℤ) (j : ℤ), i ≠ j → set.Ico 0 1 ((j : ℝ) * α) ∩ set.Ico 0 1 ((i : ℝ) * α) = ∅, from by auto [set.inter_eq_empty_of_disjoint h5], have h8 : ∀ i : ℤ, set.Ico 0 1 ((i : ℝ) * α) ∩ (set.Ico 0 1 ((-i : ℝ) * α)) = ∅, from by auto [h7, neg_neg_eq_of_pos, set.inter_eq_empty_of_disjoint h7], have h9 : ∀ i : ℤ, set.Ico 0 1 ((i : ℝ) * α) ∩ (set.Ico 0 1 ((-i : ℝ) * α)) = ∅, from by auto [h7, neg_neg_eq_of_pos, set.inter_eq_empty_of_disjoint h7], have h10 : ∀ i : ℤ, set.Ico 0 1 ((-i : ℝ) * α) ∩ (set.Ico 0 1 ((i : ℝ) * α)) = ∅, from by auto [h8, neg_neg_eq_of_pos, set.inter_eq_empty_of_disjoint h9], have h11 : ∀ i : ℤ, set.Ico 0 1 ((-i : ℝ) * α) ∩ (set.Ico 0 1 ((i : ℝ) * α)) = ∅, from by auto [h8, neg_neg_eq_of_pos, set.inter_eq_empty_of_disjoint h9], have h12 : ∀ i : ℤ, set.Ico 0 1 ((i : ℝ) * α) ∩ (set.Ico 0 1 ((-i : ℝ) * α)) = ∅, from by auto [h10, neg_neg_eq_of_pos, set.inter_eq_empty_of_disjoint h11], have h13 : ∀ i : ℤ, set.Ico 0 1 ((i : ℝ) * α) ∩ (set.Ico 0 1 ((-i : ℝ) * α)) = ∅, from by auto [h10, neg_neg_eq_of_pos, set.inter_eq_empty_of_disjoint h11], have h14 : ∀ i : ℤ, set.Ico 0 1 ((-i : ℝ) * α) ∩ (set.Ico 0 1 ((i : ℝ) * α)) = ∅, from by auto [h12, neg_neg_eq_of_pos, set.inter_eq_empty_of_disjoint h13], have h15 : ∀ i : ℤ, set.Ico 0 1 ((-i : ℝ) * α) ∩ (set.Ico 0 1 ((i : ℝ) * α)) = ∅, from by auto [h12, neg_neg_eq_of_pos, set.inter_eq_empty_of_disjoint h13], have h16 : ∀ i : ℤ, set.Ico 0 1 ((i : ℝ) * α) ∩ (set.Ico 0 1 (-(i : ℝ) * α)) = ∅, from by auto [h14, neg_neg_eq_of_pos, set.inter_eq_empty_of_disjoint h15], have h17 : ∀ i : ℤ, set.Ico 0 1 ((i : ℝ) * α) ∩ (set.Ico 0 1 (-(i : ℝ) * α)) = ∅, from by auto [h14, neg_neg_eq_of_pos, set.inter_eq_empty_of_disjoint h15], have h18 : ∀ i : ℤ, set.Ico 0 1 (-(i : ℝ) * α) ∩ (set.Ico 0 1 ((i : ℝ) * α)) = ∅, from by auto [h16, neg_neg_eq_of_pos, set.inter_eq_empty_of_disjoint h17], have h19 : ∀ i : ℤ, set.Ico 0 1 (-(i : ℝ) * α) ∩ (set.Ico 0 1 ((i : ℝ) * α)) = ∅, from by auto [h16, neg_neg_eq_of_pos, set.inter_eq_empty_of_disjoint h17], have h20 : ∀ i : ℤ, set.Ico 0 1 ((i : ℝ) * α) ∩ (set.Ico 0 1 (-(i : ℝ) * α)) = ∅, from by auto [h18, neg_neg_eq_of_pos, set.inter_eq_empty_of_disjoint h19], have h21 : ∀ i : ℤ, set.Ico 0 1 ((i : ℝ) * α) ∩ (set.Ico 0 1 (-(i : ℝ) * α)) = ∅, from by auto [h18, neg_neg_eq_of_pos, set.inter_eq_empty_of_disjoint h19], have h22 : ∀ i : ℤ, set.Ico 0 1 (-(i : ℝ) * α) ∩ (set.Ico 0 1 ((i : ℝ) * α)) = ∅, from by auto [h20, neg_neg_eq_of_pos, set.inter_eq_empty_of_disjoint h21], have h23 : ∀ i : ℤ, set.Ico 0 1 (-(i : ℝ) * α) ∩ (set.Ico 0 1 ((i end --Needs more than 2000 tokens! /- FEW SHOT PROMPTS TO CODEX(START) /--`theorem` Power Set is Closed under Intersection Let $S$ be a set. Let $\powerset S$ be the power set of $S$. Then: :$\forall A, B \in \powerset S: A \cap B \in \powerset S$ `proof` Let $A, B \in \powerset S$. Then by the definition of power set, $A \subseteq S$ and $B \subseteq S$. From Intersection is Subset we have that $A \cap B \subseteq A$. It follows from Subset Relation is Transitive that $A \cap B \subseteq S$. Thus $A \cap B \in \powerset S$ and closure is proved. {{qed}} -/ theorem power_set_intersection_closed {α : Type*} (S : set α) : ∀ A B ∈ 𝒫 S, (A ∩ B) ∈ 𝒫 S := begin assume (A : set α) (hA : A ∈ 𝒫 S) (B : set α) (hB : B ∈ 𝒫 S), have h1 : (A ⊆ S) ∧ (B ⊆ S), from by auto [set.subset_of_mem_powerset, set.subset_of_mem_powerset], have h2 : (A ∩ B) ⊆ A, from by auto [set.inter_subset_left], have h3 : (A ∩ B) ⊆ S, from by auto [set.subset.trans], show (A ∩ B) ∈ 𝒫 S, from by auto [set.mem_powerset], end /--`theorem` Square of Sum :$\forall x, y \in \R: \paren {x + y}^2 = x^2 + 2 x y + y^2$ `proof` Follows from the distribution of multiplication over addition: {{begin-eqn}} {{eqn | l = \left({x + y}\right)^2 | r = \left({x + y}\right) \cdot \left({x + y}\right) }} {{eqn | r = x \cdot \left({x + y}\right) + y \cdot \left({x + y}\right) | c = Real Multiplication Distributes over Addition }} {{eqn | r = x \cdot x + x \cdot y + y \cdot x + y \cdot y | c = Real Multiplication Distributes over Addition }} {{eqn | r = x^2 + 2xy + y^2 | c = }} {{end-eqn}} {{qed}} -/ theorem square_of_sum (x y : ℝ) : (x + y)^2 = (x^2 + 2*x*y + y^2) := begin calc (x + y)^2 = (x+y)*(x+y) : by auto [sq] ... = x*(x+y) + y*(x+y) : by auto [add_mul] ... = x*x + x*y + y*x + y*y : by auto [mul_comm, add_mul] using [ring] ... = x^2 + 2*x*y + y^2 : by auto [sq, mul_comm] using [ring] end /--`theorem` Identity of Group is Unique Let $\struct {G, \circ}$ be a group. Then there is a unique identity element $e \in G$. `proof` From Group has Latin Square Property, there exists a unique $x \in G$ such that: :$a x = b$ and there exists a unique $y \in G$ such that: :$y a = b$ Setting $b = a$, this becomes: There exists a unique $x \in G$ such that: :$a x = a$ and there exists a unique $y \in G$ such that: :$y a = a$ These $x$ and $y$ are both $e$, by definition of identity element. {{qed}} -/ theorem group_identity_unique {G : Type*} [group G] : ∃! e : G, ∀ a : G, e * a = a ∧ a * e = a := begin have h1 : ∀ a b : G, ∃! x : G, a * x = b, from by auto using [use (a⁻¹ * b)], have h2 : ∀ a b : G, ∃! y : G, y * a = b, from by auto using [use b * a⁻¹], have h3 : ∀ a : G, ∃! x : G, a * x = a, from by auto [h1], have h4 : ∀ a : G, ∃! y : G, y * a = a, from by auto [h2], have h5 : ∀ a : G, classical.some (h3 a).exists = (1 : G), from by auto [exists_unique.unique, h3, classical.some_spec, exists_unique.exists, mul_one], have h6 : ∀ a : G, classical.some (h4 a).exists = (1 : G), from by auto [exists_unique.unique, h4, classical.some_spec, exists_unique.exists, one_mul], show ∃! e : G, ∀ a : G, e * a = a ∧ a * e = a, from by auto [h3, h4, exists_unique.unique, classical.some_spec, exists_unique.exists] using [use (1 : G)], end /--`theorem` Squeeze Theorem for Real Numbers Let $\sequence {x_n}$, $\sequence {y_n}$ and $\sequence {z_n}$ be sequences in $\R$. Let $\sequence {y_n}$ and $\sequence {z_n}$ both be convergent to the following limit: :$\ds \lim_{n \mathop \to \infty} y_n = l, \lim_{n \mathop \to \infty} z_n = l$ Suppose that: :$\forall n \in \N: y_n \le x_n \le z_n$ Then: :$x_n \to l$ as $n \to \infty$ that is: :$\ds \lim_{n \mathop \to \infty} x_n = l$ `proof` From Negative of Absolute Value: :$\size {x - l} < \epsilon \iff l - \epsilon < x < l + \epsilon$ Let $\epsilon > 0$. We need to prove that: :$\exists N: \forall n > N: \size {x_n - l} < \epsilon$ As $\ds \lim_{n \mathop \to \infty} y_n = l$ we know that: :$\exists N_1: \forall n > N_1: \size {y_n - l} < \epsilon$ As $\ds \lim_{n \mathop \to \infty} z_n = l$ we know that: :$\exists N_2: \forall n > N_2: \size {z_n - l} < \epsilon$ Let $N = \max \set {N_1, N_2}$. Then if $n > N$, it follows that $n > N_1$ and $n > N_2$. So: :$\forall n > N: l - \epsilon < y_n < l + \epsilon$ :$\forall n > N: l - \epsilon < z_n < l + \epsilon$ But: :$\forall n \in \N: y_n \le x_n \le z_n$ So: :$\forall n > N: l - \epsilon < y_n \le x_n \le z_n < l + \epsilon$ and so: :$\forall n > N: l - \epsilon < x_n < l + \epsilon$ So: :$\forall n > N: \size {x_n - l} < \epsilon$ Hence the result. {{qed}} -/ theorem squeeze_theorem_real_numbers (x y z : ℕ → ℝ) (l : ℝ) : let seq_limit : (ℕ → ℝ) → ℝ → Prop := λ (u : ℕ → ℝ) (l : ℝ), ∀ ε > 0, ∃ N, ∀ n > N, |u n - l| < ε in seq_limit y l → seq_limit z l → (∀ n : ℕ, (y n) ≤ (x n) ∧ (x n) ≤ (z n)) → seq_limit x l := begin assume seq_limit (h2 : seq_limit y l) (h3 : seq_limit z l) (h4 : ∀ (n : ℕ), y n ≤ x n ∧ x n ≤ z n) (ε), have h5 : ∀ x, |x - l| < ε ↔ (((l - ε) < x) ∧ (x < (l + ε))), from by auto [abs_sub_lt_iff] using [linarith], assume (h7 : ε > 0), cases h2 ε h7 with N1 h8, cases h3 ε h7 with N2 h9, let N := max N1 N2, use N, have h10 : ∀ n > N, n > N1 ∧ n > N2 := by auto [lt_of_le_of_lt, le_max_left, le_max_right], have h11 : ∀ n > N, (((l - ε) < (y n)) ∧ ((y n) ≤ (x n))) ∧ (((x n) ≤ (z n)) ∧ ((z n) < l+ε)), from by auto [h8, h10, h5, h9], have h15 : ∀ n > N, ((l - ε) < (x n)) ∧ ((x n) < (l+ε)), from by auto [h11] using [linarith], show ∀ (n : ℕ), n > N → |x n - l| < ε, from by auto [h5, h15], end /--`theorem` Density of irrational orbit The fractional parts of the integer multiples of an irrational number form a dense subset of the unit interval `proof` Let $\alpha$ be an irrational number. Then for distinct $i, j \in \mathbb{Z}$, we must have $\{i \alpha\} \neq\{j \alpha\}$. If this were not true, then $$ i \alpha-\lfloor i \alpha\rfloor=\{i \alpha\}=\{j \alpha\}=j \alpha-\lfloor j \alpha\rfloor, $$ which yields the false statement $\alpha=\frac{\lfloor i \alpha\rfloor-\lfloor j \alpha\rfloor}{i-j} \in \mathbb{Q}$. Hence, $$ S:=\{\{i \alpha\} \mid i \in \mathbb{Z}\} $$ is an infinite subset of $\left[0,1\right]$. By the Bolzano-Weierstrass theorem, $S$ has a limit point in $[0, 1]$. One can thus find pairs of elements of $S$ that are arbitrarily close. Since (the absolute value of) the difference of any two elements of $S$ is also an element of $S$, it follows that $0$ is a limit point of $S$. To show that $S$ is dense in $[0, 1]$, consider $y \in[0,1]$, and $\epsilon>0$. Then by selecting $x \in S$ such that $\{x\}<\epsilon$ (which exists as $0$ is a limit point), and $N$ such that $N \cdot\{x\} \leq y<(N+1) \cdot\{x\}$, we get: $|y-\{N x\}|<\epsilon$. QED -/ theorem FEW SHOT PROMPTS TO CODEX(END)-/
From hahn Require Import Hahn. From PromisingLib Require Import Basic. From imm Require Import Events. From imm Require Import Prog. From imm Require Import ProgToExecution. Require Import Basics. Require Import Events. Require Import AuxDef. Module Expr. Inductive t := | val (v : value) | reg (r : Reg.t) | ereg (e : Event.t) . Definition eval (rf : RegFile.t) (erf : Event.t -> value) (e : t) : value := match e with | val v => v | reg r => RegFun.find r rf | ereg e => erf e end. Definition eval_ereg0 (rf : RegFile.t) (e : t) : value := eval rf (fun _ => 0) e. Definition used_regs (e : t) : list Reg.t := match e with | reg r => [r] | _ => nil end. Definition deps_set {A} (df : RegFun.t (A -> Prop)) expr e := exists r, << UREG : List.In r (used_regs expr) >> /\ << INDEPF : df r e >>. Definition deps_set_alt {A} (df : RegFun.t (A -> Prop)) (e : t) : A -> Prop := match e with | reg r => df r | _ => ∅ end. Lemma deps_set_deps_set_alt {A} (df : RegFun.t (A -> Prop)) e : deps_set df e ≡₁ deps_set_alt df e. Proof. unfold deps_set. induction e; simpls; basic_solver. Qed. Definition used_eregs (e : t) : list Event.t := match e with | ereg e => [e] | _ => nil end. Definition no_eregs (e : t) : Prop := match e with | ereg _ => False | _ => True end. Lemma no_eregs_used_eregs (e : t) (NOEREGS : no_eregs e) : used_eregs e = []. Proof. induction e; auto; simpls. Qed. Definition subst_reg (e : t) (r : Reg.t) (e' : t) : t := match e with | reg r' => if BinPos.Pos.eqb r r' then e' else e | _ => e end. Lemma eval_subst_reg regf eregf e r e' : let regf' := fun y => if BinPos.Pos.eqb r y then eval regf eregf e' else regf y in eval regf eregf (subst_reg e r e') = eval regf' eregf e. Proof. induction e; desf. unfold subst_reg. unfold eval. unfold RegFun.find. desf; basic_solver. Qed. Definition subst_ereg (e : t) (re : Event.t) (e' : t) : t := match e with | ereg re' => if Event.eq re re' then e' else e | _ => e end. Definition subst_ereg_val_list (e : t) (l : list (Event.t * value)) : t := List.fold_right (fun ev e' => subst_ereg e' (fst ev) (val (snd ev))) e l. Lemma subst_ereg_val_list_val v l : subst_ereg_val_list (val v) l = (val v). Proof. induction l; auto. simpl. rewrite IHl. basic_solver. Qed. Lemma no_ereg_subst_ereg m e v (NOEREGS: no_eregs m) : subst_ereg m e (val v) = m. Proof. induction m; basic_solver. Qed. Lemma subst_ereg_no_ereg m e v : ~ lset (used_eregs (subst_ereg m e (val v))) e. Proof. induction m; desf; ins. desf; ins. intro QQ; apply Heq. basic_solver. Qed. Lemma subst_ereg_val_list_no_eregs m l (NOEREGS: no_eregs m) : subst_ereg_val_list m l = m. Proof. induction l; auto. simpl. rewrite IHl. destruct m; ins. Qed. Lemma nin_subst_ereg_val_list e l (NIN : ~ (lset (List.map fst l)) e) : subst_ereg_val_list (ereg e) l = ereg e. Proof. induction l; auto. simpls. clarify_not. rewrite IHl; auto. unfold subst_ereg. desf; auto. basic_solver. Qed. Lemma subst_ereg_val_list_ereg e l : subst_ereg_val_list (ereg e) l = ereg e \/ exists v', In (e, v') l /\ subst_ereg_val_list (ereg e) l = val v'. Proof. induction l; auto; ins. destruct a; ins. desf. { rewrite IHl; ins. desf; desf; eauto. } rewrite IHl0; ins. eauto. Qed. Lemma eval_subst_ereg_val m regf eregf e v : let eregf' := fun e' => if Event.eq e e' then v else eregf e' in eval regf eregf (subst_ereg m e (val v)) = eval regf eregf' m. Proof. induction m; desf. unfold subst_ereg. unfold eval. desf; basic_solver. Qed. Lemma used_regs_subst_reg e r m : lset (used_regs (subst_reg e r m)) ≡₁ lset (used_regs e) \₁ eq r ∪₁ codom_rel ((lset (used_regs e) ∩₁ eq r) × lset (used_regs m)). Proof. induction e; auto; simpls. 2: { desf; rewrite lset_cons, lset_empty; basic_solver 10. } all: basic_solver. Qed. Lemma used_eregs_subst_reg e r m : lset (used_eregs (subst_reg e r m)) ≡₁ lset (used_eregs e) ∪₁ ifP (lset (used_regs e) r) then lset (used_eregs m) else ∅. Proof. induction e; simpls; desf; basic_solver. Qed. Lemma nin_subst_reg e r m (NO_REG : ~ (lset (used_regs e)) r) : subst_reg e r m = e. Proof. induction e; auto; simpls. clarify_not. desf; basic_solver. Qed. Lemma nin_subst_ereg m e v (NO_EREG : ~ (lset (used_eregs m)) e) : subst_ereg m e v = m. Proof. induction m; auto; simpls. clarify_not. desf; basic_solver. Qed. Lemma compose_subst_ereg_subst_reg expr r e v : subst_ereg (subst_reg expr r (ereg e)) e (val v) = subst_reg (subst_ereg expr e (val v)) r (val v). Proof. induction expr; simpls; eauto. { do 2 (desf; simpls). } do 2 desf. Qed. Lemma subst_reg_same_reg φ r : subst_reg φ r (Expr.reg r) = φ. Proof. induction φ; ins; desf; congruence. Qed. Lemma subst_ereg_val_idemp m e v v' : subst_ereg (subst_ereg m e (val v)) e (val v') = subst_ereg m e (val v). Proof. induction m; simpls; eauto. desf; basic_solver. Qed. Lemma subst_ereg_val_noereg e φ e' v' (NO_EREG : ~ lset (used_eregs φ) e) : ~ lset (used_eregs (subst_ereg φ e' (val v'))) e. Proof. induction φ; simpls; eauto. desf; basic_solver. Qed. Lemma subst_neq_ereg_val_commute φ e v e' v' (NEQ : ~ e = e') : subst_ereg (subst_ereg φ e (val v)) e' (val v') = subst_ereg (subst_ereg φ e' (val v')) e (val v). Proof. induction φ; simpls; eauto. desf; basic_solver. Qed. Lemma subst_ereg_val_subst_reg_commute m r m' e v (NOEREG : ~ lset (used_eregs m') e) : subst_ereg (subst_reg m r m') e (val v) = subst_reg (subst_ereg m e (val v)) r m'. Proof. destruct m'; auto. 1-2: by destruct m; basic_solver. destruct m; simpls. all: by clarify_not; desf; simpl; desf; basic_solver. Qed. Lemma compose_subst_ereg_val_list_subst_reg expr r e v l (IN : In (e, v) l) (FL : func_list l) : subst_ereg_val_list (subst_reg expr r (ereg e)) l = subst_reg (subst_ereg_val_list expr l) r (val v). Proof. induction l; ins. destruct (classic (In (e, v) l)) as [AA|AA]. { rewrite IHl; auto. 2: now eapply func_list_cons; eauto. destruct a; ins. now apply subst_ereg_val_subst_reg_commute. } desf; ins. clear IHl. assert (~ lset (map fst l) e) as NLL. { intros HH. red in HH. apply in_map_iff in HH. desf. enough (x = (fst x, v)) as DD. { destruct x; ins. inv DD. } apply FL; ins; auto. } induction expr; ins. { rewrite subst_ereg_val_list_val; ins. } { rewrite subst_ereg_val_list_no_eregs with (m:=reg r0); ins. desf; desf. 2: now rewrite subst_ereg_val_list_no_eregs with (m:=reg r0); ins. rewrite nin_subst_ereg_val_list; ins. desf; desf. } destruct (classic (e0 = e)) as [|NEE]; subst. { rewrite nin_subst_ereg_val_list; ins. desf; desf. } edestruct subst_ereg_val_list_ereg as [GG|GG]. { rewrite !GG; ins. desf; desf. } desf. rewrite GG0. ins. Qed. End Expr. Section Language. Local Open Scope program_scope. (* for ∘ *) (* Definition expression := Value.t. *) Definition reg_expr := Value.reg. Definition const_expr := Value.const. End Language. Module Stmt. Inductive t := | skip | assign (r : Reg.t) (m : Expr.t) | read (r : Reg.t) (L : location) (μ : mode) | write (L : location) (μ : mode) (m : Expr.t) | fence (ν : mode) | ite (m : Expr.t) (s1 s2 : t) | seq (s1 s2 : t) | par (s1 s2 : t) . Fixpoint used_regs (s : t) : list Reg.t := match s with | assign r m => r :: (Expr.used_regs m) | read r _ _ => [r] | write _ _ m => Expr.used_regs m | ite m s1 s2 => (Expr.used_regs m) ++ used_regs s1 ++ used_regs s2 | seq s1 s2 => used_regs s1 ++ used_regs s2 | par s1 s2 => used_regs s1 ++ used_regs s2 | _ => nil end. Fixpoint used_locs (s : t) : list location := match s with | read _ l _ => [l] | write l _ m => [l] | ite m s1 s2 => used_locs s1 ++ used_locs s2 | seq s1 s2 => used_locs s1 ++ used_locs s2 | par s1 s2 => used_locs s1 ++ used_locs s2 | _ => nil end. Fixpoint no_eregs (s : t) : Prop := match s with | assign r m => Expr.no_eregs m | write _ _ m => Expr.no_eregs m | ite m s1 s2 => Expr.no_eregs m /\ no_eregs s1 /\ no_eregs s2 | seq s1 s2 => no_eregs s1 /\ no_eregs s2 | par s1 s2 => no_eregs s1 /\ no_eregs s2 | _ => True end. End Stmt. Module Prog. Definition t := IdentMap.t Stmt.t. End Prog.
import tactic section trichotomy open tactic expr meta def tactic.trichotomy (a b : expr) (h : name) : tactic unit := do v ← mk_app ``lt_trichotomy [a, b], rcases none (pexpr.of_expr v) [[rcases_patt.one h], [rcases_patt.one h], [rcases_patt.one h]] namespace tactic.interactive setup_tactic_parser open interactive interactive.types expr meta def trichotomy (a b : parse parser.pexpr) (h : parse with_ident_list): tactic unit := do a ← to_expr a, b ← to_expr b, nm ← get_unused_name `h, let nm := if h = list.nil then nm else h.head, tactic.trichotomy a b nm end tactic.interactive end trichotomy example {α : Type*} {c d : α} [linear_order α] (h1 : ¬ c < d) (h2 : ¬ c = d) (h3 : ¬ d < c) : false := begin trichotomy c d, all_goals { contradiction }, end
State Before: ι : Type u s : Finset ι w z : ι → ℝ≥0 hw' : ∑ i in s, w i = 1 ⊢ ∏ i in s, z i ^ ↑(w i) ≤ ∑ i in s, w i * z i State After: no goals Tactic: exact_mod_cast Real.geom_mean_le_arith_mean_weighted _ _ _ (fun i _ => (w i).coe_nonneg) (by assumption_mod_cast) fun i _ => (z i).coe_nonneg State Before: ι : Type u s : Finset ι w z : ι → ℝ≥0 hw' : ∑ i in s, w i = 1 ⊢ ∑ i in s, ↑(w i) = 1 State After: no goals Tactic: assumption_mod_cast
/- Copyright (c) 2021 Johan Commelin. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Johan Commelin ! This file was ported from Lean 3 source module data.nat.choose.vandermonde ! leanprover-community/mathlib commit d6814c584384ddf2825ff038e868451a7c956f31 ! Please do not edit these lines, except to modify the commit id ! if you have ported upstream changes. -/ import Mathlib.Data.Polynomial.Coeff import Mathlib.Data.Nat.Choose.Basic /-! # Vandermonde's identity In this file we prove Vandermonde's identity (`nat.add_choose_eq`): `(m + n).choose k = ∑ (ij : ℕ × ℕ) in antidiagonal k, m.choose ij.1 * n.choose ij.2` We follow the algebraic proof from https://en.wikipedia.org/wiki/Vandermonde%27s_identity#Algebraic_proof . -/ open BigOperators open Polynomial Finset.Nat /-- Vandermonde's identity -/ theorem Nat.add_choose_eq (m n k : ℕ) : (m + n).choose k = ∑ ij : ℕ × ℕ in antidiagonal k, m.choose ij.1 * n.choose ij.2 := by calc (m + n).choose k = ((X + 1) ^ (m + n)).coeff k := by rw [coeff_X_add_one_pow, Nat.cast_id] _ = ((X + 1) ^ m * (X + 1) ^ n).coeff k := by rw [pow_add] _ = ∑ ij : ℕ × ℕ in antidiagonal k, m.choose ij.1 * n.choose ij.2 := by rw [coeff_mul, Finset.sum_congr rfl] simp only [coeff_X_add_one_pow, Nat.cast_id, eq_self_iff_true, imp_true_iff] #align nat.add_choose_eq Nat.add_choose_eq
/* * ext.cc * * Copyright (C) 2013 Diamond Light Source * * Author: James Parkhurst * * This code is distributed under the BSD license, a copy of which is * included in the root directory of this package. */ #include <boost/python.hpp> #include <boost/python/def.hpp> #include <iostream> namespace dials { namespace algorithms { namespace boost_python { using namespace boost::python; void export_sampler(); void export_modeller(); BOOST_PYTHON_MODULE(dials_algorithms_profile_model_modeller_ext) { export_sampler(); export_modeller(); } }}} // namespace dials::algorithms::boost_python
From here , SR 878 heads predominantly eastwards as a four @-@ lane @-@ wide expressway through residential neighborhoods for the remainder of its length , generally lying 0 @.@ 5 miles ( 0 @.@ 80 km ) north of Kendall Drive . After approximately 0 @.@ 4 miles ( 0 @.@ 64 km ) , the Snapper Creek Expressway passes through the 87th Avenue toll gantry . It then meets Galloway Road ( SR 973 ) shortly afterwards with a diamond interchange . The expressway then enters Glenvar Heights once it crosses SR 973 and remains in that district for the rest of its duration . Just before passing over the Palmetto Expressway ( SR 826 ) without an interchange ( approximately 1 mile ( 1 @.@ 6 km ) east of Galloway Road ) , SR 878 meets its second and final toll gantry .
theory Delete_Opt_Ext imports Delete_Opt Balance_Opt_LR_Ext Combine_Opt_Ext "../../Assertion_Tree_Lookup" "../../Utilities_Ext" begin context rbt_impl begin lemma del_opt_correct_ext': " llvm_htriple (rbt_assn_ext t {} ti ** \<upharpoonleft>key_assn k ki ** \<up>(is_rbt_node (rbt_of t))) (del_opt ki ti) (\<lambda>ti_res. EXS t_res. rbt_assn_ext t_res {} ti_res ** \<upharpoonleft>key_assn k ki ** \<up>(rbt_of t_res = rbt_del_ad k (rbt_of t)) ** ctx(rbt_sorted (rbt_of t_res)) ** \<up>((ptr_of_key t ti)(k := None) \<subseteq>\<^sub>m ptr_of_key t_res ti_res) ** \<up>((value_of_key t)(k := None) \<subseteq>\<^sub>m value_of_key t_res) ) " supply sep_context_pureI[fri_red_rules] proof(induct k "rbt_of t" arbitrary: t ki ti rule: rbt_del_ad.induct) case (1 k) then show ?case apply (subst del_opt.simps) apply vcg done next case (2 k c l kc vc r) from 2(5) show ?case apply (subst del_opt.simps) apply vcg subgoal for al ar ci li ki vi ri asf ra sa (*k < kc*) apply (cases ra) subgoal (*ra = 0*) supply 2(2)[simplified ctx_def, vcg_rules] supply rbt_del_ad_correct[simp] supply rbt_del_rbt_less[intro] supply rbt_del_rbt_sorted[intro] by vcg_vok subgoal (*ra = 1*) supply 2(1)[simplified ctx_def, vcg_rules] supply rbt_del_rbt_less[simp] supply rbt_del_ad_correct[simp] by vcg_vok done subgoal for al ar ci li ki vi ri asf ra s (*kc \<le> k*) apply vcg subgoal for rb sa (*kc < k*) apply (cases rb) subgoal (*rb = 0*) supply 2(4)[simplified ctx_def, vcg_rules] supply rbt_del_ad_correct[simp] supply rbt_del_rbt_greater[intro] supply rbt_del_rbt_sorted[intro] by vcg_vok subgoal (*rb = 1*) supply 2(3)[simplified ctx_def, vcg_rules] supply rbt_del_rbt_greater[simp] supply rbt_del_ad_correct[simp] by vcg_vok done subgoal (*kc = k*) by vcg_vok done done qed lemmas del_opt_ext_correct = del_opt_correct_ext'[simplified ctx_def rbt_del_ad_correct] lemma delete_opt_ext_correct: " llvm_htriple (\<upharpoonleft>key_assn k ki ** rbt_assn_ext t {} ti ** \<up>(is_rbt_node (rbt_of t))) (delete_opt ki ti) (\<lambda>ti_res. EXS t_res. rbt_assn_ext t_res {} ti_res ** \<upharpoonleft>key_assn k ki ** \<up>(rbt_of t_res = (rbt_delete k (rbt_of t))) ** ctx(rbt_sorted (rbt_of t_res)) ** \<up>((ptr_of_key t ti)(k := None) \<subseteq>\<^sub>m ptr_of_key t_res ti_res) ** \<up>((value_of_key t)(k := None) \<subseteq>\<^sub>m value_of_key t_res) ) " unfolding delete_opt_def rbt_delete_def paint_def supply del_opt_ext_correct[vcg_rules] sep_context_pureI[isep_red] apply vcg subgoal apply vcg_compat apply (sepEwith auto) (*rbt_of =*) apply (cases "rbt_del k (rbt_of t)") apply simp_all apply sep apply (metis rbt_sorted.simps(2)) apply (sepwith \<open>solves pok_solver | solves vok_solver\<close>) done done lemmas [vcg_rules] = delete_opt_ext_correct[simplified ctx_def] end end
\pdfvariable compresslevel = 0 \documentclass[a4paper]{article} \usepackage{flare} \begin{document} \section{Links and Destinations} \includegraphics[ scale=0.5, page=1 ]{dummy-2.pdf} \newpage \includegraphics[ scale=0.5, page=2 ]{dummy-2.pdf} \newpage If we include the same pages once again, we need to change the destination names. See options \texttt{flareDestPrefix} and \texttt{flareLinkPrefix}. \includegraphics[ scale=0.5, page=1, flareLinkPrefix=foo, flareDestPrefix=foo, ]{dummy-2.pdf} \newpage \includegraphics[ scale=0.5, page=2, flareLinkPrefix=foo, flareDestPrefix=foo, ]{dummy-2.pdf} \end{document} %%% Local Variables: %%% mode: latex %%% TeX-master: "example-1" %%% End:
import algebra.group import group_theory.subgroup variables {G: Type*} {H: set G} {a: G} theorem Q_04a [hG: group G] [hH: is_subgroup H]: is_subgroup ((λ h, a * h * a⁻¹) '' H) := { inv_mem := λ g ⟨h, ⟨hH, ha⟩⟩, begin -- inverse of aha⁻¹ is ah⁻¹a⁻¹ ... have inv_g: g⁻¹ = a * h⁻¹ * a⁻¹, from ha ▸ calc (a * h * a⁻¹)⁻¹ = (a⁻¹)⁻¹ * h⁻¹ * a⁻¹ : by rw [mul_inv_rev, mul_inv_rev, ←mul_assoc] ... = a * h⁻¹ * a⁻¹ : by rw inv_inv, -- ... and ah⁻¹a⁻¹ is in aHa⁻¹. have t: a * h⁻¹ * a⁻¹ ∈ ((λ h, a * h * a⁻¹) '' H), from ⟨h⁻¹, ⟨is_subgroup.inv_mem hH, rfl⟩⟩, exact inv_g.symm ▸ t, end, -- the identity is in aHa⁻¹: 1 = a1a⁻¹. one_mem := ⟨1, ⟨ hH.one_mem, calc a * 1 * a⁻¹ = 1: by rw [mul_one, mul_right_inv a] ⟩⟩, -- multiplication in aHa⁻¹ is closed because ah₁a⁻¹ * ah₂a⁻¹ = ah₁h₂a⁻¹. mul_mem := λ g₁ g₂ ⟨h₁, ⟨hh₁, hg₁⟩⟩ ⟨h₂, ⟨hh₂, hg₂⟩⟩, ⟨h₁ * h₂, ⟨ is_submonoid.mul_mem hh₁ hh₂, hg₁ ▸ hg₂ ▸ calc a * (h₁ * h₂) * a⁻¹ = a * h₁ * 1 * (h₂ * a⁻¹) : by rw [←mul_assoc, mul_assoc, mul_one] ... = a * h₁ * (a⁻¹ * a) * (h₂ * a⁻¹) : by rw mul_left_inv ... = (a * h₁ * a⁻¹) * a * (h₂ * a⁻¹) : by rw ←(mul_assoc (a * h₁) a⁻¹ a) ... = (a * h₁ * a⁻¹) * (a * h₂ * a⁻¹) : by rw [mul_assoc, mul_assoc a h₂ a⁻¹], ⟩⟩, }
% $Id$ % compile 3 times: latex tex4ht-mathplayer % or xhlatex tex4ht-mathplayer "html,3,sections+" % % Copyright (C) 2009-2010 TeX Users Group % Copyright (C) 1997-2009 Eitan M. Gurari % Released under LPPL 1.3c+. % See tex4ht-cpright.tex for license text. %%%%%%%%%%%%%%%%%% load style files %%%%%%%%%%%%%%%%%%%%%%%%%% \ifx \HTML\UnDef \def\HTML{mathplayer} \def\CONFIG{\jobname} \def\MAKETITLE{\author{Eitan M. Gurari}} \def\next{\input mktex4ht.4ht \endinput} \expandafter\next \fi \input{common} \input{tex4ht-cpright} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \chapter{Preamble} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \<mathplayer\><<< % mathplayer.4ht (|version), generated from |jobname.tex % Copyright (C) 2009-2010 TeX Users Group % Copyright (C) |CopyYear.2002. Eitan M. Gurari |<TeX4ht copywrite|> >>> %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \chapter{Start Here} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%% \section{Header} %%%%%%%%%%%%%%%% \<configure mathplayer Preamble\><<< \edef\Preamble{\Preamble,xht} \Configure{ext}{xht} >>> % \Configure{mathml}{m:} \<dtd lang\><<< \expandafter \ifx \csname a:dtd-lang\endcsname\relax EN\else \csname a:dtd-lang\endcsname \fi >>> \<configure mathplayer tex4ht\><<< \Configure{DOCTYPE} {\HCode{<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1 plus MathML 2.0//|<dtd lang|>"\Hnewline "http://www.w3.org/Math/DTD/mathml2/xhtml-math11-f.dtd"\Hnewline [\Hnewline <!ENTITY \% MATHML.prefixed "INCLUDE">\Hnewline <!ENTITY \% MATHML.prefix "m">\Hnewline ]\Hnewline >\Hnewline }} \Configure{@BODY} {\ifvmode \EndP\fi \IgnorePar\HCode{<script type="text/javascript">\Hnewline<!--\Hnewline |<check for ie 5.5 and mathplayer|>\Hnewline-->\Hnewline </script>\Hnewline }\par\ShowPar} \Configure{@HTML} {xmlns:m="http://www.w3.org/1998/Math/MathML"\Hnewline % xmlns:dsi="http://www.dessci.com/mathml" \Hnewline } %\let\dsi:=\empty >>> application/xhtml+xml \<check for ie 5.5 and mathplayer\><<< if( navigator.appName=="Microsoft Internet Explorer" \Hnewline && navigator.platform=="Win32" \Hnewline ){ \Hnewline if( parseFloat(navigator.appVersion.substr( \Hnewline navigator.appVersion.indexOf("MSIE ")+5))>="5.5" \Hnewline ){ try { \Hnewline var oMP = new ActiveXObject("MathPlayer.Factory.1"); \Hnewline } \Hnewline catch(e) { alert("Can't find Design Science's MathPalyer" + \Hnewline "(http://www.dessci.com/webmath/mathplayer)");} \Hnewline } else { \Hnewline alert("Requires MSIE version 5.5 or later"); \Hnewline } } \Hnewline >>> %%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Wrapping in Tables} %%%%%%%%%%%%%%%%%%%%%%%%%%%% The attribute \verb+nowrap="nowrap"+ in table cells prevents Mathplayer from displaying them its own way, and one gets minimal cells, overlapped: it ought to be avoided altogether. \<configure mathplayer tex4ht\><<< \Configure{halignTD} {}{} {<}{\ifmathml \HCode{ columnalign="left"}\else \HCode{ style="text-align:left" }\fi} {-}{\ifmathml \HCode{ columnalign="center"}\else \HCode{ style="text-align:center" }\fi} {>}{\ifmathml \HCode{ columnalign="right"}\else \HCode{ style="text-align:right" }\fi} {^}{\ifmathml \HCode{ rowalign="top"}\else \HCode{ style="vertical-align:top" }\fi} {=}{\ifmathml \HCode{ rowalign="baseline"}\else \HCode{ style="vertical-align:baseline" }\fi} {||}{\ifmathml \HCode{ rowalign="center"}\else \HCode{ style="vertical-align:middle" }\fi} {_}{\ifmathml \HCode{ rowalign="bottom"}\else \HCode{ style="vertical-align:bottom" }\fi} {p}{\ifmathml \HCode{ columnalign="left"}\else \HCode{ style="text-align:left"}\fi} {} >>> % %%%%%%%%%%%%%%%%%%%%% % \section{Cross References} % %%%%%%%%%%%%%%%%%%%%% % % % \<configure mathplayer tex4htNO\><<< % \LinkCommand\mtxt:link{\a:mathml maction,% % actiontype="link" \dsi: href, id, % class="label"|<mtext ref decoration|>,,} % \LinkCommand\msp:link{\a:mathml maction,% % actiontype="link" \dsi: href, id, % class="label"|<mstyle ref decoration|>,,} % \def\Link{\ifmathml % \ifmtext |<end mtext|>\HCode{</\a:mathml mtext>}% % \expandafter\expandafter\expandafter\mtxt:link % \else \expandafter\expandafter\expandafter\msp:link\fi % \else \expandafter\M:Link\fi} % \def\EndLink{\ifmathml % \ifmtext |<end mtext|>|<end mtext ref decoration|>% % \Endmtxt:link \HCode{<\a:mathml mtext\Hnewline % class="endlabel">}|<start mtext|>% % \else \HCode{|<end mstyle ref decoration|></\a:mathml % maction><!--endlabel-->}\fi % \else \expandafter\M:EndLink\fi} % >>> % % % % % % % \<mtext ref decorationNO\><<< % ><\a:mathml mtext mathbackground="yellow"% % >>> % \<end mtext ref decorationNO\><<< % \HCode{</\a:mathml mtext>}% % >>> % \<mstyle ref decorationNO\><<< % ><\a:mathml mstyle mathbackground="yellow"% % >>> % \<end mstyle ref decorationNO\><<< % </\a:mathml mstyle>% % >>> % % \begin{verbatim} % JUST BACKGROUND (one could add colour, and it would be fine to include the % labels, if any) % <m:mstyle \a:mathml mathbackground="yellow"> <m:maction % actiontype="link" \dsi: href="#x1-2r1" class="label"><m:mrow % ><m:mn>1</m:mn><!--tex4ht:ref: equ1 --></m:mrow></m:maction></m:mstyle > % HIGHLIGHT ON MOUSEOVER % <m:maction % actiontype="highlight" \dsi: color="pink" ><m:maction % actiontype="link" \dsi: href="#x1-2r1" class="label"><m:mtext % background="yellow" >1<!--tex4ht:ref: % equ1 --></m:mtext></m:maction></m:maction> % TOOLTIP CLUE ON MOUSEOVER % <m:mrow> % <m:maction actiontype="tooltip" ><m:maction % actiontype="link" \dsi: href="#x1-2r1" class="label"><m:mrow ><m:mn % background="yellow" >1</m:mn></m:mrow></m:maction> % <m:mtext>click to jump!</m:mtext> % <!--tex4ht:ref: equ1 --></m:maction><!--endlabel--></m:mrow> % * <m:mtext m:mathbackground="yellow" > disables background colour in all % the (math-)links - this one coming from a typo in mathplayer.4ht, inserting % m: before mathbackground; % m: also problematic on actiontype % \end{verbatim} % \<start mtext\><<< % \ht:special{t4ht@,&\#x00A0;}% % >>> % \<end mtext\><<< % \ht:special{t4ht@,}% % >>> % % The area of the clickable links can be enlarged with a cpde similar to % the following one. % \begin{verbatim} % \let\svLink=\Link % \let\svEndLink=\EndLink % \Odef\Link[#1]#2#3{% % \svLink[#1]{#2}{#3}% % \ifmathml\ifmtext\else \HCode{<m:mpadded width="4">}\fi\fi} % \def\EndLink{% % \ifmathml\ifmtext\else \HCode{</m:mpadded>}\fi\fi % \svEndLink} % \end{verbatim} %%%%%%%%%%%%%%%%%% \section{Namespace Prefixes} %%%%%%%%%%%%%%%%%% \<configure mathplayer tex4ht\><<< \:CheckOption{xht} \if:Option \else \:CheckOption{xml} \if:Option \else \Configure{DviMath} {\a:DviMathML \mathmltrue \ifOption{mml-fonts}{}{|%\mml:htfsym|%\NoFonts}% \IgnoreRule \HCode{<!--l. \the\inputlineno-->}\HCode{<\a:mathml math\Hnewline}% |<disable a:mathml for math attr|>% \HCode{\csname a:math-xmlns\endcsname \Hnewline \a:@math>}% |<end disable a:mathml for math attr|>\a:math |<sv dvimath par|>\IgnorePar} {\Tg</\a:mathml math>\EndIgnoreRule \ifOption{mml-fonts}{}{|%\ext:htfsym|%\EndNoFonts}% \mathmlfalse \b:DviMathML |<recall dvimath par|>} \fi\fi >>> \<recall dvimath par\><<< \sv:ignore >>> \<sv dvimath par\><<< \edef\sv:ignore{\if:nopar \noexpand\IgnorePar\else \noexpand\ShowPar\fi}% >>> \<disable a:mathml for math attr\><<< \let\sva:mathml=\a:mathml \let\a:mathml=\empty >>> \<end disable a:mathml for math attr\><<< \let\a:mathml=\sva:mathml >>> %%%%%%%%%%%%%%%%%% \section{Color} %%%%%%%%%%%%%%%%%% \<configure mathplayer color\><<< \let\a:txt:textcolor\a:textcolor \let\b:txt:textcolor\b:textcolor \Configure{textcolor} {\ifmathml \Configure{color}% {\Configure{SetHColor} {\HCode{<m:mstyle color="\HColor">}}}% \else \a:txt:textcolor \fi } {\ifmathml \HCode{</m:mstyle>}\else \b:txt:textcolor \fi} >>> \endinput
Require Import Util LengthEq IL RenamedApart LabelsDefined OptionR. Require Import Keep Drop Take Restrict SetOperations OUnion. Require Import Annotation Liveness.Liveness Coherence Delocation. Require Import AddParam AddAdd MoreListSet DelocationAlgo. Set Implicit Arguments. Lemma computeParameters_isCalled_Some_F' b Lv ZL AP als D Z F s alb l k k' x0 x1 Zs (IH : forall k Zs, get F k Zs -> forall (ZL : 〔params〕) (Lv AP : 〔⦃var⦄〕) (lv : ann ⦃var⦄) (n : nat) (D : ⦃var⦄) (Z : params) (p : ؟ ⦃var⦄), live_sound Imperative ZL Lv (snd Zs) lv -> ❬AP❭ = ❬Lv❭ -> ❬Lv❭ = ❬ZL❭ -> isCalled b (snd Zs) (LabI n) -> get Lv n D -> get ZL n Z -> get (snd (computeParameters (Lv \\ ZL) AP (snd Zs) lv)) n p -> exists Za : ⦃var⦄, p = ⎣ Za ⎦ /\ D \ of_list Z \ Za ⊆ getAnn lv) (LEN1 : ❬AP❭ = ❬Lv❭) (LEN2 : ❬Lv❭ = ❬ZL❭) (LEN3 : ❬F❭ = ❬als❭) (GetDL : get (getAnn ⊝ als ++ Lv) l D) (GetZL : get (fst ⊝ F ++ ZL) l Z) (LS:live_sound Imperative (fst ⊝ F ++ ZL) (getAnn ⊝ als ++ Lv) s alb) (LSF : forall (n : nat) (Zs : params * stmt) (a : ann ⦃var⦄), get F n Zs -> get als n a -> live_sound Imperative (fst ⊝ F ++ ZL) (getAnn ⊝ als ++ Lv) (snd Zs) a) (INCL: forall (n : nat) (Zs : params * stmt) (a : ann ⦃var⦄), get F n Zs -> get als n a -> of_list (fst Zs) ⊆ getAnn a /\ True) (GetLV : get (olu F als Lv ZL AP s alb) l x0) (GetF : get F k Zs) (GetAls : get als k x1) (IC : isCalled b (snd Zs) (LabI k')) (CC: callChain (isCalled b) F (LabI k') (LabI l)) : exists Za : ⦃var⦄, addAdd (list_union (oget ⊝ take ❬F❭ (olu F als Lv ZL AP s alb)) ∪ list_union (fst ∘ of_list ⊝ F)) (D \ of_list Z) x0 = ⎣ Za ⎦ /\ D \ of_list Z \ Za ⊆ getAnn x1 \ of_list (fst Zs) \ (list_union (oget ⊝ take ❬F❭ (olu F als Lv ZL AP s alb)) ∪ list_union (fst ∘ of_list ⊝ F)). Proof. general induction CC. - destruct (@get_in_range _ (snd (computeParameters ((getAnn ⊝ als ++ Lv) \\ (fst ⊝ F ++ ZL)) (tab {} ‖F‖ ++ AP) (snd Zs) x1)) l0) as [pF GETpF]. rewrite computeParameters_length; [ |eauto | eauto with len | eauto with len]. eapply get_range in GetDL. eauto. edestruct (IH k Zs); try eapply GETpF; eauto using get_app_right, map_get_1 with len; dcr; subst. edestruct get_olist_union_A' as [? [? ?]]; try eapply GetLV; eauto using map_get_1, zip_get. eapply computeParametersF_length; eauto with len. rewrite computeParameters_length; eauto with len. subst; simpl. eexists; split; eauto. rewrite <- H0, <- H1. repeat rewrite minus_union. assert (of_list (fst Zs) ⊆ list_union (fst ∘ of_list ⊝ F)). { eapply incl_list_union. eapply map_get_1; eauto. reflexivity. } revert H; clear_all; cset_tac. - inv_get. exploit IHCC; try eapply H0; eauto. dcr. eexists; split; eauto. rewrite H5. destruct (@get_in_range _ (snd (computeParameters ((getAnn ⊝ als ++ Lv) \\ (fst ⊝ F ++ ZL)) (tab {} ‖F‖ ++ AP) (snd Zs0) x1)) k'0) as [pF' GETpF']. rewrite computeParameters_length; [ |eauto | eauto with len | eauto with len]. rewrite app_length, map_length. eapply get_range in H1. omega. exploit (IH k0 Zs0); try eapply GETpF'; eauto using get_app, map_get_1 with len. dcr; subst. rewrite <- H7. assert (x3 ⊆ list_union (oget ⊝ take ❬F❭ (olist_union (snd ⊝ computeParametersF F als Lv ZL AP) (snd (computeParameters ((getAnn ⊝ als ++ Lv) \\ (fst ⊝ F ++ ZL)) (tab {} ‖F‖ ++ AP) s alb))))). { exploit (@get_olist_union_A _ _ (snd ⊝ computeParametersF F als Lv ZL AP)); [| eapply GETpF' | | ]. instantiate (1:=k0). eapply map_get_1. eapply zip_get_eq; [| | reflexivity]. eauto. eauto. instantiate (1:=(snd (computeParameters ((getAnn ⊝ als ++ Lv) \\ (fst ⊝ F ++ ZL)) (tab {} ‖F‖ ++ AP) s alb))). rewrite computeParameters_length; eauto. eapply computeParametersF_length; eauto with len. eauto with len. eauto with len. dcr. eapply incl_list_union. eapply map_get_1. eapply get_take; try eapply H6; eauto using get_range. eauto. } rewrite H2. assert (of_list (fst Zs0) ⊆ list_union (fst ∘ of_list ⊝ F)). { eapply incl_list_union. eapply map_get_1. instantiate (1:=Zs0). eauto. eauto. } revert H3; clear_all; cset_tac. Qed. Lemma computeParameters_isCalled_Some b ZL Lv AP s lv n D Z p : live_sound Imperative ZL Lv s lv -> length AP = length Lv -> length Lv = length ZL -> isCalled b s (LabI n) -> get Lv n D -> get ZL n Z -> get (snd (computeParameters (Lv \\ ZL) AP s lv)) n p -> exists Za, p = Some Za /\ D \ of_list Z \ Za ⊆ (getAnn lv). Proof. revert ZL Lv AP lv n D Z p. sind s; destruct s; intros ZL Lv AP lv n D Z p LS LEN1 LEN2 IC GetDL GetZL GetLV; simpl in * |- *; inv LS; invt isCalled; repeat let_case_eq; repeat let_pair_case_eq; subst; simpl in *. - edestruct (IH s) as [Za [A B]]; try eapply GetLV; eauto with len; subst; simpl. eexists; split; eauto. inv_get. exploit (@computeParameters_AP_LV Lv ZL (addParam x (Lv \\ ZL) AP)); try eapply H2; eauto with len. PIR2_inv. unfold addParam in H3. inv_get. rewrite <- H7. revert H10 B. clear_all; cases; intros; cset_tac. - inv_get. edestruct (IH s1) as [? [? SUB]]; eauto; subst. setoid_rewrite <- H8. setoid_rewrite <- SUB. destruct x0; eexists; simpl; split; eauto; clear_all; cset_tac. - inv_get. edestruct (IH s2) as [? [? SUB]]; eauto; subst. setoid_rewrite <- H9. setoid_rewrite <- SUB. destruct x; eexists; simpl; split; eauto; clear_all; cset_tac. - simpl in *. unfold keep in GetLV. inv_get. cases; eauto. eexists; split; eauto. rewrite <- H3. eauto with cset. - lnorm. inv_get. invc H4. + exploit (computeParameters_length (tab {} ‖F‖ ++ AP) H1) as Len; [ eauto with len | eauto with len | ]. assert (LE:❬F❭ + n < ❬snd (computeParameters ((getAnn ⊝ als ++ Lv) \\ (fst ⊝ F ++ ZL)) (tab {} ‖F‖ ++ AP) s alb)❭). rewrite Len, app_length, map_length. exploit (get_range GetDL). omega. destruct (get_in_range _ LE) as [pF GETpF]. edestruct (IH s) with (AP:=tab {} ‖F‖ ++ AP); eauto. eauto with len. eauto with len. eapply get_app_right; eauto using map_get_1. eauto with len. eapply get_app_right; eauto using map_get_1. eauto with len. dcr; subst. edestruct (@get_olist_union_b _ _ (snd ⊝ computeParametersF F als Lv ZL AP)) as [? [? ?]]; try eapply GETpF. eapply computeParametersF_length; eauto. get_functional. eexists; split; try reflexivity. rewrite <- H0, <- H8, <- H4. clear_all; cset_tac. + inv_get. destruct (@get_in_range _ (snd (computeParameters ((getAnn ⊝ als ++ Lv) \\ (fst ⊝ F ++ ZL)) (tab {} ‖F‖ ++ AP) s alb)) k) as [ps GETps]; eauto. rewrite computeParameters_length; eauto with len. exploit (IH s); try eapply GETps; eauto using get_app, map_get_1 with len. dcr; subst. setoid_rewrite <- H8. setoid_rewrite <- H13. assert (x2 ⊆ list_union (oget ⊝ take ❬F❭ (olist_union (snd ⊝ computeParametersF F als Lv ZL AP) (snd (computeParameters ((getAnn ⊝ als ++ Lv) \\ (fst ⊝ F ++ ZL)) (tab {} ‖F‖ ++ AP) s alb)))) ∪ list_union (fst ∘ of_list ⊝ F)). { exploit (@get_olist_union_b _ _ (snd ⊝ computeParametersF F als Lv ZL AP)); try eapply GETps. eapply computeParametersF_length; eauto with len. rewrite computeParameters_length; eauto with len. dcr. eapply incl_union_left. eapply incl_list_union. eapply map_get_1. eapply get_take; eauto using get_range. eauto. } clear H8 H13 LS GETps. setoid_rewrite H10. clear H7 H10. eapply computeParameters_isCalled_Some_F'; eauto. intros. eapply (IH (snd Zs0)); eauto. eapply get_app_right; eauto. eauto with len. eapply get_app_right; eauto. eauto with len. intros; edestruct H6; eauto. Qed. Lemma computeParameters_isCalled_get_Some b Lv ZL AP s lv n p A D Z : live_sound Imperative ZL Lv s lv -> length AP = length Lv -> length Lv = length ZL -> isCalled b s (LabI n) -> n < ❬snd (computeParameters (Lv \\ ZL) AP s lv)❭ -> get Lv n D -> get ZL n Z -> get (olist_union A (snd (computeParameters (Lv \\ ZL) AP s lv))) n p -> (forall (n0 : nat) (a : 〔؟⦃var⦄〕), get A n0 a -> ❬a❭ = ❬snd (computeParameters (Lv \\ ZL) AP s lv)❭) -> exists Za, p = Some Za /\ D \ of_list Z \ Za ⊆ (getAnn lv). Proof. intros LS LEN1 LEN2 IC LE GETDL GETZL GET LEN3. destruct (get_in_range _ LE); eauto. edestruct computeParameters_isCalled_Some; eauto; dcr; subst. edestruct get_olist_union_b; eauto; dcr. get_functional. eexists; split; try reflexivity. rewrite <- H1, <- H2; eauto. Qed. Lemma computeParameters_isCalledFrom_get_Some b Lv ZL AP F alv s lv p Da Zs l (LSF : forall (n : nat) (Zs : params * stmt) (a : ann ⦃var⦄), get F n Zs -> get alv n a -> live_sound Imperative (fst ⊝ F ++ ZL) (getAnn ⊝ alv ++ Lv) (snd Zs) a) (INCL: forall (n : nat) (Zs : params * stmt) (a : ann ⦃var⦄), get F n Zs -> get alv n a -> of_list (fst Zs) ⊆ getAnn a /\ True) : live_sound Imperative (fst ⊝ F ++ ZL) (getAnn ⊝ alv ++ Lv) s lv -> length AP = length Lv -> length Lv = length ZL -> length F = length alv -> isCalledFrom (isCalled b) F s (LabI l) -> get alv l Da -> get F l Zs -> get (olist_union (snd ⊝ computeParametersF F alv Lv ZL AP) (snd (computeParameters ((getAnn ⊝ alv ++ Lv) \\ (fst ⊝ F ++ ZL)) (tab {} ‖F‖ ++ AP) s lv))) l p -> exists Za, p = Some Za /\ getAnn Da \ of_list (fst Zs) \ Za \ list_union (oget ⊝ take ❬F❭ (olu F alv Lv ZL AP s lv)) \ list_union (fst ∘ of_list ⊝ F) ⊆ (getAnn lv). Proof. intros LS LEN1 LEN2 LEN3 [[n] [IC CC]] GETDL GETZL GET. exploit callChain_range' as LE; eauto using get_range. simpl in *. assert (NLE:n < ❬snd (computeParameters ((getAnn ⊝ alv ++ Lv) \\ (fst ⊝ F ++ ZL)) (tab {} ‖F‖ ++ AP) s lv)❭). rewrite computeParameters_length; eauto with len. destruct (get_in_range _ NLE); eauto. assert (LE':n < ❬getAnn ⊝ alv ++ Lv❭). rewrite app_length, map_length. omega. destruct (get_in_range _ LE'); eauto. assert (LE'':n < ❬fst ⊝ F ++ ZL❭). rewrite app_length, map_length. omega. destruct (get_in_range _ LE''); eauto. edestruct computeParameters_isCalled_Some; try eapply g; eauto; dcr; subst. eauto with len. eauto with len. edestruct get_olist_union_b; eauto; dcr. intros. eapply computeParametersF_length; eauto. eapply computeParameters_length; eauto with len. setoid_rewrite <- H1. inv CC. - inv_get. eexists; split; eauto. rewrite H2. clear_all; cset_tac. - inv_get. exploit computeParameters_isCalled_Some_F'; try eapply H4; try eapply H5; eauto using get_app, map_get_1. intros. eapply computeParameters_isCalled_Some; eauto. dcr. destruct p; simpl in *; invc H8. eexists; split; [ reflexivity | ]. rewrite H2. assert (Incl:x ⊆ (list_union (oget ⊝ take ❬F❭ (olu F alv Lv ZL AP s lv)) ∪ list_union (fst ∘ of_list ⊝ F))). { eapply incl_union_left. eapply incl_list_union. eapply map_get_1. eapply get_take; eauto using get_range. reflexivity. } rewrite Incl. rewrite <- H9. rewrite union_comm. rewrite <- minus_union. clear_all; cset_tac. Qed.
The degree of a polynomial is the same as the degree of its negation.
/* addmatrix_gsl.c */ /* Program to add two variable dimension matrices input by user */ // https://www.gnu.org/software/gsl/manual/html_node/Example-programs-for-matrices.html #include <stdio.h> #include <stdlib.h> #include <gsl/gsl_matrix.h> int main (void) { int i, j; gsl_matrix * m = gsl_matrix_alloc(10, 3); for (i = 0; i < 10; i++) for (j = 0; j < 3; j++) gsl_matrix_set(m, i, j, 0.23 + 100*i + j); for (i = 0; i < 100; i++) /* OUT OF RANGE ERROR */ for (j = 0; j < 3; j++) printf("m(%d, %d) = %g\n", i, j, gsl_matrix_get(m, i, j)); gsl_matrix_free(m); return 0; }
Formal statement is: lemma Cauchy_theorem_quadrisection: assumes f: "continuous_on (convex hull {a,b,c}) f" and dist: "dist a b \<le> K" "dist b c \<le> K" "dist c a \<le> K" and e: "e * K^2 \<le> norm (contour_integral(linepath a b) f + contour_integral(linepath b c) f + contour_integral(linepath c a) f)" shows "\<exists>a' b' c'. a' \<in> convex hull {a,b,c} \<and> b' \<in> convex hull {a,b,c} \<and> c' \<in> convex hull {a,b,c} \<and> dist a' b' \<le> K/2 \<and> dist b' c' \<le> K/2 \<and> dist c' a' \<le> K/2 \<and> e * (K/2)^2 \<le> norm(contour_integral(linepath a' b') f + contour_integral(linepath b' c') f + contour_integral(linepath c' a') f)" (is "\<exists>x y z. ?\<Phi> x y z") Informal statement is: Suppose $f$ is a continuous function defined on the convex hull of three points $a$, $b$, and $c$. If the distances between these points are bounded by $K$, and if the integral of $f$ around the triangle formed by these points is at least $eK^2$, then there exists a triangle with vertices $a'$, $b'$, and $c'$ such that the distances between these points are bounded by $K/2$, and the integral of $f$ around this triangle is at least $e(K/2)^2$.
lemmas Zfun_mult_left = bounded_bilinear.Zfun_left [OF bounded_bilinear_mult]
### A Pluto.jl notebook ### # v0.12.11 using Markdown using InteractiveUtils # ╔═╡ 429806e2-ff34-11ea-184e-c772958b8fbd using Pkg, DrWatson # ╔═╡ 42984312-ff34-11ea-0ad3-2f757db72417 begin #@quickactivate "StatisticalRethinkingStan" using StanSample using StatisticalRethinking end # ╔═╡ 214f9dc2-ff34-11ea-07d3-793ec3053138 md"## Clip-06-18-19s.jl" # ╔═╡ 4298b220-ff34-11ea-2b17-e7f249b9e5d4 md"##### Not yet done." # ╔═╡ 429f6e6c-ff34-11ea-21a2-b9be4f78fbae md"##### Will for now be attempted using StructuralCausalModels.jl." # ╔═╡ 429fe194-ff34-11ea-0a4e-b3ce9a78ada5 md"## End of clip-06-18-19s.jl" # ╔═╡ Cell order: # ╟─214f9dc2-ff34-11ea-07d3-793ec3053138 # ╠═429806e2-ff34-11ea-184e-c772958b8fbd # ╠═42984312-ff34-11ea-0ad3-2f757db72417 # ╠═4298b220-ff34-11ea-2b17-e7f249b9e5d4 # ╠═429f6e6c-ff34-11ea-21a2-b9be4f78fbae # ╟─429fe194-ff34-11ea-0a4e-b3ce9a78ada5
// -------------------------------------------------------------------------- // OpenMS -- Open-Source Mass Spectrometry // -------------------------------------------------------------------------- // Copyright The OpenMS Team -- Eberhard Karls University Tuebingen, // ETH Zurich, and Freie Universitaet Berlin 2002-2017. // // This software is released under a three-clause BSD license: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of any author or any participating institution // may be used to endorse or promote products derived from this software // without specific prior written permission. // For a full list of authors, refer to the file AUTHORS. // -------------------------------------------------------------------------- // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING // INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // -------------------------------------------------------------------------- // $Maintainer: Timo Sachsenberg $ // $Authors: $ // -------------------------------------------------------------------------- // #include <OpenMS/TRANSFORMATIONS/RAW2PEAK/OptimizePeakDeconvolution.h> #include <boost/math/special_functions/acosh.hpp> #ifdef DEBUG_DECONV #include <iostream> #include <fstream> #endif namespace OpenMS { const double OptimizePeakDeconvolution::dist_ = 1.003; //TODO: the operator() and the df function need heavy refactoring!!! struct OPDFunctor { int inputs() const { return m_inputs; } int values() const { return m_values; } OPDFunctor(unsigned dimensions, unsigned numDataPoints, const OptimizePeakDeconvolution::Data* data) : m_inputs(dimensions), m_values(numDataPoints), m_data(data){} int operator()(const Eigen::VectorXd& x, Eigen::VectorXd& fvec) { //TODO: holding the parameters to be optimized and additional values in the same vector is // most likely not the best idea. should be split in two vectors. // // x contains the parameters to be optimized. // The first two entries are the left and right width, respectively.They are equal // for all peaks. Then the height and position of all peaks are stored. // // m_data might contain any additional parameters. We handle these using class members // instead. // The vector f is supposed to contain the result when we return from this function. const std::vector<double>& signal = m_data->signal; const std::vector<double>& positions = m_data->positions; const std::vector<PeakShape>& peaks = m_data->peaks; const OptimizationFunctions::PenaltyFactorsIntensity& penalties = m_data->penalties; Int charge = m_data->charge; double leftwidth = x(0); double rightwidth = x(1); //double posP1 = x(2); // iterate over all points of the signal for (Size current_point = 0; current_point < positions.size(); current_point++) { double computed_signal = 0.; double current_position = positions[current_point]; double experimental_signal = signal[current_point]; //iterate over all peaks for (Size current_peak = 0; current_peak < peaks.size(); current_peak++) { //Store the current parameters for this peak double p_height = x(2 + 2 * current_peak); double p_position = x(2 + 2 * current_peak + 1); double p_width = (current_position <= p_position) ? leftwidth : rightwidth; //is it a Lorentz or a Sech - Peak? if (peaks[current_peak].type == PeakShape::LORENTZ_PEAK) { computed_signal += p_height / (1. + pow(p_width * (current_position - p_position), 2)); } else // It's a Sech - Peak { computed_signal += p_height / pow(cosh(p_width * (current_position - p_position)), 2); } } fvec(current_point) = computed_signal - experimental_signal; } // penalties : especially negative heights have to be penalised double penalty = 0.; double penalty_pos = penalties.pos; double penalty_lwidth = penalties.lWidth; double penalty_rwidth = penalties.rWidth; double penalty_intensity = penalties.height; //iterate over all peaks again to compute the penalties for (Size current_peak = 0; current_peak < peaks.size(); current_peak++) { double p_position = x(2 + 2 * current_peak + 1); if (current_peak < peaks.size() - 1) { double next_p_position = x(2 + 2 * current_peak + 3); // if distance between peaks does not match the peptide mass rule if (fabs(fabs(p_position - next_p_position) - 1.003 / charge) > 0.05) { // penalize it penalty += penalty_pos * 10000 * pow(fabs(fabs(p_position - next_p_position) - 1.003 / charge), 2); } } double old_position = peaks[current_peak].mz_position; double old_width_l = peaks[current_peak].left_width; double old_width_r = peaks[current_peak].right_width; double old_height = peaks[current_peak].height; double p_width_l = x(0); double p_width_r = x(1); double p_height = x(2 + 2 * current_peak); if (p_height < 1) { penalty += 100000* penalty_intensity* pow(fabs(p_height - old_height), 2); } if (p_width_l < 0) { penalty += penalty_lwidth * peaks.size() * 10000 * pow(fabs(p_width_l - old_width_l), 2); } else if (p_width_l < 1.5) penalty += 10000 * pow(fabs(p_width_l - old_width_l), 2); if (p_width_r < 0) { penalty += penalty_rwidth * peaks.size() * 10000 * pow(fabs(p_width_r - old_width_r), 2); } else if (p_width_r < 1.5) penalty += 10000 * pow(fabs(p_width_r - old_width_r), 2); if (fabs(old_position - p_position) > 0.1) { penalty += 10000* penalty_pos* pow(fabs(old_position - p_position), 2); } } fvec(fvec.size() - 1) = penalty; return 0; } // compute Jacobian matrix for the different parameters int df(const Eigen::VectorXd& x, Eigen::MatrixXd& J) { // For the conventions on x and params c.f. the commentary in residual() // // The matrix J is supposed to contain the result when we return from this function. // Note: Jacobian is expected as follows: // - each row corresponds to one data point // - each column corresponds to one parameter const std::vector<double>& positions = m_data->positions; const std::vector<PeakShape>& peaks = m_data->peaks; const OptimizationFunctions::PenaltyFactorsIntensity& penalties = m_data->penalties; Int charge = m_data->charge; double leftwidth = x(0); double rightwidth = x(1); //TODO: is the reset needed? J.setZero(); // iterate over all points of the signal for (Size current_point = 0; current_point < positions.size(); current_point++) { double current_position = positions[current_point]; // iterate over all peaks for (Size current_peak = 0; current_peak < peaks.size(); current_peak++) { //Store the current parameters for this peak double p_height = x(2 + 2 * current_peak); double p_position = x(2 + 2 * current_peak + 1); double p_width = (current_position <= p_position) ? leftwidth : rightwidth; //is it a Lorentz or a Sech - Peak? if (peaks[current_peak].type == PeakShape::LORENTZ_PEAK) { double diff = current_position - p_position; double denom_inv = 1. / (1. + pow(p_width * diff, 2)); double ddl_left = (current_position <= p_position) ? -2* p_height* pow(diff, 2) * p_width * pow(denom_inv, 2) : 0; double ddl_right = (current_position > p_position) ? -2* p_height* pow(diff, 2) * p_width * pow(denom_inv, 2) : 0; // left and right width are the same for all peaks, // the sums of the derivations over all peaks are stored in the first two columns J(current_point, 0) = J(current_point, 0) + ddl_left; J(current_point, 1) = J(current_point, 1) + ddl_right; double ddx0 = 2* p_height* pow(p_width, 2) * diff * pow(denom_inv, 2); // partial derivation with respect to intensity J(current_point, 2 + 2 * current_peak) = denom_inv; // partial derivation with respect to the mz-position J(current_point, 2 + 2 * current_peak + 1) = ddx0; } else // It's a Sech - Peak { double diff = current_position - p_position; double denom_inv = 1. / cosh(p_width * diff); // The remaining computations are not stable if denom_inv == 0. In that case, we are far away from the peak // and can assume that all derivatives vanish double sinh_term = (fabs(denom_inv) < 1e-6) ? 0.0 : sinh(p_width * diff); double ddl_left = (current_position <= p_position) ? -2* p_height* sinh_term* diff* pow(denom_inv, 3) : 0; double ddl_right = (current_position > p_position) ? -2* p_height* sinh_term* diff* pow(denom_inv, 3) : 0; J(current_point, 0) = J(current_point, 0) + ddl_left; J(current_point, 1) = J(current_point, 1) + ddl_right; double ddx0 = 2* p_height* p_width* sinh_term* pow(denom_inv, 3); J(current_point, 2 + 2 * current_peak) = pow(denom_inv, 2); J(current_point, 2 + 2 * current_peak + 1) = ddx0; } } } /** Now iterate over all peaks again to compute the * penalties. */ for (Size current_peak = 0; current_peak < peaks.size(); current_peak++) { double penalty_p = 0; double p_position = x(2 + 2 * current_peak + 1); if (current_peak < peaks.size() - 1) { double next_p_position = x(2 + 2 * current_peak + 3); // if distance between peaks does not match the peptide mass rule if (fabs(fabs(p_position - next_p_position) - 1.003 / charge) > 0.05) { // penalize it penalty_p += penalties.pos * 20000 * fabs(fabs(p_position - next_p_position) - 1.003 / charge); } } std::cout << "Eigen penalty_p " << penalty_p << std::endl; double p_width_left = x(0); double p_width_right = x(1); double p_height = x(2 + 2 * current_peak); double old_position = peaks[current_peak].mz_position; double old_width_left = peaks[current_peak].left_width; double old_width_right = peaks[current_peak].right_width; double old_height = peaks[current_peak].height; double penalty_h = 0., penalty_l = 0., penalty_r = 0.; if (p_height < 1) { penalty_h += 100000 * 2 * penalties.height * (fabs(p_height) - fabs(old_height)); } if (p_width_left < 0) { penalty_l += peaks.size() * 2 * penalties.lWidth * 10000 * (fabs(p_width_left - old_width_left)); } else if (p_width_left < 1.5) penalty_l += 2 * penalties.lWidth * 10000 * pow(fabs(p_width_left - old_width_left), 2); if (p_width_right < 0) { penalty_r += peaks.size() * 2 * penalties.rWidth * 10000 * (fabs(p_width_right - old_width_right)); } else if (p_width_right < 1.5) penalty_r += 2 * penalties.rWidth * 10000 * pow(fabs(p_width_right - old_width_right), 2); if (fabs(old_position - p_position) > 0.1) { penalty_p += 10000 * penalties.pos * 2 * fabs(old_position - p_position); } J(positions.size(), 2 + 2 * current_peak) = 100 * penalty_h; J(positions.size(), 0) = 100 * penalty_l; J(positions.size(), 1) = 100 * penalty_r; J(positions.size(), 2 + 2 * current_peak + 1) = 100 * penalty_p; } for (int i = 0; i < J.rows(); ++i) { for (int j = 0; j < J.cols(); ++j) std::cout << J(i, j) << " "; std::cout << std::endl; } std::cout << std::endl; return 0; } const int m_inputs, m_values; const OptimizePeakDeconvolution::Data* m_data; }; OptimizePeakDeconvolution::OptimizePeakDeconvolution() : DefaultParamHandler("OptimizePeakDeconvolution"), charge_(1) { defaults_.setValue("max_iteration", 10, "maximal number of iterations for the fitting step"); defaults_.setValue("eps_abs", 1e-04, "if the absolute error gets smaller than this value the fitting is stopped", ListUtils::create<String>("advanced")); defaults_.setValue("eps_rel", 1e-04, "if the relative error gets smaller than this value the fitting is stopped", ListUtils::create<String>("advanced")); defaults_.setValue("penalties:left_width", 0.0, "penalty term for the fitting of the left width:" \ "If the left width gets too broad or negative during the fitting it can be penalized."); defaults_.setValue("penalties:right_width", 0.0, "penalty term for the fitting of the right width:" \ "If the right width gets too broad or negative during the fitting it can be penalized."); defaults_.setValue("penalties:height", 0.0, "penalty term for the fitting of the intensity:" \ "If it gets negative during the fitting it can be penalized."); defaults_.setValue("penalties:position", 0.0, "penalty term for the fitting of the peak position:" \ "If the position changes more than 0.5Da during the fitting it can be penalized as well as " \ "discrepancies of the peptide mass rule."); defaults_.setValue("fwhm_threshold", 1.0, "If a peaks is broader than fwhm_threshold, it is assumed that it contains another peaks and an additional peak is added."); defaultsToParam_(); } void OptimizePeakDeconvolution::updateMembers_() { penalties_.rWidth = (float)param_.getValue("penalties:right_width"); penalties_.lWidth = (float)param_.getValue("penalties:left_width"); penalties_.height = (float)param_.getValue("penalties:height"); penalties_.pos = (float)param_.getValue("penalties:position"); } bool OptimizePeakDeconvolution::optimize(std::vector<PeakShape>& peaks, Data& data) { if (peaks.empty()) return true; #ifdef DEBUG_DECONV std::cout << "peaksanzahl:" << peaks.size(); std::cout << "\tpeaks[0].mz_position:" << peaks[0].mz_position << std::endl; for (Size j = 0; j < peaks.size(); ++j) { std::cout << "\tpeaks[j].mz_position:" << peaks[j].mz_position; std::cout << "\tpeaks[j].height:" << peaks[j].height << std::endl; std::cout << "\tpeaks[j].left_width:" << peaks[j].left_width; std::cout << "\tpeaks[j].right_width:" << peaks[j].right_width << std::endl << std::endl; } for (Size j = 0; j < data.positions.size(); ++j) { std::cout << "positions[" << j << "]=" << data.positions[j] << std::endl; } #endif // the input peaks are stored in a temporary vector std::vector<PeakShape> temp_shapes = peaks; Size global_peak_number = 0; double min(std::numeric_limits<double>::max()); Int bestCharge = 0; Size bestNumPeaks = 0; Eigen::VectorXd bestResult(2 + 2 * data.peaks.size()); bestResult.setZero(); // try three different charge states : charge-1, charge, charge +1 // take the best solution Int chargeState = (charge_ - 1 > 1) ? charge_ - 1 : charge_; Int firstChargeState = chargeState; #ifdef DEBUG_DECONV std::cout << "charge " << chargeState << " max_charge" << charge_ + 1 << "\tpeaks.size() " << peaks.size() << std::endl; #endif bestCharge = chargeState; bestNumPeaks = peaks.size(); for (; chargeState < charge_ + 2; ++chargeState) { setNumberOfPeaks_(data, temp_shapes, chargeState); Eigen::VectorXd x_init(2 + 2 * data.peaks.size()); for (Size i = 0; i < data.peaks.size(); i++) { x_init(2 + 2 * i) = data.peaks[i].height; x_init(3 + 2 * i) = data.peaks[i].mz_position; } // Initialize the parameters for the optimization // all peaks shall have the same width double wl = data.peaks[0].left_width; double wr = data.peaks[0].right_width; if (boost::math::isnan(wl)) { for (Size i = 0; i < data.peaks.size(); ++i) { data.peaks[i].left_width = 1; } wl = 1.; } if (boost::math::isnan(wr)) { for (Size i = 0; i < data.peaks.size(); ++i) { data.peaks[i].right_width = 1; } wr = 1.; } x_init(0) = wl; x_init(1) = wr; data.penalties = penalties_; data.charge = chargeState; unsigned numDataPoints = std::max(data.positions.size() + 1, 2 + 2 * data.peaks.size()); OPDFunctor functor(2, numDataPoints, &data); Eigen::LevenbergMarquardt<OPDFunctor> lmSolver(functor); Eigen::LevenbergMarquardt<OPDFunctor>::Parameters config; config.maxfev = (Int)param_.getValue("max_iteration"); lmSolver.parameters = config; Eigen::LevenbergMarquardtSpace::Status status = lmSolver.minimize(x_init); //the states are poorly documented. after checking the source, we believe that //all states except NotStarted, Running and ImproperInputParameters are good //termination states. if (status <= Eigen::LevenbergMarquardtSpace::ImproperInputParameters) { throw Exception::UnableToFit(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "UnableToFit-OptimizePeakDeconvolution", "Could not fit the curve to the data: Error " + String(status)); } double chi = lmSolver.fnorm; if ((chargeState == firstChargeState) || (chi < min)) { bestResult = x_init; min = chi; bestCharge = chargeState; bestNumPeaks = data.peaks.size(); } } global_peak_number += bestNumPeaks; // iterate over all peaks and store the optimized values in peaks if (bestNumPeaks > 0) { peaks.resize(bestNumPeaks); for (Size current_peak = 0; current_peak < bestNumPeaks; current_peak++) { // Store the current parameters for this peak peaks[current_peak].left_width = bestResult(0); peaks[current_peak].right_width = bestResult(1); peaks[current_peak].height = bestResult(2 + 2 * current_peak); peaks[current_peak].mz_position = bestResult(2 + 2 * current_peak + 1); // compute the area // is it a Lorentz or a Sech - Peak? if (peaks[current_peak].type == PeakShape::LORENTZ_PEAK) { PeakShape p = peaks[current_peak]; double x_left_endpoint = p.mz_position + 1 / p.left_width * sqrt(p.height / 1 - 1); double x_right_endpoint = p.mz_position + 1 / p.right_width * sqrt(p.height / 1 - 1); #ifdef DEBUG_DECONV std::cout << "x_left_endpoint " << x_left_endpoint << " x_right_endpoint " << x_right_endpoint << std::endl; std::cout << "p.height" << p.height << std::endl; #endif double area_left = -p.height / p.left_width * atan(p.left_width * (x_left_endpoint - p.mz_position)); double area_right = -p.height / p.right_width * atan(p.right_width * (p.mz_position - x_right_endpoint)); peaks[current_peak].area = area_left + area_right; } else //It's a Sech - Peak { PeakShape p = peaks[current_peak]; double x_left_endpoint = p.mz_position + 1 / p.left_width * boost::math::acosh(sqrt(p.height / 0.001)); double x_right_endpoint = p.mz_position + 1 / p.right_width * boost::math::acosh(sqrt(p.height / 0.001)); #ifdef DEBUG_DECONV std::cout << "x_left_endpoint " << x_left_endpoint << " x_right_endpoint " << x_right_endpoint << std::endl; std::cout << "p.height" << p.height << std::endl; #endif double area_left = -p.height / p.left_width * (sinh(p.left_width * (p.mz_position - x_left_endpoint)) / cosh(p.left_width * (p.mz_position - x_left_endpoint))); double area_right = -p.height / p.right_width * (sinh(p.right_width * (p.mz_position - x_right_endpoint)) / cosh(p.right_width * (p.mz_position - x_right_endpoint))); peaks[current_peak].area = area_left + area_right; } } } charge_ = bestCharge; return true; } Size OptimizePeakDeconvolution::getNumberOfPeaks_(Int charge, std::vector<PeakShape>& temp_shapes, Data& data) { double dist = dist_ / charge; data.peaks.clear(); Size shape = 0; #ifdef DEBUG_DECONV std::cout << "temp_shapes[0].mz_position " << temp_shapes[0].mz_position << "\t dist " << dist << "\tp_index " << shape << std::endl; #endif // while the peak's position is smaller than the last considered position // take the peak for optimization while ((temp_shapes[0].mz_position + shape * dist < data.positions[data.positions.size() - 1]) && (shape < temp_shapes.size())) { data.peaks.push_back(temp_shapes[shape]); #ifdef DEBUG_DECONV std::cout << "temp_shapes[0].mz_position + p_index*dist = " << temp_shapes[0].mz_position + shape * dist << std::endl; #endif ++shape; } return shape; } void OptimizePeakDeconvolution::setNumberOfPeaks_(Data& data, const std::vector<PeakShape>& temp_shapes, Int charge) { double dist = dist_ / charge; data.peaks.clear(); #ifdef DEBUG_DECONV std::cout << "temp_shapes[0].mz_position " << temp_shapes[0].mz_position << "\t dist " << dist << "\tp_index " << shape << std::endl; #endif // while the peak's position is smaller than the last considered position // take the peak for optimization Size shape = 0; while ((temp_shapes[0].mz_position + shape * dist < data.positions[data.positions.size() - 1]) && (shape < temp_shapes.size())) { data.peaks.push_back(temp_shapes[shape]); #ifdef DEBUG_DECONV std::cout << "temp_shapes[0].mz_position + p_index*dist = " << temp_shapes[0].mz_position + shape * dist << std::endl; #endif shape++; } } }
module Js.BrowserNetwork {- import Js.BrowserForeigns import public Js.ServiceTypes export httpGet : String -> ASync String httpGet url = MkASync $ httpGet_raw url export httpPost : String -> String -> ASync String httpPost url body = MkASync $ httpPost_raw url body export callRPC : ServiceGroup ts -> (name:String) -> Elem (name, RPCServiceTy a b) ts -> a -> ASync b callRPC group name p val = let (RPCService _ e1 e2) = getService name group p in do res <- httpPost name (encode e1 val) case decode e2 res of Right x => pure x Left err => debugError err -}
(* * Copyright 2014, NICTA * * This software may be distributed and modified according to the terms of * the BSD 2-Clause license. Note that NO WARRANTY is provided. * See "LICENSE_BSD2.txt" for details. * * @TAG(NICTA_BSD) *) theory int_promotion imports "../CTranslation" begin install_C_file "int_promotion.c" context int_promotion begin thm f_body_def lemma "\<Gamma> \<turnstile> \<lbrace> True \<rbrace> \<acute>ret__int :== CALL f() \<lbrace> \<acute>ret__int = 1 \<rbrace>" apply vcg apply simp done end end
(* Author: Tobias Nipkow, 2007 *) theory QEdlo_fr imports DLO begin subsection "Interior Point Method" text{* This section formalizes a new quantifier elimination procedure based on the idea of Ferrante and Rackoff~\cite{FerranteR-SIAM75} (see also \S\ref{sec:FRE}) of taking a point between each lower and upper bound as a test point. For dense linear orders it is not obvious how to realize this because we cannot name any intermediate point directly. *} fun asubst\<^sub>2 :: "nat \<Rightarrow> nat \<Rightarrow> atom \<Rightarrow> atom fm" where "asubst\<^sub>2 l u (Less 0 0) = FalseF" | "asubst\<^sub>2 l u (Less 0 (Suc j)) = Or (Atom(Less u j)) (Atom(Eq u j))" | "asubst\<^sub>2 l u (Less (Suc i) 0) = Or (Atom(Less i l)) (Atom(Eq i l))" | "asubst\<^sub>2 l u (Less (Suc i) (Suc j)) = Atom(Less i j)" | "asubst\<^sub>2 l u (Eq 0 0) = TrueF" | "asubst\<^sub>2 l u (Eq 0 _) = FalseF" | "asubst\<^sub>2 l u (Eq _ 0) = FalseF" | "asubst\<^sub>2 l u (Eq (Suc i) (Suc j)) = Atom(Eq i j)" abbreviation "subst\<^sub>2 l u \<equiv> amap\<^bsub>fm\<^esub> (asubst\<^sub>2 l u)" lemma I_subst\<^sub>21: "nqfree f \<Longrightarrow> xs!l < xs!u \<Longrightarrow> DLO.I (subst\<^sub>2 l u f) xs \<Longrightarrow> xs!l < x \<Longrightarrow> x < xs!u \<Longrightarrow> DLO.I f (x#xs)" proof(induct f arbitrary: x) case (Atom a) thus ?case by (cases "(l,u,a)" rule: asubst\<^sub>2.cases) auto qed auto definition "nolub f xs l x u \<longleftrightarrow> (\<forall>y\<in>{l<..<x}. y \<notin> LB f xs) \<and> (\<forall>y\<in>{x<..<u}. y \<notin> UB f xs)" lemma nolub_And[simp]: "nolub (And f g) xs l x u = (nolub f xs l x u \<and> nolub g xs l x u)" by(auto simp:nolub_def) lemma nolub_Or[simp]: "nolub (Or f g) xs l x u = (nolub f xs l x u \<and> nolub g xs l x u)" by(auto simp:nolub_def) declare[[simp_depth_limit=3]] lemma I_subst\<^sub>22: "nqfree f \<Longrightarrow> xs!l < x \<and> x < xs!u \<Longrightarrow> nolub f xs (xs!l) x (xs!u) \<Longrightarrow> \<forall>x\<in>{xs!l <..< xs!u}. DLO.I f (x#xs) \<and> x \<notin> EQ f xs \<Longrightarrow> DLO.I (subst\<^sub>2 l u f) xs" proof (induct f) case (Atom a) show ?case apply (cases "(l,u,a)" rule: asubst\<^sub>2.cases) apply(insert Atom, auto simp: EQ_def nolub_def split:split_if_asm) done next case Or thus ?case by (simp add: Ball_def)(metis innermost_intvl) qed auto declare[[simp_depth_limit=50]] definition "qe_interior\<^sub>1 \<phi> = (let as = DLO.atoms\<^sub>0 \<phi>; lbs = lbounds as; ubs = ubounds as; ebs = ebounds as; intrs = [And (Atom(Less l u)) (subst\<^sub>2 l u \<phi>). l\<leftarrow>lbs, u\<leftarrow>ubs] in list_disj (inf\<^sub>- \<phi> # inf\<^sub>+ \<phi> # intrs @ map (subst \<phi>) ebs))" theorem I_interior1: assumes "nqfree \<phi>" shows "DLO.I (qe_interior\<^sub>1 \<phi>) xs = (EX x. DLO.I \<phi> (x#xs))" (is "?QE = ?EX") proof assume ?QE { assume "DLO.I (inf\<^sub>- \<phi>) xs" hence ?EX using `?QE` min_inf[of \<phi> xs] `nqfree \<phi>` by(auto simp add:qe_interior\<^sub>1_def amap_fm_list_disj) } moreover { assume "DLO.I (inf\<^sub>+ \<phi>) xs" hence ?EX using `?QE` plus_inf[of \<phi> xs] `nqfree \<phi>` by(auto simp add:qe_interior\<^sub>1_def amap_fm_list_disj) } moreover { assume "\<not>DLO.I (inf\<^sub>- \<phi>) xs \<and> \<not>DLO.I (inf\<^sub>+ \<phi>) xs \<and> (\<forall>x\<in>EQ \<phi> xs. \<not>DLO.I \<phi> (x#xs))" with `?QE` `nqfree \<phi>` obtain l u where "DLO.I (subst\<^sub>2 l u \<phi>) xs" and "xs!l < xs!u" by(fastforce simp: qe_interior\<^sub>1_def set_lbounds set_ubounds I_subst EQ_conv_set_ebounds) moreover then obtain x where "xs!l < x \<and> x < xs!u" by(metis dense) ultimately have "DLO.I \<phi> (x # xs)" using `nqfree \<phi>` I_subst\<^sub>21[OF `nqfree \<phi>` `xs!l < xs!u`] by simp hence ?EX .. } ultimately show ?EX by blast next let ?as = "DLO.atoms\<^sub>0 \<phi>" let ?E = "set(ebounds ?as)" assume ?EX then obtain x where x: "DLO.I \<phi> (x#xs)" .. { assume "DLO.I (inf\<^sub>- \<phi>) xs \<or> DLO.I (inf\<^sub>+ \<phi>) xs" hence ?QE using `nqfree \<phi>` by(auto simp:qe_interior\<^sub>1_def) } moreover { assume "EX k : ?E. DLO.I (subst \<phi> k) xs" hence ?QE by(force simp:qe_interior\<^sub>1_def) } moreover { assume "\<not> DLO.I (inf\<^sub>- \<phi>) xs" and "\<not> DLO.I (inf\<^sub>+ \<phi>) xs" and "\<forall>k \<in> ?E. \<not> DLO.I (subst \<phi> k) xs" hence noE: "\<forall>e \<in> EQ \<phi> xs. \<not> DLO.I \<phi> (e#xs)" using `nqfree \<phi>` by (force simp:set_ebounds EQ_def I_subst) hence "x \<notin> EQ \<phi> xs" using x by fastforce obtain l where "l : LB \<phi> xs" "l < x" using LBex[OF `nqfree \<phi>` x `\<not> DLO.I(inf\<^sub>- \<phi>) xs` `x \<notin> EQ \<phi> xs`] .. obtain u where "u : UB \<phi> xs" "x < u" using UBex[OF `nqfree \<phi>` x `\<not> DLO.I(inf\<^sub>+ \<phi>) xs` `x \<notin> EQ \<phi> xs`] .. have "\<exists>l\<in>LB \<phi> xs. \<exists>u\<in>UB \<phi> xs. l<x \<and> x<u \<and> nolub \<phi> xs l x u \<and> (\<forall>y. l < y \<and> y < u \<longrightarrow> DLO.I \<phi> (y#xs))" using dense_interval[where P = "\<lambda>x. DLO.I \<phi> (x#xs)", OF finite_LB finite_UB `l:LB \<phi> xs` `u:UB \<phi> xs` `l<x` `x<u` x] x innermost_intvl[OF `nqfree \<phi>` _ _ _ `x \<notin> EQ \<phi> xs`] by (simp add:nolub_def) then obtain m n where "Less (Suc m) 0 : set ?as" "Less 0 (Suc n) : set ?as" "xs!m < x \<and> x < xs!n" "nolub \<phi> xs (xs!m) x (xs!n)" "\<forall>y. xs!m < y \<and> y < xs!n \<longrightarrow> DLO.I \<phi> (y#xs)" by blast moreover hence "DLO.I (subst\<^sub>2 m n \<phi>) xs" using noE by(force intro!: I_subst\<^sub>22[OF `nqfree \<phi>`]) ultimately have ?QE by(fastforce simp add:qe_interior\<^sub>1_def bex_Un set_lbounds set_ubounds) } ultimately show ?QE by blast qed lemma qfree_asubst\<^sub>2: "qfree (asubst\<^sub>2 l u a)" by(cases "(l,u,a)" rule:asubst\<^sub>2.cases) simp_all lemma qfree_subst\<^sub>2: "nqfree \<phi> \<Longrightarrow> qfree (subst\<^sub>2 l u \<phi>)" by(induct \<phi>) (simp_all add:qfree_asubst\<^sub>2) lemma qfree_interior1: "nqfree \<phi> \<Longrightarrow> qfree(qe_interior\<^sub>1 \<phi>)" apply(simp add:qe_interior\<^sub>1_def) apply(rule qfree_list_disj) apply (auto simp:qfree_min_inf qfree_plus_inf qfree_subst\<^sub>2 qfree_map_fm) done definition "qe_interior = DLO.lift_nnf_qe qe_interior\<^sub>1" lemma qfree_qe_interior: "qfree(qe_interior \<phi>)" by(simp add: qe_interior_def DLO.qfree_lift_nnf_qe qfree_interior1) end
module _ where id : {A : Set} → A → A id x = x const : {A : Set₁} {B : Set} → A → (B → A) const x = λ _ → x {-# DISPLAY const x y = x #-} infixr 4 _,_ infixr 2 _×_ record Σ (A : Set) (B : A → Set) : Set where constructor _,_ field proj₁ : A proj₂ : B proj₁ open Σ public _×_ : (A B : Set) → Set A × B = Σ A (const B) Σ-map : ∀ {A B : Set} {P : A → Set} {Q : B → Set} → (f : A → B) → (∀ {x} → P x → Q (f x)) → Σ A P → Σ B Q Σ-map f g = λ p → (f (proj₁ p) , g (proj₂ p)) foo : {A B : Set} → A × B → A × B foo = Σ-map id {!!}
{-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE Strict #-} module FourierMethod.FourierSeries2D ( module FourierMethod.FourierSeries2D , module FourierMethod.BlockCudaMatrix , module FourierMethod.BlockMatrixAcc ) where import Control.DeepSeq import Control.Monad as M import Control.Monad.IO.Class import Control.Monad.Trans.Resource import qualified Data.Array.Accelerate as A import Data.Array.Accelerate.LLVM.PTX as A import Data.Array.Accelerate.Numeric.LinearAlgebra as A import Data.Array.Repa as R import Data.Complex import Data.Conduit as C import Data.Conduit.List as CL import Data.List as L import Data.Vector.Storable as VS import Data.Vector.Unboxed as VU import Foreign.CUDA.Driver as CUDA import FourierMethod.BlockCudaMatrix import FourierMethod.BlockMatrixAcc import FourierMethod.FourierSeries2DAcc import Utils.BLAS import Utils.Distribution import Utils.List import Utils.Parallel hiding ((.|)) import Utils.Time {-# INLINE harmonicMatPTX #-} harmonicMatPTX :: ( Storable e , A.Elt (Complex e) , A.Elt e , Floating (A.Exp e) , A.FromIntegral Int e ) => Int -> e -> e -> e -> PTX -> [(Int, Int)] -> CuMat (Complex e) harmonicMatPTX numPoints period delta deltaFreq ptx r2Freqs = let rows = L.length r2Freqs in CuMat rows (numPoints ^ 2) . CuVecHost . VS.fromList . A.toList . run1With ptx (harmonicAcc numPoints period delta deltaFreq) . A.fromList (A.Z A.:. (L.length r2Freqs)) $ r2Freqs -- create column-major inverse harmonics {-# INLINE inverseHarmonicMatPTX #-} inverseHarmonicMatPTX :: ( Storable e , A.Elt (Complex e) , A.Elt e , Floating (A.Exp e) , A.FromIntegral Int e ) => Int -> e -> e -> PTX -> [(Int, Int)] -> CuMat (Complex e) inverseHarmonicMatPTX numFreqs period delta ptx r2Positions = let cols = L.length r2Positions in CuMat (numFreqs ^ 2) cols . CuVecHost . VS.fromList . A.toList . run1With ptx (inverseHarmonicAcc numFreqs period delta) . A.fromList (A.Z A.:. (L.length r2Positions)) $ r2Positions {-# INLINE inverseHarmonicMatPTX1 #-} inverseHarmonicMatPTX1 :: ( Storable e , A.Elt (Complex e) , A.Elt e , Floating (A.Exp e) , A.FromIntegral Int e ) => Int -> e -> e -> PTX -> [(Int, Int)] -> CuMat (Complex e) inverseHarmonicMatPTX1 numFreqs period delta ptx r2Positions = let rows = L.length r2Positions in CuMat rows (numFreqs ^ 2) . CuVecHost . VS.fromList . A.toList . run1With ptx (inverseHarmonicAcc1 numFreqs period delta) . A.fromList (A.Z A.:. (L.length r2Positions)) $ r2Positions createHarmonicMatriesGPU :: ( Storable e , A.Elt (Complex e) , A.Elt e , Floating (A.Exp e) , A.FromIntegral Int e ) => [PTX] -> Int -> Int -> Int -> e -> e -> e -> IO [[CuMat (Complex e)]] createHarmonicMatriesGPU ptxs numBatch numPoints numFreqs period delta deltaFreq = do let idxs = L.map (divideListN numBatch) . divideListN (L.length ptxs) $ [ (xFreq, yFreq) | xFreq <- getListFromNumber numFreqs , yFreq <- getListFromNumber numFreqs ] let output = parZipWith rdeepseq (\ptx -> L.map (harmonicMatPTX numPoints period delta deltaFreq ptx)) ptxs $ idxs return output createInverseHarmonicMatriesGPU :: ( Storable e , A.Elt (Complex e) , A.Elt e , Floating (A.Exp e) , A.FromIntegral Int e ) => [PTX] -> Int -> Int -> Int -> e -> e -> IO [[CuMat (Complex e)]] createInverseHarmonicMatriesGPU ptxs numBatch numPoints numFreqs period delta = do let idxs = L.map (divideListN numBatch) . divideListN (L.length ptxs) $ [ (x, y) | x <- getListFromNumber numPoints , y <- getListFromNumber numPoints ] return . parZipWith rdeepseq (\ptx -> L.map (inverseHarmonicMatPTX numFreqs period delta ptx)) ptxs $ idxs computeFourierCoefficientsR2 :: ( Storable e , CUBLAS (Complex e) , Unbox e , RealFloat e , A.Elt e , A.Elt (Complex e) , Floating (A.Exp e) , A.FromIntegral Int e ) => [Int] -> [PTX] -> Int -> Int -> e -> e -> e -> Int -> [CuMat (Complex e)] -> IO (R.Array U DIM3 (Complex e)) computeFourierCoefficientsR2 deviceIDs ptxs numFreqs numPoints period delta deltaFreq numBatchR2Freqs xs = do let simpsonNorm = (delta / 3) ^ 2 :+ 0 cols = L.foldl' (\s mat -> s + getColsCuMat mat) 0 xs harmonics <- createHarmonicMatriesGPU ptxs numBatchR2Freqs numPoints numFreqs period delta deltaFreq coefs <- blockMatrixMultiply2 True deviceIDs harmonics xs return . fromUnboxed (Z :. cols :. numFreqs :. numFreqs) . VU.map (* simpsonNorm) . VS.convert . getHostCuMat $ coefs computeFourierSeriesR2 :: ( Storable e , CUBLAS (Complex e) , Unbox e , RealFloat e , A.Elt e , A.Elt (Complex e) , Floating (A.Exp e) , A.FromIntegral Int e ) => [Int] -> Int -> Int -> e -> [[CuMat (Complex e)]] -> [CuMat (Complex e)] -> IO (R.Array U DIM3 (Complex e)) computeFourierSeriesR2 deviceIDs numFreqs numPoints period inverseHarmonics xs = do let rows = L.foldl' (\s mat -> s + getRowsCuMat mat) 0 xs series <- blockMatrixMultiply1 False deviceIDs xs inverseHarmonics return . fromUnboxed (Z :. rows :. numPoints :. numPoints) . VS.convert . getHostCuMat $ series {-# INLINE applyGaussian #-} applyGaussian :: (R.Source s (Complex e), Unbox e, RealFloat e) => e -> e -> R.Array s DIM3 (Complex e) -> R.Array D DIM3 (Complex e) applyGaussian deltaFreq std arr = let (Z :. _ :. numFreq :. _) = extent arr freq = div numFreq 2 gaussianCoefficients = computeUnboxedS . R.fromFunction (Z :. numFreq :. numFreq) $ \(Z :. xFreq :. yFreq) -> gaussian2D (deltaFreq * (fromIntegral $ xFreq - freq)) (deltaFreq * (fromIntegral $ yFreq - freq)) std :+ 0 in R.traverse2 arr gaussianCoefficients const $ \fC fG idx@(Z :. _ :. i :. j) -> fC idx * fG (Z :. i :. j) -- Stream {-# INLINE indexSource #-} indexSource :: Int -> Int -> ConduitT () [(Int, Int)] (ResourceT IO) () indexSource numIndex numBatch = let xs = divideListN numBatch $ [ (xFreq, yFreq) | xFreq <- getListFromNumber numIndex , yFreq <- getListFromNumber numIndex ] in CL.sourceList xs {-# INLINE fourierCoefficientsConduit #-} fourierCoefficientsConduit :: ( Storable e , CUBLAS (Complex e) , Unbox e , RealFloat e , A.Elt e , A.Elt (Complex e) , Floating (A.Exp e) , A.FromIntegral Int e , Show e ) => [Int] -> [PTX] -> Int -> e -> e -> e -> [CuMat (Complex e)] -> ConduitT [(Int, Int)] (CuMat (Complex e)) (ResourceT IO) () fourierCoefficientsConduit deviceIDs ptxs numPoints period delta deltaFreq xs = awaitForever $ \idx' -> do let idxs = divideListN (L.length deviceIDs) idx' harmonics = parMap rdeepseq (uncurry (harmonicMatPTX numPoints period delta deltaFreq)) $ L.zip ptxs idxs liftIO $ printCurrentTime "" coefs <- liftIO $ blockMatrixMultiply3 False deviceIDs harmonics xs yield $!! coefs {-# INLINE fourierCoefficientsSink #-} fourierCoefficientsSink :: (Storable e, Unbox e, RealFloat e, Show e) => Int -> e -> ConduitT (CuMat (Complex e)) Void (ResourceT IO) ((R.Array U DIM3 (Complex e))) fourierCoefficientsSink numR2Freqs delta = do xs <- CL.consume let cols = getColsCuMat . L.head $ xs simpsonNorm = (delta / 3) ^ 2 :+ 0 return . fromUnboxed (Z :. cols :. numR2Freqs :. numR2Freqs) . VU.map (* simpsonNorm) . VS.convert . getHostCuMat . transposeCuMat . concatCuMat $ xs computeFourierCoefficientsR2Stream :: ( Storable e , CUBLAS (Complex e) , Unbox e , RealFloat e , A.Elt e , A.Elt (Complex e) , Floating (A.Exp e) , A.FromIntegral Int e ) => [Int] -> [PTX] -> Int -> Int -> e -> e -> e -> Int -> [CuMat (Complex e)] -> IO (R.Array U DIM3 (Complex e)) computeFourierCoefficientsR2Stream deviceIDs ptxs numFreqs numPoints period delta deltaFreq numBatch xs = runConduitRes $ indexSource numFreqs numBatch .| fourierCoefficientsConduit deviceIDs ptxs numPoints period delta deltaFreq xs .| fourierCoefficientsSink numFreqs delta {-# INLINE fourierSeriesConduit #-} fourierSeriesConduit :: ( Storable e , CUBLAS (Complex e) , Unbox e , RealFloat e , A.Elt e , A.Elt (Complex e) , Floating (A.Exp e) , A.FromIntegral Int e , Show e ) => [Int] -> [PTX] -> Int -> e -> e -> [CuMat (Complex e)] -> ConduitT [(Int, Int)] (CuMat (Complex e)) (ResourceT IO) () fourierSeriesConduit deviceIDs ptxs numFreqs period delta xs = awaitForever $ \idx' -> do let idxs = divideListN (L.length deviceIDs) idx' harmonics = parMap rdeepseq (uncurry (inverseHarmonicMatPTX1 numFreqs period delta)) $ L.zip ptxs idxs liftIO $ printCurrentTime "fourierSeriesConduit" coefs <- liftIO $ blockMatrixMultiply3 False deviceIDs harmonics xs yield $!! coefs {-# INLINE fourierSeriesSink #-} fourierSeriesSink :: (Storable e, Unbox e, RealFloat e, Show e) => Int -> ConduitT (CuMat (Complex e)) Void (ResourceT IO) ((R.Array U DIM3 (Complex e))) fourierSeriesSink numPoints = do xs <- CL.consume let cols = getColsCuMat . L.head $ xs return . fromUnboxed (Z :. cols :. numPoints :. numPoints) . VS.convert . getHostCuMat . transposeCuMat . concatCuMat $ xs computeFourierSeriesR2Stream :: ( Storable e , CUBLAS (Complex e) , Unbox e , RealFloat e , A.Elt e , A.Elt (Complex e) , Floating (A.Exp e) , A.FromIntegral Int e ) => [Int] -> [PTX] -> Int -> Int -> e -> e -> Int -> [CuMat (Complex e)] -> IO (R.Array U DIM3 (Complex e)) computeFourierSeriesR2Stream deviceIDs ptxs numFreqs numPoints period delta numBatch xs = runConduitRes $ indexSource numPoints numBatch .| fourierSeriesConduit deviceIDs ptxs numFreqs period delta xs .| fourierSeriesSink numPoints {-# INLINE fourierSeriesConduitAcc #-} fourierSeriesConduitAcc :: ( A.Floating e , A.Elt (Complex e) , A.FromIntegral Int e , Numeric (Complex e) , Unbox e ) => [PTX] -> Int -> e -> e -> A.Acc (Matrix (Complex e)) -> ConduitT [(Int, Int)] (VU.Vector (Complex e)) (ResourceT IO) () fourierSeriesConduitAcc ptxs numFreqs period delta x = awaitForever $ \idx' -> do let idxs = divideListN (L.length ptxs) idx' harmonics = L.map (\r2Positions -> inverseHarmonicAcc2 numFreqs period delta . A.fromList (A.Z A.:. L.length r2Positions) $ r2Positions) $ idxs liftIO $ printCurrentTime "fourierSeriesConduit" yield $!! blockMatrixMultiply ptxs harmonics x {-# INLINE fourierSeriesSinkAcc #-} fourierSeriesSinkAcc :: (Unbox e) => Int -> Int -> ConduitT (VU.Vector (Complex e)) Void (ResourceT IO) (R.Array U DIM3 (Complex e)) fourierSeriesSinkAcc numPoints cols = do xs <- CL.consume computeP . R.backpermute (Z :. cols :. numPoints :. numPoints) (\(Z :. a :. b :. c) -> Z :. b :. c :. a) . fromUnboxed (Z :. numPoints :. numPoints :. cols) . VU.concat $ xs computeFourierSeriesR2StreamAcc :: ( A.Floating e , A.Elt (Complex e) , A.FromIntegral Int e , Numeric (Complex e) , Unbox e ) => [PTX] -> Int -> Int -> Int -> e -> e -> Int -> Acc (Matrix (Complex e)) -> IO (R.Array U DIM3 (Complex e)) computeFourierSeriesR2StreamAcc ptxs numFreqs numPoints cols period delta numBatch x = runConduitRes $ indexSource numPoints numBatch .| fourierSeriesConduitAcc ptxs numFreqs period delta (A.compute x) .| fourierSeriesSinkAcc numPoints cols {-# INLINE fourierSeriesConduitAcc' #-} fourierSeriesConduitAcc' :: ( A.Floating e , A.Elt (Complex e) , A.FromIntegral Int e , Numeric (Complex e) , Unbox e , Prelude.Num e ) => [PTX] -> Int -> e -> e -> A.Acc (Matrix (Complex e)) -> ConduitT [(Int, Int)] (VU.Vector (Complex e)) (ResourceT IO) () fourierSeriesConduitAcc' ptxs numFreqs period delta x = awaitForever $ \idx' -> do let idxs = divideListN (L.length ptxs) idx' harmonics = L.map (\r2Positions -> inverseHarmonicAcc2' numFreqs period delta . A.fromList (A.Z A.:. L.length r2Positions) $ r2Positions) $ idxs liftIO $ printCurrentTime "fourierSeriesConduit" yield $ blockMatrixMultiply ptxs harmonics x computeFourierSeriesR2StreamAcc' :: ( A.Floating e , A.Elt (Complex e) , A.FromIntegral Int e , Numeric (Complex e) , Unbox e , Prelude.Num e ) => [PTX] -> Int -> Int -> Int -> e -> e -> Int -> Acc (Matrix (Complex e)) -> IO (R.Array U DIM3 (Complex e)) computeFourierSeriesR2StreamAcc' ptxs numFreqs numPoints cols period delta numBatch x = runConduitRes $ indexSource numPoints numBatch .| fourierSeriesConduitAcc' ptxs numFreqs period delta (A.compute x) .| fourierSeriesSinkAcc numPoints cols
// (C) Copyright Christopher Kormanyos 1999 - 2021. // (C) Copyright Matt Borland 2021. // Use, modification and distribution are subject to the // Boost Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef BOOST_MATH_CCMATH_FREXP_HPP #define BOOST_MATH_CCMATH_FREXP_HPP #include <cmath> #include <limits> #include <type_traits> #include <boost/math/ccmath/isinf.hpp> #include <boost/math/ccmath/isnan.hpp> #include <boost/math/ccmath/isfinite.hpp> namespace boost::math::ccmath { namespace detail { template <typename Real> inline constexpr Real frexp_zero_impl(Real arg, int* exp) { *exp = 0; return arg; } template <typename Real> inline constexpr Real frexp_impl(Real arg, int* exp) { const bool negative_arg = (arg < Real(0)); Real f = negative_arg ? -arg : arg; int e2 = 0; constexpr Real two_pow_32 = Real(4294967296); while (f >= two_pow_32) { f = f / two_pow_32; e2 += 32; } while(f >= Real(1)) { f = f / Real(2); ++e2; } if(exp != nullptr) { *exp = e2; } return !negative_arg ? f : -f; } } // namespace detail template <typename Real, std::enable_if_t<!std::is_integral_v<Real>, bool> = true> inline constexpr Real frexp(Real arg, int* exp) { if(BOOST_MATH_IS_CONSTANT_EVALUATED(arg)) { return arg == Real(0) ? detail::frexp_zero_impl(arg, exp) : arg == Real(-0) ? detail::frexp_zero_impl(arg, exp) : boost::math::ccmath::isinf(arg) ? detail::frexp_zero_impl(arg, exp) : boost::math::ccmath::isnan(arg) ? detail::frexp_zero_impl(arg, exp) : boost::math::ccmath::detail::frexp_impl(arg, exp); } else { using std::frexp; return frexp(arg, exp); } } template <typename Z, std::enable_if_t<std::is_integral_v<Z>, bool> = true> inline constexpr double frexp(Z arg, int* exp) { return boost::math::ccmath::frexp(static_cast<double>(arg), exp); } inline constexpr float frexpf(float arg, int* exp) { return boost::math::ccmath::frexp(arg, exp); } #ifndef BOOST_MATH_NO_LONG_DOUBLE_MATH_FUNCTIONS inline constexpr long double frexpl(long double arg, int* exp) { return boost::math::ccmath::frexp(arg, exp); } #endif } #endif // BOOST_MATH_CCMATH_FREXP_HPP
module Crypto.Curve.Weierstrass import Crypto.Curve import Data.Bits import Data.List import Data.Vect import Utils.Bytes import Utils.Misc -- Weiserstrass curve y^2 = x^3 + ax + b in Jacobian coordinate public export interface WeierstrassPoint p where a_coefficent : Integer b_coefficent : Integer prime : Integer -- If this isn't prime the skinwalker will devour you to_jacobian : p -> (Integer, Integer, Integer) from_jacobian : (Integer, Integer, Integer) -> p g : p to_from_jacobian : (x : (Integer, Integer, Integer)) -> to_jacobian (from_jacobian x) = x bits' : Nat curve_n : Integer j_point_double : WeierstrassPoint p => (Integer, Integer, Integer) -> (Integer, Integer, Integer) j_point_double (x, y, z) = let modulus = prime {p=p} s = (4 * x * y * y) `mod'` modulus z4 = pow_mod z 4 modulus y4 = pow_mod y 4 modulus m = (3 * x * x + a_coefficent {p} * z4) `mod'` modulus x' = (m * m - 2 * s) `mod'` modulus y' = (m * (s - x') - 8 * y4) `mod'` modulus z' = (2 * y * z) `mod'` modulus in (x', y', z') j_point_add : WeierstrassPoint p => (Integer, Integer, Integer) -> (Integer, Integer, Integer) -> (Integer, Integer, Integer) j_point_add (x, y, 0) b = b j_point_add a (x, y, 0) = a j_point_add a@(xp, yp, zp) b@(xq, yq, zq) = let m = prime {p=p} zq2 = pow_mod zq 2 m zq3 = pow_mod zq 3 m zp2 = pow_mod zp 2 m zp3 = pow_mod zp 3 m u1 = mul_mod xp zq2 m u2 = mul_mod xq zp2 m s1 = mul_mod yp zq3 m s2 = mul_mod yq zp3 m h = u2 - u1 r = s2 - s1 h2 = pow_mod h 2 m h3 = mul_mod h h2 m u1h2 = mul_mod u1 h2 m nx = ((r * r) - h3 - 2 * u1h2) `mod'` m ny = (r * (u1h2 - nx) - s1 * h3) `mod'` m nz = (h * zp * zq) `mod'` m in if h == 0 then (if r == 0 then j_point_double {p=p} a else (0, 1, 0)) else (nx, ny, nz) point_double : (Point p, WeierstrassPoint p) => p -> p point_double b = from_jacobian {p=p} (j_point_double {p=p} (to_jacobian b)) mul' : (Point p, WeierstrassPoint p) => p -> p -> Nat -> Integer -> p mul' r0 r1 m d = let (r0', r1') = if testBit d m then (point_add r0 r1, point_double r1) else (point_double r0, point_add r0 r1) in case m of S m' => mul' r0' r1' m' d Z => r0' public export WeierstrassPoint p => Point p where infinity = from_jacobian (0, 1, 0) generator = g bits = bits' {p=p} to_affine point = let (x, y, z) = to_jacobian point m = prime {p=p} z' = inv_mul_mod z m z2 = z' * z' z3 = z2 * z' in (mul_mod x z2 m, mul_mod y z3 m) modulus = prime {p=p} order = curve_n {p=p} point_add a b = from_jacobian {p=p} (j_point_add {p=p} (to_jacobian a) (to_jacobian b)) mul s pt = mul' infinity pt (bits {p=p}) s encode point = let bytes = (7 + bits {p=p}) `div` 8 (x', y') = to_affine point x = toList $ integer_to_be bytes x' y = toList $ integer_to_be bytes y' in 4 :: (x <+> y) decode (4 :: body) = do let bytes = (7 + bits {p=p}) `div` 8 let (x', y') = splitAt bytes body x <- map be_to_integer $ exactLength bytes $ fromList x' y <- map be_to_integer $ exactLength bytes $ fromList y' -- infinity check guard $ not $ (x == 0) && (y == 0) -- check on curve let pri = modulus {p=p} let lhs = pow_mod y 2 pri let a = a_coefficent {p=p} let b = b_coefficent {p=p} let rhs = ((pow_mod x 3 pri) + (mul_mod x a pri) + b) `mod'` pri guard $ lhs == rhs pure $ from_jacobian (x, y, 1) decode _ = Nothing public export data P256 : Type where MkP256 : (Integer, Integer, Integer) -> P256 public export WeierstrassPoint P256 where prime = 0xffffffff00000001000000000000000000000000ffffffffffffffffffffffff a_coefficent = 0xffffffff00000001000000000000000000000000fffffffffffffffffffffffc b_coefficent = 0x5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b from_jacobian = MkP256 to_jacobian (MkP256 p) = p g = MkP256 ( 0x6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296 , 0x4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5 , 1 ) to_from_jacobian x = Refl bits' = 256 curve_n = 0xffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551 public export data P384 : Type where MkP384 : (Integer, Integer, Integer) -> P384 public export WeierstrassPoint P384 where prime = 0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000ffffffff a_coefficent = 0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000fffffffc b_coefficent = 0xb3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088f5013875ac656398d8a2ed19d2a85c8edd3ec2aef from_jacobian = MkP384 to_jacobian (MkP384 p) = p g = MkP384 ( 0xaa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741e082542a385502f25dbf55296c3a545e3872760ab7 , 0x3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da3113b5f0b8c00a60b1ce1d7e819d7a431d7c90ea0e5f , 1 ) to_from_jacobian x = Refl bits' = 384 curve_n = 0xffffffffffffffffffffffffffffffffffffffffffffffffc7634d81f4372ddf581a0db248b0a77aecec196accc52973 public export data P521 : Type where MkP521 : (Integer, Integer, Integer) -> P521 public export WeierstrassPoint P521 where prime = 0x01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff a_coefficent = 0x01fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc b_coefficent = 0x0051953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8b489918ef109e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef451fd46b503f00 from_jacobian = MkP521 to_jacobian (MkP521 p) = p g = MkP521 ( 0x00c6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f828af606b4d3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf97e7e31c2e5bd66 , 0x011839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817afbd17273e662c97ee72995ef42640c550b9013fad0761353c7086a272c24088be94769fd16650 , 1 ) to_from_jacobian x = Refl bits' = 521 curve_n = 0x01fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa51868783bf2f966b7fcc0148f709a5d03bb5c9b8899c47aebb6fb71e91386409
Formal statement is: lemma closed_Nats [simp]: "closed (\<nat> :: 'a :: real_normed_algebra_1 set)" Informal statement is: The set of natural numbers is closed.
theory Boolean_functions imports Main "Jordan_Normal_Form.Matrix" begin section\<open>Boolean functions\<close> text\<open>Definition of monotonicity\<close> text\<open>We consider (monotone) Boolean functions over vectors of length $n$, so that we can later prove that those are isomorphic to simplicial complexes of dimension $n$ (in $n$ vertexes).\<close> locale boolean_functions = fixes n::"nat" begin definition bool_fun_dim_n :: "(bool vec => bool) set" where "bool_fun_dim_n = {f. f \<in> carrier_vec n \<rightarrow> (UNIV::bool set)}" definition monotone_bool_fun :: "(bool vec => bool) => bool" where "monotone_bool_fun \<equiv> (mono_on (carrier_vec n))" definition monotone_bool_fun_set :: "(bool vec => bool) set" where "monotone_bool_fun_set = (Collect monotone_bool_fun)" text\<open>Some examples of Boolean functions\<close> definition bool_fun_top :: "bool vec => bool" where "bool_fun_top f = True" definition bool_fun_bot :: "bool vec => bool" where "bool_fun_bot f = False" end section\<open>Threshold function\<close> definition count_true :: "bool vec => nat" where "count_true v = sum (\<lambda>i. if vec_index v i then 1 else 0::nat) {0..<dim_vec v}" lemma "vec_index (vec (5::nat) (\<lambda>i. False)) 2 = False" by simp lemma "vec_index (vec (5::nat) (\<lambda>i. True)) 3 = True" by simp lemma "count_true (vec (1::nat) (\<lambda>i. True)) = 1" unfolding count_true_def by simp lemma "count_true (vec (2::nat) (\<lambda>i. True)) = 2" unfolding count_true_def by simp lemma "count_true (vec (5::nat) (\<lambda>i. True)) = 5" unfolding count_true_def by simp text\<open>The threshold function is a Boolean function which also satisfies the condition of being \emph{evasive}. We follow the definition by Scoville~\<^cite>\<open>\<open>Problem 6.5\<close> in "SC19"\<close>.\<close> definition bool_fun_threshold :: "nat => (bool vec => bool)" where "bool_fun_threshold i = (\<lambda>v. if i \<le> count_true v then True else False)" context boolean_functions begin lemma "mono_on UNIV bool_fun_top" by (simp add: bool_fun_top_def mono_onI monotone_bool_fun_def) lemma "monotone_bool_fun bool_fun_top" by (simp add: bool_fun_top_def mono_onI monotone_bool_fun_def) lemma "mono_on UNIV bool_fun_bot" by (simp add: bool_fun_bot_def mono_onI monotone_bool_fun_def) lemma "monotone_bool_fun bool_fun_bot" by (simp add: bool_fun_bot_def mono_onI monotone_bool_fun_def) lemma monotone_count_true: assumes ulev: "(u::bool vec) \<le> v" shows "count_true u \<le> count_true v" unfolding count_true_def using Groups_Big.ordered_comm_monoid_add_class.sum_mono [of "{0..<dim_vec u}" "(\<lambda>i. if vec_index u i then 1 else 0)" "(\<lambda>i. if vec_index v i then 1 else 0)"] using ulev unfolding Matrix.less_eq_vec_def by fastforce text\<open>The threshold function is monotone.\<close> lemma monotone_threshold: assumes ulev: "(u::bool vec) \<le> v" shows "bool_fun_threshold n u \<le> bool_fun_threshold n v" unfolding bool_fun_threshold_def using monotone_count_true [OF ulev] by simp lemma assumes "(u::bool vec) \<le> v" and "n < dim_vec u" shows "bool_fun_threshold n u \<le> bool_fun_threshold n v" using monotone_threshold [OF assms(1)] . lemma "mono_on UNIV (bool_fun_threshold n)" by (meson mono_onI monotone_bool_fun_def monotone_threshold) lemma "monotone_bool_fun (bool_fun_threshold n)" unfolding monotone_bool_fun_def by (meson boolean_functions.monotone_threshold mono_onI) end end
State Before: x : ℂ hx : ↑abs x ≤ 1 ⊢ ↑abs (exp x - 1) = ↑abs (exp x - ∑ m in range 1, x ^ m / ↑(Nat.factorial m)) State After: no goals Tactic: simp [sum_range_succ] State Before: x : ℂ hx : ↑abs x ≤ 1 ⊢ 0 < 1 State After: no goals Tactic: decide State Before: x : ℂ hx : ↑abs x ≤ 1 ⊢ ↑abs x ^ 1 * (↑(Nat.succ 1) * (↑(Nat.factorial 1) * ↑1)⁻¹) = 2 * ↑abs x State After: no goals Tactic: simp [two_mul, mul_two, mul_add, mul_comm, add_mul]
Formal statement is: lemma coeff_pcompose_semiring_closed: assumes "\<And>i. coeff p i \<in> R" "\<And>i. coeff q i \<in> R" shows "coeff (pcompose p q) i \<in> R" Informal statement is: If the coefficients of two polynomials are in a semiring, then the coefficients of their composition are also in the semiring.
""" Layer Defines a new layer within the video. # Fields - `frames::Frames`: A range of frames for which the `Layer` exists - `width::Int`: Width of the layer - `height::Int`: hegiht of the layer - `position::Point`: initial positon of the center of the layer on the main canvas - `layer_objects::Vector{AbstractObject}`: Objects defined under the layer - `actions::Vector{AbstractAction}`: a list of actions applied to the entire layer - `current_setting::LayerSetting`: The current state of the layer see [`LayerSetting`](@ref) - `opts::Dict{Symbol,Any}`: can hold any options defined by the user - `image_matrix::Vector`: Hold the Drwaing of the layer as a Luxor image matrix """ mutable struct Layer <: AbstractObject frames::Frames width::Int height::Int position::Point layer_objects::Vector{AbstractObject} actions::Vector{AbstractAction} current_setting::LayerSetting opts::Dict{Symbol,Any} image_matrix::Union{Array,Nothing} layer_cache::LayerCache end """ CURRENT_LAYER holds the current layer in an array to be declared as a constant The current layer can be accessed using CURRENT_LAYER[1] """ const CURRENT_LAYER = Array{Layer,1}() # for width, height and position defaults are defined in the to_layer_m function function Layer( frames, width, height, position::Point; layer_objects::Vector{AbstractObject} = AbstractObject[], actions::Vector{AbstractAction} = AbstractAction[], setting::LayerSetting = LayerSetting(), misc::Dict{Symbol,Any} = Dict{Symbol,Any}(), mat = nothing, layer_cache::LayerCache = LayerCache(), ) if width === nothing width = CURRENT_VIDEO[1].width end if height === nothing height = CURRENT_VIDEO[1].height end layer = Layer( frames, width, height, position, layer_objects, actions, setting, misc, mat, layer_cache, ) if isempty(CURRENT_LAYER) push!(CURRENT_LAYER, layer) else CURRENT_LAYER[1] = layer end push!(CURRENT_VIDEO[1].layers, layer) return layer end
||| The \"Main\" module of 'algebra'. module Main -- The interface to the 'algebra' project. import Algebra ||| The \"main\" function of 'algebra'. main : IO () main = putStrLn $ show Answer
State Before: a b c p q : ℚ ⊢ p = q ↔ p.num * ↑q.den = q.num * ↑p.den State After: a b c p q : ℚ ⊢ p.num /. ↑p.den = q.num /. ↑q.den ↔ p.num * ↑q.den = q.num * ↑p.den Tactic: conv => lhs rw [← @num_den p, ← @num_den q] State Before: case z₂ a b c p q : ℚ ⊢ ↑q.den ≠ 0 State After: case z₂ a b c p q : ℚ ⊢ ¬q.den = 0 Tactic: rw [← Nat.cast_zero, Ne, Int.ofNat_inj] State Before: case z₂ a b c p q : ℚ ⊢ ¬q.den = 0 State After: no goals Tactic: apply den_nz
section propositional variables P Q R : Prop ------------------------------------------------ -- Proposições de dupla negaço: ------------------------------------------------ theorem doubleneg_intro : P → ¬¬P := begin intro p, by_contra, have i := h p, exact i, end theorem doubleneg_elim : ¬¬P → P := begin intro p, by_contra, have i := p h, exact i, end theorem doubleneg_law : ¬¬P ↔ P := begin split, intro p, by_contra, exact p h, intro p, by_contra, exact h p, end ------------------------------------------------ -- Comutatividade dos ∨,∧: ------------------------------------------------ theorem disj_comm : (P ∨ Q) → (Q ∨ P) := begin intro p, cases p, right, exact p, left, exact p, end theorem conj_comm : (P ∧ Q) → (Q ∧ P) := begin intro p, cases p with q r, split, exact r, exact q, end ------------------------------------------------ -- Proposições de interdefinabilidade dos →,∨: ------------------------------------------------ theorem impl_as_disj_converse : (¬P ∨ Q) → (P → Q) := begin intros p q, cases p, by_contra, exact p q, exact p, end theorem disj_as_impl : (P ∨ Q) → (¬P → Q) := begin intros p q, cases p, by_contra, exact q p, exact p, end ------------------------------------------------ -- Proposições de contraposição: ------------------------------------------------ theorem impl_as_contrapositive : (P → Q) → (¬Q → ¬P) := begin intros p q r, have i := p r, exact q i, end theorem impl_as_contrapositive_converse : (¬Q → ¬P) → (P → Q) := begin intros p q, by_contra, have i := p h, exact i q, end theorem contrapositive_law : (P → Q) ↔ (¬Q → ¬P) := begin split, intros p q r, have i := p r, exact q i, intros p q, by_contra, have i := p h, exact i q, end ------------------------------------------------ -- A irrefutabilidade do LEM: ------------------------------------------------ theorem lem_irrefutable : ¬¬(P∨¬P) := begin intro p, apply p, right, by_contra, have q : P ∨ ¬P, left, exact h, have i := p q, exact i, end ------------------------------------------------ -- A lei de Peirce ------------------------------------------------ theorem peirce_law_weak : ((P → Q) → P) → ¬¬P := begin intros p q, have r : P, apply p, intro s, exfalso, exact q s, exact q r, end ------------------------------------------------ -- Proposições de interdefinabilidade dos ∨,∧: ------------------------------------------------ theorem disj_as_negconj : P∨Q → ¬(¬P∧¬Q) := begin intros p q, cases q with r s, cases p, exact r p, exact s p, end theorem conj_as_negdisj : P∧Q → ¬(¬P∨¬Q) := begin intros p q, cases p with r s, cases q with t u, exact t r, exact u s, end ------------------------------------------------ -- As leis de De Morgan para ∨,∧: ------------------------------------------------ theorem demorgan_ndisj : ¬(P∨Q) → (¬P ∧ ¬Q) := begin intro p, split, by_contra, have q : P ∨ Q, left, exact h, exact p q, by_contra, have q : P ∨ Q, right, exact h, exact p q, end theorem demorgan_ndisj_converse : (¬P ∧ ¬Q) → ¬(P∨Q) := begin intros p q, cases p with r s, cases q with j k, exact r j, exact s k, end theorem demorgan_nconj_converse : (¬Q ∨ ¬P) → ¬(P∧Q) := begin intros p q, cases p with r s, cases q with j k, exact r k, cases q with x z, exact s x, end ------------------------------------------------ -- Proposições de distributividade dos ∨,∧: ------------------------------------------------ theorem distr_conj_disj : P∧(Q∨R) → (P∧Q)∨(P∧R) := begin intro p, cases p with q r, cases r with s t, left, split, exact q, exact s, right, split, exact q, exact t, end theorem distr_conj_disj_converse : (P∧Q)∨(P∧R) → P∧(Q∨R) := begin intro p, cases p with q r, split, cases q with s t, exact s, cases q with u v, left, exact v, cases r with w x, split, exact w, right, exact x, end theorem distr_disj_conj : P∨(Q∧R) → (P∨Q)∧(P∨R) := begin intro p, split, cases p with q r s, left, exact q, cases r with t u, right, exact t, cases p with v w, left, exact v, cases w with x y, right, exact y, end theorem distr_disj_conj_converse : (P∨Q)∧(P∨R) → P∨(Q∧R) := begin intro p, cases p with q r, cases q, left, exact q, cases r, left, exact r, right, split, exact q, exact r, end ------------------------------------------------ -- Currificação ------------------------------------------------ theorem curry_prop : ((P∧Q)→R) → (P→(Q→R)) := begin intros p q r, apply p, split, exact q, exact r, end theorem uncurry_prop : (P→(Q→R)) → ((P∧Q)→R) := begin intros p q, apply p, cases q with r s, exact r, cases q with r s, exact s, end ------------------------------------------------ -- Reflexividade da →: ------------------------------------------------ theorem impl_refl : P → P := begin intro p, exact p, end ------------------------------------------------ -- Weakening and contraction: ------------------------------------------------ theorem weaken_disj_right : P → (P∨Q) := begin intro p, left, exact p, end theorem weaken_disj_left : Q → (P∨Q) := begin intro p, right, exact p, end theorem weaken_conj_right : (P∧Q) → P := begin intro p, cases p with q r, exact q, end theorem weaken_conj_left : (P∧Q) → Q := begin intro p, cases p with q r, exact r, end theorem conj_idempot : (P∧P) ↔ P := begin split, intro p, cases p with q r, exact q, intro p, split, exact p, exact p, end theorem disj_idemp : (P∨P) ↔ P := begin split, intro p, cases p, exact p, exact p, intro p, left, exact p, end end propositional ---------------------------------------------------------------- section predicate variable U : Type variables P Q : U -> Prop ------------------------------------------------ -- As leis de De Morgan para ∃,∀: ------------------------------------------------ theorem demorgan_forall_converse : (∃x, ¬P x) → ¬(∀x, P x) := begin intros p, cases p with q r, intro s, have i := s q, exact r i, end theorem demorgan_exists : ¬(∃x, P x) → (∀x, ¬P x) := begin intros p q r, apply p, existsi q, exact r, end theorem demorgan_exists_converse : (∀x, ¬P x) → ¬(∃x, P x) := begin intros p q, cases q with r s, have i := p r, exact i s, end theorem demorgan_forall : ¬(∀x, P x) → (∃x, ¬P x) := begin intro p, by_contra r, apply p, intro q, by_contra, apply r, existsi q, exact h, end theorem demorgan_forall_law : ¬(∀x, P x) ↔ (∃x, ¬P x) := begin split, apply demorgan_forall, apply demorgan_forall_converse, end theorem demorgan_exists_law : ¬(∃x, P x) ↔ (∀x, ¬P x) := begin split, apply demorgan_exists, apply demorgan_exists_converse, end ------------------------------------------------ -- Proposições de interdefinabilidade dos ∃,∀: ------------------------------------------------ theorem exists_as_neg_forall : (∃x, P x) → ¬(∀x, ¬P x) := begin intros p q, cases p with r s, have i := q r, exact i s, end theorem forall_as_neg_exists : (∀x, P x) → ¬(∃x, ¬P x) := begin intros p q, cases q with r s, have i := p r, exact s i, end theorem forall_as_neg_exists_converse : ¬(∃x, ¬P x) → (∀x, P x) := begin intros p q, by_contra, apply p, existsi q, exact h, end theorem exists_as_neg_forall_converse : ¬(∀x, ¬P x) → (∃x, P x) := begin intro p, by_contra, apply p, intros q r, apply h, existsi q, exact r, end theorem forall_as_neg_exists_law : (∀x, P x) ↔ ¬(∃x, ¬P x) := begin split, apply forall_as_neg_exists, apply forall_as_neg_exists_converse, end theorem exists_as_neg_forall_law : (∃x, P x) ↔ ¬(∀x, ¬P x) := begin split, apply exists_as_neg_forall, apply exists_as_neg_forall_converse, end ------------------------------------------------ -- Proposições de distributividade de quantificadores: ------------------------------------------------ theorem exists_conj_as_conj_exists : (∃x, P x ∧ Q x) → (∃x, P x) ∧ (∃x, Q x) := begin intro p, cases p with q r, cases r with s t, split, existsi q, exact s, existsi q, exact t, end theorem exists_disj_as_disj_exists : (∃x, P x ∨ Q x) → (∃x, P x) ∨ (∃x, Q x) := begin intro p, cases p with q r, cases r with s t, left, existsi q, exact s, right, existsi q, exact t, end theorem exists_disj_as_disj_exists_converse : (∃x, P x) ∨ (∃x, Q x) → (∃x, P x ∨ Q x) := begin intro p, cases p with q r, cases q with s t, existsi s, left, exact t, cases r with s t, existsi s, right, exact t, end theorem forall_conj_as_conj_forall : (∀x, P x ∧ Q x) → (∀x, P x) ∧ (∀x, Q x) := begin intro p, split, intro q, by_contra, have i := p q, cases i with j k, exact h j, intro s, have i := p s, cases i with j k, by_contra, exact h k, end theorem forall_conj_as_conj_forall_converse : (∀x, P x) ∧ (∀x, Q x) → (∀x, P x ∧ Q x) := begin intros p q, split, cases p with r s, have i := r q, by_contra, exact h i, cases p with r s, by_contra, have i := s q, exact h i, end theorem forall_disj_as_disj_forall_converse : (∀x, P x) ∨ (∀x, Q x) → (∀x, P x ∨ Q x) := begin intros p q, cases p with r s, have i := r q, left, by_contra, exact h i, have i := s q, right, by_contra, exact h i, end /--NOT THEOREMS -------------------------------- theorem forall_disj_as_disj_forall : (∀x, P x ∨ Q x) → (∀x, P x) ∨ (∀x, Q x) := begin intro p, right, intro q, have i := p q, cases i, --teorema inválido end theorem exists_conj_as_conj_exists_converse : (∃x, P x) ∧ (∃x, Q x) → (∃x, P x ∧ Q x) := begin intro p, cases p with q r, cases q with s t, cases r with u v, existsi u, split, --teorema inválido end ---------------------------------------------- -/ end predicate
$1$ is an $n$th power.
example : False := have : False := _ example : 5 = 3 := have t : True := _ have f : 5 = 6 := _ f example : True := have := True.intro this
Require Export Fiat.Common.Coq__8_4__8_5__Compat. Require Import Coq.Logic.Eqdep_dec Fiat.Computation Fiat.Narcissus.Common.Specs Fiat.Narcissus.Common.ComposeOpt Fiat.Narcissus.Formats.WordOpt Fiat.Narcissus.Formats.EnumOpt Fiat.Narcissus.Formats.Sequence Fiat.Narcissus.BaseFormats Fiat.Narcissus.BinLib.AlignedEncodeMonad. Require Import Coq.Vectors.Vector Coq.ZArith.ZArith Bedrock.Word. Section AlignWord. Context {B : Type}. Context {cache : Cache}. Context {cacheAddNat : CacheAdd cache nat}. Context {monoid : Monoid B}. Context {monoidUnit : QueueMonoidOpt monoid bool}. Variable addD_addD_plus : forall cd n m, addD (addD cd n) m = addD cd (n + m). Lemma If_Opt_Then_Else_DecodeBindOpt {A ResultT ResultT'} : forall (a_opt : option A) (t : A -> Hopefully (ResultT * B * CacheDecode)) (e : Hopefully (ResultT * B * CacheDecode)) (k : _ -> _ -> _ -> Hopefully (ResultT' * B * CacheDecode)), (`(w, b', cd') <- Ifopt a_opt as a Then t a Else e; k w b' cd') = (Ifopt a_opt as a Then (`(w, b', cd') <- t a; k w b' cd') Else (`(w, b', cd') <- e; k w b' cd')). Proof. destruct a_opt; simpl; intros; reflexivity. Qed. Lemma HBind_DecodeBindOpt {A ResultT ResultT'} : forall (a_opt : Hopefully A) (t : A -> Hopefully (ResultT * B * CacheDecode)) (k : _ -> _ -> _ -> Hopefully (ResultT' * B * CacheDecode)), (`(w, b', cd') <- HBind a_opt as a With t a; k w b' cd') = (HBind a_opt as a With (`(w, b', cd') <- t a; k w b' cd')). Proof. destruct a_opt; simpl; intros; reflexivity. Qed. Fixpoint split1' (sz sz' : nat) : word (sz + sz') -> word sz := match sz return word (sz + sz') -> word sz with | 0 => fun _ => WO | S n' => fun w => SW_word (word_split_hd w) (split1' n' sz' (word_split_tl w)) end. Fixpoint split2' (sz sz' : nat) : word (sz + sz') -> word sz' := match sz return word (sz + sz') -> word sz' with | 0 => fun w => w | S n' => fun w => split2' n' sz' (word_split_tl w) end. Definition trans_S_comm : forall n m : nat, S (n + m) = n + S m. Proof. fix trans_S_comm 1. destruct n. - intro; reflexivity. - simpl; intro; destruct (trans_S_comm n m); reflexivity. Defined. Lemma trans_plus_comm : forall n m, n + m = m + n. Proof. fix rec_n 1. destruct n. - fix rec_m 1. destruct m. + reflexivity. + simpl. destruct (rec_m m); reflexivity. - simpl; intro; rewrite (rec_n n m). apply trans_S_comm. Defined. Lemma wtl_eq_rect_S : forall sz sz' w eq_comm eq_comm', wtl (eq_rect (S sz) word w (S sz') eq_comm) = eq_rect sz word (wtl w) sz' eq_comm'. Proof. intros. destruct (shatter_word_S w) as (?, (w', H)); rewrite H in *; clear. revert w' eq_comm. rewrite eq_comm'; clear eq_comm'; intros. unfold eq_rect; simpl. revert w'. pattern eq_comm. apply K_dec_set; eauto; decide equality. Qed. Lemma wtl_eq_rect_comm : forall sz sz' w eq_comm eq_comm', wtl (eq_rect (S (sz + sz')) word w (S (sz' + sz)) eq_comm) = eq_rect (sz + sz') word (wtl w) (sz' + sz) eq_comm'. Proof. intros. eapply wtl_eq_rect_S. Qed. Lemma whd_eq_rect_comm : forall sz sz' w eq_comm, whd (eq_rect (S (sz + sz')) word w (S (sz' + sz)) eq_comm) = whd w. Proof. intros ? ?; rewrite trans_plus_comm; intros. destruct (shatter_word_S w) as (?, (w', H)); rewrite H in *; clear. unfold eq_rect; simpl. pattern eq_comm. apply K_dec_set; eauto; decide equality. Qed. Lemma eq_rect_WS : forall b sz sz' w e e', eq_rect (S sz) _ (WS b w) (S sz') e = WS b (eq_rect sz _ w sz' e'). Proof. simpl; intros. revert e w. rewrite e'; intro; pattern e. apply K_dec_set; eauto; decide equality. Qed. Lemma eq_rect_split_tl : forall (sz sz' : nat) (x0 : bool) (w'' : word (sz' + sz)) (e : sz' + sz = sz + sz') (e1 : sz + sz' = sz' + sz), word_split_tl (WS x0 w'') = eq_rect (sz + sz') word (word_split_tl (WS x0 (eq_rect (sz' + sz) word w'' (sz + sz') e))) (sz' + sz) e1. Proof. induction sz; simpl. - intro; rewrite <- plus_n_O; intros. pattern e1. apply K_dec_set; eauto; try decide equality; clear e1. pattern e. apply K_dec_set; eauto; try decide equality; clear e. - intro; rewrite <- (trans_S_comm sz' sz); intros. intros; destruct (shatter_word_S w'') as (?, (w', H)); rewrite H in *; clear H. simpl. rewrite (IHsz sz' x w' (trans_plus_comm _ _) (trans_plus_comm _ _)); repeat f_equal. erewrite !eq_rect_WS; reflexivity. Qed. Lemma split1'_eq : forall sz sz' w, split1' sz sz' w = split2 sz' sz (eq_rect _ _ w _ (trans_plus_comm _ _)). Proof. induction sz; simpl; intros. - induction sz'; simpl. + shatter_word w; reflexivity. + destruct (shatter_word_S w) as (?, (w', H)); rewrite H in *; clear H. erewrite wtl_eq_rect_comm. eapply IHsz'. - intros; destruct (shatter_word_S w) as (?, (w', H)); rewrite H in *; clear H. simpl; rewrite IHsz; clear. generalize (eq_ind_r (fun n : nat => S n = sz' + S sz) (trans_S_comm sz' sz) (trans_plus_comm sz sz')). generalize (trans_plus_comm sz sz'). fold (plus sz sz'). revert x sz w'; induction sz'; simpl. + intros ? ?; rewrite <- (plus_n_O sz); simpl. intros. pattern e. apply K_dec_set; eauto; try decide equality. pattern e0. apply K_dec_set; eauto; try decide equality. unfold eq_rect; simpl. rewrite <- word_split_SW; reflexivity. + intros ? ?. rewrite <- !trans_S_comm. rewrite trans_plus_comm. intros; pattern e. apply K_dec_set; eauto; try decide equality; clear e. intros; destruct (shatter_word_S w') as (?, (w'', H)); rewrite H in *; clear H. rewrite (wtl_eq_rect_S _ _ _ e0 (trans_S_comm _ _)); simpl. assert (S (sz + sz') = sz' + S sz) by lia. replace (eq_rect (S (sz' + sz)) word (WS x0 w'') (sz' + S sz) (trans_S_comm sz' sz)) with (eq_rect (S (sz + sz')) word (WS x0 (eq_rect _ _ w'' _ (trans_plus_comm _ _))) (sz' + S sz) H). erewrite <- (IHsz' x0 sz _ (trans_plus_comm _ _)). repeat f_equal. { generalize (trans_plus_comm sz' sz); intros; clear. revert sz' x0 w'' e; induction sz; simpl. - intro; rewrite <- plus_n_O; intros. destruct e; reflexivity. - intro; rewrite <- (trans_S_comm sz' sz); intros. intros; destruct (shatter_word_S w'') as (?, (w', H)); rewrite H in *; clear H. simpl. rewrite (IHsz sz' x w' (trans_plus_comm _ _)); repeat f_equal. erewrite eq_rect_WS; eauto. } { eapply eq_rect_split_tl. } revert H; clear. revert w''; generalize (trans_S_comm sz' sz). rewrite (trans_plus_comm sz' sz); intros; simpl. destruct e; simpl. rewrite eq_rect_WS with (e' := eq_refl _); reflexivity. Qed. Lemma split2'_eq : forall sz sz' w, split2' sz sz' w = split1 sz' sz (eq_rect _ _ w _ (trans_plus_comm _ _)). Proof. induction sz; simpl; intros. - induction sz'; simpl. + shatter_word w; reflexivity. + destruct (shatter_word_S w) as (?, (w', H)); rewrite H in *; clear H. rewrite whd_eq_rect_comm; simpl. erewrite wtl_eq_rect_comm. rewrite (IHsz' w') at 1; reflexivity. - intros; destruct (shatter_word_S w) as (?, (w', H)); rewrite H in *; clear H. simpl; rewrite IHsz; clear. generalize (eq_ind_r (fun n : nat => S n = sz' + S sz) (trans_S_comm sz' sz) (trans_plus_comm sz sz')). generalize (trans_plus_comm sz sz'). fold (plus sz sz'). revert x sz w'; induction sz'; simpl; eauto. intros. assert (sz + S sz' = sz' + S sz) by lia. rewrite eq_rect_WS with (e' := H); simpl. revert w' e0 H; rewrite e; intros. destruct (shatter_word_S w') as (?, (w'', H')); rewrite H' in *; clear H'; simpl. f_equal. assert (S (sz + sz') = sz' + S sz) by lia. replace (eq_rect (S (sz' + sz)) word (WS x0 w'') (sz' + S sz) H) with (eq_rect (S (sz + sz')) word (WS x0 (eq_rect _ _ w'' _ (trans_plus_comm _ _))) (sz' + S sz) H0) by (revert w'' H H0; clear; rewrite (trans_plus_comm sz' sz); intros; simpl; destruct H; simpl; erewrite eq_rect_WS with (e' := eq_refl _); reflexivity). rewrite <- IHsz' with (e := trans_plus_comm _ _); f_equal; simpl. eapply eq_rect_split_tl. Qed. Lemma CollapseWord {ResultT} : forall sz sz' (b : B) (cd : CacheDecode) (k : _ -> _ -> _ -> _ -> Hopefully (ResultT * B * CacheDecode)), (`(w, b', cd') <- decode_word (sz:=sz) b cd; `(w', b', cd') <- decode_word (sz:=sz') b' cd'; k w w' b' cd') = (`(w , b', cd') <- decode_word (sz:=sz + sz') b cd; k (split1' sz sz' w) (split2' sz sz' w) b' cd'). Proof. unfold decode_word; repeat setoid_rewrite If_Opt_Then_Else_DecodeBindOpt; simpl. induction sz; simpl; intros. - rewrite !HBind_DecodeBindOpt; simpl. rewrite addD_addD_plus; reflexivity. - destruct (dequeue_opt b) as [ [? ?] | ]; simpl; eauto. pose proof (IHsz sz' b1 (addD cd 1) (fun w => k (SW_word b0 w))). destruct (decode_word' sz b1) as [ [? ?] | ]; simpl in *. + rewrite !HBind_DecodeBindOpt; rewrite !HBind_DecodeBindOpt in H; simpl in *; rewrite !addD_addD_plus in H; rewrite !addD_addD_plus; simpl in *. rewrite H. destruct (decode_word' (sz + sz') b1) as [ [? ?] | ]; simpl; eauto. repeat f_equal; clear. * induction sz; simpl in *. induction sz'; simpl in *; try shatter_word w0; simpl; eauto. pose proof (shatter_word_S w0); destruct_ex; subst; simpl; eauto. pose proof (shatter_word_S w0); destruct_ex; subst; simpl; eauto. * induction sz; simpl in *. induction sz'; simpl in *; try shatter_word w0; simpl; eauto. pose proof (shatter_word_S w0); destruct_ex; subst; simpl; f_equal; eauto. pose proof (shatter_word_S w0); destruct_ex; subst; simpl; f_equal; eauto. * induction sz; simpl in *. induction sz'; simpl in *; try shatter_word w0; simpl; eauto. pose proof (shatter_word_S w0); destruct_ex; subst; simpl; f_equal; eauto. pose proof (shatter_word_S w0); destruct_ex; subst; simpl; f_equal; eauto. + destruct (decode_word' (sz + sz') b1) as [ [? ?] | ]; simpl in *; eauto. rewrite addD_addD_plus in H; simpl in *. rewrite H; repeat f_equal; clear. * induction sz; simpl in *. induction sz'; simpl in *; try shatter_word w; simpl; eauto. pose proof (shatter_word_S w); destruct_ex; subst; simpl; eauto. pose proof (shatter_word_S w); destruct_ex; subst; simpl; eauto. * induction sz; simpl in *. induction sz'; simpl in *; try shatter_word w; simpl; eauto. pose proof (shatter_word_S w); destruct_ex; subst; simpl; f_equal; eauto. pose proof (shatter_word_S w); destruct_ex; subst; simpl; f_equal; eauto. * induction sz; simpl in *. induction sz'; simpl in *; try shatter_word w; simpl; eauto. pose proof (shatter_word_S w); destruct_ex; subst; simpl; f_equal; eauto. pose proof (shatter_word_S w); destruct_ex; subst; simpl; f_equal; eauto. Qed. Lemma CollapseWord' {ResultT} : forall sz' sz (b : B) (cd : CacheDecode) (k : _ -> _ -> _ -> _ -> Hopefully (ResultT * B * CacheDecode)), (`(w, b', cd') <- decode_word (sz:=sz) b cd; `(w', b', cd') <- decode_word (sz:=sz') b' cd'; k w w' b' cd') = (`(w , b', cd') <- decode_word (sz:=sz + sz') b cd; k (split2 sz' sz (eq_rect _ _ w _ (trans_plus_comm sz sz'))) (split1 sz' sz (eq_rect _ _ w _ (trans_plus_comm sz sz'))) b' cd'). Proof. intros; rewrite CollapseWord. destruct (decode_word b cd) as [ [ [? ?] ?] | ]; simpl. rewrite split2'_eq, split1'_eq; eauto. reflexivity. Qed. Lemma CollapseWord'' {ResultT} : forall sz' sz (b : B) (cd : CacheDecode) (k : _ -> _ -> _ -> _ -> Hopefully (ResultT * B * CacheDecode)), (`(w, b', cd') <- decode_word (sz:=sz) b cd; `(w', b', cd') <- decode_word (sz:=sz') b' cd'; k w w' b' cd') = (`(w , b', cd') <- decode_word (sz:=sz' + sz) b cd; k (split2 sz' sz w) (split1 sz' sz w) b' cd'). Proof. intros; rewrite CollapseWord'. replace (decode_word (sz:=sz' + sz) b cd) with (eq_rect _ (fun n => Hopefully (word n * B * _)) (decode_word (sz:=sz + sz') b cd) _ (trans_plus_comm _ _)). - destruct (decode_word b cd) as [ [ [? ?] ?] | ]; simpl. + revert w; rewrite (trans_plus_comm sz sz'); simpl; eauto. + rewrite (trans_plus_comm sz sz'); simpl; eauto. - rewrite (trans_plus_comm sz sz'); simpl; reflexivity. Qed. Lemma CollapseEnumWord {ResultT} : forall sz' sz n (b : B) (tb : Vector.t (word sz) (S n)) (cd : CacheDecode) (k : _ -> _ -> _ -> _ -> Hopefully (ResultT * B * CacheDecode)), (`(w, b', cd') <- decode_enum (sz:=sz) tb b cd; `(w', b', cd') <- decode_word (sz:=sz') b' cd'; k w w' b' cd') ~= (`(w , b', cd') <- decode_word (sz:=sz' + sz) b cd; HBind (word_indexed (split2 sz' sz w) tb) as idx With k idx (split1 sz' sz w) b' cd' ). Proof. intros. unfold decode_enum. rewrite <- CollapseWord'' with (b0 := b) (cd0 := cd) (k0 := fun w1 w2 b cd => HBind (word_indexed w1 tb) as idx With k idx w2 b cd). unfold decode_word; repeat setoid_rewrite HBind_DecodeBindOpt; simpl. destruct (decode_word' sz b) as [ [? ?] | ] eqn: ?; simpl; try reflexivity. destruct (word_indexed w tb) as [ ? | ] eqn: ?; simpl; try reflexivity. destruct (decode_word' sz' b0) as [ [? ?] | ] eqn: ?; simpl; try reflexivity. apply Error_eq. Qed. Variable addE_addE_plus : forall (ce : CacheFormat) (n m : nat), addE (addE ce n) m = addE ce (n + m). Lemma format_word_S {n} : forall (w : word (S n)) (bs : B), encode_word' (S n) w bs = encode_word' n (word_split_tl w) (enqueue_opt (word_split_hd w) bs). Proof. intros; pose proof (shatter_word_S w); destruct_ex; subst. simpl. clear; revert x; induction x0; simpl; intros. - simpl; reflexivity. - rewrite IHx0. reflexivity. Qed. Lemma word_split_hd_SW_word {n} : forall b (w : word n), word_split_hd (SW_word b w) = b. Proof. induction w; simpl; intros; eauto. Qed. Lemma word_split_tl_SW_word {n} : forall b (w : word n), word_split_tl (SW_word b w) = w. Proof. induction w; simpl; intros; eauto. f_equal; eauto. Qed. Lemma CollapseFormatWord : forall {sz sz'} (w : word sz) (w' : word sz') k ce, refine (((format_word w) ThenC (format_word w') ThenC k) ce) (((format_word (combine w' w)) ThenC k) ce). Proof. intros; unfold compose, format_word, Bind2. autorewrite with monad laws. simpl; rewrite addE_addE_plus. rewrite Plus.plus_comm; f_equiv; intro. rewrite mappend_assoc. destruct a; simpl. f_equiv; f_equiv; f_equiv. revert sz' w'; induction w; simpl; intros. - rewrite mempty_left. generalize mempty; clear; induction w'; intros. + reflexivity. + simpl; rewrite IHw'; reflexivity. - rewrite !enqueue_opt_format_word. replace (encode_word' (sz' + S n) (combine w' (WS b0 w)) mempty) with (encode_word' (S sz' + n) (combine (SW_word b0 w') w) mempty). + rewrite <- IHw. simpl; rewrite format_word_S. rewrite <- mappend_assoc, word_split_tl_SW_word, word_split_hd_SW_word. f_equal. clear; induction w'. * simpl; rewrite mempty_right; reflexivity. * simpl; rewrite !enqueue_opt_format_word. rewrite <- IHw'. rewrite mappend_assoc; reflexivity. + clear; revert n w; induction w'; intros. * simpl; eauto. * simpl; rewrite <- IHw'; reflexivity. Qed. Lemma CollapseFormatWord' : forall {sz sz'} (w : word sz) (w' : word sz') k ce, refine (((format_word (combine w' w)) ThenC k) ce) (((format_word w) ThenC (format_word w') ThenC k) ce). Proof. intros; unfold compose, format_word, Bind2. autorewrite with monad laws. simpl; rewrite addE_addE_plus. f_equiv. clear; rewrite Plus.plus_comm; reflexivity. intro; rewrite mappend_assoc. destruct a; simpl. f_equiv; f_equiv; f_equiv. revert sz' w'; induction w; simpl; intros. - rewrite mempty_left. generalize mempty; clear; induction w'; intros. + reflexivity. + simpl; rewrite IHw'; reflexivity. - rewrite !enqueue_opt_format_word. replace (encode_word' (sz' + S n) (combine w' (WS b0 w)) mempty) with (encode_word' (S sz' + n) (combine (SW_word b0 w') w) mempty). + rewrite IHw. simpl; rewrite format_word_S. rewrite <- mappend_assoc, word_split_tl_SW_word, word_split_hd_SW_word. f_equal. clear; induction w'. * simpl; rewrite mempty_right; reflexivity. * simpl; rewrite !enqueue_opt_format_word. rewrite IHw'. rewrite mappend_assoc; reflexivity. + clear; revert n w; induction w'; intros. * simpl; eauto. * simpl; rewrite <- IHw'; reflexivity. Qed. Lemma format_SW_word {n} : forall b (w : word n) ce, refine (format_word (SW_word b w) ce) (`(bs, ce') <- format_word w (addE ce 1); ret (mappend (enqueue_opt b mempty) bs, ce')). Proof. induction n; simpl; intros. - shatter_word w; simpl. unfold format_word; simpl. autorewrite with monad laws. simpl; rewrite addE_addE_plus; rewrite mempty_right; reflexivity. - pose proof (shatter_word_S w); destruct_ex; subst. simpl. unfold format_word; simpl. autorewrite with monad laws; simpl. assert (computes_to (`(bs, ce') <- ret (encode_word' n x0 mempty, addE (addE ce 1) n); ret (mappend (enqueue_opt b mempty) bs, ce')) (mappend (enqueue_opt b mempty) (encode_word' n x0 mempty), addE (addE ce 1) n)) by repeat computes_to_econstructor. pose proof (IHn b x0 ce _ H). unfold format_word in H0. computes_to_inv; inversion H0; subst. rewrite H2. rewrite enqueue_mappend_opt. rewrite addE_addE_plus. reflexivity. Qed. (* Lemma If_Opt_Then_Else_DecodeBindOpt_swap {A C ResultT : Type} *) (* : forall (a_opt : option A) *) (* (b : B) *) (* (cd : CacheDecode) *) (* (dec_c : B -> CacheDecode -> Hopefully (C * B * CacheDecode)) *) (* (k : A -> C -> B -> CacheDecode -> Hopefully (ResultT * B * CacheDecode)), *) (* (`(a, b', cd') <- Ifopt a_opt as a Then Ok (a, b, cd) Else None; *) (* `(c, b', cd') <- dec_c b' cd'; *) (* k a c b' cd') = *) (* (`(c, b', cd') <- dec_c b cd; *) (* `(a, b', cd') <- Ifopt a_opt as a Then Ok (a, b', cd') Else None; *) (* k a c b' cd'). *) (* Proof. *) (* destruct a_opt; simpl; intros; eauto. *) (* destruct (dec_c b cd) as [ [ [? ?] ? ] | ]; reflexivity. *) (* Qed. *) Lemma If_Then_Else_Bind {sz} {C ResultT : Type} : forall (w w' : word sz) (b : B) (cd : CacheDecode) (dec_c : B -> CacheDecode -> Hopefully (C * B * CacheDecode)) (k : C -> B -> CacheDecode -> Hopefully (ResultT * B * CacheDecode)) (e e': CoderError), (if weq w w' then `(c, b', cd') <- dec_c b cd; k c b' cd' else Error e) ~= (`(c, b', cd') <- dec_c b cd; if weq w w' then k c b' cd' else Error e'). Proof. intros; find_if_inside; eauto; destruct (dec_c b cd) as [ [ [? ?] ? ] | ]; simpl; try reflexivity; apply Error_eq. Qed. End AlignWord. Require Import Fiat.Narcissus.BinLib.AlignedByteString Fiat.Narcissus.BinLib.AlignedDecodeMonad. Section AlignEncodeWord. Context {cache : Cache}. Context {cacheAddNat : CacheAdd cache nat}. Variable addD_addD_plus : forall cd n m, addD (addD cd n) m = addD cd (n + m). Lemma aligned_format_char_eq : forall (w : word 8) cd, refine (format_word (monoidUnit := ByteString_QueueMonoidOpt) w cd) (ret (build_aligned_ByteString (Vector.cons _ w _ (Vector.nil _)), addE cd 8)). Proof. intros; shatter_word w; simpl. unfold format_word; simpl. compute. intros. computes_to_inv; subst. match goal with |- computes_to (ret ?c) ?v => replace c with v end. computes_to_econstructor. f_equal. eapply ByteString_f_equal; simpl. instantiate (1 := eq_refl _). rewrite <- !Eqdep_dec.eq_rect_eq_dec; eauto using Peano_dec.eq_nat_dec. unfold ByteBuffer.t; erewrite eq_rect_Vector_cons; repeat f_equal. instantiate (1 := eq_refl _); reflexivity. Unshelve. reflexivity. Qed. Local Open Scope AlignedDecodeM_scope. Lemma AlignedDecodeChar {C} {numBytes} : forall (v : ByteBuffer.t (S numBytes)) (t : (word 8 * ByteString * CacheDecode) -> Hopefully C) cd, HBind (decode_word (monoidUnit := ByteString_QueueMonoidOpt) (sz := 8) (build_aligned_ByteString v) cd) as w With t w = LetIn (Vector.nth v Fin.F1) (fun w => t (w, build_aligned_ByteString (snd (Vector_split 1 _ v)), addD cd 8)). Proof. unfold LetIn; intros. unfold decode_word, WordOpt.decode_word. rewrite aligned_decode_char_eq; simpl. f_equal. pattern numBytes, v; apply Vector.caseS; simpl; intros. reflexivity. Qed. Lemma AlignedDecodeCharM : DecodeMEquivAlignedDecodeM (decode_word (monoidUnit := ByteString_QueueMonoidOpt) (sz := 8)) (fun numBytes => GetCurrentByte). Proof. unfold DecodeMEquivAlignedDecodeM, BindAlignedDecodeM, DecodeBindOpt2, BindOpt; intros; unfold decode_word, WordOpt.decode_word. split; [ | split ]; intros. - pattern numBytes_hd, v; eapply Vector.caseS; simpl; intros. unfold GetCurrentByte, nth_opt; simpl. destruct (Vector_nth_opt t n); simpl; eauto. - destruct (decode_word' 8 b) as [ [? ?] | ] eqn: ?; simpl in H; try discriminate. eapply decode_word'_lt in Heqh; unfold le_B, bin_measure in Heqh; simpl in Heqh. unfold lt_B in Heqh; simpl in Heqh. injections; lia. - destruct v. + simpl; intuition; discriminate. + rewrite aligned_decode_char_eq; simpl; intuition. * unfold GetCurrentByte; injections; simpl. clear; induction n; simpl; eauto. * injections. replace (match numBytes (build_aligned_ByteString v) with | 0 => S n | S l => n - l end) with 1 by (unfold numBytes; simpl; clear; induction n; lia). setoid_rewrite <- build_aligned_ByteString_append. eexists (Vector.cons _ c _ (@Vector.nil _)); reflexivity. Qed. Lemma SW_word_append : forall b sz (w : word sz) sz' (w' : word sz'), SW_word b (Core.append_word w w') = eq_rect _ word (Core.append_word w (SW_word b w')) _ (sym_eq (plus_n_Sm _ _)). Proof. induction w; simpl; intros. - apply Eqdep_dec.eq_rect_eq_dec; auto with arith. - erewrite <- !WS_eq_rect_eq. rewrite IHw; reflexivity. Qed. Lemma decode_word_plus': forall (n m : nat) (v : ByteString), decode_word' (n + m) v = (`(w, v') <- decode_word' n v; `(w', v'') <- decode_word' m v'; Ok (eq_rect _ _ (Core.append_word w' w) _ (plus_comm_transparent _ _), v'')). Proof. induction n. - simpl; intros. destruct (decode_word' m v) as [ [? ?] | ]; simpl; repeat f_equal. revert w; clear. induction w; simpl; eauto. rewrite IHw at 1. rewrite Core.succ_eq_rect; f_equal. apply Eqdep_dec.UIP_dec; auto with arith. - simpl; intros. simpl; rewrite !DecodeBindOpt_assoc; destruct (ByteString_dequeue v) as [ [? ?] | ]; try reflexivity. simpl; rewrite !DecodeBindOpt_assoc. rewrite IHn. simpl; rewrite !DecodeBindOpt_assoc. destruct (decode_word' n b0) as [ [? ?] | ]; try reflexivity. simpl; rewrite !DecodeBindOpt_assoc. destruct (decode_word' m b1) as [ [? ?] | ]; try reflexivity. simpl; f_equal; f_equal; clear. revert b n w; induction w0; simpl; intros. + apply SW_word_eq_rect_eq. + erewrite !SW_word_eq_rect_eq; simpl. erewrite <- !WS_eq_rect_eq. f_equal. rewrite SW_word_append. rewrite <- Equality.transport_pp. f_equal. Unshelve. lia. lia. Qed. Lemma AlignedDecodeBindCharM {C : Type} (t : word 8 -> DecodeM (C * ByteString) ByteString) (t' : word 8 -> forall {numBytes}, AlignedDecodeM C numBytes) : (forall b, DecodeMEquivAlignedDecodeM (t b) (@t' b)) -> DecodeMEquivAlignedDecodeM (fun v cd => `(a, b0, cd') <- decode_word (monoidUnit := ByteString_QueueMonoidOpt) (sz := 8) v cd; t a b0 cd') (fun numBytes => b <- GetCurrentByte; t' b). Proof. intro; eapply Bind_DecodeMEquivAlignedDecodeM. apply AlignedDecodeCharM. intros; eapply H. Qed. Lemma AlignedDecodeNCharM (addD_O : forall cd, addD cd 0 = cd) {m} : DecodeMEquivAlignedDecodeM (decode_word (sz := m * 8)) (fun numBytes => GetCurrentBytes m). Proof. induction m. - unfold decode_word; simpl; pose proof (Return_DecodeMEquivAlignedDecodeM WO). eapply DecodeMEquivAlignedDecodeM_trans; intros; try rewrite addD_O; try higher_order_reflexivity. eapply H. - Local Arguments decode_word' : simpl never. Local Arguments plus : simpl never. unfold decode_word; simpl. eapply DecodeMEquivAlignedDecodeM_trans; intros; try eapply AlignedDecodeMEquiv_refl. + eapply AlignedDecodeBindCharM; intros. eapply Bind_DecodeMEquivAlignedDecodeM. eassumption. intros. pose proof (@Return_DecodeMEquivAlignedDecodeM); eapply H. + intros; unfold mult; simpl; rewrite decode_word_plus'; simpl; fold mult; simpl. unfold decode_word. destruct (decode_word' 8 b) as [ [? ?] | ]; simpl; try reflexivity. destruct (decode_word' (m * 8) b0) as [ [? ?] | ]; simpl; try reflexivity. rewrite addD_addD_plus; eauto; reflexivity. Qed. Lemma AlignedDecodeBindCharM' {A C : Type} (t : word 8 -> DecodeM (C * ByteString) ByteString) (t' : word 8 -> forall {numBytes}, AlignedDecodeM C numBytes) decode_w : (forall v cd, decode_word (monoidUnit := ByteString_QueueMonoidOpt) (sz := 8) v cd = decode_w v cd) -> (forall b, DecodeMEquivAlignedDecodeM (t b) (@t' b)) -> DecodeMEquivAlignedDecodeM (fun v cd => `(a, b0, cd') <- decode_w v cd; t a b0 cd') (fun numBytes => b <- GetCurrentByte; t' b)%AlignedDecodeM. Proof. intros; eapply Bind_DecodeMEquivAlignedDecodeM; eauto. eapply DecodeMEquivAlignedDecodeM_trans; eauto. eapply AlignedDecodeCharM; reflexivity. simpl. intros; eapply AlignedDecodeMEquiv_refl. Qed. Lemma decode_unused_word_plus': forall (n m : nat) (v : ByteString), decode_unused_word' (n + m) v = (`(w, v') <- decode_unused_word' n v; `(w', v'') <- decode_unused_word' m v'; Ok ((), v'')). Proof. induction n. - unfold plus; simpl; intros. destruct (decode_unused_word' m v) as [ [? ?] | ]; simpl; repeat f_equal. destruct u; eauto. - simpl; intros. unfold decode_unused_word' in *; simpl. fold plus. destruct (ByteString_dequeue v) as [ [? ?] | ]; try reflexivity. simpl. pose proof (IHn m b0). destruct (WordOpt.monoid_dequeue_word (n + m) b0) as [ [? ?] | ]; simpl in *; try congruence. simpl in *. destruct (WordOpt.monoid_dequeue_word n b0) as [ [? ?] | ]; simpl in *; try congruence. destruct (WordOpt.monoid_dequeue_word n b0) as [ [? ?] | ]; simpl in *; try congruence. Qed. Lemma aligned_decode_unused_char_eq {numBytes} : forall (v : Vector.t _ (S numBytes)), WordOpt.decode_unused_word' (monoidUnit := ByteString_QueueMonoidOpt) 8 (build_aligned_ByteString v) = Ok ((), build_aligned_ByteString (Vector.tl v)). Proof. unfold decode_unused_word'; simpl; intros. etransitivity. apply f_equal with (f := fun z => hbind z _ ). eapply DecodeBindOpt_under_bind; intros; set_evars; rewrite !DecodeBindOpt_assoc. repeat (unfold H; apply DecodeBindOpt_under_bind; intros; set_evars; rewrite !DecodeBindOpt_assoc). unfold H5; higher_order_reflexivity. simpl. pattern numBytes, v; eapply Vector.caseS; intros; simpl; clear v numBytes. replace (build_aligned_ByteString t) with (ByteString_enqueue_ByteString ByteString_id (build_aligned_ByteString t)). unfold Core.char in h. shatter_word h. pose proof (@dequeue_mappend_opt _ _ _ ByteString_QueueMonoidOpt). rewrite build_aligned_ByteString_cons; simpl. simpl in H7. erewrite H7 with (t := x6) (b' := {| front := WS x (WS x0 (WS x1 (WS x2 (WS x3 (WS x4 (WS x5 WO)))))); byteString := Vector.nil _ |}); simpl. erewrite H7 with (t := x5) (b' := {| front := WS x (WS x0 (WS x1 (WS x2 (WS x3 (WS x4 WO))))); byteString := Vector.nil _ |}); simpl. erewrite H7 with (t := x4) (b' := {| front := WS x (WS x0 (WS x1 (WS x2 (WS x3 WO)))); byteString := Vector.nil _ |}); simpl. erewrite H7 with (t := x3) (b' := {| front := WS x (WS x0 (WS x1 (WS x2 WO))); byteString := Vector.nil _ |}); simpl. erewrite H7 with (t := x2) (b' := {| front := WS x (WS x0 (WS x1 WO)); byteString := Vector.nil _ |}); simpl. erewrite H7 with (t := x1) (b' := {| front := WS x (WS x0 WO); byteString := Vector.nil _ |}); simpl. erewrite H7 with (t := x0) (b' := {| front := WS x WO; byteString := Vector.nil _ |}); simpl. erewrite H7 with (t := x) (b' := {| front := WO; byteString := Vector.nil _ |}); simpl. reflexivity. unfold dequeue_opt. simpl. compute; repeat f_equal; apply Core.le_uniqueness_proof. compute; repeat f_equal; apply Core.le_uniqueness_proof. compute; repeat f_equal; apply Core.le_uniqueness_proof. compute; repeat f_equal; apply Core.le_uniqueness_proof. compute; repeat f_equal; apply Core.le_uniqueness_proof. compute; repeat f_equal; apply Core.le_uniqueness_proof. compute; repeat f_equal; apply Core.le_uniqueness_proof. unfold build_aligned_ByteString. unfold ByteString_dequeue; simpl. repeat f_equal; apply Core.le_uniqueness_proof. apply (@mempty_left _ ByteStringQueueMonoid). Qed. Lemma aligned_decode_unused_char_eq' {numBytes} : forall (v : Vector.t _ (S numBytes)) env, WordOpt.decode_unused_word (sz := 8) (monoidUnit := ByteString_QueueMonoidOpt) (build_aligned_ByteString v) env = Ok ((), build_aligned_ByteString (Vector.tl v), addD env 8). Proof. unfold decode_unused_word; simpl; intros. etransitivity. unfold Compose_Decode, DecodeBindOpt. unfold BindOpt. eapply AlignedDecodeChar. pattern numBytes, v. eapply Vector.caseS; simpl; intros. reflexivity. Qed. Lemma AlignedDecodeUnusedCharM : DecodeMEquivAlignedDecodeM (decode_unused_word (sz := 8)) (fun numBytes => SkipCurrentByte). Proof. unfold DecodeMEquivAlignedDecodeM, BindAlignedDecodeM, DecodeBindOpt2, BindOpt, Compose_Decode; intros; unfold WordOpt.decode_word, Compose_Decode. split; [ | split ]; intros. - pattern numBytes_hd, v; eapply Vector.caseS; simpl; intros. unfold SkipCurrentByte, nth_opt; simpl. destruct (Vector_nth_opt t n); simpl; eauto. - unfold decode_unused_word, Compose_Decode, decode_word in H. destruct (decode_word' 8 b) as [ [? ?] | ] eqn: ?; simpl in H; try discriminate. injections. eapply decode_word'_lt in Heqh; unfold le_B, bin_measure in Heqh; simpl in Heqh. unfold lt_B in Heqh; simpl in Heqh. injections; lia. - destruct v. + simpl; intuition; discriminate. + rewrite aligned_decode_unused_char_eq'; simpl; intuition. * unfold SkipCurrentByte; injections; simpl. clear; induction n; simpl; eauto. * injections. replace (match numBytes (build_aligned_ByteString v) with | 0 => S n | S l => n - l end) with 1 by (unfold numBytes; simpl; clear; induction n; lia). setoid_rewrite <- build_aligned_ByteString_append. eexists (Vector.cons _ h _ (@Vector.nil _)); reflexivity. Qed. Lemma AlignedDecodeNUnusedCharM (addD_O : forall cd, addD cd 0 = cd) {m} : DecodeMEquivAlignedDecodeM (decode_unused_word (sz := m * 8)) (fun numBytes => SkipCurrentBytes m). Proof. induction m. - unfold decode_unused_word; simpl; pose proof (@Return_DecodeMEquivAlignedDecodeM). eapply DecodeMEquivAlignedDecodeM_trans; intros; try rewrite addD_O. eapply H. unfold Compose_Decode, DecodeBindOpt, BindOpt. simpl; rewrite addD_O; reflexivity. simpl; reflexivity. - Local Arguments decode_word' : simpl never. Local Arguments plus : simpl never. unfold decode_unused_word; simpl. eapply DecodeMEquivAlignedDecodeM_trans; intros; try eapply AlignedDecodeMEquiv_refl. (*intros; unfold mult; simpl; rewrite decode_unused_word_plus'; simpl; fold mult. *) 2: { constructor; left. instantiate (1 := fun b cd => `(w, v', cd') <- decode_unused_word (sz := 8) b cd; `(w', v'', cd') <- decode_unused_word (sz := m * 8) v' cd'; Ok ((), v'', cd')); simpl. unfold decode_unused_word, Compose_Decode, DecodeBindOpt, BindOpt. unfold decode_word, decode_word'; simpl in *. destruct (ByteString_dequeue b) as [ [? ?] | ]; simpl in *; try discriminate; eauto. destruct (ByteString_dequeue b1) as [ [? ?] | ]; simpl in *; try discriminate; eauto. destruct (ByteString_dequeue b3) as [ [? ?] | ]; simpl in *; try discriminate; eauto. destruct (ByteString_dequeue b5) as [ [? ?] | ]; simpl in *; try discriminate; eauto. destruct (ByteString_dequeue b7) as [ [? ?] | ]; simpl in *; try discriminate; eauto. destruct (ByteString_dequeue b9) as [ [? ?] | ]; simpl in *; try discriminate; eauto. rewrite !DecodeBindOpt_assoc. destruct (ByteString_dequeue b11) as [ [? ?] | ]; simpl in *; try discriminate; eauto. destruct (ByteString_dequeue b13) as [ [? ?] | ]; simpl in *; try discriminate; eauto. destruct (ByteString_dequeue b15) as [ [? ?] | ]; simpl in *; try discriminate; eauto; intros; rewrite !DecodeBindOpt_assoc. simpl; match goal with |- context [DecodeBindOpt ?z] => destruct z as [ [? ?] | ] eqn: ? ; simpl in *; try discriminate end. rewrite addD_addD_plus; reflexivity. eauto. simpl. match goal with |- context [DecodeBindOpt ?z] => idtac z; destruct z as [ [? ?] | ] eqn: ? ; simpl in *; try discriminate end. rewrite addD_addD_plus; reflexivity. eauto. } repeat (intros; eapply Bind_DecodeMEquivAlignedDecodeM); eauto using @Return_DecodeMEquivAlignedDecodeM. eapply AlignedDecodeUnusedCharM. Qed. Lemma AlignedDecodeBindUnusedCharM {C : Type} (t : unit -> DecodeM (C * ByteString) ByteString) (t' : unit -> forall {numBytes}, AlignedDecodeM C numBytes) : (DecodeMEquivAlignedDecodeM (t ()) (@t' ())) -> DecodeMEquivAlignedDecodeM (fun v cd => `(a, b0, cd') <- decode_unused_word (sz := 8) (monoidUnit := ByteString_QueueMonoidOpt) v cd; t a b0 cd') (fun numBytes => b <- SkipCurrentByte; @t' b numBytes)%AlignedDecodeM. Proof. intro; eapply Bind_DecodeMEquivAlignedDecodeM; eauto using AlignedDecodeUnusedCharM. intro; destruct a; eauto. Qed. Lemma AlignedFormatChar {numBytes} : forall (w : word 8) ce ce' (c : _ -> Comp _) (v : Vector.t _ numBytes), refine (c (addE ce 8)) (ret (build_aligned_ByteString v, ce')) -> refine (((format_word (monoidUnit := ByteString_QueueMonoidOpt) w) ThenC c) ce) (ret (build_aligned_ByteString (Vector.cons _ w _ v), ce')). Proof. unfold compose; intros. unfold Bind2. setoid_rewrite aligned_format_char_eq; simplify with monad laws. simpl; rewrite H; simplify with monad laws. simpl. rewrite <- build_aligned_ByteString_append. reflexivity. Qed. Lemma AlignedDecode2Char {C} {numBytes} : forall (v : ByteBuffer.t (S (S numBytes))) (t : (word 16 * ByteString * CacheDecode) -> Hopefully C) cd, (HBind (decode_word (monoidUnit := ByteString_QueueMonoidOpt) (sz := 16) (build_aligned_ByteString v) cd) as w With t w ) = Let n := Core.append_word (Vector.nth v (Fin.FS Fin.F1)) (Vector.nth v Fin.F1) in t (n, build_aligned_ByteString (snd (Vector_split 2 _ v)), addD cd 16). Proof. unfold LetIn; intros. unfold decode_word, WordOpt.decode_word. match goal with |- context[HBind ?Z as _ With _] => replace Z with (let (v', v'') := Vector_split 2 numBytes v in Ok (VectorByteToWord v', build_aligned_ByteString v'')) by (symmetry; apply (@aligned_decode_char_eq' _ 1 v)) end. unfold Vector_split, If_Opt_Then_Else, If_Opt_Then_Else, hbind. f_equal. rewrite !Vector_nth_tl, !Vector_nth_hd. erewrite VectorByteToWord_cons. rewrite <- !Eqdep_dec.eq_rect_eq_dec; eauto using Peano_dec.eq_nat_dec. f_equal. erewrite VectorByteToWord_cons. rewrite <- !Eqdep_dec.eq_rect_eq_dec; eauto using Peano_dec.eq_nat_dec. Unshelve. lia. lia. Qed. Lemma decode_word_aligned_ByteString_overflow {sz'} : forall (b : t (word 8) sz') {sz : nat} (cd : CacheDecode), lt sz' sz -> is_error (decode_word (sz := 8 * sz) (build_aligned_ByteString b) cd). Proof. induction b; intros. - unfold build_aligned_ByteString; simpl. inversion H; subst; reflexivity. - destruct sz; try lia. apply lt_S_n in H. pose proof (IHb _ cd H). unfold decode_word, WordOpt.decode_word. rewrite <- mult_n_Sm, plus_comm. rewrite decode_word_plus'. rewrite (@aligned_decode_char_eq' _ 0). simpl. unfold build_aligned_ByteString, decode_word in *. simpl in H0. first [destruct (decode_word' (sz + (sz + (sz + (sz + (sz + (sz + (sz + (sz + 0)))))))) {| padding := 0; front := WO; paddingOK := build_aligned_ByteString_subproof (*n b *); numBytes := n; byteString := b |}) as [ [? ?] | ] | destruct (decode_word' (sz + (sz + (sz + (sz + (sz + (sz + (sz + (sz + 0)))))))) {| padding := 0; front := WO; paddingOK := build_aligned_ByteString_subproof n b; numBytes := n; byteString := b |}) as [ [? ?] | ]] ; simpl in *; try congruence. Qed. Lemma AlignedDecodeBind2CharM {C : Type} (t : word 16 -> DecodeM (C * ByteString) ByteString) (t' : word 16 -> forall {numBytes}, AlignedDecodeM C numBytes) : (forall b, DecodeMEquivAlignedDecodeM (t b) (@t' b)) -> DecodeMEquivAlignedDecodeM (fun v cd => `(a, b0, cd') <- decode_word (monoidUnit := ByteString_QueueMonoidOpt) (sz := 16) v cd; t a b0 cd') (fun numBytes => b <- GetCurrentByte; b' <- GetCurrentByte; w <- return (Core.append_word b' b); t' w). Proof. intros; eapply DecodeMEquivAlignedDecodeM_trans with (bit_decoder1 := (fun (v : ByteString) (cd : CacheDecode) => `(w1, bs, cd') <- decode_word (sz := 8) v cd; `(w2, bs, cd') <- decode_word (sz := 8) bs cd'; t (Core.append_word w2 w1) bs cd')). eapply AlignedDecodeBindCharM; intros. eapply AlignedDecodeBindCharM; intros. eapply DecodeMEquivAlignedDecodeM_trans. eapply (H _). intros; reflexivity. intros; higher_order_reflexivity. intros. unfold decode_word. rewrite (decode_word_plus' 8 8). unfold DecodeBindOpt2, DecodeBindOpt, BindOpt, If_Opt_Then_Else, hbind. destruct (decode_word' 8 b) as [ [? ?] | ]; try reflexivity. destruct (decode_word' 8 b0) as [ [? ?] | ]; try reflexivity. rewrite <- eq_rect_eq_dec; eauto using eq_nat_dec. rewrite addD_addD_plus; reflexivity. intros; reflexivity. Qed. Lemma CorrectAlignedEncoderForFormatChar_f {S} (proj : S -> word 8) : CorrectAlignedEncoder (Projection_Format format_word proj) (fun sz v idx s => SetCurrentByte v idx (proj s)). Proof. intros. unfold CorrectAlignedEncoder. eexists (Compose_Encode (fun c env => Ok ((build_aligned_ByteString (cons (word 8) c 0 (nil (word 8))), addE env 8))) (fun s => Ok (proj s))); split; [ | split]. - unfold Compose_Encode, Projection_Format, Compose_Format; intros. split; intros. + setoid_rewrite aligned_format_char_eq. injections. intros ? ?; apply unfold_computes; eexists; intuition eauto. + inversion H. - unfold Compose_Encode; simpl; intros. injections; reflexivity. - unfold Compose_Encode, EncodeMEquivAlignedEncodeM; intros; injections; intuition; simpl. + injections; simpl; unfold SetCurrentByte. unfold plus; fold plus. destruct (Nat.ltb idx (idx + Datatypes.S m)) eqn: ? ; try lia. * eexists (Vector.append v1 (Vector.cons _ (proj s) _ v2)); split. { repeat f_equal; try lia. clear; simpl in v. revert v v2; induction v1; intros. - replace v with (Vector.cons _ (Vector.hd v) _ (Vector.tl v)). + generalize (Vector.tl v); apply Vector.case0; reflexivity. + revert v; generalize 0; apply Vector.caseS; simpl; intros; reflexivity. - simpl; rewrite IHv1; reflexivity. } { rewrite !ByteString_enqueue_ByteString_assoc. rewrite <- !build_aligned_ByteString_append. assert (idx + 1 + m = idx + Datatypes.S m) by lia. pose proof (Vector_append_assoc _ _ _ H v1 (Vector.cons (word 8) (proj s) 0 (Vector.nil (word 8))) v2). simpl in H1; unfold Core.char in *; unfold plus in *; fold plus in *; rewrite H1. generalize (append (append v1 (Vector.cons (word 8) (proj s) 0 (Vector.nil (word 8)))) v2). rewrite H; reflexivity. } * destruct (le_lt_dec (idx + Datatypes.S m) idx); try lia. apply Nat.ltb_lt in l; congruence. + injections; simpl; unfold SetCurrentByte. destruct (Nat.ltb idx numBytes') eqn: ?; try constructor. apply Nat.ltb_lt in Heqb. unfold build_aligned_ByteString in H0. unfold length_ByteString in H0; simpl padding in H0; simpl numBytes in H0. lia. + injections; simpl in *; lia. Defined. Lemma CorrectAlignedEncoderForFormatChar : CorrectAlignedEncoder (format_word (monoidUnit := ByteString_QueueMonoidOpt)) (@SetCurrentByte _ _). Proof. replace (@SetCurrentByte _ _) with (fun (sz : nat) v idx s => SetCurrentByte (n := sz) v idx (id s)). eapply refine_CorrectAlignedEncoder. 2: eapply (CorrectAlignedEncoderForFormatChar_f id). split; intros. + unfold Projection_Format, Compose_Format. intros v Comp_v; rewrite unfold_computes in Comp_v; destruct_ex; intuition. subst; eauto. + intro; apply (H v). unfold Projection_Format, Compose_Format in *. rewrite unfold_computes; eexists. subst; eauto. + eapply functional_extensionality_dep; intros. repeat (eapply functional_extensionality; intros). reflexivity. Defined. Lemma CorrectAlignedEncoderForFormatUnusedWord {S} : CorrectAlignedEncoder (format_unused_word 8 (monoidUnit := ByteString_QueueMonoidOpt)) (fun sz v idx (s : S) => SetCurrentByte v idx (wzero 8)). Proof. intros; eapply refine_CorrectAlignedEncoder; eauto using (CorrectAlignedEncoderForFormatChar_f (fun _ => wzero 8)). simpl; split; intros. + unfold format_unused_word, Projection_Format, Compose_Format; simpl. intros ? ?. rewrite unfold_computes in *. destruct_ex; split_and; subst. eexists; split; eauto. rewrite unfold_computes; eauto. + unfold format_unused_word, Projection_Format, Compose_Format; simpl. intros ?. eapply (H _). unfold format_unused_word, Projection_Format, Compose_Format; simpl. destruct_ex; split_and; subst. rewrite unfold_computes; eauto. eexists _; split; eauto. unfold format_word; eauto. Defined. Lemma CorrectAlignedEncoderForProjection_Format {S S'} (f : S -> S') (format : FormatM S' ByteString) (encoder : forall n, AlignedEncodeM n) : CorrectAlignedEncoder format encoder -> CorrectAlignedEncoder (Projection_Format format f) (fun sz v idx (s : S) => encoder sz v idx (f s)). Proof. intros; eapply refine_CorrectAlignedEncoder. split; intros. - rewrite refine_Projection_Format at 1. higher_order_reflexivity. - intro. eapply H. apply refine_Projection_Format in H0. eauto. - destruct X; intuition. eexists (fun s env => x (f s) env); intuition eauto. eapply H; eauto. eapply H; eauto. unfold EncodeMEquivAlignedEncodeM in *; intros. specialize (H2 env (f s) idx); intuition eauto. Defined. Lemma CollapseCorrectAlignedEncoderFormatWord {S : Type} (addE_addE_plus : forall ce n m, addE (addE ce n) m = addE ce (n + m)) : forall {sz sz'} (f : S -> word sz) (f' : S -> word sz') k encoder, CorrectAlignedEncoder (Projection_Format format_word (fun s => combine (f' s) (f s)) ++ k) encoder -> CorrectAlignedEncoder (Projection_Format format_word f ++ Projection_Format format_word f' ++ k) encoder. Proof. intros; eapply refine_CorrectAlignedEncoder; eauto. intros. rewrite !refine_sequence_Format. unfold compose, Bind2. rewrite !refine_Projection_Format. pose proof CollapseFormatWord. unfold compose, Bind2 in H. rewrite <- H; eauto. split. - f_equiv; intro. rewrite !refine_sequence_Format. simpl. unfold compose, Bind2. simplify with monad laws. rewrite !refine_Projection_Format. setoid_rewrite refineEquiv_bind_bind. f_equiv; intro. setoid_rewrite refineEquiv_bind_bind. f_equiv; intro. setoid_rewrite refineEquiv_bind_unit. reflexivity. - intros. intro. simpl. apply refine_sequence_Format in H1. unfold compose, Bind2 in H1. computes_to_inv. apply refine_Projection_Format in H1. apply refine_sequence_Format in H1'. unfold compose, Bind2 in H1'. unfold format_word in *. computes_to_inv; subst. apply refine_Projection_Format in H1'. computes_to_inv; subst. simpl in *. eapply H0. unfold sequence_Format, compose, Bind2. computes_to_econstructor. apply refine_Projection_Format. eauto. computes_to_econstructor; eauto. simpl. rewrite addE_addE_plus in H1''0; rewrite plus_comm; eauto. Defined. Lemma CollapseCorrectAlignedEncoderFormatWord' {S : Type} (addE_addE_plus : forall ce n m, addE (addE ce n) m = addE ce (n + m)) : forall {sz sz'} (f : S -> word sz) (f' : S -> word sz') k encoder, CorrectAlignedEncoder (Projection_Format format_word f ++ Projection_Format format_word f' ++ k) encoder -> CorrectAlignedEncoder (Projection_Format format_word (fun s => combine (f' s) (f s)) ++ k) encoder. Proof. intros; eapply refine_CorrectAlignedEncoder; eauto. intros. rewrite !refine_sequence_Format. unfold compose, Bind2. rewrite !refine_Projection_Format. pose proof CollapseFormatWord'. unfold compose, Bind2 in H. rewrite H; eauto. split. - f_equiv; intro. rewrite !refine_sequence_Format. simpl. unfold compose, Bind2. simplify with monad laws. rewrite !refine_Projection_Format. setoid_rewrite refineEquiv_bind_bind. f_equiv; intro. setoid_rewrite refineEquiv_bind_bind. f_equiv; intro. setoid_rewrite refineEquiv_bind_unit. reflexivity. - intros. intro. simpl. apply refine_sequence_Format in H1. unfold compose, Bind2 in H1. computes_to_inv. apply refine_Projection_Format in H1. unfold format_word in *. computes_to_inv; subst. simpl in *. eapply H0. unfold sequence_Format, compose, Bind2. computes_to_econstructor. apply refine_Projection_Format. eauto. computes_to_econstructor; eauto. simpl. computes_to_econstructor; eauto. apply refine_Projection_Format. eauto. computes_to_econstructor; eauto. simpl. rewrite addE_addE_plus; rewrite plus_comm; eauto. Defined. Lemma refine_CollapseFormatWord (addE_addE_plus : forall ce n m, addE (addE ce n) m = addE ce (n + m)) : forall {sz sz'} (w : word sz) (w' : word sz') format_1 format_2 ce, refine (format_1 ce) (format_word w ce) -> (forall ce, refine (format_2 ce) (format_word w' ce)) -> refine ((format_1 ThenC format_2) ce) ((format_word (combine w' w)) ce). Proof. intros. etransitivity. instantiate (1 := ((format_word (combine w' w)) ThenC (fun ce => ret (ByteString_id, ce))) ce). rewrite <- CollapseFormatWord; eauto. unfold compose, Bind2; intros. rewrite H; setoid_rewrite H0; setoid_rewrite refineEquiv_bind_bind; repeat setoid_rewrite refineEquiv_bind_unit. simpl. pose proof mempty_right; simpl in *; rewrite H1; reflexivity. unfold compose, Bind2; intros; eauto. repeat setoid_rewrite refineEquiv_bind_unit; simpl. pose proof mempty_right; simpl in *; rewrite H1; reflexivity. Qed. Lemma refine_CollapseFormatWord' (addE_addE_plus : forall ce n m, addE (addE ce n) m = addE ce (n + m)) {S} : forall {sz sz'} (f : S -> word sz) (f' : S -> word sz') (format_1 format_2 : FormatM S _), (forall s env, refine (format_1 s env) (Projection_Format format_word f s env)) -> (forall s env, refine (format_2 s env) (Projection_Format format_word f' s env)) -> (forall s env, refine ((format_1 ++ format_2) s env) (Projection_Format format_word (fun s => combine (f' s) (f s)) s env)). Proof. intros. unfold sequence_Format, compose, Projection_Format, Compose_Format, Bind2. rewrite H; setoid_rewrite H0. intros ? ?. rewrite unfold_computes in H1. destruct_ex; intuition; subst. pose proof (CollapseFormatWord (sz' := sz') (sz := sz) addE_addE_plus (f s) (f' s) (fun ce => ret (ByteString_id, ce)) env); eauto. unfold compose in H1. unfold Bind2 in H1. repeat setoid_rewrite refineEquiv_bind_unit in H1. simpl in H1. unfold format_word in H2. pose proof mempty_right. simpl in H3. rewrite !H3 in H1. eapply H1 in H2. computes_to_inv; subst. computes_to_econstructor. unfold Projection_Format, Compose_Format; apply unfold_computes; eexists; intuition eauto. unfold format_word; computes_to_econstructor. computes_to_econstructor. unfold Projection_Format, Compose_Format; apply unfold_computes; eexists; intuition eauto. unfold format_word; computes_to_econstructor. simpl. eauto. Qed. Lemma format_words' {n m} (addE_addE_plus : forall ce n m, addE (addE ce n) m = addE ce (n + m)) : forall (w : word (n + m)) ce, refine (format_word (monoidUnit := ByteString_QueueMonoidOpt) w ce) ((format_word (monoidUnit := ByteString_QueueMonoidOpt) (split1' _ _ w) ThenC (format_word (monoidUnit := ByteString_QueueMonoidOpt) (split2' _ _ w))) ce). Proof. induction n. - unfold compose; simpl; intros. unfold format_word at 2; simpl. autorewrite with monad laws. simpl; rewrite addE_addE_plus. pose proof mempty_left as H'; simpl in H'; rewrite H'. reflexivity. - unfold plus; fold plus; simpl; intros. rewrite (word_split_SW w) at 1. rewrite format_SW_word. unfold compose, Bind2. rewrite (IHn (word_split_tl w) (addE ce 1)). unfold compose, Bind2. unfold format_word; autorewrite with monad laws. simpl. rewrite format_word_S. pose proof mappend_assoc as H'; simpl in H'. rewrite !H'. rewrite !addE_addE_plus; simpl. f_equiv. f_equiv. f_equiv. rewrite !word_split_hd_SW_word, !word_split_tl_SW_word. fold plus. clear; generalize (split1' n m (word_split_tl w)) (ByteString_enqueue (word_split_hd w) ByteString_id). induction w0; simpl in *. + intros; pose proof (mempty_right b) as H; simpl in H; rewrite H; eauto. + intros. rewrite <- (IHw0 (wtl w) b0). pose proof enqueue_mappend_opt as H'''; simpl in H'''. rewrite <- H'''; eauto. + eauto. Qed. Lemma format_words {n m} (addE_addE_plus : forall ce n m, addE (addE ce n) m = addE ce (n + m)) : forall (w : word (n + m)) ce, refine (format_word (monoidUnit := ByteString_QueueMonoidOpt) w ce) ((format_word (monoidUnit := ByteString_QueueMonoidOpt) (split2 m n (eq_rect _ _ w _ (trans_plus_comm _ _))) ThenC (format_word (monoidUnit := ByteString_QueueMonoidOpt) (split1 m n (eq_rect _ _ w _ (trans_plus_comm _ _))))) ce). Proof. intros; rewrite format_words'. rewrite split1'_eq, split2'_eq; reflexivity. eauto. Qed. Lemma CorrectAlignedEncoderForFormatNChar' (addE_addE_plus : forall ce n m, addE (addE ce n) m = addE ce (n + m)) {sz} : forall encoder, (CorrectAlignedEncoder (format_word (monoidUnit := ByteString_QueueMonoidOpt)) (fun sz => encoder sz)) -> CorrectAlignedEncoder (format_word (monoidUnit := ByteString_QueueMonoidOpt)) (fun sz' => AppendAlignedEncodeM (fun v idx w => @SetCurrentByte _ _ sz' v idx (split1' 8 sz w)) (fun v idx w => encoder sz' v idx (split2' 8 sz w))). Proof. intros; pose proof (format_words addE_addE_plus (n := 8) (m := sz)) as H'; eapply refine_CorrectAlignedEncoder. split. - unfold flip, pointwise_relation; eapply H'. - intros; intro. eapply H. unfold compose, format_word; computes_to_econstructor; eauto. unfold compose, format_word; computes_to_econstructor; eauto. - eapply refine_CorrectAlignedEncoder. split; intros. rewrite <- split2'_eq, <- split1'_eq. 3: eapply CorrectAlignedEncoderForThenC. (*3: intros; eapply (@CorrectAlignedEncoderForFormatChar_f (word (8 + sz)) (split1' 8 sz)).*) instantiate (1 := Projection_Format format_word (split2' 8 sz)). rewrite refine_sequence_Format. instantiate (1 := Projection_Format format_word (split1' 8 sz)). unfold compose, Bind2; rewrite refine_Projection_Format; f_equiv. intro; rewrite refine_Projection_Format; f_equiv. 2: eapply CorrectAlignedEncoderForProjection_Format; eauto. + intro. eapply H. rewrite <- split2'_eq, <- split1'_eq in H0. unfold sequence_Format. unfold compose, Bind2 in *; computes_to_inv; computes_to_econstructor. apply refine_Projection_Format; eauto. computes_to_econstructor. apply refine_Projection_Format; eauto. subst; eauto. + intros. instantiate (1 := (@CorrectAlignedEncoderForFormatChar_f (word (8 + sz)) (split1' 8 sz))). destruct (projT1 (CorrectAlignedEncoderForFormatChar_f (split1' 8 sz)) s env) eqn: ?. * eexists _, _; split; eauto. apply refine_Projection_Format; eauto. unfold format_word; eauto. * generalize (proj2 (proj1 (projT2 (CorrectAlignedEncoderForFormatChar_f (split1' 8 sz))) s env)); intro. eapply H1 in H. 2: eapply isError; eassumption. intuition eauto. Defined. Fixpoint SetCurrentBytes' (* Sets the bytes at the current index and increments the current index. *) {n sz : nat} : @AlignedEncodeM _ (word (sz * 8)) n := match sz return @AlignedEncodeM _ (word (sz * 8)) _ with | 0 => AlignedEncode_Nil n | S sz' => AppendAlignedEncodeM (fun v idx w => SetCurrentByte v idx (split1' 8 (sz' * 8) w)) (fun v idx w => SetCurrentBytes' v idx (split2' 8 (sz' * 8) w)) end. Fixpoint SetCurrentBytes (* This version produces better code. *) {n sz : nat} {struct sz} : @AlignedEncodeM _ (word (sz * 8)) n := match sz as n0 return (AlignedEncodeM n) with | 0 => AlignedEncode_Nil n | S sz0 => fun v idx w => match sz0 return word (S sz0 * 8) -> _ with | 0 => fun (w' : word 8) => SetCurrentByte v idx w' | S sz1 => fun _ => (* ignored to get proper recursive call *) AppendAlignedEncodeM (fun (v : t Core.char n) (idx : nat) (w : word (S sz0 * 8)) => SetCurrentByte v idx (split1' 8 (sz0 * 8) w)) (fun (v : t Core.char n) (idx : nat) (w : word (S sz0 * 8)) => SetCurrentBytes v idx (split2' 8 (sz0 * 8) w)) v idx w end w end. Local Arguments split1' : simpl never. Local Arguments split2' : simpl never. Lemma split1'_8_0 : forall w, (split1' 8 0 w) = w. Proof. intros; compute in (type of w); shatter_word w; reflexivity. Qed. Lemma SetCurrentBytes_SetCurrentBytes' : forall n sz v idx w c, @SetCurrentBytes n sz v idx w c = @SetCurrentBytes' n sz v idx w c. Proof. induction sz; simpl; intros. - reflexivity. - destruct sz; unfold AppendAlignedEncodeM, SetCurrentByte; destruct (_ <? _) eqn:?; unfold If_Opt_Then_Else. + unfold SetCurrentBytes', AlignedEncode_Nil, ReturnAlignedEncodeM; simpl. destruct (S _ <? S _) eqn:?; rewrite ?Nat.ltb_lt, ?Nat.ltb_ge, ?split1'_8_0 in *; (reflexivity || lia). + reflexivity. + f_equal. eapply functional_extensionality; intros. rewrite IHsz. reflexivity. + reflexivity. Qed. Corollary CorrectAlignedEncoderForFormatNChar (addE_addE_plus : forall ce n m, addE (addE ce n) m = addE ce (n + m)) (addE_0 : forall ce, addE ce 0 = ce) {sz} : CorrectAlignedEncoder (format_word (monoidUnit := ByteString_QueueMonoidOpt)) (fun n => @SetCurrentBytes n sz). Proof. eapply CorrectAlignedEncoder_morphism with (encode := (fun n => @SetCurrentBytes' n sz)). apply EquivFormat_reflexive. auto using SetCurrentBytes_SetCurrentBytes'. unfold CorrectAlignedEncoder. induction sz; simpl; intros. - eapply refine_CorrectAlignedEncoder; intros. shatter_word s; unfold format_word; simpl. split. unfold format_word; rewrite addE_0; higher_order_reflexivity. intros; intro. eapply H. eauto. + eapply CorrectAlignedEncoderForDoneC. - eapply (CorrectAlignedEncoderForFormatNChar' addE_addE_plus (fun sz' => @SetCurrentBytes' sz' sz)); eauto. Defined. Lemma CorrectAlignedEncoderForFormatMChar_f n {S} (addE_addE_plus : forall ce n m, addE (addE ce n) m = addE ce (n + m)) (addE_0 : forall ce, addE ce 0 = ce) (proj : S -> word (n * 8)) : CorrectAlignedEncoder (Projection_Format format_word proj) (fun sz v idx s => SetCurrentBytes v idx (proj s)). Proof. eapply CorrectAlignedEncoderForProjection_Format with (format := format_word) (encoder := fun sz => @SetCurrentBytes sz n) (f := proj). eapply CorrectAlignedEncoderForFormatNChar; eauto. Defined. End AlignEncodeWord. Ltac collapse_word addD_addD_plus := match goal with | |- DecodeBindOpt2 (decode_word (sz := ?sz) ?b ?cd) (fun w b' cd' => DecodeBindOpt2 (decode_word (sz := ?sz') b' cd') (fun w' b'' cd'' => @?k w w' b'' cd'')) = _ => etransitivity; [let H := fresh in pose proof (@CollapseWord'' _ _ _ _ _ addD_addD_plus _ sz' sz b cd k); apply H | ] end.
import data.polynomial.erase_lead import analysis.calculus.deriv import analysis.asymptotic_equivalent import equivalent open polynomial asymptotics filter finset function open_locale big_operators topological_space asymptotics section variables {α β : Type*} [linear_ordered_field α] variables {l : filter β} {f g : β → α} lemma tendsto_at_bot_mul_left' {r : α} (hr : r < 0) (hf : tendsto f l at_top) : tendsto (λx, r * f x) l at_bot := begin apply tendsto_at_bot.2 (λb, _), filter_upwards [tendsto_at_top.1 hf (b/r)], assume x hx, simpa [div_le_iff_of_neg' hr] using hx end end variables {α : Type*} lemma tendsto_abs_at_bot_at_top [linear_ordered_add_comm_group α] : tendsto (abs : α → α) at_bot at_top := begin convert show tendsto (abs ∘ has_neg.neg : α → α) at_bot at_top, from tendsto_abs_at_top_at_top.comp tendsto_neg_at_bot_at_top, ext, simp end lemma is_o_pow_pow_at_top_of_lt' {α : Type*} [normed_linear_ordered_field α] [order_topology α] {p q : ℕ} (hpq : p < q) : is_o (λ (x : α), x^p) (λ (x : α), x^q) at_top := begin refine (is_o_iff_tendsto' _).mpr (tendsto_pow_div_pow_at_top_of_lt hpq), rw eventually_iff_exists_mem, exact ⟨set.Ioi 0, Ioi_mem_at_top 0, λ x (hx : 0 < x) hxq, (pow_ne_zero q hx.ne.symm hxq).elim⟩, end lemma polynomial.eval_eq_range_sum [semiring α] (P : polynomial α) (x : α) : eval x P = ∑ i in range (P.nat_degree + 1), P.coeff i * x ^ i := begin rw eval_eq_sum, refine P.sum_of_support_subset _ _ _, { intros a, rw [mem_range, nat.lt_add_one_iff], exact le_nat_degree_of_mem_supp a }, { intros, exact zero_mul _ } end lemma polynomial.eval_eq_range_sum' [semiring α] (P : polynomial α) : (λ x, eval x P) = (λ x, ∑ i in range (P.nat_degree + 1), P.coeff i * x ^ i) := begin ext, exact P.eval_eq_range_sum x end lemma polynomial.eval_is_equivalent_at_top_eval_lead [normed_linear_ordered_field α] [order_topology α] (P : polynomial α) : (λ x, eval x P) ~[at_top] (λ x, P.leading_coeff * x ^ P.nat_degree) := begin by_cases h : P = 0, { simp [h] }, { conv { congr, funext, rw [polynomial.eval_eq_range_sum, sum_range_succ, add_comm] }, exact is_equivalent.refl.add_is_o (is_o.sum $ λ i hi, is_o.const_mul_left (is_o.const_mul_right (λ hz, h $ leading_coeff_eq_zero.mp hz) $ is_o_pow_pow_at_top_of_lt' (mem_range.mp hi)) _) } end noncomputable instance : normed_linear_ordered_field ℝ := ⟨dist_eq_norm, normed_field.norm_mul⟩ lemma polynomial.eval_div_tendsto_zero_of_degree_lt [normed_linear_ordered_field α] [order_topology α] (P Q : polynomial α) (hdeg : P.degree < Q.degree) : tendsto (λ x, (eval x P)/(eval x Q)) at_top (𝓝 0) := begin refine (P.eval_is_equivalent_at_top_eval_lead.symm.div Q.eval_is_equivalent_at_top_eval_lead.symm).tendsto_nhds _, conv { congr, funext, rw ← div_mul_div }, by_cases hPQ : P = 0 ∨ Q = 0, { rcases hPQ with hP | hQ, { simp [hP, tendsto_const_nhds] }, { simp [hQ, tendsto_const_nhds] } }, { push_neg at hPQ, rw ← mul_zero _, rw [degree_eq_nat_degree hPQ.1, degree_eq_nat_degree hPQ.2] at hdeg, refine tendsto.const_mul _ (tendsto_pow_div_pow_at_top_of_lt _), exact_mod_cast hdeg } end lemma polynomial.eval_div_tendsto_leading_coeff_div_of_degree_eq [normed_linear_ordered_field α] [order_topology α] (P Q : polynomial α) (hdeg : P.degree = Q.degree) : tendsto (λ x, (eval x P)/(eval x Q)) at_top (𝓝 $ P.leading_coeff / Q.leading_coeff) := begin refine (P.eval_is_equivalent_at_top_eval_lead.symm.div Q.eval_is_equivalent_at_top_eval_lead.symm).tendsto_nhds _, conv { congr, funext, rw ← div_mul_div, rw nat_degree_eq_of_degree_eq hdeg, skip, skip, rw ← mul_one (P.leading_coeff / Q.leading_coeff) }, exact tendsto.const_mul _ (tendsto_const_nhds.congr' $ (eventually_gt_at_top 0).mono $ λ x (hx : 0 < x), (div_self (pow_pos hx Q.nat_degree).ne.symm).symm), end lemma polynomial.eval_div_tendsto_at_top_of_degree_gt [normed_linear_ordered_field α] [order_topology α] (P Q : polynomial α) (hdeg : Q.degree < P.degree) (hQ : Q ≠ 0) (hnng : 0 ≤ P.leading_coeff/Q.leading_coeff) : tendsto (λ x, (eval x P)/(eval x Q)) at_top at_top := begin refine (P.eval_is_equivalent_at_top_eval_lead.symm.div Q.eval_is_equivalent_at_top_eval_lead.symm).tendsto_at_top _, conv { congr, funext, rw ← div_mul_div }, have hP : P ≠ 0, { rw ← degree_nonneg_iff_ne_zero at ⊢ hQ, exact hQ.trans hdeg.le }, have ratio_pos : 0 < P.leading_coeff/Q.leading_coeff := lt_of_le_of_ne hnng (div_ne_zero (λ h, hP $ leading_coeff_eq_zero.mp h) (λ h, hQ $ leading_coeff_eq_zero.mp h)).symm, rw [degree_eq_nat_degree hP, degree_eq_nat_degree hQ] at hdeg, norm_cast at hdeg, have one_le_nat_degree_sub : 1 ≤ P.nat_degree - Q.nat_degree := (nat.le_sub_left_iff_add_le hdeg.le).mpr (nat.lt_iff_add_one_le.mp hdeg), exact tendsto_at_top_mul_left' ratio_pos ((tendsto_pow_at_top one_le_nat_degree_sub).congr' $ (eventually_gt_at_top 0).mono $ λ x hx, pow_sub' x hx.ne.symm hdeg.le) end lemma polynomial.eval_div_tendsto_at_bot_of_degree_gt [normed_linear_ordered_field α] [order_topology α] (P Q : polynomial α) (hdeg : Q.degree < P.degree) (hQ : Q ≠ 0) (hnng : P.leading_coeff/Q.leading_coeff ≤ 0) : tendsto (λ x, (eval x P)/(eval x Q)) at_top at_bot := begin refine (P.eval_is_equivalent_at_top_eval_lead.symm.div Q.eval_is_equivalent_at_top_eval_lead.symm).tendsto_at_bot _, conv { congr, funext, rw ← div_mul_div }, have hP : P ≠ 0, { rw ← degree_nonneg_iff_ne_zero at ⊢ hQ, exact hQ.trans hdeg.le }, have ratio_neg : P.leading_coeff/Q.leading_coeff < 0 := lt_of_le_of_ne hnng (div_ne_zero (λ h, hP $ leading_coeff_eq_zero.mp h) (λ h, hQ $ leading_coeff_eq_zero.mp h)), rw [degree_eq_nat_degree hP, degree_eq_nat_degree hQ] at hdeg, norm_cast at hdeg, have one_le_nat_degree_sub : 1 ≤ P.nat_degree - Q.nat_degree := (nat.le_sub_left_iff_add_le hdeg.le).mpr (nat.lt_iff_add_one_le.mp hdeg), exact tendsto_at_bot_mul_left' ratio_neg ((tendsto_pow_at_top one_le_nat_degree_sub).congr' $ (eventually_gt_at_top 0).mono $ λ x hx, pow_sub' x hx.ne.symm hdeg.le) end lemma polynomial.abs_eval_div_tendsto_at_top_of_degree_gt [normed_linear_ordered_field α] [order_topology α] (P Q : polynomial α) (hdeg : Q.degree < P.degree) (hQ : Q ≠ 0) : tendsto (λ x, abs ((eval x P)/(eval x Q))) at_top at_top := begin by_cases h : 0 ≤ P.leading_coeff/Q.leading_coeff, { exact tendsto_abs_at_top_at_top.comp (P.eval_div_tendsto_at_top_of_degree_gt Q hdeg hQ h) }, { push_neg at h, exact tendsto_abs_at_bot_at_top.comp (P.eval_div_tendsto_at_bot_of_degree_gt Q hdeg hQ h.le) } end example : tendsto (λ x : ℝ, (3*x^2 - 6*x + 7)/(12*x^2 + 4)) at_top (𝓝 (1/4)) := begin have key1 : ∀ x:ℝ, 3*x^2-6*x+7 = eval x (monomial 2 3 - monomial 1 6 + monomial 0 7) := by simp, have key2 : ∀ x:ℝ, 12*x^2+4 = eval x (monomial 2 12 + monomial 0 4) := by simp, simp_rw [key1, key2], set A : polynomial ℝ := monomial 2 3 - monomial 1 6 + monomial 0 7, set B : polynomial ℝ := monomial 2 12 + monomial 0 4, have degA1 : (monomial 2 3 : polynomial ℝ).degree = (2:ℕ) := degree_monomial _ (by norm_num), have degA2 : (monomial 1 6 : polynomial ℝ).degree = (1:ℕ) := degree_monomial _ (by norm_num), have degA3 : (monomial 0 7 : polynomial ℝ).degree = (0:ℕ) := degree_monomial _ (by norm_num), have degA4 : (monomial 2 3 - monomial 1 6 : polynomial ℝ).degree = (2:ℕ) := degA1 ▸ degree_sub_eq_left_of_degree_lt (by rw [degA1, degA2] ; dec_trivial), have degA : A.degree = (2:ℕ) := degA4 ▸ degree_add_eq_left_of_degree_lt (by rw [degA3, degA4] ; dec_trivial), have degB1 : (monomial 2 12 : polynomial ℝ).degree = (2:ℕ) := degree_monomial _ (by norm_num), have degB2 : (monomial 0 4 : polynomial ℝ).degree = (0:ℕ) := degree_monomial _ (by norm_num), have degB : B.degree = (2:ℕ) := degB1 ▸ degree_add_eq_left_of_degree_lt (by rw [degB1, degB2] ; dec_trivial), have leadA : A.leading_coeff = 3, { unfold leading_coeff nat_degree, rw degA, simp only [coeff_add,coeff_sub,coeff_monomial,if_true,option.get_or_else_coe,eq_self_iff_true], norm_num }, have leadB : B.leading_coeff = 12, { unfold leading_coeff nat_degree, rw degB, simp only [coeff_add,coeff_sub,coeff_monomial,if_true,option.get_or_else_coe,eq_self_iff_true], norm_num }, convert A.eval_div_tendsto_leading_coeff_div_of_degree_eq B (degA.trans degB.symm) using 2, rw [leadA, leadB], norm_num end /-variables [ordered_ring α] set_option profiler true lemma polynomial.eval_is_equivalent_at_top_eval_lead [normed_linear_ordered_field α] [order_topology α] (P : polynomial α) : (λ x, eval x P) ~[at_top] (λ x, eval x ((C P.leading_coeff) * X ^ P.nat_degree)) := begin rw is_equivalent, have : (λ x, eval x P) - (λ x, eval x ((C P.leading_coeff) * X ^ P.nat_degree)) = λ x, eval x P.erase_lead, { simp_rw [← self_sub_C_mul_X_pow, eval_sub], refl }, rw [this, C_mul_X_pow_eq_monomial, is_o_iff_tendsto'], { conv { congr, funext, rw [eval_monomial, polynomial.eval_eq_range_sum, sum_div, sum_congr rfl (λ n hn, (div_mul_div (P.erase_lead.coeff n) (P.leading_coeff) (x^n) (x^P.nat_degree)).symm)], skip, skip, congr, rw ← sum_eq_zero (λ n (hn : n ∈ range (P.erase_lead.nat_degree + 1)), rfl) }, refine tendsto_finset_sum _ (λ c hc, _), rcases P.erase_lead_nat_degree_lt_or_erase_lead_eq_zero with h | h, { rw ← mul_zero (P.erase_lead.coeff c / P.leading_coeff), refine tendsto_const_nhds.mul _, rw [mem_range, nat.lt_add_one_iff] at hc, have : c < P.nat_degree := by linarith, suffices h : tendsto (λ (x : α), x ^ ((c : ℤ) - P.nat_degree)) at_top (𝓝 0), { refine (tendsto_congr' ((eventually_gt_at_top (0 : α)).mono (λ x hx, _))).mp h, simp [fpow_sub hx.ne.symm] }, rw [← neg_sub, ← int.coe_nat_sub this.le], have : 1 ≤ P.nat_degree - c := nat.sub_pos_of_lt this, exact @tendsto_pow_neg_at_top α _ _ (by apply_instance) _ this, }, { simp [h, tendsto_const_nhds] } }, { sorry }, end #check int.coe_nat_sub lemma polynomial.eval_is_equivalent_at_top_eval_lead [normed_field α] (P : polynomial α) : (λ x, eval x P) ~[at_top] (λ x, eval x ((C P.leading_coeff) * X ^ P.nat_degree)) := begin rw is_equivalent, have : (λ x, eval x P) - (λ x, eval x ((C P.leading_coeff) * X ^ P.nat_degree)) = λ x, eval x P.erase_lead, { simp_rw [← self_sub_C_mul_X_pow, eval_sub], refl }, rw [this, is_o_iff_exists_eq_mul, C_mul_X_pow_eq_monomial], use (λ x, (eval x P.erase_lead) / (P.leading_coeff * x ^ P.nat_degree)), split, { conv { congr, funext, rw [polynomial.eval_eq_range_sum, sum_div, sum_congr rfl (λ n hn, (div_mul_div (P.erase_lead.coeff n) (P.leading_coeff) (x^n) (x^P.nat_degree)).symm)], skip, skip, congr, rw ← sum_eq_zero (λ n (hn : n ∈ range (P.erase_lead.nat_degree + 1)), rfl) }, refine tendsto_finset_sum _ (λ c hc, _), rw ← mul_zero (P.erase_lead.coeff c / P.leading_coeff), refine tendsto_const_nhds.mul _, rw mem_range at hc, rw nat.lt_add_one_iff at hc, have := P.erase_lead_nat_degree_lt_or_erase_lead_eq_zero, rcases this with h | h, { have : c + 1 ≤ P.nat_degree := by linarith, simp [div_eq_mul_inv, ← pow_sub'], sorry }, { simp only [h, le_zero_iff_eq, nat_degree_zero] at hc, simp only [hc, one_div, pow_zero], refine tendsto_inv_at_top_zero.comp tendsto_pow_at_t, } }, { simp, symmetry, }, end lemma polynomial.eval_is_o_at_top_eval_of_degree_lt [normed_ring α] {P Q : polynomial α} (h : P.degree < Q.degree) : is_o (λ x, eval x P) (λ x, eval x Q) at_top := begin sorry end lemma polynomial.eval_is_equivalent_at_top_eval_lead [normed_ring α] (P : polynomial α) : (λ x, eval x P) ~[at_top] (λ x, eval x ((C P.leading_coeff) * X ^ P.nat_degree)) := begin rw is_equivalent, have : (λ x, eval x P) - (λ x, eval x ((C P.leading_coeff) * X ^ P.nat_degree)) = λ x, eval x P.erase_lead, { simp_rw [← self_sub_C_mul_X_pow, eval_sub], refl }, rw this, sorry end-/ #lint
def casesTFOn {motive : Prop → Sort _} (P) [inst : Decidable P] : (T : motive True) → (F : motive False) → motive P := λ ht hf => match inst with | isTrue H => eqTrue H ▸ ht | isFalse H => eqFalse H ▸ hf example (P) [Decidable P] : ¬¬P → P := by induction P using casesTFOn admit admit
From Coq Require Import NArith String. From Ceres Require Import CeresS CeresParserUtils CeresParserInternal. (** Parse a string into a list of S-expressions. *) Definition parse_sexps (s : string) : error + list sexp := match parse_sexps_ initial_state 0%N s with | (None, p, i) => eof i p | (Some e, _, _) => inl e end. (** Parse a string into one S-expression. Subsequent expressions, if any, are ignored. *) Definition parse_sexp (s : string) : error + sexp := let '(e, p, i) := parse_sexps_ initial_state 0%N s in match List.rev' (parser_done i), e with | (r :: _)%list, _ => inr r | nil, Some e => inl e | nil, None => match eof i p with | inl e => inl e | inr (r :: _)%list => inr r | inr nil => inl EmptyInput end end.
(* vim: filetype=coq *) (* Copyright (C) 2016-2018 Philip H. Smith This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. *) Require Import Way.Tactics. Require Import Way.List. Require Import Way.Nat. Lemma has_le_fold_max : forall (n : nat) (l : list nat), has n l -> n <= fold max 0 l. Proof. intro n; induction l; [ infer | infer from eq_nat_dec le_max_l (le_trans n (fold max 0 l)) le_max_r ]. Defined. Lemma fold_max_lt_not_has : forall (n : nat) (l : list nat), fold max 0 l < n -> ~ has n l. Proof. intros n l; infer from has_le_fold_max (le_lt_trans n (fold max 0 l)) (lt_irrefl n). Defined. Lemma fresh_nat : forall (l : list nat), {n : nat | ~ has n l}. Proof. intro l; exists (S (fold max 0 l)); infer from fold_max_lt_not_has. Defined.
Require Import Coq.Classes.DecidableClass. Require Import Coq.Lists.List. Require Import Coq.Bool.Bool. Require Import SquiggleEq.export. Require Import SquiggleEq.UsefulTypes. Require Import SquiggleEq.list. Require Import SquiggleEq.LibTactics. Require Import SquiggleEq.tactics. Require Import SquiggleEq.AssociationList. Require Import ExtLib.Structures.Monads. Require Import common. Require Import Trecord. Fixpoint natElim (n : nat) : Type:= match n with | 0 => bool | S n => unit + (natElim n) end. Definition isZero (n : nat) : bool := negb( match n with | 0 => true | S _ => false end). (* Parametricity Recursive isZero. Print isZero_R. Parametricity Recursive unit. Parametricity Recursive sum. Print nat_R. *) (* Print nat_R Changed Set to Prop *) Inductive nat_R : nat -> nat -> (* Set *) Prop := nat_R_O_R : nat_R 0 0 | nat_R_S_R : forall H H0 : nat, nat_R H H0 -> nat_R (S H) (S H0). (* Parametricity Recursive natElim. Print natElim_R. (* copied below *) *) (* Fails because nat_R is now in Prop Definition natElim_R := let fix_natElim_1 := fix natElim (n : nat) : Type := match n with | 0 => bool | S n0 => (unit + natElim n0)%type end in let fix_natElim_2 := fix natElim (n : nat) : Type := match n with | 0 => bool | S n0 => (unit + natElim n0)%type end in fix natElim_R (n₁ n₂ : nat) (n_R : nat_R n₁ n₂) {struct n_R} : fix_natElim_1 n₁ -> fix_natElim_2 n₂ -> Type := match n_R in (nat_R n₁0 n₂0) return (fix_natElim_1 n₁0 -> fix_natElim_2 n₂0 -> Type) with | nat_R_O_R => bool_R | nat_R_S_R n₁0 n₂0 n_R0 => sum_R unit unit unit_R (fix_natElim_1 n₁0) (fix_natElim_2 n₂0) (natElim_R n₁0 n₂0 n_R0) end. *) Print list. Inductive list (A : Set (* Type fails *)) : Set := nil : list A | cons : A -> list A -> list A. Fixpoint listElim (A:Set )(l : list A) : Type:= match l with | nil _ => True | cons _ _ tl => @sum A (listElim A tl) end. (* Parametricity Recursive unit. Parametricity Recursive sum. *) (*Parametricity Recursive list. Parametricity Recursive listElim. Print listElim_R. (* copied below *) *) Inductive list_R (A₁ A₂ : Set) (A_R : A₁ -> A₂ -> Set) : list A₁ -> list A₂ -> Prop := list_R_nil_R : list_R A₁ A₂ A_R (nil A₁) (nil A₂) | list_R_cons_R : forall (H : A₁) (H0 : A₂), A_R H H0 -> forall (H1 : list A₁) (H2 : list A₂), list_R A₁ A₂ A_R H1 H2 -> list_R A₁ A₂ A_R (cons A₁ H H1) (cons A₂ H0 H2). (* Fails because list_R is now in Prop Definition listElim_R := let fix_listElim_1 := fix listElim (A : Set) (l : list A) {struct l} : Type := match l with | nil _ => unit | cons _ _ tl => (A + listElim A tl)%type end in let fix_listElim_2 := fix listElim (A : Set) (l : list A) {struct l} : Type := match l with | nil _ => unit | cons _ _ tl => (A + listElim A tl)%type end in fix listElim_R (A₁ A₂ : Set) (A_R : A₁ -> A₂ -> Set) (l₁ : list A₁) (l₂ : list A₂) (l_R : list_R A₁ A₂ A_R l₁ l₂) {struct l_R} : fix_listElim_1 A₁ l₁ -> fix_listElim_2 A₂ l₂ -> Type := match l_R in (list_R _ _ _ l₁0 l₂0) return (fix_listElim_1 A₁ l₁0 -> fix_listElim_2 A₂ l₂0 -> Type) with | list_R_nil_R _ _ _ => unit_R | list_R_cons_R _ _ _ _ _ _ tl₁ tl₂ tl_R => sum_R A₁ A₂ A_R (fix_listElim_1 A₁ tl₁) (fix_listElim_2 A₂ tl₂) (listElim_R A₁ A₂ A_R tl₁ tl₂ tl_R) end. *) Fixpoint list_RR (A₁ A₂ : Set) (A_R : A₁ -> A₂ -> Prop) (l1 : list A₁) (l2 : list A₂) {struct l1} : Prop := match (l1, l2) with | (nil _, nil _) => True | (cons _ h1 tl1, cons _ h2 tl2) => @sigT (A_R h1 h2) (fun _ => list_RR _ _ A_R tl1 tl2) | ( _, _) => False end. (* because of template polymorphism, * for /\ works *) (* Parametricity Recursive sum. *) Inductive True_R : True -> True -> Prop := True_R_I_R : True_R I I. Inductive sum_R (A₁ A₂ : Type) (A_R : A₁ -> A₂ -> Type) (B₁ B₂ : Type) (B_R : B₁ -> B₂ -> Type) : A₁ + B₁ -> A₂ + B₂ -> Type := inl_R : forall (H : A₁) (H0 : A₂), A_R H H0 -> sum_R A₁ A₂ A_R B₁ B₂ B_R (inl H) (inl H0) | inr_R : forall (H : B₁) (H0 : B₂), B_R H H0 -> sum_R A₁ A₂ A_R B₁ B₂ B_R (inr H) (inr H0). Fixpoint listElim_RR (A₁ A₂ : Set) (A_R : A₁ -> A₂ -> Prop) (l1 : list A₁) (l2 : list A₂) (l_R : list_RR A₁ A₂ A_R l1 l2) {struct l1} (* not l_R *) : (listElim A₁ l1) -> (listElim A₂ l2) -> Type := let reT := fun l1 l2 => list_RR A₁ A₂ A_R l1 l2 -> (listElim A₁ l1) -> (listElim A₂ l2) -> Type in (match l1 return reT l1 l2 with | nil _ => match l2 return reT (nil _) l2 with | nil _ => fun l_R => True_R | cons _ _ _ => fun l_R => False_rect _ l_R end | cons _ h1 tl1 => match l2 return reT (cons _ h1 tl1) l2 with | nil _ => fun l_R => False_rect _ l_R | cons _ h2 tl2 => fun l_R => let tl_R := projT2 l_R in @sum_R _ _ A_R _ _ (listElim_RR _ _ A_R _ _ tl_R) end end) l_R. Require Import templateCoqMisc. Require Import Template.Ast. Run TemplateProgram (printTerm "isZero"). (* Should we have a set version as well? *) (* The return type of eq is a Prop... So we can hust return fun _ _ .. => True *) Definition eq_RR (A₁ A₂ : Type) (A_R : A₁ -> A₂ -> Type) (x₁ : A₁) (x₂ : A₂) (x_R : A_R x₁ x₂) : forall (y₁ : A₁) (y₂ : A₂), A_R y₁ y₂ -> x₁ = y₁ -> x₂ = y₂ -> Prop. intros ? ?. (* rename H into y₁. rename H0 into y₂. *) intros ary H1eq H2eq. destruct H1eq. destruct H2eq. exact True. Defined. Inductive Vec (C:Set) : forall (m:nat), Set := | vnil : Vec C 0 | vcons : forall (n: nat) (c:C) (vc: Vec C n), Vec C (S n). Inductive Vec2 (C:Set) : nat -> Type := | vnil2 : Vec2 C 0 | vcons2 : forall (n: nat), C -> Vec2 C n -> Vec2 C (n+1) (* not struct on n*). Open Scope nat_scope. Fixpoint nat_RR (n1 n2: nat) {struct n1} : Prop := match (n1, n2) with | (0, 0) => True | (S h1, S h2) => nat_RR h1 h2 | ( _, _) => False end. (* Definition transportRev {T : Type} {a b : T} {P : T -> Type} (p : a = b) (pb : P b) : P a := transport (eq_sym p) pb. *) Fixpoint vAppend {C:Set} {n m : nat} (vl : Vec C n) (vr : Vec C m): Vec C (n+m) := match vl in Vec _ n return Vec C (n + m) with | vnil _ => vr | vcons _ n' hl tl => (vcons _ _ hl (vAppend tl vr)) end. Definition vAppend2 {C:Set} {m : nat} (cdef: C) (vr : Vec C m): C := match vAppend vr vr as vapx in Vec _ n return C with | vnil _ => cdef | vcons _ n' hl tl => hl end. (* Parametricity Recursive vAppend2. Print vAppend2_R. vAppend2_R = fun (C₁ C₂ : Set) (C_R : C₁ -> C₂ -> Set) (m₁ m₂ : nat) (m_R : common.nat_R m₁ m₂) (cdef₁ : C₁) (cdef₂ : C₂) (cdef_R : C_R cdef₁ cdef₂) (vr₁ : Vec C₁ m₁) (vr₂ : Vec C₂ m₂) (vr_R : Vec_R C₁ C₂ C_R m₁ m₂ m_R vr₁ vr₂) => match vAppend_R C₁ C₂ C_R m₁ m₂ m_R m₁ m₂ m_R vr₁ vr₂ vr_R vr₁ vr₂ vr_R in (Vec_R _ _ _ n₁ n₂ n_R x₁ x₂) return (C_R match x₁ with | vnil _ => cdef₁ | vcons _ _ hl _ => hl end match x₂ with | vnil _ => cdef₂ | vcons _ _ hl _ => hl end) with | Vec_R_vnil_R _ _ _ => cdef_R | Vec_R_vcons_R _ _ _ _ _ _ _ _ hl_R _ _ _ => hl_R end : forall (C₁ C₂ : Set) (C_R : C₁ -> C₂ -> Set) (m₁ m₂ : nat) (m_R : common.nat_R m₁ m₂) (cdef₁ : C₁) (cdef₂ : C₂), C_R cdef₁ cdef₂ -> forall (vr₁ : Vec C₁ m₁) (vr₂ : Vec C₂ m₂), Vec_R C₁ C₂ C_R m₁ m₂ m_R vr₁ vr₂ -> C_R (vAppend2 cdef₁ vr₁) (vAppend2 cdef₂ vr₂) *) (* Run TemplateProgram (printTerm "vAppend"). Run TemplateProgram (duplicateDefn "vAppend" "vAppendss"). Check (eq_refl: @vAppend=vAppendss). *) (* Parametricity Recursive vAppend. Print Vec_R. Check vAppend_R. *) Fixpoint Vec_RR (C1 C2 : Set) (C_R : C1 -> C2 -> Prop) (n1 n2 : nat) (n_R : nat_RR n1 n2) (v1 : Vec C1 n1) (v2: Vec C2 n2) {struct v1} : Prop:= let reT := fun n1 n2 => nat_RR n1 n2 -> (* only the indices change. so only they appear here*) Prop in (* for indexed inductives, in is needed before return to bring the index in scope *) (match v1 in (Vec _ n1) return reT n1 n2 with | vnil _ => match v2 in (Vec _ n2) return reT 0 n2 with | vnil _ => fun _ => True | vcons _ _ _ _ => fun _ => False end | vcons _ n1 h1 tl1 => match v2 in (Vec _ n2) return reT (S n1) n2 with | vnil _ => fun _ => False | vcons _ n2 h2 tl2 => fun n_R => let n_R := n_R (* no sig *) in (C_R h1 h2) /\ (Vec_RR _ _ C_R n1 n2 n_R tl1 tl2) end end) n_R. (* non n_R argument *) Fixpoint Vec_RR2 (C1 C2 : Set) (C_R : C1 -> C2 -> Prop) (n1 n2 : nat) (v1 : Vec C1 n1) (v2: Vec C2 n2) {struct v1} : Prop:= let reT := fun _ _ => Prop in (* for indexed inductives, in is needed before return to bring the index in scope *) (match v1 in (Vec _ n1) return reT n1 n2 with | vnil _ => match v2 in (Vec _ n2) return reT 0 n2 with | vnil _ => True | vcons _ _ _ _ => False end | vcons _ n1 h1 tl1 => match v2 in (Vec _ n2) return reT (S n1) n2 with | vnil _ => False | vcons _ n2 h2 tl2 => (C_R h1 h2) /\ (Vec_RR2 _ _ C_R n1 n2 tl1 tl2) end end). Definition Vec_RR3 := fix ReflParam_matchR_Vec_RR0 (C C₂ : Set) (C_R : (fun H H0 : Set => H -> H0 -> Prop) C C₂) (H H0 : nat) (_ (* unused *): nat_RR H H0) (H2 : Vec C H) (H3 : Vec C₂ H0) {struct H2} : Prop := match H2 with | vnil _ => match H3 with | vnil _ => True | vcons _ _ _ _ => False end | vcons _ n x x0 => match H3 with | vnil _ => False | vcons _ n₂ x1 x2 => {n_R : nat_RR n n₂ & {_ : C_R x x1 & {_ : ReflParam_matchR_Vec_RR0 C C₂ C_R n n₂ n_R x0 x2 & True}}} end end. (* Print Nat.add. Print Coq_o_Init_o_Nat_o_add_R. *) Definition S_RR (n1 n2 : nat) (n_R : nat_RR n1 n2) : nat_RR (S n1) (S n2) := n_R. Definition O_RR : nat_RR O O := I. (* Parametricity Recursive Vec. *) (* nat_R changed to nat_RR, Set changed to Prop *) Inductive Vec_R (C₁ C₂ : Set) (C_R : C₁ -> C₂ -> Prop) : forall H H0 : nat, nat_RR H H0 -> Vec C₁ H -> Vec C₂ H0 -> Prop := | Vec_R_vnil_R : Vec_R C₁ C₂ C_R 0 0 O_RR (vnil C₁) (vnil C₂) | Vec_R_vcons_R : forall (n₁ n₂ : nat) (n_R : nat_RR n₁ n₂) (H : C₁) (H0 : C₂), C_R H H0 -> forall (H1 : Vec C₁ n₁) (H2 : Vec C₂ n₂), Vec_R C₁ C₂ C_R n₁ n₂ n_R H1 H2 -> Vec_R C₁ C₂ C_R (S n₁) (S n₂) (S_RR n₁ n₂ n_R) (vcons C₁ n₁ H H1) (vcons C₂ n₂ H0 H2). Definition fromNewV (C C₂ : Set) (C_R : C -> C₂ -> Prop) (n1 n2: nat) (nr : nat_RR n1 n2) v1 v2 (vr : Vec_RR3 _ _ C_R n1 n2 nr v1 v2) : Vec_R _ _ C_R n1 n2 nr v1 v2. induction v1;induction v2; simpl in *. - (* nr is arbitrary because it is an unused argument in Vec_RR *) Abort. Fixpoint Vec2_RR (C1 C2 : Set) (C_R : C1 -> C2 -> Prop) (n1 n2 : nat) (_ : nat_RR n1 n2) (v1 : Vec2 C1 n1) (v2: Vec2 C2 n2) {struct v1} : Prop := (match v1 with | vnil2 _ => match v2 with | vnil2 _ => True | vcons2 _ _ _ _ => False end | vcons2 _ n1 h1 tl1 => match v2 with | vnil2 _ => False | vcons2 _ n2 h2 tl2 => (C_R h1 h2) /\ (sig (fun nr => Vec2_RR _ _ C_R n1 n2 nr tl1 tl2)) end end). Fixpoint add_RR (n1 n2 : nat) (n_R : nat_RR n1 n2) (m1 m2 : nat) (m_R : nat_RR m1 m2): nat_RR (n1 + m1) (n2 + m2) := let reT := fun n1 n2 => nat_RR n1 n2 -> nat_RR (n1 + m1) (n2 + m2) in (match n1 return reT n1 n2 with | 0 => match n2 return reT 0 n2 with | 0 => fun _ => m_R | S _ => fun n_R => False_rect _ n_R end | S p1 => match n2 return reT (S p1) n2 with | 0 => fun n_R => False_rect _ n_R | S p2 => fun n_R => S_RR _ _ (add_RR p1 p2 n_R m1 m2 m_R) end end) n_R. (* Print Vec_R_vcons_R. *) Definition vcons_RR {C₁ C₂ : Set} {C_R : C₁ -> C₂ -> Prop} (n₁ n₂ : nat) (n_R : nat_RR n₁ n₂) (H : C₁) (H0 : C₂) (c_R: C_R H H0) (H1 : Vec C₁ n₁) (H2 : Vec C₂ n₂) (v_R : Vec_RR C₁ C₂ C_R n₁ n₂ n_R H1 H2): Vec_RR C₁ C₂ C_R (S n₁) (S n₂) (S_RR n₁ n₂ n_R) (vcons C₁ n₁ H H1) (vcons C₂ n₂ H0 H2). Proof. simpl. split; assumption. Defined. Fixpoint vAppend_RR {C₁ C₂ : Set} {C_R : C₁ -> C₂ -> Prop} (n₁ n₂ : nat) (n_R : nat_RR n₁ n₂) (m₁ m₂ : nat) (m_R : nat_RR m₁ m₂) (vl₁ : Vec C₁ n₁) (vl₂ : Vec C₂ n₂) (vl_R : Vec_RR C₁ C₂ C_R n₁ n₂ n_R vl₁ vl₂) (vr₁ : Vec C₁ m₁) (vr₂ : Vec C₂ m₂) (vr_R : Vec_RR C₁ C₂ C_R m₁ m₂ m_R vr₁ vr₂) {struct vl₁ }: Vec_RR C₁ C₂ C_R (n₁ + m₁) (n₂ + m₂) (add_RR n₁ n₂ n_R m₁ m₂ m_R) (vAppend vl₁ vr₁) (vAppend vl₂ vr₂) := let reT := fun n₁ vl₁ n₂ vl₂ => forall n_R: nat_RR n₁ n₂, Vec_RR C₁ C₂ C_R n₁ n₂ n_R vl₁ vl₂ -> Vec_RR C₁ C₂ C_R (n₁ + m₁) (n₂ + m₂) (add_RR n₁ n₂ n_R m₁ m₂ m_R) (vAppend vl₁ vr₁) (vAppend vl₂ vr₂) in (match vl₁ in Vec _ n₁ return reT n₁ vl₁ n₂ vl₂ with | vnil _ => match vl₂ in (Vec _ n₂) return reT 0 (vnil _) n₂ vl₂ with | vnil _ => fun _ _ => vr_R | vcons _ _ _ _ => fun _ v_R => False_rect _ v_R end | vcons _ n₁ hl₁ tl₁ => match vl₂ in (Vec _ n₂) return reT (S n₁) (vcons _ n₁ hl₁ tl₁) n₂ vl₂ with | vnil _ => fun _ v_R => False_rect _ v_R | vcons _ _ hl₂ tl₂ => fun _ v_R => let hl_R := proj1 v_R in let tl_R := proj2 v_R in (vcons_RR _ _ _ _ _ hl_R _ _ (vAppend_RR _ _ _ _ _ _ _ _ tl_R _ _ vr_R)) end end) n_R vl_R. (* summary : need indices of: 1) the type of the discriminee. put a cast around discriminee in template-coq? 2) for each constructor, a way to get the indices of the return type (or just the return type) from the arguments. take a list of types of constructors, in order of the constructors. convert the PIs to Lams in the type and then apply the arguments and maybe beta reduce. Beta reduction is not necessary though. *) Definition vAppend2_RR (C₁ C₂ : Set) (C_R : C₁ -> C₂ -> Prop) (m₁ m₂ : nat) (m_R : nat_RR m₁ m₂) (cdef₁ : C₁) (cdef₂ : C₂) (cdef_R : C_R cdef₁ cdef₂) (vr₁ : Vec C₁ m₁) (vr₂ : Vec C₂ m₂) (vr : Vec_RR C₁ C₂ C_R m₁ m₂ m_R vr₁ vr₂): C_R (vAppend2 cdef₁ vr₁) (vAppend2 cdef₂ vr₂) := ( let reT n1 n2 v1 v2 (* indices and values. move the _Rs to the body, as Pi args *) := forall (nr: nat_RR n1 n2) (vapr: Vec_RR C₁ C₂ C_R n1 n2 nr v1 v2), C_R match v1 with | vnil _ => cdef₁ | vcons _ _ hl _ => hl end match v2 with | vnil _ => cdef₂ | vcons _ _ hl _ => hl end in (* the "as vap1" part cannot be inlined. "vAppend vr₁ vr₁" has type "Vec C₁ (m₁ + m₁)" while vap1 has type "Vec C₁ n1" *) match vAppend vr₁ vr₁ as vap1 in Vec _ n1 return reT n1 (m₂+m₂)(*prime of index of discriminee *) vap1 (vAppend vr₂ vr₂) (* prime of discriminee*) with | vnil _ => match vAppend vr₂ vr₂ as vap2 in Vec _ n2 return reT O (*index of this constr:vnil*) n2 (* from in *) (vnil _) vap2 with | vnil _ => fun (nr : nat_RR 0 0) (_ : Vec_RR C₁ C₂ C_R 0 0 nr (vnil C₁) (vnil C₂)) => cdef_R | vcons _ n2 hl2 v2 => fun (nr : nat_RR 0 (S n2)) (vr0 : Vec_RR C₁ C₂ C_R 0 (S n2) nr (vnil C₁) (vcons C₂ n2 hl2 v2)) => False_rect (*reT 0 (S n2) (vnil C₁) (vcons C₂ n2 hl2 v2) nr vr0 -- then strip the 2 pis*) _ vr0 (* always the last lambda *) end | vcons _ n1 hl tl => match vAppend vr₂ vr₂ as vap2 in Vec _ n2 return reT (S n1) (*index of this constr*) n2 (* from in *) (vcons _ _ hl tl) vap2 with | vnil _ => fun _ vr => False_rect _ vr | vcons _ _ hl _ => fun _ vr => let hl_R := proj1 vr in hl_R end end (add_RR m₁ m₂ m_R m₁ m₂ m_R) (* below is the type of the discriminee. Because its type changes during the matches, we need to convoy it. Thus, in the branches, we need to lambda bind it. To lambda bind, we need its type, which is [D] d d', where D is the type of the discrimee d. (Here D is vAppend ..). Thus, we need D, the type of the discriminee, which is not stored in the kernel, but added by the reifier. Also, because of dependent types, [D] may depend on the proofs that the indices are related. Thus, we need to convoy them too. *) (vAppend_RR _ _ _ _ _ _ _ _ vr _ _ vr) ). Inductive IHaveUndecidalbeEq : Set := injFun : (nat->nat) -> IHaveUndecidalbeEq. (* Parametricity Recursive IHaveUndecidalbeEq. Print IHaveUndecidalbeEq_R. *) Inductive IHaveUndecidalbeEq_R : IHaveUndecidalbeEq -> IHaveUndecidalbeEq -> Prop := injFun_R : forall f1 f2 : nat -> nat, (forall n1 n2 : nat, n1 = n2 -> (f1 n1) = (f2 n2)) -> IHaveUndecidalbeEq_R (injFun f1) (injFun f2). (* even after assuming function extensionality, is this provable? *) Lemma UIP (f:IHaveUndecidalbeEq) (p1 p2 : IHaveUndecidalbeEq_R f f) : p1=p2. Proof using. Fail induction p1. Abort. Inductive IHaveUndecidalbeEq_R2 : IHaveUndecidalbeEq -> IHaveUndecidalbeEq -> Prop := injFun_R2 : forall f1 f2 : nat -> nat, (f1= f2) -> IHaveUndecidalbeEq_R2 (injFun f1) (injFun f2). Inductive IHaveUndecidalbeEq_R3 (f: IHaveUndecidalbeEq): IHaveUndecidalbeEq -> Prop := injFun_R3 : IHaveUndecidalbeEq_R3 f f. Require Import Contractible. Definition iso23 f1 f2 (p: IHaveUndecidalbeEq_R2 f1 f2) : IHaveUndecidalbeEq_R3 f1 f2. Proof using. destruct p. induction H. apply injFun_R3. Defined. Definition iso32 f1 f2 (p: IHaveUndecidalbeEq_R3 f1 f2) : IHaveUndecidalbeEq_R2 f1 f2. Proof using. destruct p. destruct f1. apply injFun_R2. reflexivity. Defined. Lemma iso232 f1 f2: forall a : IHaveUndecidalbeEq_R3 f1 f2, iso23 f1 f2 (iso32 f1 f2 a) = a. Proof using. intros ?. unfold iso32, iso23. simpl. destruct a. destruct f1. reflexivity. Qed. Lemma preserveContractible23 f (c1 : Contractible (IHaveUndecidalbeEq_R2 f f)): Contractible (IHaveUndecidalbeEq_R3 f f). Proof using. revert c1. apply UP_iso with (AtoB := iso23 f f) (BtoA := iso32 f f). apply iso232. Qed. Section Iso12Feq. Hypothesis feqNatNat : forall (f g : nat -> nat), (forall n, f n = g n) -> f=g. Lemma feqNatNat2 : forall (f g : nat -> nat), (forall n1 n2, n1=n2 -> f n1 = g n2) -> f=g. Proof. intros. apply feqNatNat. intros n. specialize (H n n eq_refl). assumption. Qed. Definition iso12 f1 f2 (p: IHaveUndecidalbeEq_R f1 f2) : IHaveUndecidalbeEq_R2 f1 f2. Proof. destruct p. apply feqNatNat2 in H. apply injFun_R2. assumption. Defined. Definition iso21 f1 f2 (p: IHaveUndecidalbeEq_R2 f1 f2) : IHaveUndecidalbeEq_R f1 f2. Proof using. destruct p. apply injFun_R. intros. subst. reflexivity. Defined. Lemma iso121 f1 f2: forall a : IHaveUndecidalbeEq_R2 f1 f2, iso12 f1 f2 (iso21 f1 f2 a) = a. Proof using. intros ?. unfold iso21, iso12. simpl. destruct a. simpl. (* in Hott, this would be compute because function extensionality is not an axiom. If Contractible (IHaveUndecidalbeEq_R2 f1 f2) is not provable in HoTT, a more powerful theory, it is not provable in Coq. *) Fail induction e. (* destruct f1. reflexivity. *) Abort. End Iso12Feq. Inductive tree (A : Set) : Set := | leaf : tree A | node : tlist A -> tree A with tlist (A:Set) : Set := | tnil : tlist A | tcons : tree A -> tlist A -> tlist A. Module test. Inductive slist (A : Set) : Set := snil : slist A | scons : A -> slist A -> slist A. Inductive tree (A : Set) : Set := | leaf : tree A | node : slist (tree A) -> tree A. End test. Module test2. Inductive slist (A : Set) : Set := | snil : slist A | scons : (A -> A) -> slist A -> slist A. (* Inductive tree (A : Set) : Set := | leaf : tree A | node : slist (tree A) -> tree A. *) Inductive tree (A : Set) : Set := | leaf : tree A | node : forall n, Vec (tree A) n -> tree A. End test2. Fixpoint zero (T:Type) (e:T=T) := let x := zero T e in 0. (* Fixpoint leee (n m :nat) (e: n + 1 =m ) (p:le n (n+1)) {struct p}: nat := (fix leee (n m : nat) (e : n + 1 = m) (p : n <= n + 1) {struct p} : nat := (fun p0 : n <= m => ?Goal@{p:=p0}) (eq_ind (n + 1) (fun n0 : nat => n <= n0) p m e)) rewrite e in p. Show Proof. destruct n. - exact 0. - specialize (leee n (n+1) eq_refl). apply leee. Show Proof. inversion p. destruct p. rewrite <- e in p. inversion p. admit. subst. destruct p. destruct p. subst m. Lemma leee0 n p : leee n p =0. unfold leee. destruct p. *)
Require Export euclidean__axioms. Require Export euclidean__defs. Require Export euclidean__tactics. Require Export lemma__NCdistinct. Require Export lemma__NChelper. Require Export lemma__NCorder. Require Export lemma__collinearorder. Require Export lemma__parallelNC. Require Export logic. Definition lemma__parallelbetween : forall B H K L M, (euclidean__axioms.BetS H B K) -> ((euclidean__defs.Par M B H L) -> ((euclidean__axioms.Col L M K) -> (euclidean__axioms.BetS L M K))). Proof. intro B. intro H. intro K. intro L. intro M. intro H0. intro H1. intro H2. assert (* Cut *) (~(euclidean__defs.Meet M B H L)) as H3. - assert (exists U V u v X, (euclidean__axioms.neq M B) /\ ((euclidean__axioms.neq H L) /\ ((euclidean__axioms.Col M B U) /\ ((euclidean__axioms.Col M B V) /\ ((euclidean__axioms.neq U V) /\ ((euclidean__axioms.Col H L u) /\ ((euclidean__axioms.Col H L v) /\ ((euclidean__axioms.neq u v) /\ ((~(euclidean__defs.Meet M B H L)) /\ ((euclidean__axioms.BetS U X v) /\ (euclidean__axioms.BetS u X V))))))))))) as H3 by exact H1. assert (exists U V u v X, (euclidean__axioms.neq M B) /\ ((euclidean__axioms.neq H L) /\ ((euclidean__axioms.Col M B U) /\ ((euclidean__axioms.Col M B V) /\ ((euclidean__axioms.neq U V) /\ ((euclidean__axioms.Col H L u) /\ ((euclidean__axioms.Col H L v) /\ ((euclidean__axioms.neq u v) /\ ((~(euclidean__defs.Meet M B H L)) /\ ((euclidean__axioms.BetS U X v) /\ (euclidean__axioms.BetS u X V))))))))))) as __TmpHyp by exact H3. destruct __TmpHyp as [x H4]. destruct H4 as [x0 H5]. destruct H5 as [x1 H6]. destruct H6 as [x2 H7]. destruct H7 as [x3 H8]. destruct H8 as [H9 H10]. destruct H10 as [H11 H12]. destruct H12 as [H13 H14]. destruct H14 as [H15 H16]. destruct H16 as [H17 H18]. destruct H18 as [H19 H20]. destruct H20 as [H21 H22]. destruct H22 as [H23 H24]. destruct H24 as [H25 H26]. destruct H26 as [H27 H28]. exact H25. - assert (* Cut *) (euclidean__axioms.nCol M B H) as H4. -- assert (* Cut *) ((euclidean__axioms.nCol M B H) /\ ((euclidean__axioms.nCol M H L) /\ ((euclidean__axioms.nCol B H L) /\ (euclidean__axioms.nCol M B L)))) as H4. --- apply (@lemma__parallelNC.lemma__parallelNC M B H L H1). --- destruct H4 as [H5 H6]. destruct H6 as [H7 H8]. destruct H8 as [H9 H10]. exact H5. -- assert (* Cut *) (euclidean__axioms.nCol M H L) as H5. --- assert (* Cut *) ((euclidean__axioms.nCol M B H) /\ ((euclidean__axioms.nCol M H L) /\ ((euclidean__axioms.nCol B H L) /\ (euclidean__axioms.nCol M B L)))) as H5. ---- apply (@lemma__parallelNC.lemma__parallelNC M B H L H1). ---- destruct H5 as [H6 H7]. destruct H7 as [H8 H9]. destruct H9 as [H10 H11]. exact H8. --- assert (* Cut *) (euclidean__axioms.neq M B) as H6. ---- assert (* Cut *) ((euclidean__axioms.neq M B) /\ ((euclidean__axioms.neq B H) /\ ((euclidean__axioms.neq M H) /\ ((euclidean__axioms.neq B M) /\ ((euclidean__axioms.neq H B) /\ (euclidean__axioms.neq H M)))))) as H6. ----- apply (@lemma__NCdistinct.lemma__NCdistinct M B H H4). ----- destruct H6 as [H7 H8]. destruct H8 as [H9 H10]. destruct H10 as [H11 H12]. destruct H12 as [H13 H14]. destruct H14 as [H15 H16]. exact H7. ---- assert (* Cut *) (euclidean__axioms.neq H L) as H7. ----- assert (* Cut *) ((euclidean__axioms.neq M H) /\ ((euclidean__axioms.neq H L) /\ ((euclidean__axioms.neq M L) /\ ((euclidean__axioms.neq H M) /\ ((euclidean__axioms.neq L H) /\ (euclidean__axioms.neq L M)))))) as H7. ------ apply (@lemma__NCdistinct.lemma__NCdistinct M H L H5). ------ destruct H7 as [H8 H9]. destruct H9 as [H10 H11]. destruct H11 as [H12 H13]. destruct H13 as [H14 H15]. destruct H15 as [H16 H17]. exact H10. ----- assert (* Cut *) (euclidean__axioms.nCol M L H) as H8. ------ assert (* Cut *) ((euclidean__axioms.nCol H M L) /\ ((euclidean__axioms.nCol H L M) /\ ((euclidean__axioms.nCol L M H) /\ ((euclidean__axioms.nCol M L H) /\ (euclidean__axioms.nCol L H M))))) as H8. ------- apply (@lemma__NCorder.lemma__NCorder M H L H5). ------- destruct H8 as [H9 H10]. destruct H10 as [H11 H12]. destruct H12 as [H13 H14]. destruct H14 as [H15 H16]. exact H15. ------ assert (* Cut *) (euclidean__axioms.Col M L K) as H9. ------- assert (* Cut *) ((euclidean__axioms.Col M L K) /\ ((euclidean__axioms.Col M K L) /\ ((euclidean__axioms.Col K L M) /\ ((euclidean__axioms.Col L K M) /\ (euclidean__axioms.Col K M L))))) as H9. -------- apply (@lemma__collinearorder.lemma__collinearorder L M K H2). -------- destruct H9 as [H10 H11]. destruct H11 as [H12 H13]. destruct H13 as [H14 H15]. destruct H15 as [H16 H17]. exact H10. ------- assert (* Cut *) (euclidean__axioms.Col H B K) as H10. -------- right. right. right. right. left. exact H0. -------- assert (* Cut *) (M = M) as H11. --------- apply (@logic.eq__refl Point M). --------- assert (* Cut *) (L = L) as H12. ---------- apply (@logic.eq__refl Point L). ---------- assert (* Cut *) (B = B) as H13. ----------- apply (@logic.eq__refl Point B). ----------- assert (* Cut *) (H = H) as H14. ------------ apply (@logic.eq__refl Point H). ------------ assert (* Cut *) (~(M = K)) as H15. ------------- intro H15. assert (* Cut *) (euclidean__axioms.Col H B M) as H16. -------------- apply (@eq__ind__r euclidean__axioms.Point K (fun M0 => (euclidean__defs.Par M0 B H L) -> ((euclidean__axioms.Col L M0 K) -> ((~(euclidean__defs.Meet M0 B H L)) -> ((euclidean__axioms.nCol M0 B H) -> ((euclidean__axioms.nCol M0 H L) -> ((euclidean__axioms.neq M0 B) -> ((euclidean__axioms.nCol M0 L H) -> ((euclidean__axioms.Col M0 L K) -> ((M0 = M0) -> (euclidean__axioms.Col H B M0))))))))))) with (x := M). ---------------intro H16. intro H17. intro H18. intro H19. intro H20. intro H21. intro H22. intro H23. intro H24. exact H10. --------------- exact H15. --------------- exact H1. --------------- exact H2. --------------- exact H3. --------------- exact H4. --------------- exact H5. --------------- exact H6. --------------- exact H8. --------------- exact H9. --------------- exact H11. -------------- assert (* Cut *) (euclidean__axioms.Col M B H) as H17. --------------- assert (* Cut *) ((euclidean__axioms.Col B H M) /\ ((euclidean__axioms.Col B M H) /\ ((euclidean__axioms.Col M H B) /\ ((euclidean__axioms.Col H M B) /\ (euclidean__axioms.Col M B H))))) as H17. ---------------- apply (@lemma__collinearorder.lemma__collinearorder H B M H16). ---------------- destruct H17 as [H18 H19]. destruct H19 as [H20 H21]. destruct H21 as [H22 H23]. destruct H23 as [H24 H25]. exact H25. --------------- assert (* Cut *) (euclidean__axioms.Col H L H) as H18. ---------------- right. left. exact H14. ---------------- assert (* Cut *) (euclidean__defs.Meet M B H L) as H19. ----------------- exists H. split. ------------------ exact H6. ------------------ split. ------------------- exact H7. ------------------- split. -------------------- exact H17. -------------------- exact H18. ----------------- apply (@H3 H19). ------------- assert (* Cut *) (euclidean__axioms.nCol M L H) as H16. -------------- assert (* Cut *) ((euclidean__axioms.nCol H M L) /\ ((euclidean__axioms.nCol H L M) /\ ((euclidean__axioms.nCol L M H) /\ ((euclidean__axioms.nCol M L H) /\ (euclidean__axioms.nCol L H M))))) as H16. --------------- apply (@lemma__NCorder.lemma__NCorder M H L H5). --------------- destruct H16 as [H17 H18]. destruct H18 as [H19 H20]. destruct H20 as [H21 H22]. destruct H22 as [H23 H24]. exact H8. -------------- assert (* Cut *) (euclidean__axioms.Col M L M) as H17. --------------- right. left. exact H11. --------------- assert (* Cut *) (euclidean__axioms.nCol M K H) as H18. ---------------- apply (@euclidean__tactics.nCol__notCol M K H). -----------------apply (@euclidean__tactics.nCol__not__Col M K H). ------------------apply (@lemma__NChelper.lemma__NChelper M L H M K H16 H17 H9 H15). ---------------- assert (* Cut *) (euclidean__axioms.nCol H M K) as H19. ----------------- assert (* Cut *) ((euclidean__axioms.nCol K M H) /\ ((euclidean__axioms.nCol K H M) /\ ((euclidean__axioms.nCol H M K) /\ ((euclidean__axioms.nCol M H K) /\ (euclidean__axioms.nCol H K M))))) as H19. ------------------ apply (@lemma__NCorder.lemma__NCorder M K H H18). ------------------ destruct H19 as [H20 H21]. destruct H21 as [H22 H23]. destruct H23 as [H24 H25]. destruct H25 as [H26 H27]. exact H24. ----------------- assert ((L = M) \/ ((L = K) \/ ((M = K) \/ ((euclidean__axioms.BetS M L K) \/ ((euclidean__axioms.BetS L M K) \/ (euclidean__axioms.BetS L K M)))))) as H20 by exact H2. assert (* Cut *) (euclidean__axioms.BetS L M K) as H21. ------------------ assert ((L = M) \/ ((L = K) \/ ((M = K) \/ ((euclidean__axioms.BetS M L K) \/ ((euclidean__axioms.BetS L M K) \/ (euclidean__axioms.BetS L K M)))))) as H21 by exact H20. assert ((L = M) \/ ((L = K) \/ ((M = K) \/ ((euclidean__axioms.BetS M L K) \/ ((euclidean__axioms.BetS L M K) \/ (euclidean__axioms.BetS L K M)))))) as __TmpHyp by exact H21. destruct __TmpHyp as [H22|H22]. ------------------- assert (* Cut *) (~(~(euclidean__axioms.BetS L M K))) as H23. -------------------- intro H23. assert (* Cut *) (euclidean__axioms.Col M B M) as H24. --------------------- right. left. exact H11. --------------------- assert (* Cut *) (euclidean__axioms.Col H L L) as H25. ---------------------- right. right. left. exact H12. ---------------------- assert (* Cut *) (euclidean__axioms.Col H L M) as H26. ----------------------- apply (@eq__ind__r euclidean__axioms.Point M (fun L0 => (euclidean__defs.Par M B H L0) -> ((euclidean__axioms.Col L0 M K) -> ((~(euclidean__defs.Meet M B H L0)) -> ((euclidean__axioms.nCol M H L0) -> ((euclidean__axioms.neq H L0) -> ((euclidean__axioms.nCol M L0 H) -> ((euclidean__axioms.Col M L0 K) -> ((L0 = L0) -> ((euclidean__axioms.nCol M L0 H) -> ((euclidean__axioms.Col M L0 M) -> ((~(euclidean__axioms.BetS L0 M K)) -> ((euclidean__axioms.Col H L0 L0) -> (euclidean__axioms.Col H L0 M)))))))))))))) with (x := L). ------------------------intro H26. intro H27. intro H28. intro H29. intro H30. intro H31. intro H32. intro H33. intro H34. intro H35. intro H36. intro H37. exact H37. ------------------------ exact H22. ------------------------ exact H1. ------------------------ exact H2. ------------------------ exact H3. ------------------------ exact H5. ------------------------ exact H7. ------------------------ exact H8. ------------------------ exact H9. ------------------------ exact H12. ------------------------ exact H16. ------------------------ exact H17. ------------------------ exact H23. ------------------------ exact H25. ----------------------- assert (* Cut *) (euclidean__defs.Meet M B H L) as H27. ------------------------ exists M. split. ------------------------- exact H6. ------------------------- split. -------------------------- exact H7. -------------------------- split. --------------------------- exact H24. --------------------------- exact H26. ------------------------ apply (@H3 H27). -------------------- apply (@euclidean__tactics.NNPP (euclidean__axioms.BetS L M K)). ---------------------intro H24. destruct H4 as [H25 H26]. destruct H5 as [H27 H28]. destruct H8 as [H29 H30]. destruct H16 as [H31 H32]. destruct H18 as [H33 H34]. destruct H19 as [H35 H36]. destruct H26 as [H37 H38]. destruct H28 as [H39 H40]. destruct H30 as [H41 H42]. destruct H32 as [H43 H44]. destruct H34 as [H45 H46]. destruct H36 as [H47 H48]. destruct H38 as [H49 H50]. destruct H40 as [H51 H52]. destruct H42 as [H53 H54]. destruct H44 as [H55 H56]. destruct H46 as [H57 H58]. destruct H48 as [H59 H60]. destruct H50 as [H61 H62]. destruct H52 as [H63 H64]. destruct H54 as [H65 H66]. destruct H56 as [H67 H68]. destruct H58 as [H69 H70]. destruct H60 as [H71 H72]. destruct H62 as [H73 H74]. destruct H64 as [H75 H76]. destruct H66 as [H77 H78]. destruct H68 as [H79 H80]. destruct H70 as [H81 H82]. destruct H72 as [H83 H84]. assert (* Cut *) (False) as H85. ---------------------- apply (@H23 H24). ---------------------- contradiction H85. ------------------- destruct H22 as [H23|H23]. -------------------- assert (* Cut *) (~(~(euclidean__axioms.BetS L M K))) as H24. --------------------- intro H24. assert (* Cut *) (euclidean__axioms.Col H B L) as H25. ---------------------- apply (@eq__ind__r euclidean__axioms.Point K (fun L0 => (euclidean__defs.Par M B H L0) -> ((euclidean__axioms.Col L0 M K) -> ((~(euclidean__defs.Meet M B H L0)) -> ((euclidean__axioms.nCol M H L0) -> ((euclidean__axioms.neq H L0) -> ((euclidean__axioms.nCol M L0 H) -> ((euclidean__axioms.Col M L0 K) -> ((L0 = L0) -> ((euclidean__axioms.nCol M L0 H) -> ((euclidean__axioms.Col M L0 M) -> ((~(euclidean__axioms.BetS L0 M K)) -> (euclidean__axioms.Col H B L0))))))))))))) with (x := L). -----------------------intro H25. intro H26. intro H27. intro H28. intro H29. intro H30. intro H31. intro H32. intro H33. intro H34. intro H35. exact H10. ----------------------- exact H23. ----------------------- exact H1. ----------------------- exact H2. ----------------------- exact H3. ----------------------- exact H5. ----------------------- exact H7. ----------------------- exact H8. ----------------------- exact H9. ----------------------- exact H12. ----------------------- exact H16. ----------------------- exact H17. ----------------------- exact H24. ---------------------- assert (* Cut *) (euclidean__axioms.Col H L B) as H26. ----------------------- assert (* Cut *) ((euclidean__axioms.Col B H L) /\ ((euclidean__axioms.Col B L H) /\ ((euclidean__axioms.Col L H B) /\ ((euclidean__axioms.Col H L B) /\ (euclidean__axioms.Col L B H))))) as H26. ------------------------ apply (@lemma__collinearorder.lemma__collinearorder H B L H25). ------------------------ destruct H26 as [H27 H28]. destruct H28 as [H29 H30]. destruct H30 as [H31 H32]. destruct H32 as [H33 H34]. exact H33. ----------------------- assert (* Cut *) (euclidean__axioms.Col M B B) as H27. ------------------------ right. right. left. exact H13. ------------------------ assert (* Cut *) (euclidean__defs.Meet M B H L) as H28. ------------------------- exists B. split. -------------------------- exact H6. -------------------------- split. --------------------------- exact H7. --------------------------- split. ---------------------------- exact H27. ---------------------------- exact H26. ------------------------- apply (@H3 H28). --------------------- apply (@euclidean__tactics.NNPP (euclidean__axioms.BetS L M K)). ----------------------intro H25. destruct H4 as [H26 H27]. destruct H5 as [H28 H29]. destruct H8 as [H30 H31]. destruct H16 as [H32 H33]. destruct H18 as [H34 H35]. destruct H19 as [H36 H37]. destruct H27 as [H38 H39]. destruct H29 as [H40 H41]. destruct H31 as [H42 H43]. destruct H33 as [H44 H45]. destruct H35 as [H46 H47]. destruct H37 as [H48 H49]. destruct H39 as [H50 H51]. destruct H41 as [H52 H53]. destruct H43 as [H54 H55]. destruct H45 as [H56 H57]. destruct H47 as [H58 H59]. destruct H49 as [H60 H61]. destruct H51 as [H62 H63]. destruct H53 as [H64 H65]. destruct H55 as [H66 H67]. destruct H57 as [H68 H69]. destruct H59 as [H70 H71]. destruct H61 as [H72 H73]. destruct H63 as [H74 H75]. destruct H65 as [H76 H77]. destruct H67 as [H78 H79]. destruct H69 as [H80 H81]. destruct H71 as [H82 H83]. destruct H73 as [H84 H85]. assert (* Cut *) (False) as H86. ----------------------- apply (@H24 H25). ----------------------- contradiction H86. -------------------- destruct H23 as [H24|H24]. --------------------- assert (* Cut *) (~(~(euclidean__axioms.BetS L M K))) as H25. ---------------------- intro H25. apply (@H15 H24). ---------------------- apply (@euclidean__tactics.NNPP (euclidean__axioms.BetS L M K)). -----------------------intro H26. destruct H4 as [H27 H28]. destruct H5 as [H29 H30]. destruct H8 as [H31 H32]. destruct H16 as [H33 H34]. destruct H18 as [H35 H36]. destruct H19 as [H37 H38]. destruct H28 as [H39 H40]. destruct H30 as [H41 H42]. destruct H32 as [H43 H44]. destruct H34 as [H45 H46]. destruct H36 as [H47 H48]. destruct H38 as [H49 H50]. destruct H40 as [H51 H52]. destruct H42 as [H53 H54]. destruct H44 as [H55 H56]. destruct H46 as [H57 H58]. destruct H48 as [H59 H60]. destruct H50 as [H61 H62]. destruct H52 as [H63 H64]. destruct H54 as [H65 H66]. destruct H56 as [H67 H68]. destruct H58 as [H69 H70]. destruct H60 as [H71 H72]. destruct H62 as [H73 H74]. destruct H64 as [H75 H76]. destruct H66 as [H77 H78]. destruct H68 as [H79 H80]. destruct H70 as [H81 H82]. destruct H72 as [H83 H84]. destruct H74 as [H85 H86]. assert (* Cut *) (False) as H87. ------------------------ apply (@H15 H24). ------------------------ assert (* Cut *) (False) as H88. ------------------------- apply (@H25 H26). ------------------------- contradiction H88. --------------------- destruct H24 as [H25|H25]. ---------------------- assert (* Cut *) (~(~(euclidean__axioms.BetS L M K))) as H26. ----------------------- intro H26. assert (* Cut *) (euclidean__axioms.nCol H K M) as H27. ------------------------ assert (* Cut *) ((euclidean__axioms.nCol M H K) /\ ((euclidean__axioms.nCol M K H) /\ ((euclidean__axioms.nCol K H M) /\ ((euclidean__axioms.nCol H K M) /\ (euclidean__axioms.nCol K M H))))) as H27. ------------------------- apply (@lemma__NCorder.lemma__NCorder H M K H19). ------------------------- destruct H27 as [H28 H29]. destruct H29 as [H30 H31]. destruct H31 as [H32 H33]. destruct H33 as [H34 H35]. exact H34. ------------------------ assert (* Cut *) (exists E, (euclidean__axioms.BetS H E L) /\ (euclidean__axioms.BetS M E B)) as H28. ------------------------- apply (@euclidean__axioms.postulate__Pasch__inner H M K B L H0 H25 H27). ------------------------- destruct H28 as [E H29]. destruct H29 as [H30 H31]. assert (* Cut *) (euclidean__axioms.Col H E L) as H32. -------------------------- right. right. right. right. left. exact H30. -------------------------- assert (* Cut *) (euclidean__axioms.Col M E B) as H33. --------------------------- right. right. right. right. left. exact H31. --------------------------- assert (* Cut *) (euclidean__axioms.Col H L E) as H34. ---------------------------- assert (* Cut *) ((euclidean__axioms.Col E H L) /\ ((euclidean__axioms.Col E L H) /\ ((euclidean__axioms.Col L H E) /\ ((euclidean__axioms.Col H L E) /\ (euclidean__axioms.Col L E H))))) as H34. ----------------------------- apply (@lemma__collinearorder.lemma__collinearorder H E L H32). ----------------------------- destruct H34 as [H35 H36]. destruct H36 as [H37 H38]. destruct H38 as [H39 H40]. destruct H40 as [H41 H42]. exact H41. ---------------------------- assert (* Cut *) (euclidean__axioms.Col M B E) as H35. ----------------------------- assert (* Cut *) ((euclidean__axioms.Col E M B) /\ ((euclidean__axioms.Col E B M) /\ ((euclidean__axioms.Col B M E) /\ ((euclidean__axioms.Col M B E) /\ (euclidean__axioms.Col B E M))))) as H35. ------------------------------ apply (@lemma__collinearorder.lemma__collinearorder M E B H33). ------------------------------ destruct H35 as [H36 H37]. destruct H37 as [H38 H39]. destruct H39 as [H40 H41]. destruct H41 as [H42 H43]. exact H42. ----------------------------- assert (* Cut *) (euclidean__defs.Meet M B H L) as H36. ------------------------------ exists E. split. ------------------------------- exact H6. ------------------------------- split. -------------------------------- exact H7. -------------------------------- split. --------------------------------- exact H35. --------------------------------- exact H34. ------------------------------ apply (@H3 H36). ----------------------- apply (@euclidean__tactics.NNPP (euclidean__axioms.BetS L M K)). ------------------------intro H27. destruct H4 as [H28 H29]. destruct H5 as [H30 H31]. destruct H8 as [H32 H33]. destruct H16 as [H34 H35]. destruct H18 as [H36 H37]. destruct H19 as [H38 H39]. destruct H29 as [H40 H41]. destruct H31 as [H42 H43]. destruct H33 as [H44 H45]. destruct H35 as [H46 H47]. destruct H37 as [H48 H49]. destruct H39 as [H50 H51]. destruct H41 as [H52 H53]. destruct H43 as [H54 H55]. destruct H45 as [H56 H57]. destruct H47 as [H58 H59]. destruct H49 as [H60 H61]. destruct H51 as [H62 H63]. destruct H53 as [H64 H65]. destruct H55 as [H66 H67]. destruct H57 as [H68 H69]. destruct H59 as [H70 H71]. destruct H61 as [H72 H73]. destruct H63 as [H74 H75]. destruct H65 as [H76 H77]. destruct H67 as [H78 H79]. destruct H69 as [H80 H81]. destruct H71 as [H82 H83]. destruct H73 as [H84 H85]. destruct H75 as [H86 H87]. assert (* Cut *) (False) as H88. ------------------------- apply (@H26 H27). ------------------------- contradiction H88. ---------------------- destruct H25 as [H26|H26]. ----------------------- exact H26. ----------------------- assert (* Cut *) (~(~(euclidean__axioms.BetS L M K))) as H27. ------------------------ intro H27. assert (* Cut *) (euclidean__axioms.BetS M K L) as H28. ------------------------- apply (@euclidean__axioms.axiom__betweennesssymmetry L K M H26). ------------------------- assert (* Cut *) (exists E, (euclidean__axioms.BetS H E L) /\ (euclidean__axioms.BetS M B E)) as H29. -------------------------- apply (@euclidean__axioms.postulate__Pasch__outer H M K B L H0 H28 H16). -------------------------- destruct H29 as [E H30]. destruct H30 as [H31 H32]. assert (* Cut *) (euclidean__axioms.Col H E L) as H33. --------------------------- right. right. right. right. left. exact H31. --------------------------- assert (* Cut *) (euclidean__axioms.Col M B E) as H34. ---------------------------- right. right. right. right. left. exact H32. ---------------------------- assert (* Cut *) (euclidean__axioms.Col H L E) as H35. ----------------------------- assert (* Cut *) ((euclidean__axioms.Col E H L) /\ ((euclidean__axioms.Col E L H) /\ ((euclidean__axioms.Col L H E) /\ ((euclidean__axioms.Col H L E) /\ (euclidean__axioms.Col L E H))))) as H35. ------------------------------ apply (@lemma__collinearorder.lemma__collinearorder H E L H33). ------------------------------ destruct H35 as [H36 H37]. destruct H37 as [H38 H39]. destruct H39 as [H40 H41]. destruct H41 as [H42 H43]. exact H42. ----------------------------- assert (* Cut *) (euclidean__defs.Meet M B H L) as H36. ------------------------------ exists E. split. ------------------------------- exact H6. ------------------------------- split. -------------------------------- exact H7. -------------------------------- split. --------------------------------- exact H34. --------------------------------- exact H35. ------------------------------ apply (@H3 H36). ------------------------ apply (@euclidean__tactics.NNPP (euclidean__axioms.BetS L M K)). -------------------------intro H28. destruct H4 as [H29 H30]. destruct H5 as [H31 H32]. destruct H8 as [H33 H34]. destruct H16 as [H35 H36]. destruct H18 as [H37 H38]. destruct H19 as [H39 H40]. destruct H30 as [H41 H42]. destruct H32 as [H43 H44]. destruct H34 as [H45 H46]. destruct H36 as [H47 H48]. destruct H38 as [H49 H50]. destruct H40 as [H51 H52]. destruct H42 as [H53 H54]. destruct H44 as [H55 H56]. destruct H46 as [H57 H58]. destruct H48 as [H59 H60]. destruct H50 as [H61 H62]. destruct H52 as [H63 H64]. destruct H54 as [H65 H66]. destruct H56 as [H67 H68]. destruct H58 as [H69 H70]. destruct H60 as [H71 H72]. destruct H62 as [H73 H74]. destruct H64 as [H75 H76]. destruct H66 as [H77 H78]. destruct H68 as [H79 H80]. destruct H70 as [H81 H82]. destruct H72 as [H83 H84]. destruct H74 as [H85 H86]. destruct H76 as [H87 H88]. assert (* Cut *) (False) as H89. -------------------------- apply (@H27 H28). -------------------------- contradiction H89. ------------------ exact H21. Qed.
/- Copyright (c) 2022 Yaël Dillies. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yaël Dillies, Yury Kudryashov -/ import analysis.convex.strict import analysis.convex.topology import analysis.normed_space.ordered import analysis.normed_space.pointwise /-! # Strictly convex spaces This file defines strictly convex spaces. A normed space is strictly convex if all closed balls are strictly convex. This does **not** mean that the norm is strictly convex (in fact, it never is). ## Main definitions `strict_convex_space`: a typeclass saying that a given normed space over a normed linear ordered field (e.g., `ℝ` or `ℚ`) is strictly convex. The definition requires strict convexity of a closed ball of positive radius with center at the origin; strict convexity of any other closed ball follows from this assumption. ## Main results In a strictly convex space, we prove - `strict_convex_closed_ball`: a closed ball is strictly convex. - `combo_mem_ball_of_ne`, `open_segment_subset_ball_of_ne`, `norm_combo_lt_of_ne`: a nontrivial convex combination of two points in a closed ball belong to the corresponding open ball; - `norm_add_lt_of_not_same_ray`, `same_ray_iff_norm_add`, `dist_add_dist_eq_iff`: the triangle inequality `dist x y + dist y z ≤ dist x z` is a strict inequality unless `y` belongs to the segment `[x -[ℝ] z]`. - `isometry.affine_isometry_of_strict_convex_space`: an isometry of `normed_add_torsor`s for real normed spaces, strictly convex in the case of the codomain, is an affine isometry. We also provide several lemmas that can be used as alternative constructors for `strict_convex ℝ E`: - `strict_convex_space.of_strict_convex_closed_unit_ball`: if `closed_ball (0 : E) 1` is strictly convex, then `E` is a strictly convex space; - `strict_convex_space.of_norm_add`: if `∥x + y∥ = ∥x∥ + ∥y∥` implies `same_ray ℝ x y` for all `x y : E`, then `E` is a strictly convex space. ## Implementation notes While the definition is formulated for any normed linear ordered field, most of the lemmas are formulated only for the case `𝕜 = ℝ`. ## Tags convex, strictly convex -/ open set metric open_locale convex pointwise /-- A *strictly convex space* is a normed space where the closed balls are strictly convex. We only require balls of positive radius with center at the origin to be strictly convex in the definition, then prove that any closed ball is strictly convex in `strict_convex_closed_ball` below. See also `strict_convex_space.of_strict_convex_closed_unit_ball`. -/ class strict_convex_space (𝕜 E : Type*) [normed_linear_ordered_field 𝕜] [normed_group E] [normed_space 𝕜 E] : Prop := (strict_convex_closed_ball : ∀ r : ℝ, 0 < r → strict_convex 𝕜 (closed_ball (0 : E) r)) variables (𝕜 : Type*) {E : Type*} [normed_linear_ordered_field 𝕜] [normed_group E] [normed_space 𝕜 E] /-- A closed ball in a strictly convex space is strictly convex. -/ lemma strict_convex_closed_ball [strict_convex_space 𝕜 E] (x : E) (r : ℝ) : strict_convex 𝕜 (closed_ball x r) := begin cases le_or_lt r 0 with hr hr, { exact (subsingleton_closed_ball x hr).strict_convex }, rw ← vadd_closed_ball_zero, exact (strict_convex_space.strict_convex_closed_ball r hr).vadd _, end variables [normed_space ℝ E] /-- A real normed vector space is strictly convex provided that the unit ball is strictly convex. -/ lemma strict_convex_space.of_strict_convex_closed_unit_ball [linear_map.compatible_smul E E 𝕜 ℝ] (h : strict_convex 𝕜 (closed_ball (0 : E) 1)) : strict_convex_space 𝕜 E := ⟨λ r hr, by simpa only [smul_closed_unit_ball_of_nonneg hr.le] using h.smul r⟩ /-- If `∥x + y∥ = ∥x∥ + ∥y∥` implies that `x y : E` are in the same ray, then `E` is a strictly convex space. -/ lemma strict_convex_space.of_norm_add (h : ∀ x y : E, ∥x + y∥ = ∥x∥ + ∥y∥ → same_ray ℝ x y) : strict_convex_space ℝ E := begin refine strict_convex_space.of_strict_convex_closed_unit_ball ℝ (λ x hx y hy hne a b ha hb hab, _), have hx' := hx, have hy' := hy, rw [← closure_closed_ball, closure_eq_interior_union_frontier, frontier_closed_ball (0 : E) one_ne_zero] at hx hy, cases hx, { exact (convex_closed_ball _ _).combo_interior_self_mem_interior hx hy' ha hb.le hab }, cases hy, { exact (convex_closed_ball _ _).combo_self_interior_mem_interior hx' hy ha.le hb hab }, rw [interior_closed_ball (0 : E) one_ne_zero, mem_ball_zero_iff], have hx₁ : ∥x∥ = 1, from mem_sphere_zero_iff_norm.1 hx, have hy₁ : ∥y∥ = 1, from mem_sphere_zero_iff_norm.1 hy, have ha' : ∥a∥ = a, from real.norm_of_nonneg ha.le, have hb' : ∥b∥ = b, from real.norm_of_nonneg hb.le, calc ∥a • x + b • y∥ < ∥a • x∥ + ∥b • y∥ : (norm_add_le _ _).lt_of_ne (λ H, hne _) ... = 1 : by simpa only [norm_smul, hx₁, hy₁, mul_one, ha', hb'], simpa only [norm_smul, hx₁, hy₁, ha', hb', mul_one, smul_comm a, smul_right_inj ha.ne', smul_right_inj hb.ne'] using (h _ _ H).norm_smul_eq.symm end lemma strict_convex_space.of_norm_add_lt_aux {a b c d : ℝ} (ha : 0 < a) (hab : a + b = 1) (hc : 0 < c) (hd : 0 < d) (hcd : c + d = 1) (hca : c ≤ a) {x y : E} (hy : ∥y∥ ≤ 1) (hxy : ∥a • x + b • y∥ < 1) : ∥c • x + d • y∥ < 1 := begin have hbd : b ≤ d, { refine le_of_add_le_add_left (hab.trans_le _), rw ←hcd, exact add_le_add_right hca _ }, have h₁ : 0 < c / a := div_pos hc ha, have h₂ : 0 ≤ d - c / a * b, { rw [sub_nonneg, mul_comm_div, ←le_div_iff' hc], exact div_le_div hd.le hbd hc hca }, calc ∥c • x + d • y∥ = ∥(c / a) • (a • x + b • y) + (d - c / a * b) • y∥ : by rw [smul_add, ←mul_smul, ←mul_smul, div_mul_cancel _ ha.ne', sub_smul, add_add_sub_cancel] ... ≤ ∥(c / a) • (a • x + b • y)∥ + ∥(d - c / a * b) • y∥ : norm_add_le _ _ ... = c / a * ∥a • x + b • y∥ + (d - c / a * b) * ∥y∥ : by rw [norm_smul_of_nonneg h₁.le, norm_smul_of_nonneg h₂] ... < c / a * 1 + (d - c / a * b) * 1 : add_lt_add_of_lt_of_le (mul_lt_mul_of_pos_left hxy h₁) (mul_le_mul_of_nonneg_left hy h₂) ... = 1 : begin nth_rewrite 0 ←hab, rw [mul_add, div_mul_cancel _ ha.ne', mul_one, add_add_sub_cancel, hcd], end, end /-- Strict convexity is equivalent to `∥a • x + b • y∥ < 1` for all `x` and `y` of norm at most `1` and all strictly positive `a` and `b` such that `a + b = 1`. This shows that we only need to check it for fixed `a` and `b`. -/ lemma strict_convex_space.of_norm_add_lt {a b : ℝ} (ha : 0 < a) (hb : 0 < b) (hab : a + b = 1) (h : ∀ x y : E, ∥x∥ ≤ 1 → ∥y∥ ≤ 1 → x ≠ y → ∥a • x + b • y∥ < 1) : strict_convex_space ℝ E := begin refine strict_convex_space.of_strict_convex_closed_unit_ball _ (λ x hx y hy hxy c d hc hd hcd, _), rw [interior_closed_ball (0 : E) one_ne_zero, mem_ball_zero_iff], rw mem_closed_ball_zero_iff at hx hy, obtain hca | hac := le_total c a, { exact strict_convex_space.of_norm_add_lt_aux ha hab hc hd hcd hca hy (h _ _ hx hy hxy) }, rw add_comm at ⊢ hab hcd, refine strict_convex_space.of_norm_add_lt_aux hb hab hd hc hcd _ hx _, { refine le_of_add_le_add_right (hcd.trans_le _), rw ←hab, exact add_le_add_left hac _ }, { rw add_comm, exact h _ _ hx hy hxy } end variables [strict_convex_space ℝ E] {x y z : E} {a b r : ℝ} /-- If `x ≠ y` belong to the same closed ball, then a convex combination of `x` and `y` with positive coefficients belongs to the corresponding open ball. -/ lemma combo_mem_ball_of_ne (hx : x ∈ closed_ball z r) (hy : y ∈ closed_ball z r) (hne : x ≠ y) (ha : 0 < a) (hb : 0 < b) (hab : a + b = 1) : a • x + b • y ∈ ball z r := begin rcases eq_or_ne r 0 with rfl|hr, { rw [closed_ball_zero, mem_singleton_iff] at hx hy, exact (hne (hx.trans hy.symm)).elim }, { simp only [← interior_closed_ball _ hr] at hx hy ⊢, exact strict_convex_closed_ball ℝ z r hx hy hne ha hb hab } end /-- If `x ≠ y` belong to the same closed ball, then the open segment with endpoints `x` and `y` is included in the corresponding open ball. -/ lemma open_segment_subset_ball_of_ne (hx : x ∈ closed_ball z r) (hy : y ∈ closed_ball z r) (hne : x ≠ y) : open_segment ℝ x y ⊆ ball z r := (open_segment_subset_iff _).2 $ λ a b, combo_mem_ball_of_ne hx hy hne /-- If `x` and `y` are two distinct vectors of norm at most `r`, then a convex combination of `x` and `y` with positive coefficients has norm strictly less than `r`. -/ lemma norm_combo_lt_of_ne (hx : ∥x∥ ≤ r) (hy : ∥y∥ ≤ r) (hne : x ≠ y) (ha : 0 < a) (hb : 0 < b) (hab : a + b = 1) : ∥a • x + b • y∥ < r := begin simp only [← mem_ball_zero_iff, ← mem_closed_ball_zero_iff] at hx hy ⊢, exact combo_mem_ball_of_ne hx hy hne ha hb hab end /-- In a strictly convex space, if `x` and `y` are not in the same ray, then `∥x + y∥ < ∥x∥ + ∥y∥`. -/ lemma norm_add_lt_of_not_same_ray (h : ¬same_ray ℝ x y) : ∥x + y∥ < ∥x∥ + ∥y∥ := begin simp only [same_ray_iff_inv_norm_smul_eq, not_or_distrib, ← ne.def] at h, rcases h with ⟨hx, hy, hne⟩, rw ← norm_pos_iff at hx hy, have hxy : 0 < ∥x∥ + ∥y∥ := add_pos hx hy, have := combo_mem_ball_of_ne (inv_norm_smul_mem_closed_unit_ball x) (inv_norm_smul_mem_closed_unit_ball y) hne (div_pos hx hxy) (div_pos hy hxy) (by rw [← add_div, div_self hxy.ne']), rwa [mem_ball_zero_iff, div_eq_inv_mul, div_eq_inv_mul, mul_smul, mul_smul, smul_inv_smul₀ hx.ne', smul_inv_smul₀ hy.ne', ← smul_add, norm_smul, real.norm_of_nonneg (inv_pos.2 hxy).le, ← div_eq_inv_mul, div_lt_one hxy] at this end lemma lt_norm_sub_of_not_same_ray (h : ¬same_ray ℝ x y) : ∥x∥ - ∥y∥ < ∥x - y∥ := begin nth_rewrite 0 ←sub_add_cancel x y at ⊢ h, exact sub_lt_iff_lt_add.2 (norm_add_lt_of_not_same_ray $ λ H', h $ H'.add_left same_ray.rfl), end lemma abs_lt_norm_sub_of_not_same_ray (h : ¬same_ray ℝ x y) : |∥x∥ - ∥y∥| < ∥x - y∥ := begin refine abs_sub_lt_iff.2 ⟨lt_norm_sub_of_not_same_ray h, _⟩, rw norm_sub_rev, exact lt_norm_sub_of_not_same_ray (mt same_ray.symm h), end /-- In a strictly convex space, two vectors `x`, `y` are in the same ray if and only if the triangle inequality for `x` and `y` becomes an equality. -/ lemma same_ray_iff_norm_add : same_ray ℝ x y ↔ ∥x + y∥ = ∥x∥ + ∥y∥ := ⟨same_ray.norm_add, λ h, not_not.1 $ λ h', (norm_add_lt_of_not_same_ray h').ne h⟩ /-- In a strictly convex space, two vectors `x`, `y` are not in the same ray if and only if the triangle inequality for `x` and `y` is strict. -/ lemma not_same_ray_iff_norm_add_lt : ¬ same_ray ℝ x y ↔ ∥x + y∥ < ∥x∥ + ∥y∥ := same_ray_iff_norm_add.not.trans (norm_add_le _ _).lt_iff_ne.symm lemma same_ray_iff_norm_sub : same_ray ℝ x y ↔ ∥x - y∥ = |∥x∥ - ∥y∥| := ⟨same_ray.norm_sub, λ h, not_not.1 $ λ h', (abs_lt_norm_sub_of_not_same_ray h').ne' h⟩ lemma not_same_ray_iff_abs_lt_norm_sub : ¬ same_ray ℝ x y ↔ |∥x∥ - ∥y∥| < ∥x - y∥ := same_ray_iff_norm_sub.not.trans $ ne_comm.trans (abs_norm_sub_norm_le _ _).lt_iff_ne.symm /-- In a strictly convex space, the triangle inequality turns into an equality if and only if the middle point belongs to the segment joining two other points. -/ lemma dist_add_dist_eq_iff : dist x y + dist y z = dist x z ↔ y ∈ [x -[ℝ] z] := by simp only [mem_segment_iff_same_ray, same_ray_iff_norm_add, dist_eq_norm', sub_add_sub_cancel', eq_comm] lemma norm_midpoint_lt_iff (h : ∥x∥ = ∥y∥) : ∥(1/2 : ℝ) • (x + y)∥ < ∥x∥ ↔ x ≠ y := by rw [norm_smul, real.norm_of_nonneg (one_div_nonneg.2 zero_le_two), ←inv_eq_one_div, ←div_eq_inv_mul, div_lt_iff (@zero_lt_two ℝ _ _), mul_two, ←not_same_ray_iff_of_norm_eq h, not_same_ray_iff_norm_add_lt, h] variables {F : Type*} [normed_group F] [normed_space ℝ F] variables {PF : Type*} {PE : Type*} [metric_space PF] [metric_space PE] variables [normed_add_torsor F PF] [normed_add_torsor E PE] include E lemma eq_line_map_of_dist_eq_mul_of_dist_eq_mul {x y z : PE} (hxy : dist x y = r * dist x z) (hyz : dist y z = (1 - r) * dist x z) : y = affine_map.line_map x z r := begin have : y -ᵥ x ∈ [(0 : E) -[ℝ] z -ᵥ x], { rw [← dist_add_dist_eq_iff, dist_zero_left, dist_vsub_cancel_right, ← dist_eq_norm_vsub', ← dist_eq_norm_vsub', hxy, hyz, ← add_mul, add_sub_cancel'_right, one_mul] }, rcases eq_or_ne x z with rfl|hne, { obtain rfl : y = x, by simpa, simp }, { rw [← dist_ne_zero] at hne, rcases this with ⟨a, b, ha, hb, hab, H⟩, rw [smul_zero, zero_add] at H, have H' := congr_arg norm H, rw [norm_smul, real.norm_of_nonneg hb, ← dist_eq_norm_vsub', ← dist_eq_norm_vsub', hxy, mul_left_inj' hne] at H', rw [affine_map.line_map_apply, ← H', H, vsub_vadd] }, end lemma eq_midpoint_of_dist_eq_half {x y z : PE} (hx : dist x y = dist x z / 2) (hy : dist y z = dist x z / 2) : y = midpoint ℝ x z := begin apply eq_line_map_of_dist_eq_mul_of_dist_eq_mul, { rwa [inv_of_eq_inv, ← div_eq_inv_mul] }, { rwa [inv_of_eq_inv, ← one_div, sub_half, one_div, ← div_eq_inv_mul] } end namespace isometry include F /-- An isometry of `normed_add_torsor`s for real normed spaces, strictly convex in the case of the codomain, is an affine isometry. Unlike Mazur-Ulam, this does not require the isometry to be surjective. -/ noncomputable def affine_isometry_of_strict_convex_space {f : PF → PE} (hi : isometry f) : PF →ᵃⁱ[ℝ] PE := { norm_map := λ x, by simp [affine_map.of_map_midpoint, ←dist_eq_norm_vsub E, hi.dist_eq], ..affine_map.of_map_midpoint f (λ x y, begin apply eq_midpoint_of_dist_eq_half, { rw [hi.dist_eq, hi.dist_eq, dist_left_midpoint, real.norm_of_nonneg zero_le_two, div_eq_inv_mul] }, { rw [hi.dist_eq, hi.dist_eq, dist_midpoint_right, real.norm_of_nonneg zero_le_two, div_eq_inv_mul] }, end) hi.continuous } @[simp] lemma coe_affine_isometry_of_strict_convex_space {f : PF → PE} (hi : isometry f) : ⇑(hi.affine_isometry_of_strict_convex_space) = f := rfl @[simp] lemma affine_isometry_of_strict_convex_space_apply {f : PF → PE} (hi : isometry f) (p : PF) : hi.affine_isometry_of_strict_convex_space p = f p := rfl end isometry
#ifndef GSL_WRAPPERS_H #define GSL_WRAPPERS_H // #include <gsl/gsl_check_range.h> #include <gsl/gsl_vector.h> #include <gsl/gsl_matrix.h> #include <gsl/gsl_permutation.h> #include <gsl/gsl_linalg.h> #include <gsl/gsl_eigen.h> #include <gsl/gsl_rng.h> #include <gsl/gsl_randist.h> #include <gsl/gsl_multimin.h> #include <gsl/gsl_math.h> #include <gsl/gsl_blas.h> #include <math.h> #include <assert.h> #include <time.h> #include <sys/stat.h> #include <sys/types.h> #define outlog(format, args...) \ fprintf(stderr, format, args); \ fprintf(stderr, "\n"); double safe_log(double); double log_sum(double, double); static inline double vget(const gsl_vector* v, int i) { return(gsl_vector_get(v, i)); }; static inline void vset(gsl_vector* v, int i, double x) { gsl_vector_set(v, i, x); }; // Increment a vector element by a double. void vinc(gsl_vector*, int, double); static inline double mget(const gsl_matrix* m, int i, int j) { return(gsl_matrix_get(m, i, j)); }; static inline void mset(gsl_matrix* m, int i, int j, double x) { gsl_matrix_set(m, i, j, x); }; void msetcol(gsl_matrix* m, int r, const gsl_vector* val); // Increment a matrix element by a double. void minc(gsl_matrix*, int, int, double); void msetrow(gsl_matrix*, int, const gsl_vector*); void col_sum(gsl_matrix*, gsl_vector*); void vct_printf(const gsl_vector* v); void mtx_printf(const gsl_matrix* m); void vct_fscanf(const char*, gsl_vector* v); void mtx_fscanf(const char*, gsl_matrix* m); void vct_fprintf(const char* filename, gsl_vector* v); void mtx_fprintf(const char* filename, const gsl_matrix* m); double log_det(gsl_matrix*); void matrix_inverse(gsl_matrix*, gsl_matrix*); void sym_eigen(gsl_matrix*, gsl_vector*, gsl_matrix*); double sum(const gsl_vector* v); double norm(gsl_vector * v); void vct_log(gsl_vector* v); void vct_exp(gsl_vector* x); void choose_k_from_n(int k, int n, int* result); void log_normalize(gsl_vector* x); void normalize(gsl_vector* x); void optimize(int dim, gsl_vector* x, void* params, void (*fdf)(const gsl_vector*, void*, double*, gsl_vector*), void (*df)(const gsl_vector*, void*, gsl_vector*), double (*f)(const gsl_vector*, void*)); void optimize_fdf(int dim, gsl_vector* x, void* params, void (*fdf)(const gsl_vector*, void*, double*, gsl_vector*), void (*df)(const gsl_vector*, void*, gsl_vector*), double (*f)(const gsl_vector*, void*), double* f_val, double* conv_val, int* niter); void log_write(FILE* f, char* string); int directory_exist(const char *dname); void make_directory(char* name); gsl_rng* new_random_number_generator(); #endif
import algebra.order.field.basic tactic.linarith /-! # IMO 2011 A6 (P3) -/ namespace IMOSL namespace IMO2011A6 /-- Final solution -/ theorem final_solution {R : Type*} [linear_ordered_comm_ring R] {f : R → R} (h : ∀ x y : R, f (x + y) ≤ y * f x + f (f x)) : ∀ x : R, x ≤ 0 → f x = 0 := begin ---- A sequence of results have h0 : ∀ t x : R, f (f t) ≤ (f t - x) * f x + f (f x) := λ t x, by replace h := h x (f t - x); rwa add_sub_cancel'_right at h, replace h0 : ∀ x : R, x * f x ≤ 0 := λ x, by linarith [h0 (2 * f x) x, h0 x (2 * f x)], have h1 : ∀ x : R, f x ≤ f (f x) := λ x, by replace h := h x 0; rwa [add_zero, zero_mul, zero_add] at h, have h2 : ∀ x : R, f x ≤ 0 := λ x, by contrapose! h1; exact ⟨x, lt_of_le_of_lt (nonpos_of_mul_nonpos_right (h0 (f x)) h1) h1⟩, replace h0 : ∀ x : R, x < 0 → f x = 0 := λ x hx, le_antisymm (h2 x) (nonneg_of_mul_nonpos_right (h0 x) hx), ---- Finishing intros x h, rw le_iff_lt_or_eq at h; rcases h with h | rfl, exact h0 x h, refine le_antisymm (h2 0) _, rw ← h0 (-1) neg_one_lt_zero; exact h1 (-1) end end IMO2011A6 end IMOSL
State Before: R : Type u_2 A : Type ?u.1320972 K : Type ?u.1320975 inst✝⁵ : CommRing R inst✝⁴ : CommRing A inst✝³ : Field K inst✝² : IsDomain A inst✝¹ : IsDomain R inst✝ : IsDedekindDomain R ι : Type u_1 s : Finset ι P : ι → Ideal R e : ι → ℕ prime : ∀ (i : ι), i ∈ s → Prime (P i) coprime : ∀ (i : ι), i ∈ s → ∀ (j : ι), j ∈ s → i ≠ j → P i ≠ P j x : { x // x ∈ s } → R ⊢ ∃ y, ∀ (i : ι) (hi : i ∈ s), y - x { val := i, property := hi } ∈ P i ^ e i State After: case intro R : Type u_2 A : Type ?u.1320972 K : Type ?u.1320975 inst✝⁵ : CommRing R inst✝⁴ : CommRing A inst✝³ : Field K inst✝² : IsDomain A inst✝¹ : IsDomain R inst✝ : IsDedekindDomain R ι : Type u_1 s : Finset ι P : ι → Ideal R e : ι → ℕ prime : ∀ (i : ι), i ∈ s → Prime (P i) coprime : ∀ (i : ι), i ∈ s → ∀ (j : ι), j ∈ s → i ≠ j → P i ≠ P j x : { x // x ∈ s } → R y : R hy : ∀ (i : ι) (hi : i ∈ s), ↑(Ideal.Quotient.mk (P i ^ e i)) y = ↑(Ideal.Quotient.mk (P ↑{ val := i, property := hi } ^ e ↑{ val := i, property := hi })) (x { val := i, property := hi }) ⊢ ∃ y, ∀ (i : ι) (hi : i ∈ s), y - x { val := i, property := hi } ∈ P i ^ e i Tactic: obtain ⟨y, hy⟩ := IsDedekindDomain.exists_representative_mod_finset P e prime coprime fun i => Ideal.Quotient.mk _ (x i) State Before: case intro R : Type u_2 A : Type ?u.1320972 K : Type ?u.1320975 inst✝⁵ : CommRing R inst✝⁴ : CommRing A inst✝³ : Field K inst✝² : IsDomain A inst✝¹ : IsDomain R inst✝ : IsDedekindDomain R ι : Type u_1 s : Finset ι P : ι → Ideal R e : ι → ℕ prime : ∀ (i : ι), i ∈ s → Prime (P i) coprime : ∀ (i : ι), i ∈ s → ∀ (j : ι), j ∈ s → i ≠ j → P i ≠ P j x : { x // x ∈ s } → R y : R hy : ∀ (i : ι) (hi : i ∈ s), ↑(Ideal.Quotient.mk (P i ^ e i)) y = ↑(Ideal.Quotient.mk (P ↑{ val := i, property := hi } ^ e ↑{ val := i, property := hi })) (x { val := i, property := hi }) ⊢ ∃ y, ∀ (i : ι) (hi : i ∈ s), y - x { val := i, property := hi } ∈ P i ^ e i State After: no goals Tactic: exact ⟨y, fun i hi => Ideal.Quotient.eq.mp (hy i hi)⟩
[STATEMENT] lemma ocondition_K_false [simp]: "ocondition (\<lambda>_. False) T F = F" [PROOF STATE] proof (prove) goal (1 subgoal): 1. ocondition (\<lambda>_. False) T F = F [PROOF STEP] by (simp add: ocondition_def)
postulate P : (A : Set) → A → Set X : Set x : X P' : (A : Set) → _ R : Set R = P' _ x P' = P
datos <- c(32,42,53,21,23)
[STATEMENT] lemma coeff_gt_degree_eq_0: assumes "Poly_Mapping.lookup m i > MPoly_Type.degree p i" shows "MPoly_Type.coeff p m = 0" [PROOF STATE] proof (prove) goal (1 subgoal): 1. MPoly_Type.coeff p m = (0::'a) [PROOF STEP] using assms degree_geI leD [PROOF STATE] proof (prove) using this: MPoly_Type.degree p i < lookup m i MPoly_Type.coeff ?p ?m \<noteq> (0::?'a) \<Longrightarrow> lookup ?m ?i \<le> MPoly_Type.degree ?p ?i ?y \<le> ?x \<Longrightarrow> \<not> ?x < ?y goal (1 subgoal): 1. MPoly_Type.coeff p m = (0::'a) [PROOF STEP] by blast
module Yaffle.Main import Parser.Source import Core.Binary import Core.Context import Core.Directory import Core.Env import Core.FC import Core.InitPrimitives import Core.Metadata import Core.Normalise import Core.Options import Core.TT import Core.UnifyState import Libraries.Utils.Path import TTImp.Parser import TTImp.ProcessDecls import TTImp.TTImp import Yaffle.REPL import Data.List import Data.So import Data.String import System %default covering usage : String usage = "Usage: yaffle <input file> [--timing]" processArgs : List String -> Core Bool processArgs [] = pure False processArgs ["--timing"] = pure True processArgs _ = coreLift $ do ignore $ putStrLn usage exitWith (ExitFailure 1) HasNames () where full _ _ = pure () resolved _ _ = pure () export yaffleMain : String -> List String -> Core () yaffleMain sourceFileName args = do defs <- initDefs c <- newRef Ctxt defs t <- processArgs args modIdent <- ctxtPathToNS sourceFileName m <- newRef MD (initMetadata (PhysicalIdrSrc modIdent)) u <- newRef UST initUState setLogTimings t addPrimitives case extension sourceFileName of Just "ttc" => do coreLift_ $ putStrLn "Processing as TTC" ignore $ readFromTTC {extra = ()} True emptyFC True sourceFileName (nsAsModuleIdent emptyNS) emptyNS coreLift_ $ putStrLn "Read TTC" _ => do coreLift_ $ putStrLn "Processing as TTImp" ok <- processTTImpFile sourceFileName when ok $ do makeBuildDirectory modIdent ttcFileName <- getTTCFileName sourceFileName "ttc" writeToTTC () sourceFileName ttcFileName coreLift_ $ putStrLn "Written TTC" ust <- get UST repl {c} {u} ymain : IO () ymain = do (_ :: fname :: rest) <- getArgs | _ => do putStrLn usage exitWith (ExitFailure 1) coreRun (yaffleMain fname rest) (\err : Error => putStrLn ("Uncaught error: " ++ show err)) (\res => pure ())