Datasets:
AI4M
/

text
stringlengths
0
3.34M
State Before: α : Type u_1 a : α m n : Nat l : List α ⊢ get? (set l m a) n = if m = n then (fun x => a) <$> get? l n else get? l n State After: no goals Tactic: by_cases m = n <;> simp [*, get?_set_eq, get?_set_ne]
module Main palindrome : String -> Bool palindrome str = let strLower = toLower str in strLower == reverse strLower main : IO () main = repl "Enter a string: " show_palindrome where show_palindrome : String -> String show_palindrome x = show (palindrome x) ++ "\n"
import Data.Primitives.Views import System %default total data InfIO : Type where Do : IO a -> (a -> Inf InfIO) -> InfIO (>>=) : IO a -> (a -> Inf InfIO) -> InfIO (>>=) = Do loopPrint : String -> InfIO loopPrint msg = do putStrLn msg loopPrint msg data Fuel = Dry | More (Lazy Fuel) run : Fuel -> InfIO -> IO () run (More fuel) (Do action cont) = do res <- action run fuel (cont res) run Dry p = putStrLn "Out of fuel"
module Node.FS.Stream import Node.FS data ReadStream : Type where [external] %foreign "node:lambda: (fs,path)=>fs.createReadStream(path)" ffi_createReadStream : FS -> String -> PrimIO ReadStream export createReadStream : HasIO io => { auto fs : FS } -> String -> io ReadStream createReadStream path = primIO $ ffi_createReadStream fs path %foreign "node:lambda: (ty, req, data) => { req.on('data', a => data(a)()) }" ffi_onData : ReadStream -> (a -> PrimIO ()) -> PrimIO () export (.onData) : HasIO io => ReadStream -> (a -> IO ()) -> io () (.onData) req cb = primIO $ ffi_onData req $ \a => toPrim $ cb a %foreign "node:lambda: (req, end) => { req.on('end', () => end()()) }" ffi_onEnd : ReadStream -> (() -> PrimIO ()) -> PrimIO () export (.onEnd) : HasIO io => ReadStream -> (() -> IO ()) -> io () (.onEnd) req cb = primIO $ ffi_onEnd req $ \_ => toPrim $ cb () %foreign "node:lambda: (ty, req, error) => { req.on('error', e => error(e)()) }" ffi_onError : ReadStream -> (e -> PrimIO ()) -> PrimIO () export (.onError) : HasIO io => ReadStream -> (e -> IO ()) -> io () (.onError) req cb = primIO $ ffi_onError req $ \e => toPrim $ cb e
/* * KENLMBatch.cpp * * Created on: 4 Nov 2015 * Author: hieu */ #include <boost/foreach.hpp> #include <sstream> #include <vector> #ifdef _linux #include <pthread.h> #include <unistd.h> #endif #include <stdio.h> #include <stdlib.h> #include <errno.h> #include "KENLMBatch.h" #include "../Phrase.h" #include "../Scores.h" #include "../System.h" #include "../PhraseBased/Hypothesis.h" #include "../PhraseBased/Manager.h" #include "../PhraseBased/TargetPhraseImpl.h" #include "lm/state.hh" #include "lm/left.hh" #include "util/exception.hh" #include "util/tokenize_piece.hh" #include "util/string_stream.hh" #include "../legacy/FactorCollection.h" using namespace std; namespace Moses2 { struct KenLMState: public FFState { lm::ngram::State state; virtual size_t hash() const { size_t ret = hash_value(state); return ret; } virtual bool operator==(const FFState& o) const { const KenLMState &other = static_cast<const KenLMState &>(o); bool ret = state == other.state; return ret; } virtual std::string ToString() const { stringstream ss; for (size_t i = 0; i < state.Length(); ++i) { ss << state.words[i] << " "; } return ss.str(); } }; ///////////////////////////////////////////////////////////////// class MappingBuilder: public lm::EnumerateVocab { public: MappingBuilder(FactorCollection &factorCollection, System &system, std::vector<lm::WordIndex> &mapping) : m_factorCollection(factorCollection), m_system(system), m_mapping(mapping) { } void Add(lm::WordIndex index, const StringPiece &str) { std::size_t factorId = m_factorCollection.AddFactor(str, m_system, false)->GetId(); if (m_mapping.size() <= factorId) { // 0 is <unk> :-) m_mapping.resize(factorId + 1); } m_mapping[factorId] = index; } private: FactorCollection &m_factorCollection; std::vector<lm::WordIndex> &m_mapping; System &m_system; }; ///////////////////////////////////////////////////////////////// KENLMBatch::KENLMBatch(size_t startInd, const std::string &line) :StatefulFeatureFunction(startInd, line) ,m_numHypos(0) { cerr << "KENLMBatch::KENLMBatch" << endl; ReadParameters(); } KENLMBatch::~KENLMBatch() { // TODO Auto-generated destructor stub } void KENLMBatch::Load(System &system) { cerr << "KENLMBatch::Load" << endl; FactorCollection &fc = system.GetVocab(); m_bos = fc.AddFactor(BOS_, system, false); m_eos = fc.AddFactor(EOS_, system, false); lm::ngram::Config config; config.messages = NULL; FactorCollection &collection = system.GetVocab(); MappingBuilder builder(collection, system, m_lmIdLookup); config.enumerate_vocab = &builder; config.load_method = m_load_method; m_ngram.reset(new Model(m_path.c_str(), config)); } FFState* KENLMBatch::BlankState(MemPool &pool, const System &sys) const { KenLMState *ret = new (pool.Allocate<KenLMState>()) KenLMState(); return ret; } //! return the state associated with the empty hypothesis for a given sentence void KENLMBatch::EmptyHypothesisState(FFState &state, const ManagerBase &mgr, const InputType &input, const Hypothesis &hypo) const { KenLMState &stateCast = static_cast<KenLMState&>(state); stateCast.state = m_ngram->BeginSentenceState(); } void KENLMBatch::EvaluateInIsolation(MemPool &pool, const System &system, const Phrase<Moses2::Word> &source, const TargetPhraseImpl &targetPhrase, Scores &scores, SCORE &estimatedScore) const { // contains factors used by this LM float fullScore, nGramScore; size_t oovCount; CalcScore(targetPhrase, fullScore, nGramScore, oovCount); float estimateScore = fullScore - nGramScore; bool GetLMEnableOOVFeature = false; if (GetLMEnableOOVFeature) { float scoresVec[2], estimateScoresVec[2]; scoresVec[0] = nGramScore; scoresVec[1] = oovCount; scores.PlusEquals(system, *this, scoresVec); estimateScoresVec[0] = estimateScore; estimateScoresVec[1] = 0; SCORE weightedScore = Scores::CalcWeightedScore(system, *this, estimateScoresVec); estimatedScore += weightedScore; } else { scores.PlusEquals(system, *this, nGramScore); SCORE weightedScore = Scores::CalcWeightedScore(system, *this, estimateScore); estimatedScore += weightedScore; } } void KENLMBatch::EvaluateInIsolation(MemPool &pool, const System &system, const Phrase<SCFG::Word> &source, const TargetPhrase<SCFG::Word> &targetPhrase, Scores &scores, SCORE &estimatedScore) const { } void KENLMBatch::EvaluateWhenApplied(const ManagerBase &mgr, const Hypothesis &hypo, const FFState &prevState, Scores &scores, FFState &state) const { KenLMState &stateCast = static_cast<KenLMState&>(state); const System &system = mgr.system; const lm::ngram::State &in_state = static_cast<const KenLMState&>(prevState).state; if (!hypo.GetTargetPhrase().GetSize()) { stateCast.state = in_state; return; } const std::size_t begin = hypo.GetCurrTargetWordsRange().GetStartPos(); //[begin, end) in STL-like fashion. const std::size_t end = hypo.GetCurrTargetWordsRange().GetEndPos() + 1; const std::size_t adjust_end = std::min(end, begin + m_ngram->Order() - 1); std::size_t position = begin; Model::State aux_state; Model::State *state0 = &stateCast.state, *state1 = &aux_state; float score = m_ngram->Score(in_state, TranslateID(hypo.GetWord(position)), *state0); ++position; for (; position < adjust_end; ++position) { score += m_ngram->Score(*state0, TranslateID(hypo.GetWord(position)), *state1); std::swap(state0, state1); } if (hypo.GetBitmap().IsComplete()) { // Score end of sentence. std::vector<lm::WordIndex> indices(m_ngram->Order() - 1); const lm::WordIndex *last = LastIDs(hypo, &indices.front()); score += m_ngram->FullScoreForgotState(&indices.front(), last, m_ngram->GetVocabulary().EndSentence(), stateCast.state).prob; } else if (adjust_end < end) { // Get state after adding a long phrase. std::vector<lm::WordIndex> indices(m_ngram->Order() - 1); const lm::WordIndex *last = LastIDs(hypo, &indices.front()); m_ngram->GetState(&indices.front(), last, stateCast.state); } else if (state0 != &stateCast.state) { // Short enough phrase that we can just reuse the state. stateCast.state = *state0; } score = TransformLMScore(score); bool OOVFeatureEnabled = false; if (OOVFeatureEnabled) { std::vector<float> scoresVec(2); scoresVec[0] = score; scoresVec[1] = 0.0; scores.PlusEquals(system, *this, scoresVec); } else { scores.PlusEquals(system, *this, score); } } void KENLMBatch::CalcScore(const Phrase<Moses2::Word> &phrase, float &fullScore, float &ngramScore, std::size_t &oovCount) const { fullScore = 0; ngramScore = 0; oovCount = 0; if (!phrase.GetSize()) return; lm::ngram::ChartState discarded_sadly; lm::ngram::RuleScore<Model> scorer(*m_ngram, discarded_sadly); size_t position; if (m_bos == phrase[0][m_factorType]) { scorer.BeginSentence(); position = 1; } else { position = 0; } size_t ngramBoundary = m_ngram->Order() - 1; size_t end_loop = std::min(ngramBoundary, phrase.GetSize()); for (; position < end_loop; ++position) { const Word &word = phrase[position]; lm::WordIndex index = TranslateID(word); scorer.Terminal(index); if (!index) ++oovCount; } float before_boundary = fullScore + scorer.Finish(); for (; position < phrase.GetSize(); ++position) { const Word &word = phrase[position]; lm::WordIndex index = TranslateID(word); scorer.Terminal(index); if (!index) ++oovCount; } fullScore += scorer.Finish(); ngramScore = TransformLMScore(fullScore - before_boundary); fullScore = TransformLMScore(fullScore); } // Convert last words of hypothesis into vocab ids, returning an end pointer. lm::WordIndex *KENLMBatch::LastIDs(const Hypothesis &hypo, lm::WordIndex *indices) const { lm::WordIndex *index = indices; lm::WordIndex *end = indices + m_ngram->Order() - 1; int position = hypo.GetCurrTargetWordsRange().GetEndPos(); for (;; ++index, --position) { if (index == end) return index; if (position == -1) { *index = m_ngram->GetVocabulary().BeginSentence(); return index + 1; } *index = TranslateID(hypo.GetWord(position)); } } void KENLMBatch::SetParameter(const std::string& key, const std::string& value) { //cerr << "key=" << key << " " << value << endl; if (key == "path") { m_path = value; } else if (key == "order") { // ignore } else if (key == "factor") { m_factorType = Scan<FactorType>(value); } else if (key == "lazyken") { m_load_method = boost::lexical_cast<bool>(value) ? util::LAZY : util::POPULATE_OR_READ; } else if (key == "load") { if (value == "lazy") { m_load_method = util::LAZY; } else if (value == "populate_or_lazy") { m_load_method = util::POPULATE_OR_LAZY; } else if (value == "populate_or_read" || value == "populate") { m_load_method = util::POPULATE_OR_READ; } else if (value == "read") { m_load_method = util::READ; } else if (value == "parallel_read") { m_load_method = util::PARALLEL_READ; } else { UTIL_THROW2("Unknown KenLM load method " << value); } } else { StatefulFeatureFunction::SetParameter(key, value); } //cerr << "SetParameter done" << endl; } void KENLMBatch::EvaluateWhenAppliedBatch( const Batch &batch) const { { // write lock boost::unique_lock<boost::shared_mutex> lock(m_accessLock); m_batches.push_back(&batch); m_numHypos += batch.size(); } //cerr << "m_numHypos=" << m_numHypos << endl; if (m_numHypos > 0) { // process batch EvaluateWhenAppliedBatch(); m_batches.clear(); m_numHypos = 0; m_threadNeeded.notify_all(); } else { boost::mutex::scoped_lock lock(m_mutex); m_threadNeeded.wait(lock); } } void KENLMBatch::EvaluateWhenAppliedBatch() const { BOOST_FOREACH(const Batch *batch, m_batches) { //cerr << "batch=" << batch->size() << endl; BOOST_FOREACH(Hypothesis *hypo, *batch) { hypo->EvaluateWhenApplied(*this); } } } void KENLMBatch::EvaluateWhenApplied(const SCFG::Manager &mgr, const SCFG::Hypothesis &hypo, int featureID, Scores &scores, FFState &state) const { UTIL_THROW2("Not implemented"); } }
State Before: α : Type u_1 l : Ordnode α x : α r : Ordnode α ⊢ dual (balanceR l x r) = balanceL (dual r) x (dual l) State After: no goals Tactic: rw [← dual_dual (balanceL _ _ _), dual_balanceL, dual_dual, dual_dual]
(* Copyright (C) 2017 M.A.L. Marques This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. *) (* type: mgga_exc *) $define lda_c_pw_params $define lda_c_pw_modified_params $include "lda_c_pw.mpl" rmggac_gamma1 := 0.08: rmggac_gamma2 := 0.3: rmggac_g := (alpha, s) -> (1 + rmggac_gamma1)*alpha/(rmggac_gamma1 + alpha + rmggac_gamma2*s^2): rmggac_f2 := (alpha, s) -> 3*rmggac_g(alpha, s)^3/(1 + rmggac_g(alpha, s)^3 + rmggac_g(alpha, s)^6): rmggac_f1 := (alpha, s) -> 1 - rmggac_f2(alpha, s): rmggac_gamma := 0.031091: (* from gga_c_regtpss *) beta_a := 0.066724550603149220: beta_b := 0.1: beta_c := 0.1778: mbeta := (rs, t) -> beta_a*(1 + beta_b*rs)/(1 + beta_c*rs): (* from mgga_c_r2scan *) rmggac_w1 := (rs, z) -> exp(-f_pw(rs, z)/(rmggac_gamma*mphi(z)^3)) - 1: (* from gga_c_pbe *) A := (rs, z, t) -> mbeta(rs, t)/(rmggac_gamma * rmggac_w1(rs, z)): (* from gga_c_scan_e0 *) scan_e0_g := (rs, z, t) -> (1 + 4*A(rs, z, t)*t^2)^(-1/4): (* from mgga_c_r2scan *) rmggac_H1 := (rs, z, t) -> rmggac_gamma*mphi(z)^3*log(1 + rmggac_w1(rs, z) * (1 - scan_e0_g(rs, z, t))): (* from mgga_c_scan *) scan_alpha := (z, xt, ts0, ts1) -> (t_total(z, ts0, ts1) - xt^2/8)/(K_FACTOR_C*t_total(z, 1, 1)): scan_b1c := 0.0285764: scan_b2c := 0.0889: scan_b3c := 0.125541: scan_eclda0 := rs -> -scan_b1c/(1 + scan_b2c*sqrt(rs) + scan_b3c*rs): scan_chi_infty := 0.12802585262625815: scan_g_infty := s -> 1/(1 + 4*scan_chi_infty*s^2)^(1/4): scan_G_cnst := 2.3631: scan_Gc := z -> (1 - scan_G_cnst*(2^(1/3) - 1)*f_zeta(z))*(1 - z^12): scan_H0 := (rs, s) -> scan_b1c*log(1 + (exp(-scan_eclda0(rs)/scan_b1c) - 1)*(1 - scan_g_infty(s))): scan_e0 := (rs, z, s) -> (scan_eclda0(rs) + scan_H0(rs, s))*scan_Gc(z): (* define the functional *) rmggac_eps1 := (rs, z, t) -> (f_pw(rs, z) + rmggac_H1(rs, z, t)): rmggac_f := (rs, z, xt, xs0, xs1, ts0, ts1) -> + scan_e0(rs, z, X2S*2^(1/3)*xt) * rmggac_f1(scan_alpha(z, xt, ts0, ts1), X2S*2^(1/3)*xt) + rmggac_eps1(rs, z, tt(rs, z, xt)) * rmggac_f2(scan_alpha(z, xt, ts0, ts1), X2S*2^(1/3)*xt): f := (rs, z, xt, xs0, xs1, us0, us1, ts0, ts1) -> rmggac_f(rs, z, xt, xs0, xs1, ts0, ts1):
If $c \neq 0$, then the distribution of $cX$ is the same as the distribution of $X/|c|$.
module Main import Chez display : String -> SIO () display s = foreign FFI_S "display" (String -> SIO ()) s displayBool : Bool -> SIO () displayBool b = foreign FFI_S "display" (Bool -> SIO ()) b schemeEqual : Int -> Int -> SIO Bool schemeEqual x y = foreign FFI_S "=" (Int -> Int -> SIO Bool) x y main : SIO () main = do display "O RLY?" displayBool True a <- schemeEqual 1 2 displayBool a
! ! -------------------------------------------------------------- ! D L S A 4 ! -------------------------------------------------------------- ! * ! Written by G. Gaigalas, * ! Vanderbilt University, Nashville February 1994 * ! SUBROUTINE DLSA4(K, JA1, JA2, K1, K2, KA, IRE, IAT, REC) !----------------------------------------------- ! M o d u l e s !----------------------------------------------- USE vast_kind_param, ONLY: DOUBLE USE CONSTS_C USE MEDEFN_C !...Translated by Pacific-Sierra Research 77to90 4.3E 11:15:35 11/16/01 !...Switches: !----------------------------------------------- ! I n t e r f a c e B l o c k s !----------------------------------------------- USE ninels_I IMPLICIT NONE !----------------------------------------------- ! D u m m y A r g u m e n t s !----------------------------------------------- INTEGER , INTENT(IN) :: K INTEGER , INTENT(IN) :: JA1 INTEGER , INTENT(IN) :: JA2 INTEGER :: K1 INTEGER :: K2 INTEGER :: KA INTEGER , INTENT(IN) :: IRE INTEGER :: IAT REAL(DOUBLE) , INTENT(OUT) :: REC !----------------------------------------------- ! L o c a l V a r i a b l e s !----------------------------------------------- INTEGER :: IA1, IB1, IA2, IB2, IT2, IT2S, N1, J2, J2S, N2 REAL(DOUBLE) :: A2 !----------------------------------------------- REC = ZERO IA1 = J1QN1(JA1,K) - 1 IB1 = J1QN2(JA1,K) - 1 IA2 = J1QN1(JA2,K) - 1 IB2 = J1QN2(JA2,K) - 1 IF (JA1==1 .AND. JA2==2) THEN IT2 = IA1 IT2S = IB1 N1 = IHSH + 1 J2 = J1QN1(N1,K) - 1 J2S = J1QN2(N1,K) - 1 ELSE N1 = IHSH + JA2 - 1 J2 = J1QN1(N1,K) - 1 J2S = J1QN2(N1,K) - 1 N2 = IHSH + JA2 - 2 IT2 = J1QN1(N2,K) - 1 IT2S = J1QN2(N2,K) - 1 ENDIF IF (IRE == 0) THEN ! CALL NINE(IT2S,K1,IT2,IB2,K2,IA2,J2S,KA,J2,1,IAT,A2) CALL NINELS (IT2, IT2S, K1, IA2, IB2, K2, J2, J2S, KA, 1, IAT, A2) ELSE ! CALL NINE(IT2S,K1,IT2,IB2,K2,IA2,J2S,KA,J2,0,IAT,A2) CALL NINELS (IT2, IT2S, K1, IA2, IB2, K2, J2, J2S, KA, 0, IAT, A2) REC = A2*DSQRT(DBLE((IT2 + 1)*(KA + 1)*(IA2 + 1)*(J2S + 1))) ENDIF RETURN END SUBROUTINE DLSA4
If $z_i$ and $w_i$ are vectors with norm at most 1, then the norm of the difference of their products is at most the sum of the norms of their differences.
@inline colnames(table)::Vector{Symbol} = collect(propertynames(columns(table))) @inline getcol(table, x) = getproperty(columns(table), x) function subset_cols(table, cols::Tuple{Vararg{Symbol}}) nt = columntable(table) newcols = NamedTuple{cols}(nt) materializer(table)(newcols) end function subset_cols(table::NamedTuple, cols::Tuple{Vararg{Symbol}}) NamedTuple{cols}(table) end subset_cols(table, cols::Symbol...) = subset_cols(table, cols) subset_cols(table, cols::Tuple{Vararg{Symbol,0}}) = table ensure_tuple(x) = tuple(x) ensure_tuple(x::Tuple) = x ensure_tuple(x::AbstractArray) = Tuple(x)
If $g$ has an isolated singularity at $z$ and $g$ and $f$ agree on a neighborhood of $z$, then $f$ has an isolated singularity at $z$.
[STATEMENT] lemma num_reachable_le_nodes: "\<lbrakk> wf_graph G \<rbrakk> \<Longrightarrow> num_reachable G v \<le> card (nodes G)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. wf_graph G \<Longrightarrow> num_reachable G v \<le> card (nodes G) [PROOF STEP] unfolding num_reachable_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. wf_graph G \<Longrightarrow> card (succ_tran G v) \<le> card (nodes G) [PROOF STEP] using succ_tran_subseteq_nodes card_seteq nat_le_linear wf_graph.finiteV [PROOF STATE] proof (prove) using this: wf_graph ?G \<Longrightarrow> succ_tran ?G ?v \<subseteq> nodes ?G \<lbrakk>finite ?B; ?A \<subseteq> ?B; card ?B \<le> card ?A\<rbrakk> \<Longrightarrow> ?A = ?B ?m \<le> ?n \<or> ?n \<le> ?m wf_graph ?G \<Longrightarrow> finite (nodes ?G) goal (1 subgoal): 1. wf_graph G \<Longrightarrow> card (succ_tran G v) \<le> card (nodes G) [PROOF STEP] by metis
State Before: R : Type u L : Type v L' : Type w₂ M : Type w M' : Type w₁ inst✝¹² : CommRing R inst✝¹¹ : LieRing L inst✝¹⁰ : LieAlgebra R L inst✝⁹ : LieRing L' inst✝⁸ : LieAlgebra R L' inst✝⁷ : AddCommGroup M inst✝⁶ : Module R M inst✝⁵ : LieRingModule L M inst✝⁴ : LieModule R L M inst✝³ : AddCommGroup M' inst✝² : Module R M' inst✝¹ : LieRingModule L M' inst✝ : LieModule R L M' f : L →ₗ⁅R⁆ L' I I₂ : LieIdeal R L J : LieIdeal R L' ⊢ map f (comap f J) ≤ J State After: no goals Tactic: rw [map_le_iff_le_comap]
(* Author: Norbert Schirmer Maintainer: Norbert Schirmer, norbert.schirmer at web de License: LGPL *) (* Title: AlternativeSmallStep.thy Author: Norbert Schirmer, TU Muenchen Copyright (C) 2006-2008 Norbert Schirmer Some rights reserved, TU Muenchen This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA *) section \<open>Alternative Small Step Semantics\<close> theory AlternativeSmallStep imports HoareTotalDef begin text \<open> This is the small-step semantics, which is described and used in my PhD-thesis \cite{Schirmer-PhD}. It decomposes the statement into a list of statements and finally executes the head. So the redex is always the head of the list. The equivalence between termination (based on the big-step semantics) and the absence of infinite computations in this small-step semantics follows the same lines of reasoning as for the new small-step semantics. However, it is technically more involved since the configurations are more complicated. Thats why I switched to the new small-step semantics in the "main trunk". I keep this alternative version and the important proofs in this theory, so that one can compare both approaches. \<close> subsection \<open>Small-Step Computation: \<open>\<Gamma>\<turnstile>(cs, css, s) \<rightarrow> (cs', css', s')\<close>\<close> type_synonym ('s,'p,'f) continuation = "('s,'p,'f) com list \<times> ('s,'p,'f) com list" type_synonym ('s,'p,'f) config = "('s,'p,'f)com list \<times> ('s,'p,'f)continuation list \<times> ('s,'f) xstate" inductive "step"::"[('s,'p,'f) body,('s,'p,'f) config,('s,'p,'f) config] \<Rightarrow> bool" ("_\<turnstile> (_ \<rightarrow>/ _)" [81,81,81] 100) for \<Gamma>::"('s,'p,'f) body" where Skip: "\<Gamma>\<turnstile>(Skip#cs,css,Normal s) \<rightarrow> (cs,css,Normal s)" | Guard: "s\<in>g \<Longrightarrow> \<Gamma>\<turnstile>(Guard f g c#cs,css,Normal s) \<rightarrow> (c#cs,css,Normal s)" | GuardFault: "s\<notin>g \<Longrightarrow> \<Gamma>\<turnstile>(Guard f g c#cs,css,Normal s) \<rightarrow> (cs,css,Fault f)" | FaultProp: "\<Gamma>\<turnstile>(c#cs,css,Fault f) \<rightarrow> (cs,css,Fault f)" | FaultPropBlock: "\<Gamma>\<turnstile>([],(nrms,abrs)#css,Fault f) \<rightarrow> (nrms,css,Fault f)" (* FaultPropBlock: "\<Gamma>\<turnstile>([],cs#css,Fault) \<rightarrow> ([],css,Fault)"*) | AbruptProp: "\<Gamma>\<turnstile>(c#cs,css,Abrupt s) \<rightarrow> (cs,css,Abrupt s)" | ExitBlockNormal: "\<Gamma>\<turnstile>([],(nrms,abrs)#css,Normal s) \<rightarrow> (nrms,css,Normal s)" | ExitBlockAbrupt: "\<Gamma>\<turnstile>([],(nrms,abrs)#css,Abrupt s) \<rightarrow> (abrs,css,Normal s)" | Basic: "\<Gamma>\<turnstile>(Basic f#cs,css,Normal s) \<rightarrow> (cs,css,Normal (f s))" | Spec: "(s,t) \<in> r \<Longrightarrow> \<Gamma>\<turnstile>(Spec r#cs,css,Normal s) \<rightarrow> (cs,css,Normal t)" | SpecStuck: "\<forall>t. (s,t) \<notin> r \<Longrightarrow> \<Gamma>\<turnstile>(Spec r#cs,css,Normal s) \<rightarrow> (cs,css,Stuck)" | Seq: "\<Gamma>\<turnstile>(Seq c\<^sub>1 c\<^sub>2#cs,css,Normal s) \<rightarrow> (c\<^sub>1#c\<^sub>2#cs,css,Normal s)" | CondTrue: "s\<in>b \<Longrightarrow> \<Gamma>\<turnstile>(Cond b c\<^sub>1 c\<^sub>2#cs,css,Normal s) \<rightarrow> (c\<^sub>1#cs,css,Normal s)" | CondFalse: "s\<notin>b \<Longrightarrow> \<Gamma>\<turnstile>(Cond b c\<^sub>1 c\<^sub>2#cs,css,Normal s) \<rightarrow> (c\<^sub>2#cs,css,Normal s)" | WhileTrue: "\<lbrakk>s\<in>b\<rbrakk> \<Longrightarrow> \<Gamma>\<turnstile>(While b c#cs,css,Normal s) \<rightarrow> (c#While b c#cs,css,Normal s)" | WhileFalse: "\<lbrakk>s\<notin>b\<rbrakk> \<Longrightarrow> \<Gamma>\<turnstile>(While b c#cs,css,Normal s) \<rightarrow> (cs,css,Normal s)" | Call: "\<Gamma> p=Some bdy \<Longrightarrow> \<Gamma>\<turnstile>(Call p#cs,css,Normal s) \<rightarrow> ([bdy],(cs,Throw#cs)#css,Normal s)" | CallUndefined: "\<Gamma> p=None \<Longrightarrow> \<Gamma>\<turnstile>(Call p#cs,css,Normal s) \<rightarrow> (cs,css,Stuck)" | StuckProp: "\<Gamma>\<turnstile>(c#cs,css,Stuck) \<rightarrow> (cs,css,Stuck)" | StuckPropBlock: "\<Gamma>\<turnstile>([],(nrms,abrs)#css,Stuck) \<rightarrow> (nrms,css,Stuck)" | DynCom: "\<Gamma>\<turnstile>(DynCom c#cs,css,Normal s) \<rightarrow> (c s#cs,css,Normal s)" | Throw: "\<Gamma>\<turnstile>(Throw#cs,css,Normal s) \<rightarrow> (cs,css,Abrupt s)" | Catch: "\<Gamma>\<turnstile>(Catch c\<^sub>1 c\<^sub>2#cs,css,Normal s) \<rightarrow> ([c\<^sub>1],(cs,c\<^sub>2#cs)#css,Normal s)" lemmas step_induct = step.induct [of _ "(c,css,s)" "(c',css',s')", split_format (complete), case_names Skip Guard GuardFault FaultProp FaultPropBlock AbruptProp ExitBlockNormal ExitBlockAbrupt Basic Spec SpecStuck Seq CondTrue CondFalse WhileTrue WhileFalse Call CallUndefined StuckProp StuckPropBlock DynCom Throw Catch, induct set] inductive_cases step_elim_cases [cases set]: "\<Gamma>\<turnstile>(c#cs,css,Fault f) \<rightarrow> u" "\<Gamma>\<turnstile>([],css,Fault f) \<rightarrow> u" "\<Gamma>\<turnstile>(c#cs,css,Stuck) \<rightarrow> u" "\<Gamma>\<turnstile>([],css,Stuck) \<rightarrow> u" "\<Gamma>\<turnstile>(c#cs,css,Abrupt s) \<rightarrow> u" "\<Gamma>\<turnstile>([],css,Abrupt s) \<rightarrow> u" "\<Gamma>\<turnstile>([],css,Normal s) \<rightarrow> u" "\<Gamma>\<turnstile>(Skip#cs,css,s) \<rightarrow> u" "\<Gamma>\<turnstile>(Guard f g c#cs,css,s) \<rightarrow> u" "\<Gamma>\<turnstile>(Basic f#cs,css,s) \<rightarrow> u" "\<Gamma>\<turnstile>(Spec r#cs,css,s) \<rightarrow> u" "\<Gamma>\<turnstile>(Seq c1 c2#cs,css,s) \<rightarrow> u" "\<Gamma>\<turnstile>(Cond b c1 c2#cs,css,s) \<rightarrow> u" "\<Gamma>\<turnstile>(While b c#cs,css,s) \<rightarrow> u" "\<Gamma>\<turnstile>(Call p#cs,css,s) \<rightarrow> u" "\<Gamma>\<turnstile>(DynCom c#cs,css,s) \<rightarrow> u" "\<Gamma>\<turnstile>(Throw#cs,css,s) \<rightarrow> u" "\<Gamma>\<turnstile>(Catch c1 c2#cs,css,s) \<rightarrow> u" inductive_cases step_Normal_elim_cases [cases set]: "\<Gamma>\<turnstile>(c#cs,css,Fault f) \<rightarrow> u" "\<Gamma>\<turnstile>([],css,Fault f) \<rightarrow> u" "\<Gamma>\<turnstile>(c#cs,css,Stuck) \<rightarrow> u" "\<Gamma>\<turnstile>([],css,Stuck) \<rightarrow> u" "\<Gamma>\<turnstile>([],(nrms,abrs)#css,Normal s) \<rightarrow> u" "\<Gamma>\<turnstile>([],(nrms,abrs)#css,Abrupt s) \<rightarrow> u" "\<Gamma>\<turnstile>(Skip#cs,css,Normal s) \<rightarrow> u" "\<Gamma>\<turnstile>(Guard f g c#cs,css,Normal s) \<rightarrow> u" "\<Gamma>\<turnstile>(Basic f#cs,css,Normal s) \<rightarrow> u" "\<Gamma>\<turnstile>(Spec r#cs,css,Normal s) \<rightarrow> u" "\<Gamma>\<turnstile>(Seq c1 c2#cs,css,Normal s) \<rightarrow> u" "\<Gamma>\<turnstile>(Cond b c1 c2#cs,css,Normal s) \<rightarrow> u" "\<Gamma>\<turnstile>(While b c#cs,css,Normal s) \<rightarrow> u" "\<Gamma>\<turnstile>(Call p#cs,css,Normal s) \<rightarrow> u" "\<Gamma>\<turnstile>(DynCom c#cs,css,Normal s) \<rightarrow> u" "\<Gamma>\<turnstile>(Throw#cs,css,Normal s) \<rightarrow> u" "\<Gamma>\<turnstile>(Catch c1 c2#cs,css,Normal s) \<rightarrow> u" abbreviation "step_rtrancl" :: "[('s,'p,'f) body,('s,'p,'f) config,('s,'p,'f) config] \<Rightarrow> bool" ("_\<turnstile> (_ \<rightarrow>\<^sup>*/ _)" [81,81,81] 100) where "\<Gamma>\<turnstile>cs0 \<rightarrow>\<^sup>* cs1 == (step \<Gamma>)\<^sup>*\<^sup>* cs0 cs1" abbreviation "step_trancl" :: "[('s,'p,'f) body,('s,'p,'f) config,('s,'p,'f) config] \<Rightarrow> bool" ("_\<turnstile> (_ \<rightarrow>\<^sup>+/ _)" [81,81,81] 100) where "\<Gamma>\<turnstile>cs0 \<rightarrow>\<^sup>+ cs1 == (step \<Gamma>)\<^sup>+\<^sup>+ cs0 cs1" subsubsection \<open>Structural Properties of Small Step Computations\<close> lemma Fault_app_steps: "\<Gamma>\<turnstile>(cs@xs,css,Fault f) \<rightarrow>\<^sup>* (xs,css,Fault f)" proof (induct cs) case Nil thus ?case by simp next case (Cons c cs) have "\<Gamma>\<turnstile>(c#cs@xs, css, Fault f) \<rightarrow>\<^sup>* (xs, css, Fault f)" proof - have "\<Gamma>\<turnstile>(c#cs@xs, css, Fault f) \<rightarrow> (cs@xs, css, Fault f)" by (rule step.FaultProp) also have "\<Gamma>\<turnstile>(cs@xs, css, Fault f) \<rightarrow>\<^sup>* (xs, css, Fault f)" by (rule Cons.hyps) finally show ?thesis . qed thus ?case by simp qed lemma Stuck_app_steps: "\<Gamma>\<turnstile>(cs@xs,css,Stuck) \<rightarrow>\<^sup>* (xs,css,Stuck)" proof (induct cs) case Nil thus ?case by simp next case (Cons c cs) have "\<Gamma>\<turnstile>(c#cs@xs, css, Stuck) \<rightarrow>\<^sup>* (xs, css, Stuck)" proof - have "\<Gamma>\<turnstile>(c#cs@xs, css, Stuck) \<rightarrow> (cs@xs, css, Stuck)" by (rule step.StuckProp) also have "\<Gamma>\<turnstile>(cs@xs, css, Stuck) \<rightarrow>\<^sup>* (xs, css, Stuck)" by (rule Cons.hyps) finally show ?thesis . qed thus ?case by simp qed text \<open>We can only append commands inside a block, if execution does not enter or exit a block. \<close> lemma app_step: assumes step: "\<Gamma>\<turnstile>(cs,css,s) \<rightarrow> (cs',css',t)" shows "css=css' \<Longrightarrow> \<Gamma>\<turnstile>(cs@xs,css,s) \<rightarrow> (cs'@xs,css',t)" using step apply induct apply (simp_all del: fun_upd_apply,(blast intro: step.intros)+) done text \<open>We can append whole blocks, without interfering with the actual block. Outer blocks do not influence execution of inner blocks.\<close> lemma app_css_step: assumes step: "\<Gamma>\<turnstile>(cs,css,s) \<rightarrow> (cs',css',t)" shows "\<Gamma>\<turnstile>(cs,css@xs,s) \<rightarrow> (cs',css'@xs,t)" using step apply induct apply (simp_all del: fun_upd_apply,(blast intro: step.intros)+) done ML \<open> ML_Thms.bind_thm ("trancl_induct3", Split_Rule.split_rule @{context} (Rule_Insts.read_instantiate @{context} [((("a", 0), Position.none), "(ax, ay, az)"), ((("b", 0), Position.none), "(bx, by, bz)")] [] @{thm tranclp_induct})); \<close> lemma app_css_steps: assumes step: "\<Gamma>\<turnstile>(cs,css,s) \<rightarrow>\<^sup>+ (cs',css',t)" shows "\<Gamma>\<turnstile>(cs,css@xs,s) \<rightarrow>\<^sup>+ (cs',css'@xs,t)" apply(rule trancl_induct3 [OF step]) apply (rule app_css_step [THEN tranclp.r_into_trancl [of "step \<Gamma>"]],assumption) apply(blast intro:app_css_step tranclp_trans) done lemma step_Cons': assumes step: "\<Gamma>\<turnstile>(ccs,css,s) \<rightarrow> (cs',css',t)" shows "\<And>c cs. ccs=c#cs \<Longrightarrow> \<exists>css''. css'=css''@css \<and> (if css''=[] then \<exists>p. cs'=p@cs else (\<exists>pnorm pabr. css''=[(pnorm@cs,pabr@cs)]))" using step by induct force+ lemma step_Cons: assumes step: "\<Gamma>\<turnstile>(c#cs,css,s) \<rightarrow> (cs',css',t)" shows "\<exists>pcss. css'=pcss@css \<and> (if pcss=[] then \<exists>ps. cs'=ps@cs else (\<exists>pcs_normal pcs_abrupt. pcss=[(pcs_normal@cs,pcs_abrupt@cs)]))" using step_Cons' [OF step] by blast lemma step_Nil': assumes step: "\<Gamma>\<turnstile>(cs,asscss,s) \<rightarrow> (cs',css',t)" shows "\<And>ass. \<lbrakk>cs=[]; asscss=ass@css; ass\<noteq>Nil\<rbrakk> \<Longrightarrow> css'=tl ass@css \<and> (case s of Abrupt s' \<Rightarrow> cs'=snd (hd ass) \<and> t=Normal s' | _ \<Rightarrow> cs'=fst (hd ass) \<and> t=s)" using step by (induct) (fastforce simp add: neq_Nil_conv)+ lemma step_Nil: assumes step: "\<Gamma>\<turnstile>([],ass@css,s) \<rightarrow> (cs',css',t)" assumes ass_not_Nil: "ass\<noteq>[]" shows "css'=tl ass@css \<and> (case s of Abrupt s' \<Rightarrow> cs'=snd (hd ass) \<and> t=Normal s' | _ \<Rightarrow> cs'=fst (hd ass) \<and> t=s)" using step_Nil' [OF step _ _ ass_not_Nil] by simp lemma step_Nil'': assumes step: "\<Gamma>\<turnstile>([],(pcs_normal,pcs_abrupt)#pcss@css,s) \<rightarrow> (cs',pcss@css,t)" shows "(case s of Abrupt s' \<Rightarrow> cs'=pcs_abrupt \<and> t=Normal s' | _ \<Rightarrow> cs'=pcs_normal \<and> t=s)" using step_Nil' [OF step, where ass ="(pcs_normal,pcs_abrupt)#pcss" and css="css"] by (auto split: xstate.splits) lemma drop_suffix_css_step': assumes step: "\<Gamma>\<turnstile>(cs,cssxs,s) \<rightarrow> (cs',css'xs,t)" shows "\<And>css css' xs. \<lbrakk>cssxs = css@xs; css'xs=css'@xs\<rbrakk> \<Longrightarrow> \<Gamma>\<turnstile>(cs,css,s) \<rightarrow> (cs',css',t)" using step apply induct apply (fastforce intro: step.intros)+ done lemma drop_suffix_css_step: assumes step: "\<Gamma>\<turnstile>(cs,pcss@css,s) \<rightarrow> (cs',pcss'@css,t)" shows "\<Gamma>\<turnstile>(cs,pcss,s) \<rightarrow> (cs',pcss',t)" using step by (blast intro: drop_suffix_css_step') lemma drop_suffix_hd_css_step': assumes step: "\<Gamma>\<turnstile> (pcs,css,s) \<rightarrow> (cs',css'css,t)" shows "\<And>p ps cs pnorm pabr. \<lbrakk>pcs=p#ps@cs; css'css=(pnorm@cs,pabr@cs)#css\<rbrakk> \<Longrightarrow> \<Gamma>\<turnstile> (p#ps,css,s) \<rightarrow> (cs',(pnorm,pabr)#css,t)" using step by induct (force intro: step.intros)+ lemma drop_suffix_hd_css_step'': assumes step: "\<Gamma>\<turnstile> (p#ps@cs,css,s) \<rightarrow> (cs',(pnorm@cs,pabr@cs)#css,t)" shows "\<Gamma>\<turnstile> (p#ps,css,s) \<rightarrow> (cs',(pnorm,pabr)#css,t)" using drop_suffix_hd_css_step' [OF step] by auto lemma drop_suffix_hd_css_step: assumes step: "\<Gamma>\<turnstile> (p#ps@cs,css,s) \<rightarrow> (cs',[(pnorm@ps@cs,pabr@ps@cs)]@css,t)" shows "\<Gamma>\<turnstile> (p#ps,css,s) \<rightarrow> (cs',[(pnorm@ps,pabr@ps)]@css,t)" proof - from step drop_suffix_hd_css_step'' [of _ p ps cs css s cs' "pnorm@ps" "pabr@ps" t] show ?thesis by auto qed lemma drop_suffix': assumes step: "\<Gamma>\<turnstile>(csxs,css,s) \<rightarrow> (cs'xs,css',t)" shows "\<And>xs cs cs'. \<lbrakk>css=css'; csxs=cs@xs; cs'xs = cs'@xs; cs\<noteq>[] \<rbrakk> \<Longrightarrow> \<Gamma>\<turnstile>(cs,css,s) \<rightarrow> (cs',css,t)" using step apply induct apply (fastforce intro: step.intros simp add: neq_Nil_conv)+ done lemma drop_suffix: assumes step: "\<Gamma>\<turnstile>(c#cs@xs,css,s) \<rightarrow> (cs'@xs,css,t)" shows "\<Gamma>\<turnstile>(c#cs,css,s) \<rightarrow> (cs',css,t)" by(rule drop_suffix' [OF step _ _ _]) auto lemma drop_suffix_same_css_step: assumes step: "\<Gamma>\<turnstile>(cs@xs,css,s) \<rightarrow> (cs'@xs,css,t)" assumes not_Nil: "cs\<noteq>[]" shows "\<Gamma>\<turnstile>(cs,xss,s) \<rightarrow> (cs',xss,t)" proof- from drop_suffix' [OF step _ _ _ not_Nil] have "\<Gamma>\<turnstile>(cs,css,s) \<rightarrow> (cs',css,t)" by auto with drop_suffix_css_step [of _ cs "[]" css s cs' "[]" t] have "\<Gamma>\<turnstile> (cs, [], s) \<rightarrow> (cs', [], t)" by auto from app_css_step [OF this] show ?thesis by auto qed lemma Cons_change_css_step: assumes step: "\<Gamma>\<turnstile> (cs,css,s) \<rightarrow> (cs',css'@css,t)" shows "\<Gamma>\<turnstile> (cs,xss,s) \<rightarrow> (cs',css'@xss,t)" proof - from step drop_suffix_css_step [where cs=cs and pcss="[]" and css=css and s=s and cs'=cs' and pcss'=css' and t=t] have "\<Gamma>\<turnstile> (cs, [], s) \<rightarrow> (cs', css', t)" by auto from app_css_step [where xs=xss, OF this] show ?thesis by auto qed lemma Nil_change_css_step: assumes step: "\<Gamma>\<turnstile>([],ass@css,s) \<rightarrow> (cs',ass'@css,t)" assumes ass_not_Nil: "ass\<noteq>[]" shows "\<Gamma>\<turnstile>([],ass@xss,s) \<rightarrow> (cs',ass'@xss,t)" proof - from step drop_suffix_css_step [of _ "[]" ass css s cs' ass' t] have "\<Gamma>\<turnstile> ([], ass, s) \<rightarrow> (cs', ass', t)" by auto from app_css_step [where xs=xss, OF this] show ?thesis by auto qed subsubsection \<open>Equivalence between Big and Small-Step Semantics\<close> lemma exec_impl_steps: assumes exec: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t" shows "\<And>cs css. \<Gamma>\<turnstile>(c#cs,css,s) \<rightarrow>\<^sup>* (cs,css,t)" using exec proof (induct) case Skip thus ?case by (blast intro: step.Skip) next case Guard thus ?case by (blast intro: step.Guard rtranclp_trans) next case GuardFault thus ?case by (blast intro: step.GuardFault) next case FaultProp thus ?case by (blast intro: step.FaultProp) next case Basic thus ?case by (blast intro: step.Basic) next case Spec thus ?case by (blast intro: step.Spec) next case SpecStuck thus ?case by (blast intro: step.SpecStuck) next case Seq thus ?case by (blast intro: step.Seq rtranclp_trans) next case CondTrue thus ?case by (blast intro: step.CondTrue rtranclp_trans) next case CondFalse thus ?case by (blast intro: step.CondFalse rtranclp_trans) next case WhileTrue thus ?case by (blast intro: step.WhileTrue rtranclp_trans) next case WhileFalse thus ?case by (blast intro: step.WhileFalse) next case (Call p bdy s s' cs css) have bdy: "\<Gamma> p = Some bdy" by fact have steps_body: "\<Gamma>\<turnstile>([bdy],(cs,Throw#cs)#css,Normal s) \<rightarrow>\<^sup>* ([],(cs,Throw#cs)#css, s')" by fact show ?case proof (cases s') case (Normal s'') note steps_body also from Normal have "\<Gamma>\<turnstile>([],(cs,Throw#cs)#css, s') \<rightarrow> (cs,css,s')" by (auto intro: step.intros) finally show ?thesis using bdy by (blast intro: step.Call rtranclp_trans) next case (Abrupt s'') with steps_body have "\<Gamma>\<turnstile>([bdy],(cs,Throw#cs)#css,Normal s) \<rightarrow>\<^sup>* ([],(cs,Throw#cs)#css, Abrupt s'')" by simp also have "\<Gamma>\<turnstile>([],(cs,Throw#cs)#css, Abrupt s'') \<rightarrow> (Throw#cs,css,Normal s'')" by (rule ExitBlockAbrupt) also have "\<Gamma>\<turnstile>(Throw#cs,css,Normal s'') \<rightarrow> (cs,css,Abrupt s'')" by (rule Throw) finally show ?thesis using bdy Abrupt by (auto intro: step.Call rtranclp_trans) next case Fault note steps_body also from Fault have "\<Gamma>\<turnstile>([],(cs,Throw#cs)#css, s') \<rightarrow> (cs,css,s')" by (auto intro: step.intros) finally show ?thesis using bdy by (blast intro: step.Call rtranclp_trans) next case Stuck note steps_body also from Stuck have "\<Gamma>\<turnstile>([],(cs,Throw#cs)#css, s') \<rightarrow> (cs,css,s')" by (auto intro: step.intros) finally show ?thesis using bdy by (blast intro: step.Call rtranclp_trans) qed next case (CallUndefined p s cs css) have undef: "\<Gamma> p = None" by fact hence "\<Gamma>\<turnstile>(Call p # cs, css, Normal s) \<rightarrow> (cs, css, Stuck)" by (rule step.CallUndefined) thus ?case .. next case StuckProp thus ?case by (blast intro: step.StuckProp rtrancl_trans) next case DynCom thus ?case by (blast intro: step.DynCom rtranclp_trans) next case Throw thus ?case by (blast intro: step.Throw) next case AbruptProp thus ?case by (blast intro: step.AbruptProp) next case (CatchMatch c\<^sub>1 s s' c\<^sub>2 s'' cs css) have steps_c1: "\<Gamma>\<turnstile>([c\<^sub>1],(cs,c\<^sub>2#cs)#css,Normal s) \<rightarrow>\<^sup>* ([],(cs,c\<^sub>2#cs)#css,Abrupt s')" by fact also have "\<Gamma>\<turnstile>([],(cs,c\<^sub>2#cs)#css,Abrupt s') \<rightarrow> (c\<^sub>2#cs,css,Normal s')" by (rule ExitBlockAbrupt) also have steps_c2: "\<Gamma>\<turnstile>(c\<^sub>2#cs,css,Normal s') \<rightarrow>\<^sup>* (cs,css,s'')" by fact finally show "\<Gamma>\<turnstile>(Catch c\<^sub>1 c\<^sub>2 # cs, css, Normal s) \<rightarrow>\<^sup>* (cs, css, s'')" by (blast intro: step.Catch rtranclp_trans) next case (CatchMiss c\<^sub>1 s s' c\<^sub>2 cs css) assume notAbr: "\<not> isAbr s'" have steps_c1: "\<Gamma>\<turnstile>([c\<^sub>1],(cs,c\<^sub>2#cs)#css,Normal s) \<rightarrow>\<^sup>* ([],(cs,c\<^sub>2#cs)#css,s')" by fact show "\<Gamma>\<turnstile>(Catch c\<^sub>1 c\<^sub>2 # cs, css, Normal s) \<rightarrow>\<^sup>* (cs, css, s')" proof (cases s') case (Normal w) with steps_c1 have "\<Gamma>\<turnstile>([c\<^sub>1],(cs,c\<^sub>2#cs)#css,Normal s) \<rightarrow>\<^sup>* ([],(cs,c\<^sub>2#cs)#css,Normal w)" by simp also have "\<Gamma>\<turnstile>([],(cs,c\<^sub>2#cs)#css,Normal w) \<rightarrow> (cs,css,Normal w)" by (rule ExitBlockNormal) finally show ?thesis using Normal by (auto intro: step.Catch rtranclp_trans) next case Abrupt with notAbr show ?thesis by simp next case (Fault f) with steps_c1 have "\<Gamma>\<turnstile>([c\<^sub>1],(cs,c\<^sub>2#cs)#css,Normal s) \<rightarrow>\<^sup>* ([],(cs,c\<^sub>2#cs)#css,Fault f)" by simp also have "\<Gamma>\<turnstile>([],(cs,c\<^sub>2#cs)#css,Fault f) \<rightarrow> (cs,css,Fault f)" by (rule FaultPropBlock) finally show ?thesis using Fault by (auto intro: step.Catch rtranclp_trans) next case Stuck with steps_c1 have "\<Gamma>\<turnstile>([c\<^sub>1],(cs,c\<^sub>2#cs)#css,Normal s) \<rightarrow>\<^sup>* ([],(cs,c\<^sub>2#cs)#css,Stuck)" by simp also have "\<Gamma>\<turnstile>([],(cs,c\<^sub>2#cs)#css,Stuck) \<rightarrow> (cs,css,Stuck)" by (rule StuckPropBlock) finally show ?thesis using Stuck by (auto intro: step.Catch rtranclp_trans) qed qed inductive "execs"::"[('s,'p,'f) body,('s,'p,'f) com list, ('s,'p,'f) continuation list, ('s,'f) xstate,('s,'f) xstate] \<Rightarrow> bool" ("_\<turnstile> \<langle>_,_,_\<rangle> \<Rightarrow> _" [50,50,50,50,50] 50) for \<Gamma>:: "('s,'p,'f) body" where Nil: "\<Gamma>\<turnstile>\<langle>[],[],s\<rangle> \<Rightarrow> s" | ExitBlockNormal: "\<Gamma>\<turnstile>\<langle>nrms,css,Normal s\<rangle> \<Rightarrow> t \<Longrightarrow> \<Gamma>\<turnstile>\<langle>[],(nrms,abrs)#css,Normal s\<rangle> \<Rightarrow> t" | ExitBlockAbrupt: "\<Gamma>\<turnstile>\<langle>abrs,css,Normal s\<rangle> \<Rightarrow> t \<Longrightarrow> \<Gamma>\<turnstile>\<langle>[],(nrms,abrs)#css,Abrupt s\<rangle> \<Rightarrow> t" | ExitBlockFault: "\<Gamma>\<turnstile>\<langle>nrms,css,Fault f\<rangle> \<Rightarrow> t \<Longrightarrow> \<Gamma>\<turnstile>\<langle>[],(nrms,abrs)#css,Fault f\<rangle> \<Rightarrow> t" | ExitBlockStuck: "\<Gamma>\<turnstile>\<langle>nrms,css,Stuck\<rangle> \<Rightarrow> t \<Longrightarrow> \<Gamma>\<turnstile>\<langle>[],(nrms,abrs)#css,Stuck\<rangle> \<Rightarrow> t" | Cons: "\<lbrakk>\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t; \<Gamma>\<turnstile>\<langle>cs,css,t\<rangle> \<Rightarrow> u\<rbrakk> \<Longrightarrow> \<Gamma>\<turnstile>\<langle>c#cs,css,s\<rangle> \<Rightarrow> u" inductive_cases execs_elim_cases [cases set]: "\<Gamma>\<turnstile>\<langle>[],css,s\<rangle> \<Rightarrow> t" "\<Gamma>\<turnstile>\<langle>c#cs,css,s\<rangle> \<Rightarrow> t" ML \<open> ML_Thms.bind_thm ("converse_rtrancl_induct3", Split_Rule.split_rule @{context} (Rule_Insts.read_instantiate @{context} [((("a", 0), Position.none), "(cs, css, s)"), ((("b", 0), Position.none), "(cs', css', t)")] [] @{thm converse_rtranclp_induct})); \<close> lemma execs_Fault_end: assumes execs: "\<Gamma>\<turnstile>\<langle>cs,css,s\<rangle> \<Rightarrow> t" shows "s=Fault f\<Longrightarrow> t=Fault f" using execs by (induct) (auto dest: Fault_end) lemma execs_Stuck_end: assumes execs: "\<Gamma>\<turnstile>\<langle>cs,css,s\<rangle> \<Rightarrow> t" shows "s=Stuck \<Longrightarrow> t=Stuck" using execs by (induct) (auto dest: Stuck_end) theorem steps_impl_execs: assumes steps: "\<Gamma>\<turnstile>(cs,css,s) \<rightarrow>\<^sup>* ([],[],t)" shows "\<Gamma>\<turnstile>\<langle>cs,css,s\<rangle> \<Rightarrow> t" using steps proof (induct rule: converse_rtrancl_induct3 [consumes 1]) show "\<Gamma>\<turnstile>\<langle>[],[],t\<rangle> \<Rightarrow> t" by (rule execs.Nil) next fix cs css s cs' css' w assume step: "\<Gamma>\<turnstile>(cs,css, s) \<rightarrow> (cs',css', w)" assume execs: "\<Gamma>\<turnstile>\<langle>cs',css',w\<rangle> \<Rightarrow> t" from step show "\<Gamma>\<turnstile>\<langle>cs,css,s\<rangle> \<Rightarrow> t" proof (cases) case (Catch c1 c2 cs s) with execs obtain t' where exec_c1: "\<Gamma>\<turnstile>\<langle>c1,Normal s\<rangle> \<Rightarrow> t'" and execs_rest: "\<Gamma>\<turnstile>\<langle>[],(cs, c2 # cs) # css,t'\<rangle> \<Rightarrow> t" by (clarsimp elim!: execs_elim_cases) have "\<Gamma>\<turnstile>\<langle>Catch c1 c2 # cs,css,Normal s\<rangle> \<Rightarrow> t" proof (cases t') case (Normal t'') with exec_c1 have "\<Gamma>\<turnstile>\<langle>Catch c1 c2,Normal s\<rangle> \<Rightarrow> t'" by (auto intro: exec.CatchMiss) moreover from execs_rest Normal have "\<Gamma>\<turnstile>\<langle>cs,css,t'\<rangle> \<Rightarrow> t" by (cases) auto ultimately show ?thesis by (rule execs.Cons) next case (Abrupt t'') from execs_rest Abrupt have "\<Gamma>\<turnstile>\<langle>c2#cs,css,Normal t''\<rangle> \<Rightarrow> t" by (cases) auto then obtain v where exec_c2: "\<Gamma>\<turnstile>\<langle>c2,Normal t''\<rangle> \<Rightarrow> v" and rest: "\<Gamma>\<turnstile>\<langle>cs,css,v\<rangle> \<Rightarrow> t" by cases from exec_c1 Abrupt exec_c2 have "\<Gamma>\<turnstile>\<langle>Catch c1 c2,Normal s\<rangle> \<Rightarrow> v" by - (rule exec.CatchMatch, auto) from this rest show ?thesis by (rule execs.Cons) next case (Fault f) with exec_c1 have "\<Gamma>\<turnstile>\<langle>Catch c1 c2,Normal s\<rangle> \<Rightarrow> Fault f" by (auto intro: exec.intros) moreover from execs_rest Fault have "\<Gamma>\<turnstile>\<langle>cs,css,Fault f\<rangle> \<Rightarrow> t" by (cases) auto ultimately show ?thesis by (rule execs.Cons) next case Stuck with exec_c1 have "\<Gamma>\<turnstile>\<langle>Catch c1 c2,Normal s\<rangle> \<Rightarrow> Stuck" by (auto intro: exec.intros) moreover from execs_rest Stuck have "\<Gamma>\<turnstile>\<langle>cs,css,Stuck\<rangle> \<Rightarrow> t" by (cases) auto ultimately show ?thesis by (rule execs.Cons) qed with Catch show ?thesis by simp next case (Call p bdy cs s) have bdy: "\<Gamma> p = Some bdy" by fact from Call execs obtain t' where exec_body: "\<Gamma>\<turnstile>\<langle>bdy,Normal s\<rangle> \<Rightarrow> t'" and execs_rest: "\<Gamma>\<turnstile>\<langle>[],(cs,Throw#cs)#css ,t'\<rangle> \<Rightarrow> t" by (clarsimp elim!: execs_elim_cases) have "\<Gamma>\<turnstile>\<langle>Call p # cs,css,Normal s\<rangle> \<Rightarrow> t" proof (cases t') case (Normal t'') with exec_body bdy have "\<Gamma>\<turnstile>\<langle>Call p ,Normal s\<rangle> \<Rightarrow> Normal t''" by (auto intro: exec.intros) moreover from execs_rest Normal have "\<Gamma>\<turnstile>\<langle>cs,css ,Normal t''\<rangle> \<Rightarrow> t" by cases auto ultimately show ?thesis by (rule execs.Cons) next case (Abrupt t'') with exec_body bdy have "\<Gamma>\<turnstile>\<langle>Call p,Normal s\<rangle> \<Rightarrow> Abrupt t''" by (auto intro: exec.intros) moreover from execs_rest Abrupt have "\<Gamma>\<turnstile>\<langle>Throw # cs,css,Normal t''\<rangle> \<Rightarrow> t" by (cases) auto then obtain v where v: "\<Gamma>\<turnstile>\<langle>Throw,Normal t''\<rangle> \<Rightarrow> v" "\<Gamma>\<turnstile>\<langle>cs,css,v\<rangle> \<Rightarrow> t" by (clarsimp elim!: execs_elim_cases) moreover from v have "v=Abrupt t''" by (auto elim: exec_Normal_elim_cases) ultimately show ?thesis by (auto intro: execs.Cons) next case (Fault f) with exec_body bdy have "\<Gamma>\<turnstile>\<langle>Call p,Normal s\<rangle> \<Rightarrow> Fault f" by (auto intro: exec.intros) moreover from execs_rest Fault have "\<Gamma>\<turnstile>\<langle>cs,css,Fault f\<rangle> \<Rightarrow> t" by (cases) (auto elim: execs_elim_cases dest: Fault_end) ultimately show ?thesis by (rule execs.Cons) next case Stuck with exec_body bdy have "\<Gamma>\<turnstile>\<langle>Call p,Normal s\<rangle> \<Rightarrow> Stuck" by (auto intro: exec.intros) moreover from execs_rest Stuck have "\<Gamma>\<turnstile>\<langle>cs,css,Stuck\<rangle> \<Rightarrow> t" by (cases) (auto elim: execs_elim_cases dest: Stuck_end) ultimately show ?thesis by (rule execs.Cons) qed with Call show ?thesis by simp qed (insert execs, (blast intro:execs.intros exec.intros elim!: execs_elim_cases)+) qed theorem steps_impl_exec: assumes steps: "\<Gamma>\<turnstile>([c],[],s) \<rightarrow>\<^sup>* ([],[],t)" shows "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t" using steps_impl_execs [OF steps] by (blast elim: execs_elim_cases) corollary steps_eq_exec: "\<Gamma>\<turnstile>([c],[],s) \<rightarrow>\<^sup>* ([],[],t) = \<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t" by (blast intro: steps_impl_exec exec_impl_steps) subsection \<open>Infinite Computations: \<open>inf \<Gamma> cs css s\<close>\<close> definition inf :: "[('s,'p,'f) body,('s,'p,'f) com list,('s,'p,'f) continuation list,('s,'f) xstate] \<Rightarrow> bool" where "inf \<Gamma> cs css s = (\<exists>f. f 0 = (cs,css,s) \<and> (\<forall>i. \<Gamma>\<turnstile>f i \<rightarrow> f(Suc i)))" lemma not_infI: "\<lbrakk>\<And>f. \<lbrakk>f 0 = (cs,css,s); \<And>i. \<Gamma>\<turnstile>f i \<rightarrow> f (Suc i)\<rbrakk> \<Longrightarrow> False\<rbrakk> \<Longrightarrow> \<not>inf \<Gamma> cs css s" by (auto simp add: inf_def) subsection \<open>Equivalence of Termination and Absence of Infinite Computations\<close> inductive "terminatess":: "[('s,'p,'f) body,('s,'p,'f) com list, ('s,'p,'f) continuation list,('s,'f) xstate] \<Rightarrow> bool" ("_\<turnstile>_,_ \<Down> _" [60,20,60] 89) for \<Gamma>::"('s,'p,'f) body" where Nil: "\<Gamma>\<turnstile>[],[]\<Down>s" | ExitBlockNormal: "\<Gamma>\<turnstile>nrms,css\<Down>Normal s \<Longrightarrow> \<Gamma>\<turnstile>[],(nrms,abrs)#css\<Down>Normal s" | ExitBlockAbrupt: "\<Gamma>\<turnstile>abrs,css\<Down>Normal s \<Longrightarrow> \<Gamma>\<turnstile>[],(nrms,abrs)#css\<Down>Abrupt s" | ExitBlockFault: "\<Gamma>\<turnstile>nrms,css\<Down>Fault f \<Longrightarrow> \<Gamma>\<turnstile>[],(nrms,abrs)#css\<Down>Fault f" | ExitBlockStuck: "\<Gamma>\<turnstile>nrms,css\<Down>Stuck \<Longrightarrow> \<Gamma>\<turnstile>[],(nrms,abrs)#css\<Down>Stuck" | Cons: "\<lbrakk>\<Gamma>\<turnstile>c\<down>s; (\<forall>t. \<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t \<longrightarrow> \<Gamma>\<turnstile>cs,css\<Down>t)\<rbrakk> \<Longrightarrow> \<Gamma>\<turnstile>c#cs,css\<Down>s" inductive_cases terminatess_elim_cases [cases set]: "\<Gamma>\<turnstile>[],css\<Down>t" "\<Gamma>\<turnstile>c#cs,css\<Down>t" lemma terminatess_Fault: "\<And>cs. \<Gamma>\<turnstile>cs,css\<Down>Fault f" proof (induct css) case Nil show "\<Gamma>\<turnstile>cs,[]\<Down>Fault f" proof (induct cs) case Nil show "\<Gamma>\<turnstile>[],[]\<Down>Fault f" by (rule terminatess.Nil) next case (Cons c cs) thus ?case by (auto intro: terminatess.intros terminates.intros dest: Fault_end) qed next case (Cons d css) have hyp: "\<And>cs. \<Gamma>\<turnstile>cs,css\<Down>Fault f" by fact obtain nrms abrs where d: "d=(nrms,abrs)" by (cases d) auto have "\<Gamma>\<turnstile>cs,(nrms,abrs)#css\<Down>Fault f" proof (induct cs) case Nil show "\<Gamma>\<turnstile>[],(nrms, abrs) # css\<Down>Fault f" by (rule terminatess.ExitBlockFault) (rule hyp) next case (Cons c cs) have hyp1: "\<Gamma>\<turnstile>cs,(nrms, abrs) # css\<Down>Fault f" by fact show "\<Gamma>\<turnstile>c#cs,(nrms, abrs)#css\<Down>Fault f" by (auto intro: hyp1 terminatess.Cons terminates.intros dest: Fault_end) qed with d show ?case by simp qed lemma terminatess_Stuck: "\<And>cs. \<Gamma>\<turnstile>cs,css\<Down>Stuck" proof (induct css) case Nil show "\<Gamma>\<turnstile>cs,[]\<Down>Stuck" proof (induct cs) case Nil show "\<Gamma>\<turnstile>[],[]\<Down>Stuck" by (rule terminatess.Nil) next case (Cons c cs) thus ?case by (auto intro: terminatess.intros terminates.intros dest: Stuck_end) qed next case (Cons d css) have hyp: "\<And>cs. \<Gamma>\<turnstile>cs,css\<Down>Stuck" by fact obtain nrms abrs where d: "d=(nrms,abrs)" by (cases d) auto have "\<Gamma>\<turnstile>cs,(nrms,abrs)#css\<Down>Stuck" proof (induct cs) case Nil show "\<Gamma>\<turnstile>[],(nrms, abrs) # css\<Down>Stuck" by (rule terminatess.ExitBlockStuck) (rule hyp) next case (Cons c cs) have hyp1: "\<Gamma>\<turnstile>cs,(nrms, abrs) # css\<Down>Stuck" by fact show "\<Gamma>\<turnstile>c#cs,(nrms, abrs)#css\<Down>Stuck" by (auto intro: hyp1 terminatess.Cons terminates.intros dest: Stuck_end) qed with d show ?case by simp qed lemma Basic_terminates: "\<Gamma>\<turnstile>Basic f \<down> t" by (cases t) (auto intro: terminates.intros) lemma step_preserves_terminations: assumes step: "\<Gamma>\<turnstile>(cs,css,s) \<rightarrow> (cs',css',t)" shows "\<Gamma>\<turnstile>cs,css\<Down>s \<Longrightarrow> \<Gamma>\<turnstile>cs',css'\<Down>t" using step proof (induct) case Skip thus ?case by (auto elim: terminates_Normal_elim_cases terminatess_elim_cases intro: exec.intros) next case Guard thus ?case by (blast elim: terminatess_elim_cases terminates_Normal_elim_cases intro: terminatess.intros terminates.intros exec.intros) next case GuardFault thus ?case by (blast elim: terminatess_elim_cases terminates_Normal_elim_cases intro: terminatess.intros terminates.intros exec.intros) next case FaultProp show ?case by (rule terminatess_Fault) next case FaultPropBlock show ?case by (rule terminatess_Fault) next case AbruptProp thus ?case by (blast elim: terminatess_elim_cases intro: terminatess.intros) next case ExitBlockNormal thus ?case by (blast elim: terminatess_elim_cases intro: terminatess.intros ) next case ExitBlockAbrupt thus ?case by (blast elim: terminatess_elim_cases intro: terminatess.intros ) next case Basic thus ?case by (blast elim: terminatess_elim_cases terminates_Normal_elim_cases intro: terminatess.intros terminates.intros exec.intros) next case Spec thus ?case by (blast elim: terminatess_elim_cases terminates_Normal_elim_cases intro: terminatess.intros terminates.intros exec.intros) next case SpecStuck thus ?case by (blast elim: terminatess_elim_cases terminates_Normal_elim_cases intro: terminatess.intros terminates.intros exec.intros) next case Seq thus ?case by (blast elim: terminatess_elim_cases terminates_Normal_elim_cases intro: terminatess.intros terminates.intros exec.intros) next case CondTrue thus ?case by (blast elim: terminatess_elim_cases terminates_Normal_elim_cases intro: terminatess.intros terminates.intros exec.intros) next case CondFalse thus ?case by (blast elim: terminatess_elim_cases terminates_Normal_elim_cases intro: terminatess.intros terminates.intros exec.intros) next case WhileTrue thus ?case by (blast elim: terminatess_elim_cases terminates_Normal_elim_cases intro: terminatess.intros terminates.intros exec.intros) next case WhileFalse thus ?case by (blast elim: terminatess_elim_cases terminates_Normal_elim_cases intro: terminatess.intros terminates.intros exec.intros) next case (Call p bdy cs css s) have bdy: "\<Gamma> p = Some bdy" by fact from Call obtain term_body: "\<Gamma>\<turnstile>bdy \<down> Normal s" and term_rest: "\<forall>t. \<Gamma>\<turnstile>\<langle>Call p,Normal s\<rangle> \<Rightarrow> t \<longrightarrow> \<Gamma>\<turnstile>cs,css\<Down>t" by (fastforce elim!: terminatess_elim_cases terminates_Normal_elim_cases) show "\<Gamma>\<turnstile>[bdy],(cs,Throw # cs)#css\<Down>Normal s" proof (rule terminatess.Cons [OF term_body],clarsimp) fix t assume exec_body: "\<Gamma>\<turnstile>\<langle>bdy,Normal s\<rangle> \<Rightarrow> t" show "\<Gamma>\<turnstile>[],(cs,Throw # cs) # css\<Down>t" proof (cases t) case (Normal t') with exec_body bdy have "\<Gamma>\<turnstile>\<langle>Call p,Normal s\<rangle> \<Rightarrow> Normal t'" by (auto intro: exec.intros) with term_rest have "\<Gamma>\<turnstile>cs,css\<Down>Normal t'" by iprover with Normal show ?thesis by (auto intro: terminatess.intros terminates.intros elim: exec_Normal_elim_cases) next case (Abrupt t') with exec_body bdy have "\<Gamma>\<turnstile>\<langle>Call p,Normal s\<rangle> \<Rightarrow> Abrupt t'" by (auto intro: exec.intros) with term_rest have "\<Gamma>\<turnstile>cs,css\<Down>Abrupt t'" by iprover with Abrupt show ?thesis by (fastforce intro: terminatess.intros terminates.intros elim: exec_Normal_elim_cases) next case Fault thus ?thesis by (iprover intro: terminatess_Fault) next case Stuck thus ?thesis by (iprover intro: terminatess_Stuck) qed qed next case CallUndefined thus ?case by (iprover intro: terminatess_Stuck) next case StuckProp show ?case by (rule terminatess_Stuck) next case StuckPropBlock show ?case by (rule terminatess_Stuck) next case DynCom thus ?case by (blast elim: terminatess_elim_cases terminates_Normal_elim_cases intro: terminatess.intros terminates.intros exec.intros) next case Throw thus ?case by (blast elim: terminatess_elim_cases terminates_Normal_elim_cases intro: terminatess.intros terminates.intros exec.intros) next case (Catch c1 c2 cs css s) then obtain term_c1: "\<Gamma>\<turnstile>c1 \<down> Normal s" and term_c2: "\<forall>s'. \<Gamma>\<turnstile>\<langle>c1,Normal s\<rangle> \<Rightarrow> Abrupt s' \<longrightarrow> \<Gamma>\<turnstile>c2 \<down> Normal s'"and term_rest: "\<forall>t. \<Gamma>\<turnstile>\<langle>Catch c1 c2,Normal s\<rangle> \<Rightarrow> t \<longrightarrow> \<Gamma>\<turnstile>cs,css\<Down>t" by (clarsimp elim!: terminatess_elim_cases terminates_Normal_elim_cases) show "\<Gamma>\<turnstile>[c1],(cs, c2 # cs) # css\<Down>Normal s" proof (rule terminatess.Cons [OF term_c1],clarsimp) fix t assume exec_c1: "\<Gamma>\<turnstile>\<langle>c1,Normal s\<rangle> \<Rightarrow> t" show "\<Gamma>\<turnstile>[],(cs, c2 # cs) # css\<Down>t" proof (cases t) case (Normal t') with exec_c1 have "\<Gamma>\<turnstile>\<langle>Catch c1 c2,Normal s\<rangle> \<Rightarrow> t" by (auto intro: exec.intros) with term_rest have "\<Gamma>\<turnstile>cs,css\<Down>t" by iprover with Normal show ?thesis by (iprover intro: terminatess.intros) next case (Abrupt t') with exec_c1 term_c2 have "\<Gamma>\<turnstile>c2 \<down> Normal t'" by auto moreover { fix w assume exec_c2: "\<Gamma>\<turnstile>\<langle>c2,Normal t'\<rangle> \<Rightarrow> w" have "\<Gamma>\<turnstile>cs,css\<Down>w" proof - from exec_c1 Abrupt exec_c2 have "\<Gamma>\<turnstile>\<langle>Catch c1 c2,Normal s\<rangle> \<Rightarrow> w" by (auto intro: exec.intros) with term_rest show ?thesis by simp qed } ultimately show ?thesis using Abrupt by (auto intro: terminatess.intros) next case Fault thus ?thesis by (iprover intro: terminatess_Fault) next case Stuck thus ?thesis by (iprover intro: terminatess_Stuck) qed qed qed ML \<open> ML_Thms.bind_thm ("rtrancl_induct3", Split_Rule.split_rule @{context} (Rule_Insts.read_instantiate @{context} [((("a", 0), Position.none), "(ax, ay, az)"), ((("b", 0), Position.none), "(bx, by, bz)")] [] @{thm rtranclp_induct})); \<close> lemma steps_preserves_terminations: assumes steps: "\<Gamma>\<turnstile>(cs,css,s) \<rightarrow>\<^sup>* (cs',css',t)" shows "\<Gamma>\<turnstile>cs,css\<Down>s \<Longrightarrow> \<Gamma>\<turnstile>cs',css'\<Down>t" using steps proof (induct rule: rtrancl_induct3 [consumes 1]) assume "\<Gamma>\<turnstile>cs,css\<Down>s" then show "\<Gamma>\<turnstile>cs,css\<Down>s". next fix cs'' css'' w cs' css' t assume "\<Gamma>\<turnstile>(cs'',css'', w) \<rightarrow> (cs',css', t)" "\<Gamma>\<turnstile>cs,css\<Down>s \<Longrightarrow> \<Gamma>\<turnstile>cs'',css''\<Down>w" "\<Gamma>\<turnstile>cs,css\<Down>s" then show "\<Gamma>\<turnstile>cs',css'\<Down>t" by (blast dest: step_preserves_terminations) qed theorem steps_preserves_termination: assumes steps: "\<Gamma>\<turnstile>([c],[],s) \<rightarrow>\<^sup>* (c'#cs',css',t)" assumes term_c: "\<Gamma>\<turnstile>c\<down>s" shows "\<Gamma>\<turnstile>c'\<down>t" proof - from term_c have "\<Gamma>\<turnstile>[c],[]\<Down>s" by (auto intro: terminatess.intros) from steps this have "\<Gamma>\<turnstile>c'#cs',css'\<Down>t" by (rule steps_preserves_terminations) thus "\<Gamma>\<turnstile>c'\<down>t" by (auto elim: terminatess_elim_cases) qed lemma renumber': assumes f: "\<forall>i. (a,f i) \<in> r\<^sup>* \<and> (f i,f(Suc i)) \<in> r" assumes a_b: "(a,b) \<in> r\<^sup>*" shows "b = f 0 \<Longrightarrow> (\<exists>f. f 0 = a \<and> (\<forall>i. (f i, f(Suc i)) \<in> r))" using a_b proof (induct rule: converse_rtrancl_induct [consumes 1]) assume "b = f 0" with f show "\<exists>f. f 0 = b \<and> (\<forall>i. (f i, f (Suc i)) \<in> r)" by blast next fix a z assume a_z: "(a, z) \<in> r" and "(z, b) \<in> r\<^sup>*" assume "b = f 0 \<Longrightarrow> \<exists>f. f 0 = z \<and> (\<forall>i. (f i, f (Suc i)) \<in> r)" "b = f 0" then obtain f where f0: "f 0 = z" and seq: "\<forall>i. (f i, f (Suc i)) \<in> r" by iprover { fix i have "((\<lambda>i. case i of 0 \<Rightarrow> a | Suc i \<Rightarrow> f i) i, f i) \<in> r" using seq a_z f0 by (cases i) auto } then show "\<exists>f. f 0 = a \<and> (\<forall>i. (f i, f (Suc i)) \<in> r)" by - (rule exI [where x="\<lambda>i. case i of 0 \<Rightarrow> a | Suc i \<Rightarrow> f i"],simp) qed lemma renumber: "\<forall>i. (a,f i) \<in> r\<^sup>* \<and> (f i,f(Suc i)) \<in> r \<Longrightarrow> \<exists>f. f 0 = a \<and> (\<forall>i. (f i, f(Suc i)) \<in> r)" by(blast dest:renumber') lemma not_inf_Fault': assumes enum_step: "\<forall>i. \<Gamma>\<turnstile>f i \<rightarrow> f (Suc i)" shows "\<And>k cs. f k = (cs,css,Fault m) \<Longrightarrow> False" proof (induct css) case Nil have f_k: "f k = (cs,[],Fault m)" by fact have "\<And>k. f k = (cs,[],Fault m) \<Longrightarrow> False" proof (induct cs) case Nil have "f k = ([], [], Fault m)" by fact moreover from enum_step have "\<Gamma>\<turnstile>f k \<rightarrow> f (Suc k)".. ultimately show "False" by (fastforce elim: step_elim_cases) next case (Cons c cs) have fk: "f k = (c # cs, [], Fault m)" by fact from enum_step have "\<Gamma>\<turnstile>f k \<rightarrow> f (Suc k)".. with fk have "f (Suc k) = (cs,[],Fault m)" by (fastforce elim: step_elim_cases) with enum_step Cons.hyps show False by blast qed from this f_k show False by blast next case (Cons ds css) then obtain nrms abrs where ds: "ds=(nrms,abrs)" by (cases ds) auto have hyp: "\<And>k cs. f k = (cs,css,Fault m) \<Longrightarrow> False" by fact have "\<And>k. f k = (cs,(nrms,abrs)#css,Fault m) \<Longrightarrow> False" proof (induct cs) case Nil have fk: "f k = ([], (nrms, abrs) # css, Fault m)" by fact from enum_step have "\<Gamma>\<turnstile>f k \<rightarrow> f (Suc k)".. with fk have "f (Suc k) = (nrms,css,Fault m)" by (fastforce elim: step_elim_cases) thus ?case by (rule hyp) next case (Cons c cs) have fk: "f k = (c#cs, (nrms, abrs) # css, Fault m)" by fact have hyp1: "\<And>k. f k = (cs, (nrms, abrs) # css, Fault m) \<Longrightarrow> False" by fact from enum_step have "\<Gamma>\<turnstile>f k \<rightarrow> f (Suc k)".. with fk have "f (Suc k) = (cs,(nrms,abrs)#css,Fault m)" by (fastforce elim: step_elim_cases) thus ?case by (rule hyp1) qed with ds Cons.prems show False by auto qed lemma not_inf_Fault: "\<not> inf \<Gamma> cs css (Fault m)" apply (rule not_infI) apply (rule_tac f=f in not_inf_Fault' ) by auto lemma not_inf_Stuck': assumes enum_step: "\<forall>i. \<Gamma>\<turnstile>f i \<rightarrow> f (Suc i)" shows "\<And>k cs. f k = (cs,css,Stuck) \<Longrightarrow> False" proof (induct css) case Nil have f_k: "f k = (cs,[],Stuck)" by fact have "\<And>k. f k = (cs,[],Stuck) \<Longrightarrow> False" proof (induct cs) case Nil have "f k = ([], [], Stuck)" by fact moreover from enum_step have "\<Gamma>\<turnstile>f k \<rightarrow> f (Suc k)".. ultimately show "False" by (fastforce elim: step_elim_cases) next case (Cons c cs) have fk: "f k = (c # cs, [], Stuck)" by fact from enum_step have "\<Gamma>\<turnstile>f k \<rightarrow> f (Suc k)".. with fk have "f (Suc k) = (cs,[],Stuck)" by (fastforce elim: step_elim_cases) with enum_step Cons.hyps show False by blast qed from this f_k show False . next case (Cons ds css) then obtain nrms abrs where ds: "ds=(nrms,abrs)" by (cases ds) auto have hyp: "\<And>k cs. f k = (cs,css,Stuck) \<Longrightarrow> False" by fact have "\<And>k. f k = (cs,(nrms,abrs)#css,Stuck) \<Longrightarrow> False" proof (induct cs) case Nil have fk: "f k = ([], (nrms, abrs) # css, Stuck)" by fact from enum_step have "\<Gamma>\<turnstile>f k \<rightarrow> f (Suc k)".. with fk have "f (Suc k) = (nrms,css,Stuck)" by (fastforce elim: step_elim_cases) thus ?case by (rule hyp) next case (Cons c cs) have fk: "f k = (c#cs, (nrms, abrs) # css, Stuck)" by fact have hyp1: "\<And>k. f k = (cs, (nrms, abrs) # css, Stuck) \<Longrightarrow> False" by fact from enum_step have "\<Gamma>\<turnstile>f k \<rightarrow> f (Suc k)".. with fk have "f (Suc k) = (cs,(nrms,abrs)#css,Stuck)" by (fastforce elim: step_elim_cases) thus ?case by (rule hyp1) qed with ds Cons.prems show False by auto qed lemma not_inf_Stuck: "\<not> inf \<Gamma> cs css Stuck" apply (rule not_infI) apply (rule_tac f=f in not_inf_Stuck') by auto lemma last_butlast_app: assumes butlast: "butlast as = xs @ butlast bs" assumes not_Nil: "bs \<noteq> []" "as \<noteq> []" assumes last: "fst (last as) = fst (last bs)" "snd (last as) = snd (last bs)" shows "as = xs @ bs" proof - from last have "last as = last bs" by (cases "last as",cases "last bs") simp moreover from not_Nil have "as = butlast as @ [last as]" "bs = butlast bs @ [last bs]" by auto ultimately show ?thesis using butlast by simp qed lemma last_butlast_tl: assumes butlast: "butlast bs = x # butlast as" assumes not_Nil: "bs \<noteq> []" "as \<noteq> []" assumes last: "fst (last as) = fst (last bs)" "snd (last as) = snd (last bs)" shows "as = tl bs" proof - from last have "last as = last bs" by (cases "last as",cases "last bs") simp moreover from not_Nil have "as = butlast as @ [last as]" "bs = butlast bs @ [last bs]" by auto ultimately show ?thesis using butlast by simp qed locale inf = fixes CS:: "('s,'p,'f) config \<Rightarrow> ('s, 'p,'f) com list" and CSS:: "('s,'p,'f) config \<Rightarrow> ('s, 'p,'f) continuation list" and S:: "('s,'p,'f) config \<Rightarrow> ('s,'f) xstate" defines CS_def : "CS \<equiv> fst" defines CSS_def : "CSS \<equiv> \<lambda>c. fst (snd c)" defines S_def: "S \<equiv> \<lambda>c. snd (snd c)" lemma (in inf) steps_hd_drop_suffix: assumes f_0: "f 0 = (c#cs,css,s)" assumes f_step: "\<forall>i. \<Gamma>\<turnstile> f(i) \<rightarrow> f(Suc i)" assumes not_finished: "\<forall>i < k. \<not> (CS (f i) = cs \<and> CSS (f i) = css)" assumes simul: "\<forall>i\<le>k. (if pcss i = [] then CSS (f i)=css \<and> CS (f i)=pcs i@cs else CS (f i)=pcs i \<and> CSS (f i)= butlast (pcss i)@ [(fst (last (pcss i))@cs,(snd (last (pcss i)))@cs)]@ css)" defines "p\<equiv>\<lambda>i. (pcs i, pcss i, S (f i))" shows "\<forall>i<k. \<Gamma>\<turnstile> p i \<rightarrow> p (Suc i)" using not_finished simul proof (induct k) case 0 thus ?case by simp next case (Suc k) have simul: "\<forall>i\<le>Suc k. (if pcss i = [] then CSS (f i)=css \<and> CS (f i)=pcs i@cs else CS (f i)=pcs i \<and> CSS (f i)= butlast (pcss i)@ [(fst (last (pcss i))@cs,(snd (last (pcss i)))@cs)]@ css)" by fact have not_finished': "\<forall>i < Suc k. \<not> (CS (f i) = cs \<and> CSS (f i) = css)" by fact with simul have not_finished: "\<forall>i<Suc k. \<not> (pcs i = [] \<and> pcss i = [])" by (auto simp add: CS_def CSS_def S_def split: if_split_asm) show ?case proof (clarify) fix i assume i_le_Suc_k: "i < Suc k" show "\<Gamma>\<turnstile> p i \<rightarrow> p (Suc i)" proof (cases "i < k") case True with not_finished' simul Suc.hyps show ?thesis by auto next case False with i_le_Suc_k have eq_i_k: "i=k" by simp show "\<Gamma>\<turnstile>p i \<rightarrow> p (Suc i)" proof - obtain cs' css' t' where f_Suc_i: "f (Suc i) = (cs', css', t')" by (cases "f (Suc i)") obtain cs'' css'' t'' where f_i: "f i = (cs'',css'',t'')" by (cases "f i") from not_finished eq_i_k have pcs_pcss_not_Nil: "\<not> (pcs i = [] \<and> pcss i = [])" by auto from simul [rule_format, of i] i_le_Suc_k f_i have pcs_pcss_i: "if pcss i = [] then css''=css \<and> cs''=pcs i@cs else cs''=pcs i \<and> css''= butlast (pcss i)@ [(fst (last (pcss i))@cs,(snd (last (pcss i)))@cs)]@ css" by (simp add: CS_def CSS_def S_def cong: if_cong) from simul [rule_format, of "Suc i"] i_le_Suc_k f_Suc_i have pcs_pcss_Suc_i: "if pcss (Suc i) = [] then css' = css \<and> cs' = pcs (Suc i) @ cs else cs' = pcs (Suc i) \<and> css' = butlast (pcss (Suc i)) @ [(fst (last (pcss (Suc i))) @ cs, snd (last (pcss (Suc i))) @ cs)] @ css" by (simp add: CS_def CSS_def S_def cong: if_cong) show ?thesis proof (cases "pcss i = []") case True note pcss_Nil = this with pcs_pcss_i pcs_pcss_not_Nil obtain p ps where pcs_i: "pcs i = p#ps" and css'': "css''=css" and cs'': "cs''=(p#ps)@cs" by (auto simp add: neq_Nil_conv) with f_i have "f i = (p#(ps@cs),css,t'')" by simp with f_Suc_i f_step [rule_format, of i] have step_css: "\<Gamma>\<turnstile> (p#(ps@cs),css,t'') \<rightarrow> (cs',css',t')" by simp from step_Cons' [OF this, of p "ps@cs"] obtain css''' where css''': "css' = css''' @ css" "if css''' = [] then \<exists>p. cs' = p @ ps @ cs else (\<exists>pnorm pabr. css'''=[(pnorm @ ps @ cs,pabr @ ps @ cs)])" by auto show ?thesis proof (cases "css''' = []") case True with css''' obtain p' where css': "css' = css" and cs': "cs' = p' @ ps @ cs" by auto (*from cs' css' f_Suc_i f_i [rule_format, of "Suc k"] have p_ps_not_Nil: "p'@ps \<noteq> Nil" by auto*) from css' cs' step_css have step: "\<Gamma>\<turnstile> (p#(ps@cs),css,t'') \<rightarrow> (p'@ps@cs,css,t')" by simp hence "\<Gamma>\<turnstile> ((p#ps)@cs,css,t'') \<rightarrow> ((p'@ps)@cs,css,t')" by simp from drop_suffix_css_step' [OF drop_suffix_same_css_step [OF this], where xs="css" and css="[]" and css'="[]"] have "\<Gamma>\<turnstile> (p#ps,[],t'') \<rightarrow> (p'@ps,[],t')" by simp moreover from css' cs' pcs_pcss_Suc_i obtain "pcs (Suc i) = p'@ps" and "pcss (Suc i) = []" by (simp split: if_split_asm) ultimately show ?thesis using pcs_i pcss_Nil f_i f_Suc_i by (simp add: CS_def CSS_def S_def p_def) next case False with css''' obtain pnorm pabr where css': "css'=css'''@css" "css'''=[(pnorm @ ps @ cs,pabr @ ps @ cs)]" by auto with css''' step_css have "\<Gamma>\<turnstile> (p#ps@cs,css,t'') \<rightarrow> (cs',[(pnorm@ps@cs,pabr@ps@cs)]@css,t')" by simp then have "\<Gamma>\<turnstile>(p#ps, css, t'') \<rightarrow> (cs', [(pnorm@ps, pabr@ps)] @ css, t')" by (rule drop_suffix_hd_css_step) from drop_suffix_css_step' [OF this, where css="[]" and xs="css" and css'="[(pnorm@ps, pabr@ps)]"] have "\<Gamma>\<turnstile> (p#ps,[],t'') \<rightarrow> (cs',[(pnorm@ps, pabr@ps)],t')" by simp moreover from css' pcs_pcss_Suc_i obtain "pcs (Suc i) = cs'" "pcss (Suc i) = [(pnorm@ps, pabr@ps)]" apply (cases "pcss (Suc i)") apply (auto split: if_split_asm) done ultimately show ?thesis using pcs_i pcss_Nil f_i f_Suc_i by (simp add: p_def CS_def CSS_def S_def) qed next case False note pcss_i_not_Nil = this with pcs_pcss_i obtain cs'': "cs''=pcs i" and css'': "css''= butlast (pcss i)@ [(fst (last (pcss i))@cs,(snd (last (pcss i)))@cs)]@ css" by auto from f_Suc_i f_i f_step [rule_format, of i] have step_i_full: "\<Gamma>\<turnstile> (cs'',css'',t'') \<rightarrow> (cs',css',t')" by simp show ?thesis proof (cases cs'') case (Cons c' cs) with step_Cons' [OF step_i_full] obtain css''' where css': "css' = css'''@css''" by auto with step_i_full have "\<Gamma>\<turnstile> (cs'',css'',t'') \<rightarrow> (cs',css'''@css'',t')" by simp from Cons_change_css_step [OF this, where xss="pcss i"] Cons cs'' have "\<Gamma>\<turnstile> (pcs i, pcss i,t'') \<rightarrow> (cs',css'''@pcss i,t')" by simp moreover from cs'' css'' css' False pcs_pcss_Suc_i obtain "pcs (Suc i) = cs'" "pcss (Suc i) = css'''@pcss i" apply (auto split: if_split_asm) apply (drule (4) last_butlast_app) by simp ultimately show ?thesis using f_i f_Suc_i by (simp add: p_def CS_def CSS_def S_def) next case Nil note cs''_Nil = this show ?thesis proof (cases "butlast (pcss i)") case (Cons bpcs bpcss) with cs''_Nil step_i_full css'' have *: "\<Gamma>\<turnstile> ([],[hd css'']@tl css'',t'') \<rightarrow> (cs',css',t')" by simp moreover from step_Nil [OF *] have css': "css'=tl css''" by simp ultimately have step_i_full: "\<Gamma>\<turnstile> ([],[hd css'']@tl css'',t'') \<rightarrow> (cs',tl css'',t')" by simp from css'' Cons pcss_i_not_Nil have "hd css'' = hd (pcss i)" by (auto simp add: neq_Nil_conv split: if_split_asm) with cs'' cs''_Nil Nil_change_css_step [where ass="[hd css'']" and css="tl css''" and ass'="[]" and xss="tl (pcss i)", simplified, OF step_i_full [simplified]] have "\<Gamma>\<turnstile> (pcs i,[hd (pcss i)]@tl (pcss i),t'') \<rightarrow> (cs',tl (pcss i),t')" by simp with pcss_i_not_Nil have "\<Gamma>\<turnstile> (pcs i,pcss i,t'') \<rightarrow> (cs',tl (pcss i),t')" by simp moreover from css' css'' cs''_Nil Cons pcss_i_not_Nil pcs_pcss_Suc_i obtain "pcs (Suc i) = cs'" "pcss (Suc i) = tl (pcss i)" apply (clarsimp split: if_split_asm) apply (drule (4) last_butlast_tl) by simp ultimately show ?thesis using f_i f_Suc_i by (simp add: p_def CS_def CSS_def S_def) next case Nil with css'' pcss_i_not_Nil obtain pnorm pabr where css'': "css''= [(pnorm@cs,pabr@cs)]@css" and pcss_i: "pcss i = [(pnorm,pabr)]" by (force simp add: neq_Nil_conv split: if_split_asm) with cs''_Nil step_i_full have "\<Gamma>\<turnstile>([],[(pnorm@cs,pabr@cs)]@css,t'') \<rightarrow> (cs',css',t')" by simp from step_Nil [OF this] obtain css': "css'=css" and cs': "(case t'' of Abrupt s' \<Rightarrow> cs' = pabr @ cs \<and> t' = Normal s' | _ \<Rightarrow> cs' = pnorm @ cs \<and> t' = t'')" by (simp cong: xstate.case_cong) let "?pcs_Suc_i " = "(case t'' of Abrupt s' \<Rightarrow> pabr | _ \<Rightarrow> pnorm)" from cs' have "\<Gamma>\<turnstile>([],[(pnorm,pabr)],t'') \<rightarrow> (?pcs_Suc_i,[],t')" by (auto intro: step.intros split: xstate.splits) moreover from css'' css' cs' pcss_i pcs_pcss_Suc_i obtain "pcs (Suc i) = ?pcs_Suc_i" "pcss (Suc i) = []" by (simp split: if_split_asm xstate.splits) ultimately show ?thesis using pcss_i cs'' cs''_Nil f_i f_Suc_i by (simp add: p_def CS_def CSS_def S_def) qed qed qed qed qed qed qed lemma k_steps_to_rtrancl: assumes steps: "\<forall>i<k. \<Gamma>\<turnstile> p i \<rightarrow> p (Suc i)" shows "\<Gamma>\<turnstile>p 0\<rightarrow>\<^sup>* p k" using steps proof (induct k) case 0 thus ?case by auto next case (Suc k) have "\<forall>i<Suc k. \<Gamma>\<turnstile> p i \<rightarrow> p (Suc i)" by fact then obtain step_le_k: "\<forall>i<k. \<Gamma>\<turnstile> p i \<rightarrow> p (Suc i)" and step_k: "\<Gamma>\<turnstile> p k \<rightarrow> p (Suc k)" by auto from Suc.hyps [OF step_le_k] have "\<Gamma>\<turnstile> p 0 \<rightarrow>\<^sup>* p k". also note step_k finally show ?case . qed lemma (in inf) steps_hd_drop_suffix_finite: assumes f_0: "f 0 = (c#cs,css,s)" assumes f_step: "\<forall>i. \<Gamma>\<turnstile> f(i) \<rightarrow> f(Suc i)" assumes not_finished: "\<forall>i < k. \<not> (CS (f i) = cs \<and> CSS (f i) = css)" assumes simul: "\<forall>i\<le>k. (if pcss i = [] then CSS (f i)=css \<and> CS (f i)=pcs i@cs else CS (f i)=pcs i \<and> CSS (f i)= butlast (pcss i)@ [(fst (last (pcss i))@cs,(snd (last (pcss i)))@cs)]@ css)" shows "\<Gamma>\<turnstile>([c],[],s) \<rightarrow>\<^sup>* (pcs k, pcss k, S (f k))" proof - from steps_hd_drop_suffix [OF f_0 f_step not_finished simul] have "\<forall>i<k. \<Gamma>\<turnstile> (pcs i, pcss i, S (f i)) \<rightarrow> (pcs (Suc i), pcss (Suc i), S (f (Suc i)))". from k_steps_to_rtrancl [OF this] have "\<Gamma>\<turnstile> (pcs 0, pcss 0, S (f 0)) \<rightarrow>\<^sup>* (pcs k, pcss k, S (f k))". moreover from f_0 simul [rule_format, of 0] have "(pcs 0, pcss 0, S (f 0)) = ([c],[],s)" by (auto split: if_split_asm simp add: CS_def CSS_def S_def) ultimately show ?thesis by simp qed lemma (in inf) steps_hd_drop_suffix_infinite: assumes f_0: "f 0 = (c#cs,css,s)" assumes f_step: "\<forall>i. \<Gamma>\<turnstile> f(i) \<rightarrow> f(Suc i)" assumes not_finished: "\<forall>i. \<not> (CS (f i) = cs \<and> CSS (f i) = css)" (*assumes not_finished: "\<forall>i. \<not> (pcs i = [] \<and> pcss i = [])"*) assumes simul: "\<forall>i. (if pcss i = [] then CSS (f i)=css \<and> CS (f i)=pcs i@cs else CS (f i)=pcs i \<and> CSS (f i)= butlast (pcss i)@ [(fst (last (pcss i))@cs,(snd (last (pcss i)))@cs)]@ css)" defines "p\<equiv>\<lambda>i. (pcs i, pcss i, S (f i))" shows "\<Gamma>\<turnstile> p i \<rightarrow> p (Suc i)" proof - from steps_hd_drop_suffix [OF f_0 f_step, of "Suc i" pcss pcs] not_finished simul show ?thesis by (auto simp add: p_def) qed lemma (in inf) steps_hd_progress: assumes f_0: "f 0 = (c#cs,css,s)" assumes f_step: "\<forall>i. \<Gamma>\<turnstile> f(i) \<rightarrow> f(Suc i)" assumes c_unfinished: "\<forall>i < k. \<not> (CS (f i) = cs \<and> CSS (f i) = css)" shows "\<forall>i \<le> k. (\<exists>pcs pcss. (if pcss = [] then CSS (f i)=css \<and> CS (f i)=pcs@cs else CS (f i)=pcs \<and> CSS (f i)= butlast pcss@ [(fst (last pcss)@cs,(snd (last pcss))@cs)]@ css))" using c_unfinished proof (induct k) case 0 with f_0 show ?case by (simp add: CSS_def CS_def) next case (Suc k) have c_unfinished: "\<forall>i<Suc k. \<not> (CS (f i) = cs \<and> CSS (f i) = css)" by fact hence c_unfinished': "\<forall>i< k. \<not> (CS (f i) = cs \<and> CSS (f i) = css)" by simp show ?case proof (clarify) fix i assume i_le_Suc_k: "i \<le> Suc k" show "\<exists>pcs pcss. (if pcss = [] then CSS (f i)=css \<and> CS (f i)=pcs@cs else CS (f i)=pcs \<and> CSS (f i)= butlast pcss@ [(fst (last pcss)@cs,(snd (last pcss))@cs)]@ css)" proof (cases "i < Suc k") case True with Suc.hyps [OF c_unfinished', rule_format, of i] c_unfinished show ?thesis by auto next case False with i_le_Suc_k have eq_i_Suc_k: "i=Suc k" by auto obtain cs' css' t' where f_Suc_k: "f (Suc k) = (cs', css', t')" by (cases "f (Suc k)") obtain cs'' css'' t'' where f_k: "f k = (cs'',css'',t'')" by (cases "f k") with Suc.hyps [OF c_unfinished',rule_format, of k] obtain pcs pcss where pcs_pcss_k: "if pcss = [] then css'' = css \<and> cs'' = pcs @ cs else cs'' = pcs \<and> css'' = butlast pcss @ [(fst (last pcss) @ cs, snd (last pcss) @ cs)] @ css" by (auto simp add: CSS_def CS_def cong: if_cong) from c_unfinished [rule_format, of k] f_k pcs_pcss_k have pcs_pcss_empty: "\<not> (pcs = [] \<and> pcss = [])" by (auto simp add: CS_def CSS_def S_def split: if_split_asm) show ?thesis proof (cases "pcss = []") case True note pcss_Nil = this with pcs_pcss_k pcs_pcss_empty obtain p ps where pcs_i: "pcs = p#ps" and css'': "css''=css" and cs'': "cs''=(p#ps)@cs" by (cases "pcs") auto with f_k have "f k = (p#(ps@cs),css,t'')" by simp with f_Suc_k f_step [rule_format, of k] have step_css: "\<Gamma>\<turnstile> (p#(ps@cs),css,t'') \<rightarrow> (cs',css',t')" by simp from step_Cons' [OF this, of p "ps@cs"] obtain css''' where css''': "css' = css''' @ css" "if css''' = [] then \<exists>p. cs' = p @ ps @ cs else (\<exists>pnorm pabr. css'''=[(pnorm @ ps @ cs,pabr @ ps @ cs)])" by auto show ?thesis proof (cases "css''' = []") case True with css''' obtain p' where css': "css' = css" and cs': "cs' = p' @ ps @ cs" by auto from css' cs' f_Suc_k show ?thesis apply (rule_tac x="p'@ps" in exI) apply (rule_tac x="[]" in exI) apply (simp add: CSS_def CS_def eq_i_Suc_k) done next case False with css''' obtain pnorm pabr where css': "css'=css'''@css" "css'''=[(pnorm @ ps @ cs,pabr @ ps @ cs)]" by auto with f_Suc_k eq_i_Suc_k show ?thesis apply (rule_tac x="cs'" in exI) apply (rule_tac x="[(pnorm@ps, pabr@ps)]" in exI) by (simp add: CSS_def CS_def) qed next case False note pcss_k_not_Nil = this with pcs_pcss_k obtain cs'': "cs''=pcs" and css'': "css''= butlast pcss@ [(fst (last pcss)@cs,(snd (last pcss))@cs)]@ css" by auto from f_Suc_k f_k f_step [rule_format, of k] have step_i_full: "\<Gamma>\<turnstile> (cs'',css'',t'') \<rightarrow> (cs',css',t')" by simp show ?thesis proof (cases cs'') case (Cons c' cs) with step_Cons' [OF step_i_full] obtain css''' where css': "css' = css'''@css''" by auto with cs'' css'' f_Suc_k eq_i_Suc_k pcss_k_not_Nil show ?thesis apply (rule_tac x="cs'" in exI) apply (rule_tac x="css'''@pcss" in exI) by (clarsimp simp add: CSS_def CS_def butlast_append) next case Nil note cs''_Nil = this show ?thesis proof (cases "butlast pcss") case (Cons bpcs bpcss) with cs''_Nil step_i_full css'' have *: "\<Gamma>\<turnstile> ([],[hd css'']@tl css'',t'') \<rightarrow> (cs',css',t')" by simp moreover from step_Nil [OF *] obtain css': "css'=tl css''" and cs': "cs' = (case t'' of Abrupt s' \<Rightarrow> snd (hd css'') | _ \<Rightarrow> fst (hd css''))" by (auto split: xstate.splits) from css'' Cons pcss_k_not_Nil have "hd css'' = hd pcss" by (auto simp add: neq_Nil_conv split: if_split_asm) with css' cs' css'' cs''_Nil Cons pcss_k_not_Nil f_Suc_k eq_i_Suc_k show ?thesis apply (rule_tac x="cs'" in exI) apply (rule_tac x="tl pcss" in exI) apply (clarsimp split: xstate.splits simp add: CS_def CSS_def neq_Nil_conv split: if_split_asm) done next case Nil with css'' pcss_k_not_Nil obtain pnorm pabr where css'': "css''= [(pnorm@cs,pabr@cs)]@css" and pcss_k: "pcss = [(pnorm,pabr)]" by (force simp add: neq_Nil_conv split: if_split_asm) with cs''_Nil step_i_full have "\<Gamma>\<turnstile>([],[(pnorm@cs,pabr@cs)]@css,t'') \<rightarrow> (cs',css',t')" by simp from step_Nil [OF this] obtain css': "css'=css" and cs': "(case t'' of Abrupt s' \<Rightarrow> cs' = pabr @ cs \<and> t' = Normal s' | _ \<Rightarrow> cs' = pnorm @ cs \<and> t' = t'')" by (simp cong: xstate.case_cong) let "?pcs_Suc_k " = "(case t'' of Abrupt s' \<Rightarrow> pabr | _ \<Rightarrow> pnorm)" from css'' css' cs' pcss_k f_Suc_k eq_i_Suc_k show ?thesis apply (rule_tac x="?pcs_Suc_k" in exI) apply (rule_tac x="[]" in exI) apply (simp split: xstate.splits add: CS_def CSS_def) done qed qed qed qed qed qed lemma (in inf) inf_progress: assumes f_0: "f 0 = (c#cs,css,s)" assumes f_step: "\<forall>i. \<Gamma>\<turnstile> f(i) \<rightarrow> f(Suc i)" assumes unfinished: "\<forall>i. \<not> ((CS (f i) = cs) \<and> (CSS (f i) = css))" shows "\<exists>pcs pcss. (if pcss = [] then CSS (f i)=css \<and> CS (f i)=pcs@cs else CS (f i)=pcs \<and> CSS (f i)= butlast pcss@ [(fst (last pcss)@cs,(snd (last pcss))@cs)]@ css)" proof - from steps_hd_progress [OF f_0 f_step, of "i"] unfinished show ?thesis by auto qed lemma skolemize1: "\<forall>x. P x \<longrightarrow> (\<exists>y. Q x y) \<Longrightarrow> \<exists>f.\<forall>x. P x \<longrightarrow> Q x (f x)" by (rule choice) blast lemma skolemize2: "\<forall>x. P x \<longrightarrow> (\<exists>y z. Q x y z) \<Longrightarrow> \<exists>f g.\<forall>x. P x \<longrightarrow> Q x (f x) (g x)" apply (drule skolemize1) apply (erule exE) apply (drule skolemize1) apply fast done lemma skolemize2': "\<forall>x.\<exists>y z. P x y z \<Longrightarrow> \<exists>f g.\<forall>x. P x (f x) (g x)" apply (drule choice) apply (erule exE) apply (drule choice) apply fast done theorem (in inf) inf_cases: fixes c::"('s,'p,'f) com" assumes inf: "inf \<Gamma> (c#cs) css s" shows "inf \<Gamma> [c] [] s \<or> (\<exists>t. \<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t \<and> inf \<Gamma> cs css t)" proof - from inf obtain f where f_0: "f 0 = (c#cs,css,s)" and f_step: "(\<forall>i. \<Gamma>\<turnstile>f i \<rightarrow> f (Suc i))" by (auto simp add: inf_def) show ?thesis proof (cases "\<exists>i. CS (f i) = cs \<and> CSS (f i) = css") case True define k where "k = (LEAST i. CS (f i) = cs \<and> CSS (f i) = css)" from True obtain CS_f_k: "CS (f k) = cs" and CSS_f_k: "CSS (f k) = css" apply - apply (erule exE) apply (drule LeastI) apply (simp add: k_def) done have less_k_prop: "\<forall>i<k. \<not> (CS (f i) = cs \<and> CSS (f i) = css)" apply (intro allI impI) apply (unfold k_def) apply (drule not_less_Least) apply simp done have "\<Gamma>\<turnstile>([c], [], s) \<rightarrow>\<^sup>* ([],[],S (f k))" proof - have "\<forall>i\<le>k. \<exists>pcs pcss. (if pcss = [] then CSS (f i)=css \<and> CS (f i)=pcs@cs else CS (f i)=pcs \<and> CSS (f i)= butlast pcss@ [(fst (last pcss)@cs,(snd (last pcss))@cs)]@ css)" by (rule steps_hd_progress [OF f_0 f_step, where k=k, OF less_k_prop]) from skolemize2 [OF this] obtain pcs pcss where pcs_pcss: "\<forall>i\<le>k. (if pcss i = [] then CSS (f i)=css \<and> CS (f i)=pcs i@cs else CS (f i)=pcs i \<and> CSS (f i)= butlast (pcss i)@ [(fst (last (pcss i))@cs,(snd (last (pcss i)))@cs)]@ css)" by iprover from pcs_pcss [rule_format, of k] CS_f_k CSS_f_k have finished: "pcs k = []" "pcss k = []" by (auto simp add: CS_def CSS_def S_def split: if_split_asm) from pcs_pcss have simul: "\<forall>i\<le>k. (if pcss i = [] then CSS (f i)=css \<and> CS (f i)=pcs i@cs else CS (f i)=pcs i \<and> CSS (f i)= butlast (pcss i)@ [(fst (last (pcss i))@cs,(snd (last (pcss i)))@cs)]@ css)" by auto from steps_hd_drop_suffix_finite [OF f_0 f_step less_k_prop simul] finished show ?thesis by simp qed hence "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> S (f k)" by (rule steps_impl_exec) moreover from CS_f_k CSS_f_k f_step have "inf \<Gamma> cs css (S (f k))" apply (simp add: inf_def) apply (rule_tac x="\<lambda>i. f (i + k)" in exI) apply simp apply (auto simp add: CS_def CSS_def S_def) done ultimately have "(\<exists>t. \<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t \<and> inf \<Gamma> cs css t)" by blast thus ?thesis by simp next case False hence unfinished: "\<forall>i. \<not> ((CS (f i) = cs) \<and> (CSS (f i) = css))" by simp from inf_progress [OF f_0 f_step this] have "\<forall>i. \<exists>pcs pcss. (if pcss = [] then CSS (f i)=css \<and> CS (f i)=pcs@cs else CS (f i)=pcs \<and> CSS (f i)= butlast pcss@ [(fst (last pcss)@cs,(snd (last pcss))@cs)]@ css)" by auto from skolemize2' [OF this] obtain pcs pcss where pcs_pcss: "\<forall>i. (if pcss i = [] then CSS (f i)=css \<and> CS (f i)=pcs i@cs else CS (f i)=pcs i \<and> CSS (f i)= butlast (pcss i)@ [(fst (last (pcss i))@cs,(snd (last (pcss i)))@cs)]@ css)" by iprover define g where "g i = (pcs i, pcss i, S (f i))" for i from pcs_pcss [rule_format, of 0] f_0 have "g 0 = ([c],[],s)" by (auto split: if_split_asm simp add: CS_def CSS_def S_def g_def) moreover from steps_hd_drop_suffix_infinite [OF f_0 f_step unfinished pcs_pcss] have "\<forall>i. \<Gamma>\<turnstile>g i \<rightarrow> g (Suc i)" by (simp add: g_def) ultimately have "inf \<Gamma> [c] [] s" by (auto simp add: inf_def) thus ?thesis by simp qed qed lemma infE [consumes 1]: assumes inf: "inf \<Gamma> (c#cs) css s" assumes cases: "inf \<Gamma> [c] [] s \<Longrightarrow> P" "\<And>t. \<lbrakk>\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t; inf \<Gamma> cs css t\<rbrakk> \<Longrightarrow> P" shows P using inf cases apply - apply (drule inf.inf_cases) apply auto done lemma inf_Seq: "inf \<Gamma> (Seq c1 c2#cs) css (Normal s) = inf \<Gamma> (c1#c2#cs) css (Normal s)" proof assume "inf \<Gamma> (Seq c1 c2 # cs) css (Normal s)" then obtain f where f_0: "f 0 = (Seq c1 c2#cs,css,Normal s)" and f_step: "\<forall>i. \<Gamma>\<turnstile>f i \<rightarrow> f (Suc i)" by (auto simp add: inf_def) from f_step [rule_format, of 0] f_0 have "f 1 = (c1#c2#cs,css,Normal s)" by (auto elim: step_Normal_elim_cases) with f_step show "inf \<Gamma> (c1#c2#cs) css (Normal s)" apply (simp add: inf_def) apply (rule_tac x="\<lambda>i. f (Suc i)" in exI) apply simp done next assume "inf \<Gamma> (c1 # c2 # cs) css (Normal s)" then obtain f where f_0: "f 0 = (c1# c2#cs,css,Normal s)" and f_step: "\<forall>i. \<Gamma>\<turnstile>f i \<rightarrow> f (Suc i)" by (auto simp add: inf_def) define g where "g i = (case i of 0 \<Rightarrow> (Seq c1 c2#cs,css,Normal s) | Suc j \<Rightarrow> f j)" for i with f_0 have "\<Gamma>\<turnstile>g 0 \<rightarrow> g (Suc 0)" by (auto intro: step.intros) moreover from f_step have "\<forall>i. i\<noteq>0 \<longrightarrow> \<Gamma>\<turnstile>g i \<rightarrow> g (Suc i)" by (auto simp add: g_def split: nat.splits) ultimately show "inf \<Gamma> (Seq c1 c2 # cs) css (Normal s)" apply (simp add: inf_def) apply (rule_tac x=g in exI) apply (simp add: g_def split: nat.splits) done qed lemma inf_WhileTrue: assumes b: "s \<in> b" shows "inf \<Gamma> (While b c#cs) css (Normal s) = inf \<Gamma> (c#While b c#cs) css (Normal s)" proof assume "inf \<Gamma> (While b c # cs) css (Normal s)" then obtain f where f_0: "f 0 = (While b c#cs,css,Normal s)" and f_step: "\<forall>i. \<Gamma>\<turnstile>f i \<rightarrow> f (Suc i)" by (auto simp add: inf_def) from b f_step [rule_format, of 0] f_0 have "f 1 = (c#While b c#cs,css,Normal s)" by (auto elim: step_Normal_elim_cases) with f_step show "inf \<Gamma> (c # While b c # cs) css (Normal s)" apply (simp add: inf_def) apply (rule_tac x="\<lambda>i. f (Suc i)" in exI) apply simp done next assume "inf \<Gamma> (c # While b c # cs) css (Normal s)" then obtain f where f_0: "f 0 = (c # While b c #cs,css,Normal s)" and f_step: "\<forall>i. \<Gamma>\<turnstile>f i \<rightarrow> f (Suc i)" by (auto simp add: inf_def) define h where "h i = (case i of 0 \<Rightarrow> (While b c#cs,css,Normal s) | Suc j \<Rightarrow> f j)" for i with b f_0 have "\<Gamma>\<turnstile>h 0 \<rightarrow> h (Suc 0)" by (auto intro: step.intros) moreover from f_step have "\<forall>i. i\<noteq>0 \<longrightarrow> \<Gamma>\<turnstile>h i \<rightarrow> h (Suc i)" by (auto simp add: h_def split: nat.splits) ultimately show "inf \<Gamma> (While b c # cs) css (Normal s)" apply (simp add: inf_def) apply (rule_tac x=h in exI) apply (simp add: h_def split: nat.splits) done qed lemma inf_Catch: "inf \<Gamma> (Catch c1 c2#cs) css (Normal s) = inf \<Gamma> [c1] ((cs,c2#cs)#css) (Normal s)" proof assume "inf \<Gamma> (Catch c1 c2#cs) css (Normal s)" then obtain f where f_0: "f 0 = (Catch c1 c2#cs,css,Normal s)" and f_step: "\<forall>i. \<Gamma>\<turnstile>f i \<rightarrow> f (Suc i)" by (auto simp add: inf_def) from f_step [rule_format, of 0] f_0 have "f 1 = ([c1],(cs,c2#cs)#css,Normal s)" by (auto elim: step_Normal_elim_cases) with f_step show "inf \<Gamma> [c1] ((cs,c2#cs)#css) (Normal s)" apply (simp add: inf_def) apply (rule_tac x="\<lambda>i. f (Suc i)" in exI) apply simp done next assume "inf \<Gamma> [c1] ((cs,c2#cs)#css) (Normal s)" then obtain f where f_0: "f 0 = ([c1],(cs,c2#cs)#css,Normal s)" and f_step: "\<forall>i. \<Gamma>\<turnstile>f i \<rightarrow> f (Suc i)" by (auto simp add: inf_def) define h where "h i = (case i of 0 \<Rightarrow> (Catch c1 c2#cs,css,Normal s) | Suc j \<Rightarrow> f j)" for i with f_0 have "\<Gamma>\<turnstile>h 0 \<rightarrow> h (Suc 0)" by (auto intro: step.intros) moreover from f_step have "\<forall>i. i\<noteq>0 \<longrightarrow> \<Gamma>\<turnstile>h i \<rightarrow> h (Suc i)" by (auto simp add: h_def split: nat.splits) ultimately show "inf \<Gamma> (Catch c1 c2 # cs) css (Normal s)" apply (simp add: inf_def) apply (rule_tac x=h in exI) apply (simp add: h_def split: nat.splits) done qed theorem terminates_impl_not_inf: assumes termi: "\<Gamma>\<turnstile>c \<down> s" shows "\<not>inf \<Gamma> [c] [] s" using termi proof induct case (Skip s) thus ?case proof (rule not_infI) fix f assume f_step: "\<And>i. \<Gamma>\<turnstile>f i \<rightarrow> f (Suc i)" assume f_0: "f 0 = ([Skip], [], Normal s)" from f_step [of 0] f_0 have "f (Suc 0) = ([],[],Normal s)" by (auto elim: step_Normal_elim_cases) with f_step [of 1] show False by (auto elim: step_elim_cases) qed next case (Basic g s) thus ?case proof (rule not_infI) fix f assume f_step: "\<And>i. \<Gamma>\<turnstile>f i \<rightarrow> f (Suc i)" assume f_0: "f 0 = ([Basic g], [], Normal s)" from f_step [of 0] f_0 have "f (Suc 0) = ([],[],Normal (g s))" by (auto elim: step_Normal_elim_cases) with f_step [of 1] show False by (auto elim: step_elim_cases) qed next case (Spec r s) thus ?case proof (rule not_infI) fix f assume f_step: "\<And>i. \<Gamma>\<turnstile>f i \<rightarrow> f (Suc i)" assume f_0: "f 0 = ([Spec r], [], Normal s)" with f_step [of 0] have "\<Gamma>\<turnstile>([Spec r], [], Normal s) \<rightarrow> f (Suc 0)" by simp then show False proof (cases) fix t assume "(s, t) \<in> r" "f (Suc 0) = ([], [], Normal t)" with f_step [of 1] show False by (auto elim: step_elim_cases) next assume "\<forall>t. (s, t) \<notin> r" "f (Suc 0) = ([], [], Stuck)" with f_step [of 1] show False by (auto elim: step_elim_cases) qed qed next case (Guard s g c m) have g: "s \<in> g" by fact have hyp: "\<not> inf \<Gamma> [c] [] (Normal s)" by fact show ?case proof (rule not_infI) fix f assume f_step: "\<And>i. \<Gamma>\<turnstile>f i \<rightarrow> f (Suc i)" assume f_0: "f 0 = ([Guard m g c], [], Normal s)" from g f_step [of 0] f_0 have "f (Suc 0) = ([c],[],Normal s)" by (auto elim: step_Normal_elim_cases) with f_step have "inf \<Gamma> [c] [] (Normal s)" apply (simp add: inf_def) apply (rule_tac x="\<lambda>i. f (Suc i)" in exI) by simp with hyp show False .. qed next case (GuardFault s g m c) have g: "s \<notin> g" by fact show ?case proof (rule not_infI) fix f assume f_step: "\<And>i. \<Gamma>\<turnstile>f i \<rightarrow> f (Suc i)" assume f_0: "f 0 = ([Guard m g c], [], Normal s)" from g f_step [of 0] f_0 have "f (Suc 0) = ([],[],Fault m)" by (auto elim: step_Normal_elim_cases) with f_step [of 1] show False by (auto elim: step_elim_cases) qed next case (Fault c m) thus ?case proof (rule not_infI) fix f assume f_step: "\<And>i. \<Gamma>\<turnstile>f i \<rightarrow> f (Suc i)" assume f_0: "f 0 = ([c], [], Fault m)" from f_step [of 0] f_0 have "f (Suc 0) = ([],[],Fault m)" by (auto elim: step_Normal_elim_cases) with f_step [of 1] show False by (auto elim: step_elim_cases) qed next case (Seq c1 s c2) have hyp_c1: "\<not> inf \<Gamma> [c1] [] (Normal s)" by fact have hyp_c2: "\<forall>s'. \<Gamma>\<turnstile>\<langle>c1,Normal s\<rangle> \<Rightarrow> s' \<longrightarrow> \<Gamma>\<turnstile>c2 \<down> s' \<and> \<not> inf \<Gamma> [c2] [] s'" by fact have "\<not> inf \<Gamma> ([c1,c2]) [] (Normal s)" proof assume "inf \<Gamma> [c1, c2] [] (Normal s)" then show False proof (cases rule: infE) assume "inf \<Gamma> [c1] [] (Normal s)" with hyp_c1 show ?thesis by simp next fix t assume "\<Gamma>\<turnstile>\<langle>c1,Normal s\<rangle> \<Rightarrow> t" "inf \<Gamma> [c2] [] t" with hyp_c2 show ?thesis by simp qed qed thus ?case by (simp add: inf_Seq) next case (CondTrue s b c1 c2) have b: "s \<in> b" by fact have hyp_c1: "\<not> inf \<Gamma> [c1] [] (Normal s)" by fact show ?case proof (rule not_infI) fix f assume f_step: "\<And>i. \<Gamma>\<turnstile>f i \<rightarrow> f (Suc i)" assume f_0: "f 0 = ([Cond b c1 c2], [], Normal s)" from b f_step [of 0] f_0 have "f 1 = ([c1],[],Normal s)" by (auto elim: step_Normal_elim_cases) with f_step have "inf \<Gamma> [c1] [] (Normal s)" apply (simp add: inf_def) apply (rule_tac x="\<lambda>i. f (Suc i)" in exI) by simp with hyp_c1 show False by simp qed next case (CondFalse s b c2 c1) have b: "s \<notin> b" by fact have hyp_c2: "\<not> inf \<Gamma> [c2] [] (Normal s)" by fact show ?case proof (rule not_infI) fix f assume f_step: "\<And>i. \<Gamma>\<turnstile>f i \<rightarrow> f (Suc i)" assume f_0: "f 0 = ([Cond b c1 c2], [], Normal s)" from b f_step [of 0] f_0 have "f 1 = ([c2],[],Normal s)" by (auto elim: step_Normal_elim_cases) with f_step have "inf \<Gamma> [c2] [] (Normal s)" apply (simp add: inf_def) apply (rule_tac x="\<lambda>i. f (Suc i)" in exI) by simp with hyp_c2 show False by simp qed next case (WhileTrue s b c) have b: "s \<in> b" by fact have hyp_c: "\<not> inf \<Gamma> [c] [] (Normal s)" by fact have hyp_w: "\<forall>s'. \<Gamma>\<turnstile>\<langle>c,Normal s\<rangle> \<Rightarrow> s' \<longrightarrow> \<Gamma>\<turnstile>While b c \<down> s' \<and> \<not> inf \<Gamma> [While b c] [] s'" by fact have "\<not> inf \<Gamma> [c,While b c] [] (Normal s)" proof assume "inf \<Gamma> [c,While b c] [] (Normal s)" from this hyp_c hyp_w show False by (cases rule: infE) auto qed with b show ?case by (simp add: inf_WhileTrue) next case (WhileFalse s b c) have b: "s \<notin> b" by fact show ?case proof (rule not_infI) fix f assume f_step: "\<And>i. \<Gamma>\<turnstile>f i \<rightarrow> f (Suc i)" assume f_0: "f 0 = ([While b c], [], Normal s)" from b f_step [of 0] f_0 have "f (Suc 0) = ([],[],Normal s)" by (auto elim: step_Normal_elim_cases) with f_step [of 1] show False by (auto elim: step_elim_cases) qed next case (Call p bdy s) have bdy: "\<Gamma> p = Some bdy" by fact have hyp: "\<not> inf \<Gamma> [bdy] [] (Normal s)" by fact have not_inf_bdy: "\<not> inf \<Gamma> [bdy] [([],[Throw])] (Normal s)" proof assume "inf \<Gamma> [bdy] [([],[Throw])] (Normal s)" then show False proof (rule infE) assume "inf \<Gamma> [bdy] [] (Normal s)" with hyp show False by simp next fix t assume "\<Gamma>\<turnstile>\<langle>bdy,Normal s\<rangle> \<Rightarrow> t" assume inf: "inf \<Gamma> [] [([], [Throw])] t" then obtain f where f_0: "f 0 = ([],[([], [Throw])],t)" and f_step: "\<forall>i. \<Gamma>\<turnstile>f i \<rightarrow> f (Suc i)" by (auto simp add: inf_def) show False proof (cases t) case (Normal t') with f_0 f_step [rule_format, of 0] have "f (Suc 0) = ([],[],(Normal t'))" by (auto elim: step_Normal_elim_cases) with f_step [rule_format, of "Suc 0"] show False by (auto elim: step.cases) next case (Abrupt t') with f_0 f_step [rule_format, of 0] have "f (Suc 0) = ([Throw],[],(Normal t'))" by (auto elim: step_Normal_elim_cases) with f_step [rule_format, of "Suc 0"] have "f (Suc (Suc 0)) = ([],[],(Abrupt t'))" by (auto elim: step_Normal_elim_cases) with f_step [rule_format, of "Suc(Suc 0)"] show False by (auto elim: step.cases) next case (Fault m) with f_0 f_step [rule_format, of 0] have "f (Suc 0) = ([],[],Fault m)" by (auto elim: step_Normal_elim_cases) with f_step [rule_format, of 1] have "f (Suc (Suc 0)) = ([],[],Fault m)" by (auto elim: step_Normal_elim_cases) with f_step [rule_format, of "Suc (Suc 0)"] show False by (auto elim: step.cases) next case Stuck with f_0 f_step [rule_format, of 0] have "f (Suc 0) = ([],[],Stuck)" by (auto elim: step_Normal_elim_cases) with f_step [rule_format, of 1] have "f (Suc (Suc 0)) = ([],[],Stuck)" by (auto elim: step_Normal_elim_cases) with f_step [rule_format, of "Suc (Suc 0)"] show False by (auto elim: step.cases) qed qed qed show ?case proof (rule not_infI) fix f assume f_step: "\<And>i. \<Gamma>\<turnstile>f i \<rightarrow> f (Suc i)" assume f_0: "f 0 = ([Call p], [], Normal s)" from bdy f_step [of 0] f_0 have "f (Suc 0) = ([bdy],[([],[Throw])],Normal s)" by (auto elim: step_Normal_elim_cases) with f_step have "inf \<Gamma> [bdy] [([],[Throw])] (Normal s)" apply (simp add: inf_def) apply (rule_tac x="\<lambda>i. f (Suc i)" in exI) by simp with not_inf_bdy show False by simp qed next case (CallUndefined p s) have undef: "\<Gamma> p = None" by fact show ?case proof (rule not_infI) fix f assume f_step: "\<And>i. \<Gamma>\<turnstile>f i \<rightarrow> f (Suc i)" assume f_0: "f 0 = ([Call p], [], Normal s)" from undef f_step [of 0] f_0 have "f (Suc 0) = ([],[],Stuck)" by (auto elim: step_Normal_elim_cases) with f_step [rule_format, of "Suc 0"] show False by (auto elim: step_elim_cases) qed next case (Stuck c) show ?case proof (rule not_infI) fix f assume f_step: "\<And>i. \<Gamma>\<turnstile>f i \<rightarrow> f (Suc i)" assume f_0: "f 0 = ([c], [], Stuck)" from f_step [of 0] f_0 have "f (Suc 0) = ([],[],Stuck)" by (auto elim: step_elim_cases) with f_step [rule_format, of "Suc 0"] show False by (auto elim: step_elim_cases) qed next case (DynCom c s) have hyp: "\<not> inf \<Gamma> [(c s)] [] (Normal s)" by fact show ?case proof (rule not_infI) fix f assume f_step: "\<And>i. \<Gamma>\<turnstile>f i \<rightarrow> f (Suc i)" assume f_0: "f 0 = ([DynCom c], [], Normal s)" from f_step [of 0] f_0 have "f (Suc 0) = ([(c s)], [], Normal s)" by (auto elim: step_elim_cases) with f_step have "inf \<Gamma> [(c s)] [] (Normal s)" apply (simp add: inf_def) apply (rule_tac x="\<lambda>i. f (Suc i)" in exI) by simp with hyp show False by simp qed next case (Throw s) thus ?case proof (rule not_infI) fix f assume f_step: "\<And>i. \<Gamma>\<turnstile>f i \<rightarrow> f (Suc i)" assume f_0: "f 0 = ([Throw], [], Normal s)" from f_step [of 0] f_0 have "f (Suc 0) = ([],[],Abrupt s)" by (auto elim: step_Normal_elim_cases) with f_step [of 1] show False by (auto elim: step_elim_cases) qed next case (Abrupt c s) show ?case proof (rule not_infI) fix f assume f_step: "\<And>i. \<Gamma>\<turnstile>f i \<rightarrow> f (Suc i)" assume f_0: "f 0 = ([c], [], Abrupt s)" from f_step [of 0] f_0 have "f (Suc 0) = ([],[],Abrupt s)" by (auto elim: step_elim_cases) with f_step [rule_format, of "Suc 0"] show False by (auto elim: step_elim_cases) qed next case (Catch c1 s c2) have hyp_c1: "\<not> inf \<Gamma> [c1] [] (Normal s)" by fact have hyp_c2: "\<forall>s'. \<Gamma>\<turnstile>\<langle>c1,Normal s\<rangle> \<Rightarrow> Abrupt s' \<longrightarrow> \<Gamma>\<turnstile>c2 \<down> Normal s' \<and> \<not> inf \<Gamma> [c2] [] (Normal s')" by fact have "\<not> inf \<Gamma> [c1] [([],[c2])] (Normal s)" proof assume "inf \<Gamma> [c1] [([],[c2])] (Normal s)" then show False proof (rule infE) assume "inf \<Gamma> [c1] [] (Normal s)" with hyp_c1 show False by simp next fix t assume eval: "\<Gamma>\<turnstile>\<langle>c1,Normal s\<rangle> \<Rightarrow> t" assume inf: "inf \<Gamma> [] [([], [c2])] t" then obtain f where f_0: "f 0 = ([],[([], [c2] )],t)" and f_step: "\<forall>i. \<Gamma>\<turnstile>f i \<rightarrow> f (Suc i)" by (auto simp add: inf_def) show False proof (cases t) case (Normal t') with f_0 f_step [rule_format, of 0] have "f (Suc 0) = ([],[],Normal t')" by (auto elim: step_Normal_elim_cases) with f_step [rule_format, of 1] show False by (auto elim: step_elim_cases) next case (Abrupt t') with f_0 f_step [rule_format, of 0] have "f (Suc 0) = ([c2],[],Normal t')" by (auto elim: step_Normal_elim_cases) with f_step eval Abrupt have "inf \<Gamma> [c2] [] (Normal t')" apply (simp add: inf_def) apply (rule_tac x="\<lambda>i. f (Suc i)" in exI) by simp with eval hyp_c2 Abrupt show False by simp next case (Fault m) with f_0 f_step [rule_format, of 0] have "f (Suc 0) = ([],[],Fault m)" by (auto elim: step_Normal_elim_cases) with f_step [rule_format, of 1] show False by (auto elim: step_elim_cases) next case Stuck with f_0 f_step [rule_format, of 0] have "f (Suc 0) = ([],[],Stuck)" by (auto elim: step_Normal_elim_cases) with f_step [rule_format, of 1] show False by (auto elim: step_elim_cases) qed qed qed thus ?case by (simp add: inf_Catch) qed lemma terminatess_impl_not_inf: assumes termi: "\<Gamma>\<turnstile>cs,css\<Down>s" shows "\<not>inf \<Gamma> cs css s" using termi proof (induct) case (Nil s) show ?case proof (rule not_infI) fix f assume "\<And>i. \<Gamma>\<turnstile>f i \<rightarrow> f (Suc i)" hence "\<Gamma>\<turnstile>f 0 \<rightarrow> f (Suc 0)" by simp moreover assume "f 0 = ([], [], s)" ultimately show False by (fastforce elim: step.cases) qed next case (ExitBlockNormal nrms css s abrs) have hyp: "\<not> inf \<Gamma> nrms css (Normal s)" by fact show ?case proof (rule not_infI) fix f assume f_step: "\<And>i. \<Gamma>\<turnstile>f i \<rightarrow> f (Suc i)" assume f0: "f 0 = ([], (nrms, abrs) # css, Normal s)" with f_step [of 0] have "f (Suc 0) = (nrms,css,Normal s)" by (auto elim: step_Normal_elim_cases) with f_step have "inf \<Gamma> nrms css (Normal s)" apply (simp add: inf_def) apply (rule_tac x="\<lambda>i. f (Suc i)" in exI) by simp with hyp show False .. qed next case (ExitBlockAbrupt abrs css s nrms) have hyp: "\<not> inf \<Gamma> abrs css (Normal s)" by fact show ?case proof (rule not_infI) fix f assume f_step: "\<And>i. \<Gamma>\<turnstile>f i \<rightarrow> f (Suc i)" assume f0: "f 0 = ([], (nrms, abrs) # css, Abrupt s)" with f_step [of 0] have "f (Suc 0) = (abrs,css,Normal s)" by (auto elim: step_Normal_elim_cases) with f_step have "inf \<Gamma> abrs css (Normal s)" apply (simp add: inf_def) apply (rule_tac x="\<lambda>i. f (Suc i)" in exI) by simp with hyp show False .. qed next case (ExitBlockFault nrms css f abrs) show ?case by (rule not_inf_Fault) next case (ExitBlockStuck nrms css abrs) show ?case by (rule not_inf_Stuck) next case (Cons c s cs css) have termi_c: "\<Gamma>\<turnstile>c \<down> s" by fact have hyp: "\<forall>t. \<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t \<longrightarrow> \<Gamma>\<turnstile>cs,css\<Down>t \<and> \<not> inf \<Gamma> cs css t" by fact show "\<not> inf \<Gamma> (c # cs) css s" proof assume "inf \<Gamma> (c # cs) css s" thus False proof (rule infE) assume "inf \<Gamma> [c] [] s" with terminates_impl_not_inf [OF termi_c] show False .. next fix t assume "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t" "inf \<Gamma> cs css t" with hyp show False by simp qed qed qed lemma lem: "\<forall>y. r\<^sup>+\<^sup>+ a y \<longrightarrow> P a \<longrightarrow> P y \<Longrightarrow> ((b,a) \<in> {(y,x). P x \<and> r x y}\<^sup>+) = ((b,a) \<in> {(y,x). P x \<and> r\<^sup>+\<^sup>+ x y})" apply(rule iffI) apply clarify apply(erule trancl_induct) apply blast apply(blast intro:tranclp_trans) apply clarify apply(erule tranclp_induct) apply blast apply(blast intro:trancl_trans) done corollary terminatess_impl_no_inf_chain: assumes terminatess: "\<Gamma>\<turnstile>cs,css\<Down>s" shows "\<not>(\<exists>f. f 0 = (cs,css,s) \<and> (\<forall>i::nat. \<Gamma>\<turnstile>f i \<rightarrow>\<^sup>+ f(Suc i)))" proof - have "wf({(y,x). \<Gamma>\<turnstile>(cs,css,s) \<rightarrow>\<^sup>* x \<and> \<Gamma>\<turnstile>x \<rightarrow> y}\<^sup>+)" proof (rule wf_trancl) show "wf {(y, x). \<Gamma>\<turnstile>(cs, css, s) \<rightarrow>\<^sup>* x \<and> \<Gamma>\<turnstile>x \<rightarrow> y}" proof (simp only: wf_iff_no_infinite_down_chain,clarify,simp) fix f assume "\<forall>i. \<Gamma>\<turnstile>(cs, css, s) \<rightarrow>\<^sup>* f i \<and> \<Gamma>\<turnstile>f i \<rightarrow> f (Suc i)" hence "\<exists>f. f 0 = (cs, css, s) \<and> (\<forall>i. \<Gamma>\<turnstile>f i \<rightarrow> f (Suc i))" by (rule renumber [to_pred]) moreover from terminatess have "\<not> (\<exists>f. f 0 = (cs, css, s) \<and> (\<forall>i. \<Gamma>\<turnstile>f i \<rightarrow> f (Suc i)))" by (rule terminatess_impl_not_inf [unfolded inf_def]) ultimately show False by simp qed qed hence "\<not> (\<exists>f. \<forall>i. (f (Suc i), f i) \<in> {(y, x). \<Gamma>\<turnstile>(cs, css, s) \<rightarrow>\<^sup>* x \<and> \<Gamma>\<turnstile>x \<rightarrow> y}\<^sup>+)" by (simp add: wf_iff_no_infinite_down_chain) thus ?thesis proof (rule contrapos_nn) assume "\<exists>f. f 0 = (cs, css, s) \<and> (\<forall>i. \<Gamma>\<turnstile>f i \<rightarrow>\<^sup>+ f (Suc i))" then obtain f where f0: "f 0 = (cs, css, s)" and seq: "\<forall>i. \<Gamma>\<turnstile>f i \<rightarrow>\<^sup>+ f (Suc i)" by iprover show "\<exists>f. \<forall>i. (f (Suc i), f i) \<in> {(y, x). \<Gamma>\<turnstile>(cs, css, s) \<rightarrow>\<^sup>* x \<and> \<Gamma>\<turnstile>x \<rightarrow> y}\<^sup>+" proof (rule exI [where x=f],rule allI) fix i show "(f (Suc i), f i) \<in> {(y, x). \<Gamma>\<turnstile>(cs, css, s) \<rightarrow>\<^sup>* x \<and> \<Gamma>\<turnstile>x \<rightarrow> y}\<^sup>+" proof - { fix i have "\<Gamma>\<turnstile>(cs,css,s) \<rightarrow>\<^sup>* f i" proof (induct i) case 0 show "\<Gamma>\<turnstile>(cs, css, s) \<rightarrow>\<^sup>* f 0" by (simp add: f0) next case (Suc n) have "\<Gamma>\<turnstile>(cs, css, s) \<rightarrow>\<^sup>* f n" by fact with seq show "\<Gamma>\<turnstile>(cs, css, s) \<rightarrow>\<^sup>* f (Suc n)" by (blast intro: tranclp_into_rtranclp rtranclp_trans) qed } hence "\<Gamma>\<turnstile>(cs,css,s) \<rightarrow>\<^sup>* f i" by iprover with seq have "(f (Suc i), f i) \<in> {(y, x). \<Gamma>\<turnstile>(cs, css, s) \<rightarrow>\<^sup>* x \<and> \<Gamma>\<turnstile>x \<rightarrow>\<^sup>+ y}" by clarsimp moreover have "\<forall>y. \<Gamma>\<turnstile>f i \<rightarrow>\<^sup>+ y\<longrightarrow>\<Gamma>\<turnstile>(cs, css, s) \<rightarrow>\<^sup>* f i\<longrightarrow>\<Gamma>\<turnstile>(cs, css, s) \<rightarrow>\<^sup>* y" by (blast intro: tranclp_into_rtranclp rtranclp_trans) ultimately show ?thesis by (subst lem) qed qed qed qed corollary terminates_impl_no_inf_chain: "\<Gamma>\<turnstile>c\<down>s \<Longrightarrow> \<not>(\<exists>f. f 0 = ([c],[],s) \<and> (\<forall>i::nat. \<Gamma>\<turnstile>f i \<rightarrow>\<^sup>+ f(Suc i)))" by (rule terminatess_impl_no_inf_chain) (iprover intro: terminatess.intros) definition termi_call_steps :: "('s,'p,'f) body \<Rightarrow> (('s \<times> 'p) \<times> ('s \<times> 'p))set" where "termi_call_steps \<Gamma> = {((t,q),(s,p)). \<Gamma>\<turnstile>the (\<Gamma> p)\<down>Normal s \<and> (\<exists>css. \<Gamma>\<turnstile>([the (\<Gamma> p)],[],Normal s) \<rightarrow>\<^sup>+ ([the (\<Gamma> q)],css,Normal t))}" text \<open>Sequencing computations, or more exactly continuation stacks\<close> primrec seq:: "(nat \<Rightarrow> 'a list) \<Rightarrow> nat \<Rightarrow> 'a list" where "seq css 0 = []" | "seq css (Suc i) = css i@seq css i" theorem wf_termi_call_steps: "wf (termi_call_steps \<Gamma>)" proof (simp only: termi_call_steps_def wf_iff_no_infinite_down_chain, clarify,simp) fix S assume inf: "\<forall>i. (\<lambda>(t,q) (s,p). \<Gamma>\<turnstile>(the (\<Gamma> p)) \<down> Normal s \<and> (\<exists>css. \<Gamma>\<turnstile>([the (\<Gamma> p)],[],Normal s) \<rightarrow>\<^sup>+ ([the (\<Gamma> q)],css,Normal t))) (S (Suc i)) (S i)" obtain s p where "s = (\<lambda>i. fst (S i))" and "p = (\<lambda>i. snd (S i))" by auto with inf have inf': "\<forall>i. \<Gamma>\<turnstile>(the (\<Gamma> (p i))) \<down> Normal (s i) \<and> (\<exists>css. \<Gamma>\<turnstile>([the (\<Gamma> (p i))],[],Normal (s i)) \<rightarrow>\<^sup>+ ([the (\<Gamma> (p (Suc i)))],css,Normal (s (Suc i))))" apply - apply (rule allI) apply (erule_tac x=i in allE) apply auto done show False proof - from inf' \<comment> \<open>Skolemization of css with axiom of choice\<close> have "\<exists>css. \<forall>i. \<Gamma>\<turnstile>(the (\<Gamma> (p i))) \<down> Normal (s i) \<and> \<Gamma>\<turnstile>([the (\<Gamma> (p i))],[],Normal (s i)) \<rightarrow>\<^sup>+ ([the (\<Gamma> (p (Suc i)))],css i,Normal (s (Suc i)))" apply - apply (rule choice) by blast then obtain css where termi_css: "\<forall>i. \<Gamma>\<turnstile>(the (\<Gamma> (p i))) \<down> Normal (s i)" and step_css: "\<forall>i. \<Gamma>\<turnstile>([the (\<Gamma> (p i))],[],Normal (s i)) \<rightarrow>\<^sup>+ ([the (\<Gamma> (p (Suc i)))],css i,Normal (s (Suc i)))" by blast define f where "f i = ([the (\<Gamma> (p i))], seq css i,Normal (s i)::('a,'c) xstate)" for i have "f 0 = ([the (\<Gamma> (p 0))],[],Normal (s 0))" by (simp add: f_def) moreover have "\<forall>i. \<Gamma>\<turnstile> (f i) \<rightarrow>\<^sup>+ (f (i+1))" proof fix i from step_css [rule_format, of i] have "\<Gamma>\<turnstile>([the (\<Gamma> (p i))], [], Normal (s i)) \<rightarrow>\<^sup>+ ([the (\<Gamma> (p (Suc i)))], css i, Normal (s (Suc i)))". from app_css_steps [OF this,simplified] have "\<Gamma>\<turnstile>([the (\<Gamma> (p i))], seq css i, Normal (s i)) \<rightarrow>\<^sup>+ ([the (\<Gamma> (p (Suc i)))], css i@seq css i, Normal (s (Suc i)))". thus "\<Gamma>\<turnstile> (f i) \<rightarrow>\<^sup>+ (f (i+1))" by (simp add: f_def) qed moreover from termi_css [rule_format, of 0] have "\<not> (\<exists>f. (f 0 = ([the (\<Gamma> (p 0))],[],Normal (s 0)) \<and> (\<forall>i. \<Gamma>\<turnstile>(f i) \<rightarrow>\<^sup>+ f(Suc i))))" by (rule terminates_impl_no_inf_chain) ultimately show False by auto qed qed text \<open>An alternative proof using Hilbert-choice instead of axiom of choice.\<close> theorem "wf (termi_call_steps \<Gamma>)" proof (simp only: termi_call_steps_def wf_iff_no_infinite_down_chain, clarify,simp) fix S assume inf: "\<forall>i. (\<lambda>(t,q) (s,p). \<Gamma>\<turnstile>(the (\<Gamma> p)) \<down> Normal s \<and> (\<exists>css. \<Gamma>\<turnstile>([the (\<Gamma> p)],[],Normal s) \<rightarrow>\<^sup>+ ([the (\<Gamma> q)],css,Normal t))) (S (Suc i)) (S i)" obtain s p where "s = (\<lambda>i. fst (S i))" and "p = (\<lambda>i. snd (S i))" by auto with inf have inf': "\<forall>i. \<Gamma>\<turnstile>(the (\<Gamma> (p i))) \<down> Normal (s i) \<and> (\<exists>css. \<Gamma>\<turnstile>([the (\<Gamma> (p i))],[],Normal (s i)) \<rightarrow>\<^sup>+ ([the (\<Gamma> (p (Suc i)))],css,Normal (s (Suc i))))" apply - apply (rule allI) apply (erule_tac x=i in allE) apply auto done show "False" proof - define CSS where "CSS i = (SOME css. \<Gamma>\<turnstile>([the (\<Gamma> (p i))],[], Normal (s i)) \<rightarrow>\<^sup>+ ([the (\<Gamma> (p (i+1)))],css,Normal (s (i+1))))" for i define f where "f i = ([the (\<Gamma> (p i))], seq CSS i,Normal (s i)::('a,'c) xstate)" for i have "f 0 = ([the (\<Gamma> (p 0))],[],Normal (s 0))" by (simp add: f_def) moreover have "\<forall>i. \<Gamma>\<turnstile> (f i) \<rightarrow>\<^sup>+ (f (i+1))" proof fix i from inf' [rule_format, of i] obtain css where css: "\<Gamma>\<turnstile>([the (\<Gamma> (p i))],[],Normal (s i)) \<rightarrow>\<^sup>+ ([the (\<Gamma> (p (i+1)))],css,Normal (s (i+1)))" by fastforce hence "\<Gamma>\<turnstile>([the (\<Gamma> (p i))], seq CSS i, Normal (s i)) \<rightarrow>\<^sup>+ ([the (\<Gamma> (p (i+1)))], CSS i @ seq CSS i, Normal (s (i+1)))" apply - apply (unfold CSS_def) apply (rule someI2 [where P="\<lambda>css. \<Gamma>\<turnstile>([the (\<Gamma> (p i))],[],Normal (s i))\<rightarrow>\<^sup>+ ([the (\<Gamma> (p (i+1)))],css, Normal (s (i+1)))"]) apply (rule css) apply (fastforce dest: app_css_steps) done thus "\<Gamma>\<turnstile> (f i) \<rightarrow>\<^sup>+ (f (i+1))" by (simp add: f_def) qed moreover from inf' [rule_format, of 0] have "\<Gamma>\<turnstile>the (\<Gamma> (p 0)) \<down> Normal (s 0)" by iprover then have "\<not> (\<exists>f. (f 0 = ([the (\<Gamma> (p 0))],[],Normal (s 0)) \<and> (\<forall>i. \<Gamma>\<turnstile>(f i) \<rightarrow>\<^sup>+ f(Suc i))))" by (rule terminates_impl_no_inf_chain) ultimately show False by auto qed qed lemma not_inf_implies_wf: assumes not_inf: "\<not> inf \<Gamma> cs css s" shows "wf {(c2,c1). \<Gamma> \<turnstile> (cs,css,s) \<rightarrow>\<^sup>* c1 \<and> \<Gamma> \<turnstile> c1 \<rightarrow> c2}" proof (simp only: wf_iff_no_infinite_down_chain,clarify, simp) fix f assume "\<forall>i. \<Gamma>\<turnstile>(cs, css, s) \<rightarrow>\<^sup>* f i \<and> \<Gamma>\<turnstile>f i \<rightarrow> f (Suc i)" hence "\<exists>f. f 0 = (cs, css, s) \<and> (\<forall>i. \<Gamma>\<turnstile>f i \<rightarrow> f (Suc i))" by (rule renumber [to_pred]) moreover from not_inf have "\<not> (\<exists>f. f 0 = (cs, css, s) \<and> (\<forall>i. \<Gamma>\<turnstile>f i \<rightarrow> f (Suc i)))" by (unfold inf_def) ultimately show False by simp qed lemma wf_implies_termi_reach: assumes wf: "wf {(c2,c1). \<Gamma> \<turnstile> (cs,css,s) \<rightarrow>\<^sup>* c1 \<and> \<Gamma> \<turnstile> c1 \<rightarrow> c2}" shows "\<And>cs1 css1 s1. \<lbrakk>\<Gamma> \<turnstile> (cs,css,s) \<rightarrow>\<^sup>* c1; c1=(cs1,css1,s1)\<rbrakk>\<Longrightarrow> \<Gamma>\<turnstile>cs1,css1\<Down>s1" using wf proof (induct c1, simp) fix cs1 css1 s1 assume reach: "\<Gamma>\<turnstile>(cs, css, s) \<rightarrow>\<^sup>* (cs1, css1, s1)" assume hyp_raw: "\<And>y cs2 css2 s2. \<lbrakk>\<Gamma> \<turnstile> (cs1,css1,s1) \<rightarrow> (cs2,css2,s2); \<Gamma> \<turnstile> (cs,css,s) \<rightarrow>\<^sup>* (cs2,css2,s2); y=(cs2,css2,s2)\<rbrakk> \<Longrightarrow> \<Gamma>\<turnstile>cs2,css2\<Down>s2" have hyp: "\<And>cs2 css2 s2. \<lbrakk>\<Gamma> \<turnstile> (cs1,css1,s1) \<rightarrow> (cs2,css2,s2)\<rbrakk> \<Longrightarrow> \<Gamma>\<turnstile>cs2,css2\<Down>s2" apply - apply (rule hyp_raw) apply assumption using reach apply simp apply (rule refl) done show "\<Gamma>\<turnstile>cs1,css1\<Down>s1" proof (cases s1) case (Normal s1') show ?thesis proof (cases cs1) case Nil note cs1_Nil = this show ?thesis proof (cases css1) case Nil with cs1_Nil show ?thesis by (auto intro: terminatess.intros) next case (Cons nrms_abrs css1') then obtain nrms abrs where nrms_abrs: "nrms_abrs=(nrms,abrs)" by (cases "nrms_abrs") have "\<Gamma> \<turnstile> ([],(nrms,abrs)#css1',Normal s1') \<rightarrow> (nrms,css1',Normal s1')" by (rule step.intros) from hyp [simplified cs1_Nil Cons nrms_abrs Normal, OF this] have "\<Gamma>\<turnstile>nrms,css1'\<Down>Normal s1'". from ExitBlockNormal [OF this] cs1_Nil Cons nrms_abrs Normal show ?thesis by auto qed next case (Cons c1 cs1') have "\<Gamma>\<turnstile>c1#cs1',css1\<Down>Normal s1'" proof (cases c1) case Skip have "\<Gamma> \<turnstile> (Skip#cs1',css1,Normal s1') \<rightarrow> (cs1',css1,Normal s1')" by (rule step.intros) from hyp [simplified Cons Skip Normal, OF this] have "\<Gamma>\<turnstile>cs1',css1\<Down>Normal s1'". with Normal Skip show ?thesis by (auto intro: terminatess.intros terminates.intros elim: exec_Normal_elim_cases) next case (Basic f) have "\<Gamma> \<turnstile> (Basic f#cs1',css1,Normal s1') \<rightarrow> (cs1',css1,Normal (f s1'))" by (rule step.intros) from hyp [simplified Cons Basic Normal, OF this] have "\<Gamma>\<turnstile>cs1',css1\<Down>Normal (f s1')". with Normal Basic show ?thesis by (auto intro: terminatess.intros terminates.intros elim: exec_Normal_elim_cases) next case (Spec r) with Normal show ?thesis apply simp apply (rule terminatess.Cons) apply (fastforce intro: terminates.intros) apply (clarify) apply (erule exec_Normal_elim_cases) apply clarsimp apply (rule hyp) apply (fastforce intro: step.intros simp add: Cons Spec Normal ) apply (fastforce intro: terminatess_Stuck) done next case (Seq c\<^sub>1 c\<^sub>2) have "\<Gamma> \<turnstile> (Seq c\<^sub>1 c\<^sub>2#cs1',css1,Normal s1') \<rightarrow> (c\<^sub>1#c\<^sub>2#cs1',css1,Normal s1')" by (rule step.intros) from hyp [simplified Cons Seq Normal, OF this] have "\<Gamma>\<turnstile>c\<^sub>1 # c\<^sub>2 # cs1',css1\<Down>Normal s1'". with Normal Seq show ?thesis by (fastforce intro: terminatess.intros terminates.intros elim: terminatess_elim_cases exec_Normal_elim_cases) next case (Cond b c\<^sub>1 c\<^sub>2) show ?thesis proof (cases "s1' \<in> b") case True hence "\<Gamma>\<turnstile>(Cond b c\<^sub>1 c\<^sub>2#cs1',css1,Normal s1') \<rightarrow> (c\<^sub>1#cs1',css1,Normal s1')" by (rule step.intros) from hyp [simplified Cons Cond Normal, OF this] have "\<Gamma>\<turnstile>c\<^sub>1 # cs1',css1\<Down>Normal s1'". with Normal Cond True show ?thesis by (fastforce intro: terminatess.intros terminates.intros elim: terminatess_elim_cases exec_Normal_elim_cases) next case False hence "\<Gamma>\<turnstile>(Cond b c\<^sub>1 c\<^sub>2#cs1',css1,Normal s1') \<rightarrow> (c\<^sub>2#cs1',css1,Normal s1')" by (rule step.intros) from hyp [simplified Cons Cond Normal, OF this] have "\<Gamma>\<turnstile>c\<^sub>2 # cs1',css1\<Down>Normal s1'". with Normal Cond False show ?thesis by (fastforce intro: terminatess.intros terminates.intros elim: terminatess_elim_cases exec_Normal_elim_cases) qed next case (While b c') show ?thesis proof (cases "s1' \<in> b") case True then have "\<Gamma>\<turnstile>(While b c' # cs1', css1, Normal s1') \<rightarrow> (c' # While b c' # cs1', css1, Normal s1')" by (rule step.intros) from hyp [simplified Cons While Normal, OF this] have "\<Gamma>\<turnstile>c' # While b c' # cs1',css1\<Down>Normal s1'". with Cons While True Normal show ?thesis by (fastforce intro: terminatess.intros terminates.intros exec.intros elim: terminatess_elim_cases exec_Normal_elim_cases) next case False then have "\<Gamma>\<turnstile>(While b c' # cs1', css1, Normal s1') \<rightarrow> (cs1', css1, Normal s1')" by (rule step.intros) from hyp [simplified Cons While Normal, OF this] have "\<Gamma>\<turnstile>cs1',css1\<Down>Normal s1'". with Cons While False Normal show ?thesis by (fastforce intro: terminatess.intros terminates.intros exec.intros elim: terminatess_elim_cases exec_Normal_elim_cases) qed next case (Call p) show ?thesis proof (cases "\<Gamma> p") case None with Call Normal show ?thesis by (fastforce intro: terminatess.intros terminates.intros terminatess_Stuck elim: exec_Normal_elim_cases) next case (Some bdy) then have "\<Gamma> \<turnstile> (Call p#cs1',css1,Normal s1') \<rightarrow> ([bdy],(cs1',Throw#cs1')#css1,Normal s1')" by (rule step.intros) from hyp [simplified Cons Call Normal Some, OF this] have "\<Gamma>\<turnstile>[bdy],(cs1', Throw # cs1') # css1\<Down>Normal s1'". with Some Call Normal show ?thesis apply simp apply (rule terminatess.intros) apply (blast elim: terminatess_elim_cases intro: terminates.intros) apply clarify apply (erule terminatess_elim_cases) apply (erule exec_Normal_elim_cases) prefer 2 apply simp apply (erule_tac x=t in allE) apply (case_tac t) apply (auto intro: terminatess_Stuck terminatess_Fault exec.intros elim: terminatess_elim_cases exec_Normal_elim_cases) done qed next case (DynCom c') have "\<Gamma> \<turnstile> (DynCom c'#cs1',css1,Normal s1') \<rightarrow> (c' s1'#cs1',css1,Normal s1')" by (rule step.intros) from hyp [simplified Cons DynCom Normal, OF this] have "\<Gamma>\<turnstile>c' s1'#cs1',css1\<Down>Normal s1'". with Normal DynCom show ?thesis by (fastforce intro: terminatess.intros terminates.intros exec.intros elim: terminatess_elim_cases exec_Normal_elim_cases) next case (Guard f g c') show ?thesis proof (cases "s1' \<in> g") case True then have "\<Gamma> \<turnstile> (Guard f g c'#cs1',css1,Normal s1') \<rightarrow> (c'#cs1',css1,Normal s1')" by (rule step.intros) from hyp [simplified Cons Guard Normal, OF this] have "\<Gamma>\<turnstile>c'#cs1',css1\<Down>Normal s1'". with Normal Guard True show ?thesis by (fastforce intro: terminatess.intros terminates.intros exec.intros elim: terminatess_elim_cases exec_Normal_elim_cases) next case False with Guard Normal show ?thesis by (fastforce intro: terminatess.intros terminatess_Fault terminates.intros elim: exec_Normal_elim_cases) qed next case Throw have "\<Gamma> \<turnstile> (Throw#cs1',css1,Normal s1') \<rightarrow> (cs1',css1,Abrupt s1')" by (rule step.intros) from hyp [simplified Cons Throw Normal, OF this] have "\<Gamma>\<turnstile>cs1',css1\<Down>Abrupt s1'". with Normal Throw show ?thesis by (auto intro: terminatess.intros terminates.intros elim: exec_Normal_elim_cases) next case (Catch c\<^sub>1 c\<^sub>2) have "\<Gamma> \<turnstile> (Catch c\<^sub>1 c\<^sub>2#cs1',css1,Normal s1') \<rightarrow> ([c\<^sub>1], (cs1',c\<^sub>2#cs1')# css1,Normal s1')" by (rule step.intros) from hyp [simplified Cons Catch Normal, OF this] have "\<Gamma>\<turnstile>[c\<^sub>1],(cs1', c\<^sub>2 # cs1') # css1\<Down>Normal s1'". with Normal Catch show ?thesis by (fastforce intro: terminatess.intros terminates.intros exec.intros elim: terminatess_elim_cases exec_Normal_elim_cases) qed with Cons Normal show ?thesis by simp qed next case (Abrupt s1') show ?thesis proof (cases cs1) case Nil note cs1_Nil = this show ?thesis proof (cases css1) case Nil with cs1_Nil show ?thesis by (auto intro: terminatess.intros) next case (Cons nrms_abrs css1') then obtain nrms abrs where nrms_abrs: "nrms_abrs=(nrms,abrs)" by (cases "nrms_abrs") have "\<Gamma> \<turnstile> ([],(nrms,abrs)#css1',Abrupt s1') \<rightarrow> (abrs,css1',Normal s1')" by (rule step.intros) from hyp [simplified cs1_Nil Cons nrms_abrs Abrupt, OF this] have "\<Gamma>\<turnstile>abrs,css1'\<Down>Normal s1'". from ExitBlockAbrupt [OF this] cs1_Nil Cons nrms_abrs Abrupt show ?thesis by auto qed next case (Cons c1 cs1') have "\<Gamma>\<turnstile>c1#cs1',css1\<Down>Abrupt s1'" proof - have "\<Gamma> \<turnstile> (c1#cs1',css1,Abrupt s1') \<rightarrow> (cs1',css1,Abrupt s1')" by (rule step.intros) from hyp [simplified Cons Abrupt, OF this] have "\<Gamma>\<turnstile>cs1',css1\<Down>Abrupt s1'". with Cons Abrupt show ?thesis by (fastforce intro: terminatess.intros terminates.intros exec.intros elim: terminatess_elim_cases exec_Normal_elim_cases) qed with Cons Abrupt show ?thesis by simp qed next case (Fault f) thus ?thesis by (auto intro: terminatess_Fault) next case Stuck thus ?thesis by (auto intro: terminatess_Stuck) qed qed lemma not_inf_impl_terminatess: assumes not_inf: "\<not> inf \<Gamma> cs css s" shows "\<Gamma>\<turnstile>cs,css\<Down>s" proof - from not_inf_implies_wf [OF not_inf] have wf: "wf {(c2, c1). \<Gamma>\<turnstile>(cs, css, s) \<rightarrow>\<^sup>* c1 \<and> \<Gamma>\<turnstile>c1 \<rightarrow> c2}". show ?thesis by (rule wf_implies_termi_reach [OF wf]) auto qed lemma not_inf_impl_terminates: assumes not_inf: "\<not> inf \<Gamma> [c] [] s" shows "\<Gamma>\<turnstile>c\<down>s" proof - from not_inf_impl_terminatess [OF not_inf] have "\<Gamma>\<turnstile>[c],[]\<Down>s". thus ?thesis by (auto elim: terminatess_elim_cases) qed theorem terminatess_iff_not_inf: "\<Gamma>\<turnstile>cs,css\<Down>s = (\<not> inf \<Gamma> cs css s)" apply rule apply (erule terminatess_impl_not_inf) apply (erule not_inf_impl_terminatess) done corollary terminates_iff_not_inf: "\<Gamma>\<turnstile>c\<down>s = (\<not> inf \<Gamma> [c] [] s)" apply (rule) apply (erule terminates_impl_not_inf) apply (erule not_inf_impl_terminates) done subsection \<open>Completeness of Total Correctness Hoare Logic\<close> lemma ConseqMGT: assumes modif: "\<forall>Z::'a. \<Gamma>,\<Theta> \<turnstile>\<^sub>t\<^bsub>/F\<^esub> (P' Z::'a assn) c (Q' Z),(A' Z)" assumes impl: "\<And>s. s \<in> P \<Longrightarrow> s \<in> P' s \<and> (\<forall>t. t \<in> Q' s \<longrightarrow> t \<in> Q) \<and> (\<forall>t. t \<in> A' s \<longrightarrow> t \<in> A)" shows "\<Gamma>,\<Theta> \<turnstile>\<^sub>t\<^bsub>/F\<^esub> P c Q,A" using impl by - (rule conseq [OF modif],blast) lemma conseq_extract_state_indep_prop: assumes state_indep_prop:"\<forall>s \<in> P. R" assumes to_show: "R \<Longrightarrow> \<Gamma>,\<Theta>\<turnstile>\<^sub>t\<^bsub>/F\<^esub> P c Q,A" shows "\<Gamma>,\<Theta>\<turnstile>\<^sub>t\<^bsub>/F\<^esub> P c Q,A" apply (rule Conseq) apply (clarify) apply (rule_tac x="P" in exI) apply (rule_tac x="Q" in exI) apply (rule_tac x="A" in exI) using state_indep_prop to_show by blast text \<open>To prove a procedure implementation correct it suffices to assume only the procedure specifications of procedures that actually occur during evaluation of the body. \<close> lemma Call_lemma: assumes Call: "\<forall>q \<in> dom \<Gamma>. \<forall>Z. \<Gamma>,\<Theta> \<turnstile>\<^sub>t\<^bsub>/F\<^esub> {s. s=Z \<and> \<Gamma>\<turnstile>\<langle>Call q,Normal s\<rangle> \<Rightarrow>\<notin>({Stuck} \<union> Fault ` (-F)) \<and> \<Gamma>\<turnstile>Call q\<down>Normal s \<and> ((s,q),(\<sigma>,p)) \<in> termi_call_steps \<Gamma>} (Call q) {t. \<Gamma>\<turnstile>\<langle>Call q,Normal Z\<rangle> \<Rightarrow> Normal t}, {t. \<Gamma>\<turnstile>\<langle>Call q,Normal Z\<rangle> \<Rightarrow> Abrupt t}" shows "\<And>Z. \<Gamma>,\<Theta> \<turnstile>\<^sub>t\<^bsub>/F\<^esub> ({\<sigma>} \<inter> {s. s=Z \<and> \<Gamma>\<turnstile>\<langle>the (\<Gamma> p),Normal s\<rangle> \<Rightarrow>\<notin>({Stuck} \<union> Fault ` (-F)) \<and> \<Gamma>\<turnstile>the (\<Gamma> p)\<down>Normal s}) the (\<Gamma> p) {t. \<Gamma>\<turnstile>\<langle>the (\<Gamma> p),Normal Z\<rangle> \<Rightarrow> Normal t}, {t. \<Gamma>\<turnstile>\<langle>the (\<Gamma> p),Normal Z\<rangle> \<Rightarrow> Abrupt t}" apply (rule conseqPre) apply (rule Call_lemma') apply (rule Call) apply blast done lemma Call_lemma_switch_Call_body: assumes call: "\<forall>q \<in> dom \<Gamma>. \<forall>Z. \<Gamma>,\<Theta> \<turnstile>\<^sub>t\<^bsub>/F\<^esub> {s. s=Z \<and> \<Gamma>\<turnstile>\<langle>Call q,Normal s\<rangle> \<Rightarrow>\<notin>({Stuck} \<union> Fault ` (-F)) \<and> \<Gamma>\<turnstile>Call q\<down>Normal s \<and> ((s,q),(\<sigma>,p)) \<in> termi_call_steps \<Gamma>} (Call q) {t. \<Gamma>\<turnstile>\<langle>Call q,Normal Z\<rangle> \<Rightarrow> Normal t}, {t. \<Gamma>\<turnstile>\<langle>Call q,Normal Z\<rangle> \<Rightarrow> Abrupt t}" assumes p_defined: "p \<in> dom \<Gamma>" shows "\<And>Z. \<Gamma>,\<Theta> \<turnstile>\<^sub>t\<^bsub>/F\<^esub> ({\<sigma>} \<inter> {s. s=Z \<and> \<Gamma>\<turnstile>\<langle>Call p,Normal s\<rangle> \<Rightarrow>\<notin>({Stuck} \<union> Fault ` (-F)) \<and> \<Gamma>\<turnstile>Call p\<down>Normal s}) the (\<Gamma> p) {t. \<Gamma>\<turnstile>\<langle>Call p,Normal Z\<rangle> \<Rightarrow> Normal t}, {t. \<Gamma>\<turnstile>\<langle>Call p,Normal Z\<rangle> \<Rightarrow> Abrupt t}" apply (simp only: exec_Call_body' [OF p_defined] noFaultStuck_Call_body' [OF p_defined] terminates_Normal_Call_body [OF p_defined]) apply (rule conseqPre) apply (rule Call_lemma') apply (rule call) apply blast done lemma MGT_Call: "\<forall>p \<in> dom \<Gamma>. \<forall>Z. \<Gamma>,\<Theta> \<turnstile>\<^sub>t\<^bsub>/F\<^esub> {s. s=Z \<and> \<Gamma>\<turnstile>\<langle>Call p,Normal s\<rangle> \<Rightarrow>\<notin>({Stuck} \<union> Fault ` (-F)) \<and> \<Gamma>\<turnstile>(Call p)\<down>Normal s} (Call p) {t. \<Gamma>\<turnstile>\<langle>Call p,Normal Z\<rangle> \<Rightarrow> Normal t}, {t. \<Gamma>\<turnstile>\<langle>Call p,Normal Z\<rangle> \<Rightarrow> Abrupt t}" apply (intro ballI allI) apply (rule CallRec' [where Procs="dom \<Gamma>" and P="\<lambda>p Z. {s. s=Z \<and> \<Gamma>\<turnstile>\<langle>Call p,Normal s\<rangle> \<Rightarrow>\<notin>({Stuck} \<union> Fault ` (-F)) \<and> \<Gamma>\<turnstile>Call p\<down>Normal s}" and Q="\<lambda>p Z. {t. \<Gamma>\<turnstile>\<langle>Call p,Normal Z\<rangle> \<Rightarrow> Normal t}" and A="\<lambda>p Z. {t. \<Gamma>\<turnstile>\<langle>Call p,Normal Z\<rangle> \<Rightarrow> Abrupt t}" and r="termi_call_steps \<Gamma>" ]) apply simp apply simp apply (rule wf_termi_call_steps) apply (intro ballI allI) apply simp apply (rule Call_lemma_switch_Call_body [rule_format, simplified]) apply (rule hoaret.Asm) apply fastforce apply assumption done lemma CollInt_iff: "{s. P s} \<inter> {s. Q s} = {s. P s \<and> Q s}" by auto lemma image_Un_conv: "f ` (\<Union>p\<in>dom \<Gamma>. \<Union>Z. {x p Z}) = (\<Union>p\<in>dom \<Gamma>. \<Union>Z. {f (x p Z)})" by (auto iff: not_None_eq) text \<open>Another proof of \<open>MGT_Call\<close>, maybe a little more readable\<close> lemma "\<forall>p \<in> dom \<Gamma>. \<forall>Z. \<Gamma>,{} \<turnstile>\<^sub>t\<^bsub>/F\<^esub> {s. s=Z \<and> \<Gamma>\<turnstile>\<langle>Call p,Normal s\<rangle> \<Rightarrow>\<notin>({Stuck} \<union> Fault ` (-F)) \<and> \<Gamma>\<turnstile>(Call p)\<down>Normal s} (Call p) {t. \<Gamma>\<turnstile>\<langle>Call p,Normal Z\<rangle> \<Rightarrow> Normal t}, {t. \<Gamma>\<turnstile>\<langle>Call p,Normal Z\<rangle> \<Rightarrow> Abrupt t}" proof - { fix p Z \<sigma> assume defined: "p \<in> dom \<Gamma>" define Specs where "Specs = (\<Union>p\<in>dom \<Gamma>. \<Union>Z. {({s. s=Z \<and> \<Gamma>\<turnstile>\<langle>Call p,Normal s\<rangle> \<Rightarrow>\<notin>({Stuck} \<union> Fault ` (-F)) \<and> \<Gamma>\<turnstile>Call p\<down>Normal s}, p, {t. \<Gamma>\<turnstile>\<langle>Call p,Normal Z\<rangle> \<Rightarrow> Normal t}, {t. \<Gamma>\<turnstile>\<langle>Call p,Normal Z\<rangle> \<Rightarrow> Abrupt t})})" define Specs_wf where "Specs_wf p \<sigma> = (\<lambda>(P,q,Q,A). (P \<inter> {s. ((s,q),\<sigma>,p) \<in> termi_call_steps \<Gamma>}, q, Q, A)) ` Specs" for p \<sigma> have "\<Gamma>,Specs_wf p \<sigma> \<turnstile>\<^sub>t\<^bsub>/F\<^esub>({\<sigma>} \<inter> {s. s = Z \<and> \<Gamma>\<turnstile>\<langle>the (\<Gamma> p),Normal s\<rangle> \<Rightarrow>\<notin>({Stuck} \<union> Fault ` (-F)) \<and> \<Gamma>\<turnstile>the (\<Gamma> p)\<down>Normal s}) (the (\<Gamma> p)) {t. \<Gamma>\<turnstile>\<langle>the (\<Gamma> p),Normal Z\<rangle> \<Rightarrow> Normal t}, {t. \<Gamma>\<turnstile>\<langle>the (\<Gamma> p),Normal Z\<rangle> \<Rightarrow> Abrupt t}" apply (rule Call_lemma [rule_format]) apply (rule hoaret.Asm) apply (clarsimp simp add: Specs_wf_def Specs_def image_Un_conv) apply (rule_tac x=q in bexI) apply (rule_tac x=Z in exI) apply (clarsimp simp add: CollInt_iff) apply auto done hence "\<Gamma>,Specs_wf p \<sigma> \<turnstile>\<^sub>t\<^bsub>/F\<^esub>({\<sigma>} \<inter> {s. s = Z \<and> \<Gamma>\<turnstile>\<langle>Call p,Normal s\<rangle> \<Rightarrow>\<notin>({Stuck} \<union> Fault ` (-F)) \<and> \<Gamma>\<turnstile>Call p\<down>Normal s}) (the (\<Gamma> p)) {t. \<Gamma>\<turnstile>\<langle>Call p,Normal Z\<rangle> \<Rightarrow> Normal t}, {t. \<Gamma>\<turnstile>\<langle>Call p,Normal Z\<rangle> \<Rightarrow> Abrupt t}" by (simp only: exec_Call_body' [OF defined] noFaultStuck_Call_body' [OF defined] terminates_Normal_Call_body [OF defined]) } note bdy=this show ?thesis apply (intro ballI allI) apply (rule hoaret.CallRec [where Specs="(\<Union>p\<in>dom \<Gamma>. \<Union>Z. {({s. s=Z \<and> \<Gamma>\<turnstile>\<langle>Call p,Normal s\<rangle> \<Rightarrow>\<notin>({Stuck} \<union> Fault ` (-F)) \<and> \<Gamma>\<turnstile>Call p\<down>Normal s}, p, {t. \<Gamma>\<turnstile>\<langle>Call p,Normal Z\<rangle> \<Rightarrow> Normal t}, {t. \<Gamma>\<turnstile>\<langle>Call p,Normal Z\<rangle> \<Rightarrow> Abrupt t})})", OF _ wf_termi_call_steps [of \<Gamma>] refl]) apply fastforce apply clarify apply (rule conjI) apply fastforce apply (rule allI) apply (simp (no_asm_use) only : Un_empty_left) apply (rule bdy) apply auto done qed end
‘Come & See’ is widely regarded as the greatest war film of all time AFAIK. The story of its production (including the flagrant disregard for the health and safety of the crew and various farm animals) is legendary. WRT portrayal of the Germans/Russians… it is (at least partially) propagandistic, produced as it was to mark the 40th anniversary of the Soviet victory, but that said, the brutality of the German’s on the Eastern front cannot be exaggerated. Glad to see a positive review of it, especially after the awful treatment you doled out to Texas Chainsaw Massacre. Having said that, I’m not a huge fan of WWII films either… and while I do watch a lot of non-Anglophone cinema, Come and See’s rep as “the greatest film of all time” seems to have passed me by. Though I will admit, it’s a deserving candidate. Point taken on the depiction of Russians and Germans.
section {*FUNCTION\_\_DPDA\_DFA\_PRODUCT*} theory FUNCTION__DPDA_DFA_PRODUCT imports PRJ_10_02__ENTRY begin lemma F_DPDA_DFA_PRODUCT__produces__PDA: " valid_dpda M \<Longrightarrow> valid_dfa D \<Longrightarrow> R = F_DPDA_DFA_PRODUCT M D \<Longrightarrow> valid_pda R" apply(simp add: valid_dpda_def valid_dfa_def F_DPDA_DFA_PRODUCT_def valid_pda_def valid_epda_def) apply(clarsimp) apply(rule conjI) apply(simp add: F_DPDA_DFA_PRODUCT__states_def) apply(rule_tac t="{cons_tuple2 p q |p q. p \<in> epda_states M \<and> q \<in> epda_states D}" and s="(\<lambda>(x,y). cons_tuple2 x y) ` ((epda_states M) \<times> (epda_states D))" in ssubst) apply(force) apply(rule finite_imageI) apply(force) apply(rule conjI) apply(simp add: F_DPDA_DFA_PRODUCT__events_def) apply(rule conjI) apply(simp add: F_DPDA_DFA_PRODUCT__edges_def) apply(rule conjI) apply(simp add: F_DPDA_DFA_PRODUCT__edges_execute_def) apply(rule_tac B="(\<lambda>(e,e'). \<lparr>edge_src = cons_tuple2 (edge_src e) (edge_src e'), edge_event = edge_event e, edge_pop = edge_pop e, edge_push = edge_push e, edge_trg = cons_tuple2 (edge_trg e) (edge_trg e')\<rparr>) ` ((epda_delta M) \<times> (epda_delta D))" in finite_subset) apply(clarsimp) apply(rename_tac e e')(*strict*) apply(rule inMap) apply(rule_tac x="(e,e')" in bexI) apply(rename_tac e e')(*strict*) apply(force) apply(rename_tac e e')(*strict*) apply(force) apply(rule finite_imageI) apply(force) apply(simp add: F_DPDA_DFA_PRODUCT__edges_empty_def) apply(rule_tac B="(\<lambda>(e,p). \<lparr>edge_src = cons_tuple2 (edge_src e) p, edge_event = None, edge_pop = edge_pop e, edge_push = edge_push e, edge_trg = cons_tuple2 (edge_trg e) p\<rparr>) ` ((epda_delta M) \<times> (epda_states D))" in finite_subset) apply(clarsimp) apply(rename_tac e p)(*strict*) apply(rule inMap) apply(rule_tac x="(e,p)" in bexI) apply(rename_tac e p)(*strict*) apply(force) apply(rename_tac e p)(*strict*) apply(force) apply(rule finite_imageI) apply(force) apply(rule conjI) apply(simp add: F_DPDA_DFA_PRODUCT__states_def) apply(rule conjI) apply(simp add: F_DPDA_DFA_PRODUCT__marking_states_def F_DPDA_DFA_PRODUCT__states_def) apply(clarsimp) apply(rename_tac p q)(*strict*) apply(force) apply(rule conjI) apply(clarsimp) apply(rename_tac x)(*strict*) apply(simp add: F_DPDA_DFA_PRODUCT__edges_def) apply(erule disjE) apply(rename_tac x)(*strict*) apply(simp add: valid_epda_step_label_def F_DPDA_DFA_PRODUCT__edges_execute_def) apply(clarsimp) apply(rename_tac e e')(*strict*) apply(simp add: F_DPDA_DFA_PRODUCT__marking_states_def F_DPDA_DFA_PRODUCT__states_def F_DPDA_DFA_PRODUCT__events_def) apply(simp add: option_to_set_def) apply(clarsimp) apply(rename_tac e e' x)(*strict*) apply(erule_tac x="e'" and P="\<lambda>e'. edge_src e' \<in> epda_states D \<and> edge_trg e' \<in> epda_states D \<and> {y. Some y = edge_event e'} \<subseteq> epda_events D \<and> [epda_box D] \<in> may_terminated_by (epda_gamma D) (epda_box D)" in ballE) apply(rename_tac e e' x)(*strict*) prefer 2 apply(force) apply(rename_tac e e' x)(*strict*) apply(force) apply(rename_tac x)(*strict*) apply(simp add: valid_epda_step_label_def F_DPDA_DFA_PRODUCT__edges_empty_def) apply(clarsimp) apply(rename_tac e p)(*strict*) apply(simp add: F_DPDA_DFA_PRODUCT__marking_states_def F_DPDA_DFA_PRODUCT__states_def F_DPDA_DFA_PRODUCT__events_def) apply(simp add: option_to_set_def) apply(clarsimp) apply(rename_tac e)(*strict*) apply(simp add: F_DPDA_DFA_PRODUCT__edges_def) apply(erule disjE) apply(rename_tac e)(*strict*) apply(simp add: valid_epda_step_label_def F_DPDA_DFA_PRODUCT__edges_execute_def) apply(clarsimp) apply(rename_tac e)(*strict*) apply(simp add: valid_epda_step_label_def F_DPDA_DFA_PRODUCT__edges_empty_def) apply(clarsimp) done definition slice_edge_A :: " (('stateA, 'stateB) DT_tuple2, 'event, 'stackA) epda_step_label \<Rightarrow> ('stateA, 'event, 'stackA) epda_step_label" where "slice_edge_A e \<equiv> \<lparr>edge_src = sel_tuple2_1 (edge_src e), edge_event = edge_event e, edge_pop = edge_pop e, edge_push = edge_push e, edge_trg = sel_tuple2_1 (edge_trg e)\<rparr>" definition slice_conf_A :: " (('stateA, 'stateB) DT_tuple2, 'event, 'stackA) epdaH_conf \<Rightarrow> ('stateA, 'event, 'stackA) epdaH_conf" where "slice_conf_A c \<equiv> \<lparr>epdaH_conf_state = sel_tuple2_1 (epdaH_conf_state c), epdaH_conf_history = epdaH_conf_history c, epdaH_conf_stack = epdaH_conf_stack c\<rparr>" definition slice_edge_B :: " (('stateA, 'stateB) DT_tuple2, 'event, 'stackA) epda_step_label \<Rightarrow> 'stackB list \<Rightarrow> ('stateB, 'event, 'stackB) epda_step_label" where "slice_edge_B e s \<equiv> \<lparr>edge_src = sel_tuple2_2 (edge_src e), edge_event = edge_event e, edge_pop = s, edge_push = s, edge_trg = sel_tuple2_2 (edge_trg e)\<rparr>" definition slice_conf_B :: " (('stateA, 'stateB) DT_tuple2, 'event, 'stackA) epdaH_conf \<Rightarrow> 'stackB list \<Rightarrow> ('stateB, 'event, 'stackB) epdaH_conf" where "slice_conf_B c s \<equiv> \<lparr>epdaH_conf_state = sel_tuple2_2 (epdaH_conf_state c), epdaH_conf_history = epdaH_conf_history c, epdaH_conf_stack = s\<rparr>" lemma F_DPDA_DFA_PRODUCT__reflects__derivation_initial: " valid_dpda M \<Longrightarrow> valid_dfa D \<Longrightarrow> R = F_DPDA_DFA_PRODUCT M D \<Longrightarrow> valid_pda R \<Longrightarrow> epdaH.derivation_initial R dR \<Longrightarrow> dR n = Some (pair eR \<lparr>epdaH_conf_state = cons_tuple2 q1 q2, epdaH_conf_history = h, epdaH_conf_stack = s\<rparr>) \<Longrightarrow> \<exists>dM dD nD eM eD. nD \<le> n \<and> epdaH.derivation_initial D dD \<and> epdaH.derivation_initial M dM \<and> dM n = Some (pair eM \<lparr>epdaH_conf_state = q1, epdaH_conf_history = h, epdaH_conf_stack = s\<rparr>) \<and> dD nD = Some (pair eD \<lparr>epdaH_conf_state = q2, epdaH_conf_history = h, epdaH_conf_stack = [epda_box D]\<rparr>)" apply(induct n arbitrary: eR q1 q2 h s) apply(rename_tac eR q1 q2 h s)(*strict*) apply(clarsimp) apply(rule_tac x="der1 \<lparr>epdaH_conf_state = q1, epdaH_conf_history = h, epdaH_conf_stack = s\<rparr>" in exI) apply(rename_tac eR q1 q2 h s)(*strict*) apply(rule_tac x="der1 \<lparr>epdaH_conf_state = q2, epdaH_conf_history = h, epdaH_conf_stack = [epda_box D]\<rparr>" in exI) apply(rename_tac eR q1 q2 h s)(*strict*) apply(rule conjI) apply(rename_tac eR q1 q2 h s)(*strict*) apply(rule epdaH.derivation_initialI) apply(rename_tac eR q1 q2 h s)(*strict*) apply(rule epdaH.der1_is_derivation) apply(rename_tac eR q1 q2 h s)(*strict*) apply(clarsimp) apply(rename_tac eR q1 q2 h s c)(*strict*) apply(simp add: get_configuration_def der1_def) apply(clarsimp) apply(rename_tac eR q1 q2 h s)(*strict*) apply(simp add: epdaH.derivation_initial_def) apply(clarsimp) apply(rename_tac q1 q2 h s)(*strict*) apply(simp add: epdaH_initial_configurations_def epdaH_configurations_def F_DPDA_DFA_PRODUCT_def F_DPDA_DFA_PRODUCT__states_def) apply(clarsimp) apply(simp add: valid_dfa_def valid_pda_def valid_dpda_def valid_epda_def) apply(rename_tac eR q1 q2 h s)(*strict*) apply(rule conjI) apply(rename_tac eR q1 q2 h s)(*strict*) apply(rule epdaH.derivation_initialI) apply(rename_tac eR q1 q2 h s)(*strict*) apply(rule epdaH.der1_is_derivation) apply(rename_tac eR q1 q2 h s)(*strict*) apply(clarsimp) apply(rename_tac eR q1 q2 h s c)(*strict*) apply(simp add: get_configuration_def der1_def) apply(clarsimp) apply(rename_tac eR q1 q2 h s)(*strict*) apply(simp add: epdaH.derivation_initial_def) apply(clarsimp) apply(rename_tac q1 q2 h s)(*strict*) apply(simp add: epdaH_initial_configurations_def epdaH_configurations_def F_DPDA_DFA_PRODUCT_def F_DPDA_DFA_PRODUCT__states_def) apply(clarsimp) apply(rename_tac eR q1 q2 h s)(*strict*) apply(rule conjI) apply(rename_tac eR q1 q2 h s)(*strict*) apply(rule_tac x="None" in exI) apply(simp add: der1_def) apply(rename_tac eR q1 q2 h s)(*strict*) apply(rule_tac x="None" in exI) apply(simp add: der1_def) apply(rename_tac n eR q1 q2 h s)(*strict*) apply(clarsimp) apply(subgoal_tac "X" for X) apply(rename_tac n eR q1 q2 h s)(*strict*) prefer 2 apply(rule_tac G="F_DPDA_DFA_PRODUCT M D" and d="dR" and n="n" and m="Suc n" in epdaH.step_detail_before_some_position) apply(rename_tac n eR q1 q2 h s)(*strict*) apply(rule epdaH.derivation_initial_is_derivation) apply(force) apply(rename_tac n eR q1 q2 h s)(*strict*) apply(force) apply(rename_tac n eR q1 q2 h s)(*strict*) apply(force) apply(rename_tac n eR q1 q2 h s)(*strict*) apply(clarsimp) apply(rename_tac n q1 q2 h s e1 e2 c1)(*strict*) apply(erule_tac x="e1" in meta_allE) apply(case_tac c1) apply(rename_tac n q1 q2 h s e1 e2 c1 epdaH_conf_state epdaH_conf_history epdaH_conf_stack)(*strict*) apply(clarsimp) apply(rename_tac n q1 q2 h s e1 e2 epdaH_conf_state epdaH_conf_history epdaH_conf_stack)(*strict*) apply(rename_tac qx hx sx) apply(rename_tac n q1 q2 h s e1 e2 qx hx sx)(*strict*) apply(case_tac qx) apply(rename_tac n q1 q2 h s e1 e2 qx hx sx a b)(*strict*) apply(clarsimp) apply(rename_tac n q1 q2 h s e1 e2 hx sx a b)(*strict*) apply(rename_tac p1 p2) apply(rename_tac n q1 q2 h s e1 e2 hx sx p1 p2)(*strict*) apply(erule_tac x="p1" in meta_allE) apply(erule_tac x="p2" in meta_allE) apply(erule_tac x="hx" in meta_allE) apply(erule_tac x="sx" in meta_allE) apply(clarsimp) apply(rename_tac n q1 q2 h s e1 e2 hx sx p1 p2 dM dD nD eM eD)(*strict*) apply(case_tac e2) apply(rename_tac n q1 q2 h s e1 e2 hx sx p1 p2 dM dD nD eM eD edge_src edge_event edge_pop edge_push edge_trg)(*strict*) apply(clarsimp) apply(rename_tac n q1 q2 h s e1 hx sx p1 p2 dM dD nD eM eD edge_src edge_event edge_pop edge_push edge_trg)(*strict*) apply(rename_tac qs read pop push qt) apply(rename_tac n q1 q2 h s e1 hx sx p1 p2 dM dD nD eM eD qs read pop push qt)(*strict*) apply(subgoal_tac "\<lparr>edge_src = qs, edge_event = read, edge_pop = pop, edge_push = push, edge_trg = qt\<rparr> \<in> F_DPDA_DFA_PRODUCT__edges M D") apply(rename_tac n q1 q2 h s e1 hx sx p1 p2 dM dD nD eM eD qs read pop push qt)(*strict*) prefer 2 apply(simp add: epdaH_step_relation_def) apply(simp add: F_DPDA_DFA_PRODUCT_def) apply(rename_tac n q1 q2 h s e1 hx sx p1 p2 dM dD nD eM eD qs read pop push qt)(*strict*) apply(simp add: F_DPDA_DFA_PRODUCT__edges_def) apply(subgoal_tac "\<lparr>epdaH_conf_state = cons_tuple2 p1 p2, epdaH_conf_history = hx, epdaH_conf_stack = sx\<rparr> \<in> epdaH_configurations (F_DPDA_DFA_PRODUCT M D)") apply(rename_tac n q1 q2 h s e1 hx sx p1 p2 dM dD nD eM eD qs read pop push qt)(*strict*) prefer 2 apply(rule epdaH.belongs_configurations) apply(rename_tac n q1 q2 h s e1 hx sx p1 p2 dM dD nD eM eD qs read pop push qt)(*strict*) apply(rule epdaH.derivation_initial_belongs) apply(rename_tac n q1 q2 h s e1 hx sx p1 p2 dM dD nD eM eD qs read pop push qt)(*strict*) apply(simp add: valid_pda_def) apply(rename_tac n q1 q2 h s e1 hx sx p1 p2 dM dD nD eM eD qs read pop push qt)(*strict*) apply(force) apply(rename_tac n q1 q2 h s e1 hx sx p1 p2 dM dD nD eM eD qs read pop push qt)(*strict*) apply(force) apply(rename_tac n q1 q2 h s e1 hx sx p1 p2 dM dD nD eM eD qs read pop push qt)(*strict*) apply(erule disjE) apply(rename_tac n q1 q2 h s e1 hx sx p1 p2 dM dD nD eM eD qs read pop push qt)(*strict*) apply(simp add: F_DPDA_DFA_PRODUCT__edges_execute_def) apply(clarsimp) apply(rename_tac n q1 q2 h s e1 hx sx p1 p2 dM dD nD eM eD e e')(*strict*) apply(simp add: epdaH_step_relation_def) apply(clarsimp) apply(rename_tac n e1 hx dM dD nD eM eD e e' w)(*strict*) apply(thin_tac "\<lparr>edge_src = cons_tuple2 (edge_src e) (edge_src e'), edge_event = edge_event e', edge_pop = edge_pop e, edge_push = edge_push e, edge_trg = cons_tuple2 (edge_trg e) (edge_trg e')\<rparr> \<in> epda_delta (F_DPDA_DFA_PRODUCT M D)") apply(rename_tac n e1 hx dM dD nD eM eD e e' w)(*strict*) apply(case_tac "edge_event e'") apply(rename_tac n e1 hx dM dD nD eM eD e e' w)(*strict*) apply(clarsimp) apply(simp add: valid_dfa_def) apply(clarsimp) apply(force) apply(rename_tac n e1 hx dM dD nD eM eD e e' w a)(*strict*) apply(clarsimp) apply(rename_tac \<sigma>) apply(rename_tac n e1 hx dM dD nD eM eD e e' w \<sigma>)(*strict*) apply(rule_tac x="derivation_append dM (der2 \<lparr>epdaH_conf_state = edge_src e, epdaH_conf_history = hx, epdaH_conf_stack = edge_pop e @ w\<rparr> e \<lparr>epdaH_conf_state = edge_trg e, epdaH_conf_history = hx @ option_to_list (Some \<sigma>), epdaH_conf_stack = edge_push e @ w\<rparr>) n" in exI) apply(rename_tac n e1 hx dM dD nD eM eD e e' w \<sigma>)(*strict*) apply(rule_tac x="derivation_append dD (der2 \<lparr>epdaH_conf_state = edge_src e', epdaH_conf_history = hx, epdaH_conf_stack = [epda_box D]\<rparr> e' \<lparr>epdaH_conf_state = edge_trg e', epdaH_conf_history = hx @ option_to_list (Some \<sigma>), epdaH_conf_stack = [epda_box D]\<rparr>) nD" in exI) apply(rename_tac n e1 hx dM dD nD eM eD e e' w \<sigma>)(*strict*) apply(rule_tac x="Suc nD" in exI) apply(rule conjI) apply(rename_tac n e1 hx dM dD nD eM eD e e' w \<sigma>)(*strict*) apply(force) apply(rename_tac n e1 hx dM dD nD eM eD e e' w \<sigma>)(*strict*) apply(rule conjI) apply(rename_tac n e1 hx dM dD nD eM eD e e' w \<sigma>)(*strict*) apply(rule epdaH.derivation_append_preserves_derivation_initial) apply(rename_tac n e1 hx dM dD nD eM eD e e' w \<sigma>)(*strict*) apply(simp add: valid_dfa_def valid_dpda_def valid_pda_def) apply(rename_tac n e1 hx dM dD nD eM eD e e' w \<sigma>)(*strict*) apply(force) apply(rename_tac n e1 hx dM dD nD eM eD e e' w \<sigma>)(*strict*) apply(rule epdaH.derivation_append_preserves_derivation) apply(rename_tac n e1 hx dM dD nD eM eD e e' w \<sigma>)(*strict*) apply(rule epdaH.derivation_initial_is_derivation) apply(force) apply(rename_tac n e1 hx dM dD nD eM eD e e' w \<sigma>)(*strict*) apply(rule epdaH.der2_is_derivation) apply(simp add: epdaH_step_relation_def) apply(simp add: valid_dfa_def) apply(rename_tac n e1 hx dM dD nD eM eD e e' w \<sigma>)(*strict*) apply(clarsimp) apply(simp add: derivation_append_def der2_def) apply(rename_tac n e1 hx dM dD nD eM eD e e' w \<sigma>)(*strict*) apply(rule conjI) apply(rename_tac n e1 hx dM dD nD eM eD e e' w \<sigma>)(*strict*) apply(rule epdaH.derivation_append_preserves_derivation_initial) apply(rename_tac n e1 hx dM dD nD eM eD e e' w \<sigma>)(*strict*) apply(simp add: valid_dfa_def valid_dpda_def valid_pda_def) apply(rename_tac n e1 hx dM dD nD eM eD e e' w \<sigma>)(*strict*) apply(force) apply(rename_tac n e1 hx dM dD nD eM eD e e' w \<sigma>)(*strict*) apply(rule epdaH.derivation_append_preserves_derivation) apply(rename_tac n e1 hx dM dD nD eM eD e e' w \<sigma>)(*strict*) apply(rule epdaH.derivation_initial_is_derivation) apply(force) apply(rename_tac n e1 hx dM dD nD eM eD e e' w \<sigma>)(*strict*) apply(rule epdaH.der2_is_derivation) apply(simp add: epdaH_step_relation_def) apply(rename_tac n e1 hx dM dD nD eM eD e e' w \<sigma>)(*strict*) apply(simp add: valid_dpda_def valid_pda_def valid_epda_def) apply(clarsimp) apply(simp add: derivation_append_def der2_def) apply(rename_tac n e1 hx dM dD nD eM eD e e' w \<sigma>)(*strict*) apply(rule conjI) apply(rename_tac n e1 hx dM dD nD eM eD e e' w \<sigma>)(*strict*) apply(rule_tac x="Some e" in exI) apply(simp add: derivation_append_def der2_def) apply(rename_tac n e1 hx dM dD nD eM eD e e' w \<sigma>)(*strict*) apply(rule_tac x="Some e'" in exI) apply(simp add: derivation_append_def der2_def) apply(rename_tac n q1 q2 h s e1 hx sx p1 p2 dM dD nD eM eD qs read pop push qt)(*strict*) apply(simp add: F_DPDA_DFA_PRODUCT__edges_empty_def) apply(clarsimp) apply(rename_tac n q1 q2 h s e1 hx sx p1 p2 dM dD nD eM eD e p)(*strict*) apply(simp add: epdaH_step_relation_def) apply(clarsimp) apply(rename_tac n e1 hx dM dD nD eM eD e p w)(*strict*) apply(thin_tac "\<lparr>edge_src = cons_tuple2 (edge_src e) p, edge_event = None, edge_pop = edge_pop e, edge_push = edge_push e, edge_trg = cons_tuple2 (edge_trg e) p\<rparr> \<in> epda_delta (F_DPDA_DFA_PRODUCT M D)") apply(rename_tac n e1 hx dM dD nD eM eD e p w)(*strict*) apply(rule_tac x="derivation_append dM (der2 \<lparr>epdaH_conf_state = edge_src e, epdaH_conf_history = hx, epdaH_conf_stack = edge_pop e @ w\<rparr> e \<lparr>epdaH_conf_state = edge_trg e, epdaH_conf_history = hx @ option_to_list None, epdaH_conf_stack = edge_push e @ w\<rparr>) n" in exI) apply(rename_tac n e1 hx dM dD nD eM eD e p w)(*strict*) apply(rule_tac x="dD" in exI) apply(rule_tac x="nD" in exI) apply(rule conjI) apply(rename_tac n e1 hx dM dD nD eM eD e p w)(*strict*) apply(force) apply(rename_tac n e1 hx dM dD nD eM eD e p w)(*strict*) apply(rule conjI) apply(rename_tac n e1 hx dM dD nD eM eD e p w)(*strict*) apply(force) apply(rename_tac n e1 hx dM dD nD eM eD e p w)(*strict*) apply(rule conjI) apply(rename_tac n e1 hx dM dD nD eM eD e p w)(*strict*) apply(rule epdaH.derivation_append_preserves_derivation_initial) apply(rename_tac n e1 hx dM dD nD eM eD e p w)(*strict*) apply(simp add: valid_dfa_def valid_dpda_def valid_pda_def) apply(rename_tac n e1 hx dM dD nD eM eD e p w)(*strict*) apply(force) apply(rename_tac n e1 hx dM dD nD eM eD e p w)(*strict*) apply(rule epdaH.derivation_append_preserves_derivation) apply(rename_tac n e1 hx dM dD nD eM eD e p w)(*strict*) apply(rule epdaH.derivation_initial_is_derivation) apply(force) apply(rename_tac n e1 hx dM dD nD eM eD e p w)(*strict*) apply(rule epdaH.der2_is_derivation) apply(simp add: epdaH_step_relation_def) apply(rename_tac n e1 hx dM dD nD eM eD e p w)(*strict*) apply(simp add: valid_dfa_def) apply(clarsimp) apply(simp add: derivation_append_def der2_def) apply(rename_tac n e1 hx dM dD nD eM eD e p w)(*strict*) apply(rule conjI) apply(rename_tac n e1 hx dM dD nD eM eD e p w)(*strict*) apply(rule_tac x="Some e" in exI) apply(simp add: derivation_append_def der2_def) apply(rename_tac n e1 hx dM dD nD eM eD e p w)(*strict*) apply(rule_tac x="eD" in exI) apply(simp add: derivation_append_def der2_def) apply(simp add: option_to_list_def) done lemma F_DPDA_DFA_PRODUCT__reflects__derivation_initial2: " valid_dpda M \<Longrightarrow> valid_dfa D \<Longrightarrow> R = F_DPDA_DFA_PRODUCT M D \<Longrightarrow> valid_pda R \<Longrightarrow> epdaH.derivation_initial R dR \<Longrightarrow> epdaH.derivation_initial M (\<lambda>n. case dR n of None \<Rightarrow> None | Some (pair e c) \<Rightarrow> Some (pair (case e of None \<Rightarrow> None | Some e \<Rightarrow> Some (slice_edge_A e)) (slice_conf_A c)))" apply(rule epdaH.derivation_initialI) prefer 2 apply(simp add: get_configuration_def epdaH.derivation_initial_def epdaH.derivation_def) apply(case_tac "dR 0") apply(clarsimp) apply(rename_tac a)(*strict*) apply(clarsimp) apply(rename_tac a c)(*strict*) apply(case_tac a) apply(rename_tac a c option b)(*strict*) apply(clarsimp) apply(rename_tac b)(*strict*) apply(simp add: epdaH_initial_configurations_def epdaH_configurations_def slice_conf_A_def sel_tuple2_1_def F_DPDA_DFA_PRODUCT_def valid_dpda_def valid_pda_def valid_epda_def) apply(simp add: epdaH.derivation_def) apply(clarsimp) apply(rename_tac i)(*strict*) apply(case_tac i) apply(rename_tac i)(*strict*) apply(simp add: epdaH_initial_configurations_def epdaH_configurations_def slice_conf_A_def sel_tuple2_1_def F_DPDA_DFA_PRODUCT_def valid_dpda_def valid_pda_def valid_epda_def) apply(simp add: get_configuration_def epdaH.derivation_initial_def epdaH.derivation_def) apply(clarsimp) apply(erule_tac x="0" in allE) apply(clarsimp) apply(case_tac "dR 0") apply(clarsimp) apply(rename_tac a)(*strict*) apply(clarsimp) apply(case_tac a) apply(rename_tac a option b)(*strict*) apply(clarsimp) apply(rename_tac i nat)(*strict*) apply(clarsimp) apply(rename_tac nat)(*strict*) apply(case_tac "dR (Suc nat)") apply(rename_tac nat)(*strict*) apply(clarsimp) apply(rename_tac nat a)(*strict*) apply(clarsimp) apply(subgoal_tac "X" for X) apply(rename_tac nat a)(*strict*) prefer 2 apply(rule_tac G="F_DPDA_DFA_PRODUCT M D" and d="dR" and n="nat" and m="Suc nat" in epdaH.step_detail_before_some_position) apply(rename_tac nat a)(*strict*) apply(rule epdaH.derivation_initial_is_derivation) apply(force) apply(rename_tac nat a)(*strict*) apply(force) apply(rename_tac nat a)(*strict*) apply(force) apply(rename_tac nat a)(*strict*) apply(clarsimp) apply(rename_tac nat e1 e2 c1 c2)(*strict*) apply(simp add: epdaH_step_relation_def) apply(clarsimp) apply(rename_tac nat e1 e2 c1 c2 w)(*strict*) apply(simp add: slice_edge_A_def) apply(case_tac c1) apply(rename_tac nat e1 e2 c1 c2 w epdaH_conf_statea epdaH_conf_historya epdaH_conf_stacka)(*strict*) apply(clarsimp) apply(rename_tac nat e1 e2 c2 w epdaH_conf_historya)(*strict*) apply(case_tac c2) apply(rename_tac nat e1 e2 c2 w epdaH_conf_historya epdaH_conf_statea epdaH_conf_historyaa epdaH_conf_stacka)(*strict*) apply(clarsimp) apply(rename_tac nat e1 e2 w epdaH_conf_historya)(*strict*) apply(case_tac e2) apply(rename_tac nat e1 e2 w epdaH_conf_historya edge_srca edge_eventa edge_popa edge_pusha edge_trga)(*strict*) apply(clarsimp) apply(rename_tac nat e1 w epdaH_conf_historya edge_src edge_event edge_pop edge_push edge_trg)(*strict*) apply(rename_tac h qs r po pu qt) apply(rename_tac nat e1 w h qs r po pu qt)(*strict*) apply(case_tac qs) apply(rename_tac nat e1 w h qs r po pu qt a b)(*strict*) apply(case_tac qt) apply(rename_tac nat e1 w h qs r po pu qt a b aa ba)(*strict*) apply(clarsimp) apply(rename_tac nat e1 w h r po pu a b aa ba)(*strict*) apply(simp add: slice_conf_A_def) apply(simp add: F_DPDA_DFA_PRODUCT_def F_DPDA_DFA_PRODUCT__edges_def) apply(erule disjE) apply(rename_tac nat e1 w h r po pu a b aa ba)(*strict*) apply(simp add: F_DPDA_DFA_PRODUCT__edges_execute_def) apply(clarsimp) apply(rename_tac nat e1 w h e e')(*strict*) apply(case_tac e) apply(rename_tac nat e1 w h e e' edge_srca edge_eventa edge_popa edge_pusha edge_trga)(*strict*) apply(clarsimp) apply(rename_tac nat e1 w h r po pu a b aa ba)(*strict*) apply(simp add: F_DPDA_DFA_PRODUCT__edges_empty_def) apply(clarsimp) apply(rename_tac nat e1 w h b e)(*strict*) apply(case_tac e) apply(rename_tac nat e1 w h b e edge_srca edge_eventa edge_popa edge_pusha edge_trga)(*strict*) apply(clarsimp) done lemma F_DPDA_DFA_PRODUCT__preserves__derivation_initial: " valid_dpda M \<Longrightarrow> valid_dfa D \<Longrightarrow> R = F_DPDA_DFA_PRODUCT M D \<Longrightarrow> valid_pda R \<Longrightarrow> epdaH.derivation_initial D dD \<Longrightarrow> epdaH.derivation_initial M dM \<Longrightarrow> length h + ints = n \<Longrightarrow> dM n = Some (pair eM \<lparr>epdaH_conf_state = q1, epdaH_conf_history = h, epdaH_conf_stack = s\<rparr>) \<Longrightarrow> dD (length h) = Some (pair eD \<lparr>epdaH_conf_state = q2, epdaH_conf_history = h, epdaH_conf_stack = [epda_box D]\<rparr>) \<Longrightarrow> \<exists>dR eR. epdaH.derivation_initial R dR \<and> dR n = Some (pair eR \<lparr>epdaH_conf_state = cons_tuple2 q1 q2, epdaH_conf_history = h, epdaH_conf_stack = s\<rparr>)" apply(induct n arbitrary: h q1 q2 s eM eD ints) apply(rename_tac h q1 q2 s eM eD ints)(*strict*) apply(clarsimp) apply(rename_tac q1 q2 s eM eD)(*strict*) apply(rule_tac x="der1 \<lparr>epdaH_conf_state = cons_tuple2 q1 q2, epdaH_conf_history = [], epdaH_conf_stack = s\<rparr>" in exI) apply(rename_tac q1 q2 s eM eD)(*strict*) apply(rule conjI) apply(rename_tac q1 q2 s eM eD)(*strict*) apply(rule epdaH.derivation_initialI) apply(rename_tac q1 q2 s eM eD)(*strict*) apply(rule epdaH.der1_is_derivation) apply(rename_tac q1 q2 s eM eD)(*strict*) apply(clarsimp) apply(rename_tac q1 q2 s eM eD c)(*strict*) apply(simp add: get_configuration_def der1_def) apply(clarsimp) apply(rename_tac q1 q2 s eM eD)(*strict*) apply(simp add: epdaH.derivation_initial_def) apply(clarsimp) apply(rename_tac q1 q2 s)(*strict*) apply(simp add: epdaH_initial_configurations_def epdaH_configurations_def F_DPDA_DFA_PRODUCT_def F_DPDA_DFA_PRODUCT__states_def) apply(clarsimp) apply(rename_tac q1 q2 s eM eD)(*strict*) apply(rule_tac x="None" in exI) apply(simp add: der1_def) apply(rename_tac n h q1 q2 s eM eD ints)(*strict*) apply(clarsimp) apply(subgoal_tac "X" for X) apply(rename_tac n h q1 q2 s eM eD ints)(*strict*) prefer 2 apply(rule_tac G="M" and d="dM" and n="n" and m="Suc n" in epdaH.step_detail_before_some_position) apply(rename_tac n h q1 q2 s eM eD ints)(*strict*) apply(rule epdaH.derivation_initial_is_derivation) apply(force) apply(rename_tac n h q1 q2 s eM eD ints)(*strict*) apply(force) apply(rename_tac n h q1 q2 s eM eD ints)(*strict*) apply(force) apply(rename_tac n h q1 q2 s eM eD ints)(*strict*) apply(clarsimp) apply(rename_tac n h q1 q2 s eD ints e1 e2 c1)(*strict*) apply(case_tac c1) apply(rename_tac n h q1 q2 s eD ints e1 e2 c1 epdaH_conf_state epdaH_conf_history epdaH_conf_stack)(*strict*) apply(clarsimp) apply(rename_tac n h q1 q2 s eD ints e1 e2 epdaH_conf_state epdaH_conf_history epdaH_conf_stack)(*strict*) apply(rename_tac qx hx sx) apply(rename_tac n h q1 q2 s eD ints e1 e2 qx hx sx)(*strict*) apply(case_tac e2) apply(rename_tac n h q1 q2 s eD ints e1 e2 qx hx sx edge_src edge_event edge_pop edge_push edge_trg)(*strict*) apply(clarsimp) apply(rename_tac n h q1 q2 s eD ints e1 qx hx sx edge_src edge_event edge_pop edge_push edge_trg)(*strict*) apply(rename_tac qs read pop push qt) apply(rename_tac n h q1 q2 s eD ints e1 qx hx sx qs read pop push qt)(*strict*) apply(simp add: epdaH_step_relation_def) apply(clarsimp) apply(rename_tac n q2 eD ints e1 hx qs read pop push qt w)(*strict*) apply(erule_tac x="hx" in meta_allE) apply(erule_tac x="qs" in meta_allE) apply(clarsimp) apply(case_tac read) apply(rename_tac n q2 eD ints e1 hx qs read pop push qt w)(*strict*) apply(clarsimp) apply(rename_tac n q2 eD ints e1 hx qs pop push qt w)(*strict*) apply(simp add: option_to_list_def) apply(erule_tac x="q2" in meta_allE) apply(erule_tac x="pop@w" in meta_allE) apply(clarsimp) apply(erule_tac x="e1" in meta_allE) apply(erule_tac x="eD" in meta_allE) apply(clarsimp) apply(case_tac ints) apply(rename_tac n q2 eD ints e1 hx qs pop push qt w)(*strict*) apply(clarsimp) apply(rename_tac n q2 eD e1 hx qs pop push qt w)(*strict*) apply(subgoal_tac "X" for X) apply(rename_tac n q2 eD e1 hx qs pop push qt w)(*strict*) prefer 2 apply(rule_tac G="M" and d="dM" and n="n" in epda_at_most_one_symbol_per_step) apply(rename_tac n q2 eD e1 hx qs pop push qt w)(*strict*) apply(simp add: valid_dpda_def valid_pda_def) apply(rename_tac n q2 eD e1 hx qs pop push qt w)(*strict*) apply(force) apply(rename_tac n q2 eD e1 hx qs pop push qt w)(*strict*) apply(force) apply(rename_tac n q2 eD e1 hx qs pop push qt w)(*strict*) apply(clarsimp) apply(rename_tac n q2 eD ints e1 hx qs pop push qt w nat)(*strict*) apply(clarsimp) apply(rename_tac q2 eD e1 hx qs pop push qt w nat)(*strict*) apply(erule_tac x="nat" in meta_allE) apply(clarsimp) apply(rename_tac q2 eD e1 hx qs pop push qt w nat dR eR)(*strict*) apply(rename_tac ints dR eR) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(subgoal_tac "\<lparr>epdaH_conf_state = cons_tuple2 qs q2, epdaH_conf_history = hx, epdaH_conf_stack = pop @ w\<rparr> \<in> epdaH_configurations (F_DPDA_DFA_PRODUCT M D)") apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(rule_tac x="derivation_append dR (der2 \<lparr>epdaH_conf_state = cons_tuple2 qs q2, epdaH_conf_history = hx, epdaH_conf_stack = pop @ w\<rparr> \<lparr>edge_src = cons_tuple2 qs q2, edge_event = None, edge_pop = pop, edge_push = push, edge_trg = cons_tuple2 qt q2\<rparr> \<lparr>epdaH_conf_state = cons_tuple2 qt q2, epdaH_conf_history = hx, epdaH_conf_stack = push @ w\<rparr>) (length hx + ints)" in exI) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(rule conjI) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(rule epdaH.derivation_append_preserves_derivation_initial) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(simp add: valid_dfa_def valid_dpda_def valid_pda_def) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(force) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(rule epdaH.derivation_append_preserves_derivation) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(rule epdaH.derivation_initial_is_derivation) apply(force) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(rule epdaH.der2_is_derivation) apply(simp add: epdaH_step_relation_def F_DPDA_DFA_PRODUCT_def F_DPDA_DFA_PRODUCT__edges_def) apply(rule conjI) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(rule disjI2) apply(simp add: F_DPDA_DFA_PRODUCT__edges_empty_def) apply(rule_tac x="\<lparr>edge_src = qs, edge_event = None, edge_pop = pop, edge_push = push, edge_trg = qt\<rparr>" in exI) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(clarsimp) apply(simp add: epdaH_configurations_def F_DPDA_DFA_PRODUCT__states_def) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(simp add: option_to_list_def) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(simp add: derivation_append_def der2_def) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(rule_tac x="Some \<lparr>edge_src = cons_tuple2 qs q2, edge_event = None, edge_pop = pop, edge_push = push, edge_trg = cons_tuple2 qt q2\<rparr>" in exI) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(simp add: derivation_append_def der2_def) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(rule epdaH.belongs_configurations) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(rule epdaH.derivation_initial_belongs) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(simp add: valid_pda_def) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(force) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(force) apply(rename_tac n q2 eD ints e1 hx qs read pop push qt w a)(*strict*) apply(clarsimp) apply(rename_tac n q2 eD ints e1 hx qs pop push qt w a)(*strict*) apply(simp add: option_to_list_def) apply(subgoal_tac "X" for X) apply(rename_tac n q2 eD ints e1 hx qs pop push qt w a)(*strict*) prefer 2 apply(rule_tac G="D" and d="dD" and n="length hx" and m="Suc (length hx)" in epdaH.step_detail_before_some_position) apply(rename_tac n q2 eD ints e1 hx qs pop push qt w a)(*strict*) apply(rule epdaH.derivation_initial_is_derivation) apply(force) apply(rename_tac n q2 eD ints e1 hx qs pop push qt w a)(*strict*) apply(force) apply(rename_tac n q2 eD ints e1 hx qs pop push qt w a)(*strict*) apply(force) apply(rename_tac n q2 eD ints e1 hx qs pop push qt w a)(*strict*) apply(clarsimp) apply(rename_tac q2 ints e1 hx qs pop push qt w a e1a e2 c1)(*strict*) apply(case_tac c1) apply(rename_tac q2 ints e1 hx qs pop push qt w a e1a e2 c1 epdaH_conf_state epdaH_conf_history epdaH_conf_stack)(*strict*) apply(clarsimp) apply(rename_tac q2 ints e1 hx qs pop push qt w a e1a e2 epdaH_conf_state epdaH_conf_history epdaH_conf_stack)(*strict*) apply(rename_tac q2' h' s') apply(rename_tac q2 ints e1 hx qs pop push qt w a e1a e2 q2' h' s')(*strict*) apply(erule_tac x="q2'" in meta_allE) apply(clarsimp) apply(erule_tac x="pop@w" in meta_allE) apply(clarsimp) apply(erule_tac x="e1" in meta_allE) apply(clarsimp) apply(erule_tac x="e1a" in meta_allE) apply(clarsimp) apply(erule_tac x="ints" in meta_allE) apply(clarsimp) apply(simp add: epdaH_step_relation_def) apply(clarsimp) apply(rename_tac ints e1 hx qs pop push qt w a e1a e2 h' wa)(*strict*) apply(subgoal_tac "(\<forall>e\<in> epda_delta D. edge_event e \<noteq> None \<and> edge_pop e = [epda_box D] \<and> edge_push e = [epda_box D])") apply(rename_tac ints e1 hx qs pop push qt w a e1a e2 h' wa)(*strict*) prefer 2 apply(unfold valid_dfa_def)[1] apply(erule conjE)+ apply(force) apply(rename_tac ints e1 hx qs pop push qt w a e1a e2 h' wa)(*strict*) apply(erule_tac x="e2" in ballE) apply(rename_tac ints e1 hx qs pop push qt w a e1a e2 h' wa)(*strict*) prefer 2 apply(force) apply(rename_tac ints e1 hx qs pop push qt w a e1a e2 h' wa)(*strict*) apply(clarify) apply(rename_tac ints e1 hx qs pop push qt w a e1a e2 h' wa y)(*strict*) apply(case_tac e2) apply(rename_tac ints e1 hx qs pop push qt w a e1a e2 h' wa y edge_srca edge_eventa edge_popa edge_pusha edge_trga)(*strict*) apply(clarify) apply(simp) apply(rename_tac ints e1 hx qs pop push qt w a e1a h' wa y edge_src edge_event edge_pop edge_push edge_trg)(*strict*) apply(simp add: option_to_list_def) apply(clarify) apply(rename_tac ints e1 hx qs pop push qt w a e1a h' wa y edge_src edge_event edge_pop edge_push edge_trg dR eR)(*strict*) apply(simp) apply(rename_tac ints e1 qs pop push qt w e1a h' y edge_src edge_trg dR eR)(*strict*) apply(rename_tac qxs qxt dR eR) apply(rename_tac ints e1 qs pop push qt w e1a h' y qxs qxt dR eR)(*strict*) apply(rule_tac x="derivation_append dR (der2 \<lparr>epdaH_conf_state = cons_tuple2 qs qxs, epdaH_conf_history = h', epdaH_conf_stack = pop @ w\<rparr> \<lparr>edge_src = cons_tuple2 qs qxs, edge_event = Some y, edge_pop = pop, edge_push = push, edge_trg = cons_tuple2 qt qxt\<rparr> \<lparr>epdaH_conf_state = cons_tuple2 qt qxt, epdaH_conf_history = h' @ [y], epdaH_conf_stack = push @ w\<rparr>) (length h' + ints)" in exI) apply(rename_tac ints e1 qs pop push qt w e1a h' y qxs qxt dR eR)(*strict*) apply(rule conjI) apply(rename_tac ints e1 qs pop push qt w e1a h' y qxs qxt dR eR)(*strict*) apply(rule epdaH.derivation_append_preserves_derivation_initial) apply(rename_tac ints e1 qs pop push qt w e1a h' y qxs qxt dR eR)(*strict*) apply(simp add: valid_dfa_def valid_dpda_def valid_pda_def) apply(rename_tac ints e1 qs pop push qt w e1a h' y qxs qxt dR eR)(*strict*) apply(force) apply(rename_tac ints e1 qs pop push qt w e1a h' y qxs qxt dR eR)(*strict*) apply(rule epdaH.derivation_append_preserves_derivation) apply(rename_tac ints e1 qs pop push qt w e1a h' y qxs qxt dR eR)(*strict*) apply(rule epdaH.derivation_initial_is_derivation) apply(force) apply(rename_tac ints e1 qs pop push qt w e1a h' y qxs qxt dR eR)(*strict*) apply(rule epdaH.der2_is_derivation) apply(simp add: epdaH_step_relation_def F_DPDA_DFA_PRODUCT_def F_DPDA_DFA_PRODUCT__edges_def) apply(simp add: option_to_list_def) apply(rule disjI1) apply(simp add: F_DPDA_DFA_PRODUCT__edges_execute_def) apply(rule_tac x="\<lparr>edge_src = qs, edge_event = Some y, edge_pop = pop, edge_push = push, edge_trg = qt\<rparr>" in exI) apply(rename_tac ints e1 qs pop push qt w e1a h' y qxs qxt dR eR)(*strict*) apply(clarsimp) apply(rule_tac x="\<lparr>edge_src = qxs, edge_event = Some y, edge_pop = [epda_box D], edge_push = [epda_box D], edge_trg = qxt\<rparr>" in exI) apply(rename_tac ints e1 qs pop push qt w e1a h' y qxs qxt dR eR)(*strict*) apply(clarsimp) apply(rename_tac ints e1 qs pop push qt w e1a h' y qxs qxt dR eR)(*strict*) apply(simp add: derivation_append_def der2_def) apply(rename_tac ints e1 qs pop push qt w e1a h' y qxs qxt dR eR)(*strict*) apply(rule_tac x="Some \<lparr>edge_src = cons_tuple2 qs qxs, edge_event = Some y, edge_pop = pop, edge_push = push, edge_trg = cons_tuple2 qt qxt\<rparr>" in exI) apply(rename_tac ints e1 qs pop push qt w e1a h' y qxs qxt dR eR)(*strict*) apply(simp add: derivation_append_def der2_def) done lemma F_DPDA_DFA_PRODUCT__reflects__unmarked_language: " valid_dpda M \<Longrightarrow> valid_dfa D \<Longrightarrow> R = F_DPDA_DFA_PRODUCT M D \<Longrightarrow> epdaH.unmarked_language R \<subseteq> epdaH.unmarked_language M \<inter> epdaH.unmarked_language D" apply(rule subsetI) apply(rename_tac x)(*strict*) apply(clarsimp) apply(simp add: epdaH.unmarked_language_def) apply(clarsimp) apply(rename_tac x d)(*strict*) apply(thin_tac "epdaH.derivation (F_DPDA_DFA_PRODUCT M D) d") apply(simp add: epdaH_unmarked_effect_def) apply(clarsimp) apply(rename_tac d i e c)(*strict*) apply(case_tac c) apply(rename_tac d i e c epdaH_conf_state epdaH_conf_historya epdaH_conf_stack)(*strict*) apply(clarsimp) apply(rename_tac d i e epdaH_conf_state epdaH_conf_historya epdaH_conf_stack)(*strict*) apply(rename_tac q h s) apply(rename_tac d i e q h s)(*strict*) apply(case_tac q) apply(rename_tac d i e q h s a b)(*strict*) apply(rename_tac q1 q2) apply(rename_tac d i e q h s q1 q2)(*strict*) apply(clarsimp) apply(rename_tac d i e h s q1 q2)(*strict*) apply(subgoal_tac "X" for X) apply(rename_tac d i e h s q1 q2)(*strict*) prefer 2 apply(rule_tac eR="e" and n="i" in F_DPDA_DFA_PRODUCT__reflects__derivation_initial) apply(rename_tac d i e h s q1 q2)(*strict*) apply(force) apply(rename_tac d i e h s q1 q2)(*strict*) apply(force) apply(rename_tac d i e h s q1 q2)(*strict*) apply(force) apply(rename_tac d i e h s q1 q2)(*strict*) apply(rule F_DPDA_DFA_PRODUCT__produces__PDA) apply(rename_tac d i e h s q1 q2)(*strict*) apply(force) apply(rename_tac d i e h s q1 q2)(*strict*) apply(force) apply(rename_tac d i e h s q1 q2)(*strict*) apply(force) apply(rename_tac d i e h s q1 q2)(*strict*) apply(force) apply(rename_tac d i e h s q1 q2)(*strict*) apply(force) apply(rename_tac d i e h s q1 q2)(*strict*) apply(clarsimp) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule conjI) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule_tac x="dM" in exI) apply(clarsimp) apply(rule conjI) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule_tac x="i" in exI) apply(clarsimp) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(simp add: epdaH.derivation_initial_def) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule_tac x="dD" in exI) apply(clarsimp) apply(rule conjI) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule_tac x="nD" in exI) apply(clarsimp) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(simp add: epdaH.derivation_initial_def) done lemma F_DPDA_DFA_PRODUCT__preserves__unmarked_language: " valid_dpda M \<Longrightarrow> valid_dfa D \<Longrightarrow> R = F_DPDA_DFA_PRODUCT M D \<Longrightarrow> epdaH.unmarked_language M \<inter> epdaH.unmarked_language D \<subseteq> epdaH.unmarked_language R" apply(rule subsetI) apply(rename_tac x)(*strict*) apply(clarsimp) apply(simp add: epdaH.unmarked_language_def) apply(clarsimp) apply(rename_tac x d da)(*strict*) apply(thin_tac "epdaH.derivation M d") apply(thin_tac "epdaH.derivation D da") apply(simp add: epdaH_unmarked_effect_def) apply(clarsimp) apply(rename_tac d da i ia e ea c ca)(*strict*) apply(case_tac c) apply(rename_tac d da i ia e ea c ca epdaH_conf_state epdaH_conf_historya epdaH_conf_stack)(*strict*) apply(case_tac ca) apply(rename_tac d da i ia e ea c ca epdaH_conf_state epdaH_conf_historya epdaH_conf_stack epdaH_conf_statea epdaH_conf_historyaa epdaH_conf_stacka)(*strict*) apply(clarsimp) apply(rename_tac d da i ia e ea epdaH_conf_state epdaH_conf_stack epdaH_conf_statea epdaH_conf_historya epdaH_conf_stacka)(*strict*) apply(rename_tac q1 s1 q2 h s2) apply(rename_tac d da i ia e ea q1 s1 q2 h s2)(*strict*) apply(subgoal_tac "X" for X) apply(rename_tac d da i ia e ea q1 s1 q2 h s2)(*strict*) prefer 2 apply(rule_tac G="D" and d="da" and n="ia" in DFA_one_symbol_per_step) apply(rename_tac d da i ia e ea q1 s1 q2 h s2)(*strict*) apply(force) apply(rename_tac d da i ia e ea q1 s1 q2 h s2)(*strict*) apply(force) apply(rename_tac d da i ia e ea q1 s1 q2 h s2)(*strict*) apply(force) apply(rename_tac d da i ia e ea q1 s1 q2 h s2)(*strict*) apply(clarsimp) apply(rename_tac d da i e ea q1 s1 q2 h s2)(*strict*) apply(subgoal_tac "X" for X) apply(rename_tac d da i e ea q1 s1 q2 h s2)(*strict*) prefer 2 apply(rule_tac G="M" and d="d" and n="i" in epda_at_most_one_symbol_per_step) apply(rename_tac d da i e ea q1 s1 q2 h s2)(*strict*) apply(simp add: valid_dpda_def valid_pda_def) apply(rename_tac d da i e ea q1 s1 q2 h s2)(*strict*) apply(force) apply(rename_tac d da i e ea q1 s1 q2 h s2)(*strict*) apply(force) apply(rename_tac d da i e ea q1 s1 q2 h s2)(*strict*) apply(clarsimp) apply(subgoal_tac "\<exists>ints. length h+ints=i") apply(rename_tac d da i e ea q1 s1 q2 h s2)(*strict*) prefer 2 apply(rule_tac x="i-length h" in exI) apply(force) apply(rename_tac d da i e ea q1 s1 q2 h s2)(*strict*) apply(clarsimp) apply(rename_tac d da e ea q1 s1 q2 h s2 ints)(*strict*) apply(subgoal_tac "X" for X) apply(rename_tac d da e ea q1 s1 q2 h s2 ints)(*strict*) prefer 2 apply(rule DFA_stack_consists_only_of_box) apply(rename_tac d da e ea q1 s1 q2 h s2 ints)(*strict*) apply(force) apply(rename_tac d da e ea q1 s1 q2 h s2 ints)(*strict*) apply(force) apply(rename_tac d da e ea q1 s1 q2 h s2 ints)(*strict*) apply(force) apply(rename_tac d da e ea q1 s1 q2 h s2 ints)(*strict*) apply(clarsimp) apply(rename_tac d da e ea q1 s1 q2 h ints)(*strict*) apply(subgoal_tac "X" for X) apply(rename_tac d da e ea q1 s1 q2 h ints)(*strict*) prefer 2 apply(rule_tac h="h" and ints="ints" and D="D" and M="M" in F_DPDA_DFA_PRODUCT__preserves__derivation_initial) apply(rename_tac d da e ea q1 s1 q2 h ints)(*strict*) apply(force) apply(rename_tac d da e ea q1 s1 q2 h ints)(*strict*) apply(force) apply(rename_tac d da e ea q1 s1 q2 h ints)(*strict*) apply(force) apply(rename_tac d da e ea q1 s1 q2 h ints)(*strict*) apply(rule F_DPDA_DFA_PRODUCT__produces__PDA) apply(rename_tac d da e ea q1 s1 q2 h ints)(*strict*) apply(force) apply(rename_tac d da e ea q1 s1 q2 h ints)(*strict*) apply(force) apply(rename_tac d da e ea q1 s1 q2 h ints)(*strict*) apply(force) apply(rename_tac d da e ea q1 s1 q2 h ints)(*strict*) apply(force) apply(rename_tac d da e ea q1 s1 q2 h ints)(*strict*) apply(force) apply(rename_tac d da e ea q1 s1 q2 h ints)(*strict*) apply(force) apply(rename_tac d da e ea q1 s1 q2 h ints)(*strict*) apply(force) apply(rename_tac d da e ea q1 s1 q2 h ints)(*strict*) apply(force) apply(rename_tac d da e ea q1 s1 q2 h ints)(*strict*) apply(clarsimp) apply(rename_tac d da e ea q1 s1 q2 h ints dR eR)(*strict*) apply(rule_tac x="dR" in exI) apply(clarsimp) apply(rule conjI) apply(rename_tac d da e ea q1 s1 q2 h ints dR eR)(*strict*) apply(rule_tac x="length h+ints" in exI) apply(clarsimp) apply(rename_tac d da e ea q1 s1 q2 h ints dR eR)(*strict*) apply(simp add: epdaH.derivation_initial_def) done theorem F_DPDA_DFA_PRODUCT__relates__unmarked_language: " valid_dpda M \<Longrightarrow> valid_dfa D \<Longrightarrow> R = F_DPDA_DFA_PRODUCT M D \<Longrightarrow> epdaH.unmarked_language R = epdaH.unmarked_language M \<inter> epdaH.unmarked_language D" apply(rule order_antisym) apply(rule F_DPDA_DFA_PRODUCT__reflects__unmarked_language) apply(force) apply(force) apply(force) apply(rule F_DPDA_DFA_PRODUCT__preserves__unmarked_language) apply(force) apply(force) apply(force) done lemma F_DPDA_DFA_PRODUCT__reflects__marked_language: " valid_dpda M \<Longrightarrow> valid_dfa D \<Longrightarrow> R = F_DPDA_DFA_PRODUCT M D \<Longrightarrow> epdaH.marked_language R \<subseteq> epdaH.marked_language M \<inter> epdaH.marked_language D" apply(rule subsetI) apply(rename_tac x)(*strict*) apply(clarsimp) apply(simp add: epdaH.marked_language_def) apply(clarsimp) apply(rename_tac x d)(*strict*) apply(thin_tac "epdaH.derivation (F_DPDA_DFA_PRODUCT M D) d") apply(subgoal_tac "(\<exists>d. epdaH.derivation_initial M d \<and> x \<in> epdaH_marked_effect M d) \<and> (\<exists>d. epdaH.derivation_initial D d \<and> x \<in> epdaH_marked_effect D d)") apply(rename_tac x d)(*strict*) apply(clarsimp) apply(rename_tac x d da db)(*strict*) apply(rule conjI) apply(rename_tac x d da db)(*strict*) apply(rule_tac x="da" in exI) apply(clarsimp) apply(simp add: epdaH.derivation_initial_def) apply(simp add: epdaH_marking_condition_def epdaH_marked_effect_def) apply(clarsimp) apply(rename_tac d da db i ia ib ic e ea eb ec c ca cb cc)(*strict*) apply(rule_tac x="ib" in exI) apply(clarsimp) apply(rename_tac x d da db)(*strict*) apply(rule_tac x="db" in exI) apply(clarsimp) apply(simp add: epdaH.derivation_initial_def) apply(simp add: epdaH_marking_condition_def epdaH_marked_effect_def) apply(clarsimp) apply(rename_tac d da db i ia ib ic e ea eb ec c ca cb cc)(*strict*) apply(rule_tac x="ic" in exI) apply(clarsimp) apply(rename_tac x d)(*strict*) apply(thin_tac "epdaH_marking_condition (F_DPDA_DFA_PRODUCT M D) d") apply(simp add: epdaH_marked_effect_def epdaH_marking_configurations_def) apply(clarsimp) apply(rename_tac d i e c)(*strict*) apply(thin_tac "\<forall>j e' c'. i < j \<and> d j = Some (pair e' c') \<longrightarrow> epdaH_string_state c = epdaH_string_state c'") apply(rename_tac d i e c)(*strict*) apply(thin_tac "c \<in> epdaH_configurations (F_DPDA_DFA_PRODUCT M D)") apply(case_tac c) apply(rename_tac d i e c epdaH_conf_statea epdaH_conf_historya epdaH_conf_stack)(*strict*) apply(clarsimp) apply(rename_tac d i e epdaH_conf_statea epdaH_conf_historya epdaH_conf_stack)(*strict*) apply(rename_tac q h s) apply(rename_tac d i e q h s)(*strict*) apply(case_tac q) apply(rename_tac d i e q h s a b)(*strict*) apply(rename_tac q1 q2) apply(rename_tac d i e q h s q1 q2)(*strict*) apply(clarsimp) apply(rename_tac d i e h s q1 q2)(*strict*) apply(subgoal_tac "X" for X) apply(rename_tac d i e h s q1 q2)(*strict*) prefer 2 apply(rule_tac eR="e" and n="i" in F_DPDA_DFA_PRODUCT__reflects__derivation_initial) apply(rename_tac d i e h s q1 q2)(*strict*) apply(force) apply(rename_tac d i e h s q1 q2)(*strict*) apply(force) apply(rename_tac d i e h s q1 q2)(*strict*) apply(force) apply(rename_tac d i e h s q1 q2)(*strict*) apply(rule F_DPDA_DFA_PRODUCT__produces__PDA) apply(rename_tac d i e h s q1 q2)(*strict*) apply(force) apply(rename_tac d i e h s q1 q2)(*strict*) apply(force) apply(rename_tac d i e h s q1 q2)(*strict*) apply(force) apply(rename_tac d i e h s q1 q2)(*strict*) apply(force) apply(rename_tac d i e h s q1 q2)(*strict*) apply(force) apply(rename_tac d i e h s q1 q2)(*strict*) apply(clarsimp) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule conjI) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule_tac x="derivation_take dM i" in exI) apply(rule conjI) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule epdaH.derivation_take_preserves_derivation_initial) apply(force) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule_tac x="i" in exI) apply(rule_tac x="eM" in exI) apply(rule_tac x="\<lparr>epdaH_conf_state = q1, epdaH_conf_history = h, epdaH_conf_stack = s\<rparr>" in exI) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule conjI) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(simp add: derivation_take_def) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule conjI) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(simp add: derivation_take_def F_DPDA_DFA_PRODUCT_def F_DPDA_DFA_PRODUCT__marking_states_def) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule conjI) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule epdaH.belongs_configurations) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule epdaH.derivation_initial_belongs) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(simp add: valid_dpda_def valid_pda_def) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(force) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(force) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule conjI) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(simp add: derivation_take_def F_DPDA_DFA_PRODUCT_def F_DPDA_DFA_PRODUCT__marking_states_def) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(clarsimp) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD j e' c')(*strict*) apply(simp add: derivation_take_def F_DPDA_DFA_PRODUCT_def F_DPDA_DFA_PRODUCT__marking_states_def) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule_tac x="derivation_take dD nD" in exI) apply(rule conjI) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule epdaH.derivation_take_preserves_derivation_initial) apply(force) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule_tac x="nD" in exI) apply(rule_tac x="eD" in exI) apply(rule_tac x="\<lparr>epdaH_conf_state = q2, epdaH_conf_history = h, epdaH_conf_stack = [epda_box D]\<rparr>" in exI) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule conjI) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(simp add: derivation_take_def) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule conjI) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(simp add: derivation_take_def F_DPDA_DFA_PRODUCT_def F_DPDA_DFA_PRODUCT__marking_states_def) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule conjI) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule epdaH.belongs_configurations) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule epdaH.derivation_initial_belongs) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(simp add: valid_dfa_def valid_dpda_def valid_pda_def) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(force) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(force) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule conjI) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(simp add: derivation_take_def F_DPDA_DFA_PRODUCT_def F_DPDA_DFA_PRODUCT__marking_states_def) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD)(*strict*) apply(clarsimp) apply(rename_tac d i e h s q1 q2 dM dD nD eM eD j e' c')(*strict*) apply(simp add: derivation_take_def F_DPDA_DFA_PRODUCT_def F_DPDA_DFA_PRODUCT__marking_states_def) done lemma F_DPDA_DFA_PRODUCT__preserves__marked_language: " valid_dpda M \<Longrightarrow> valid_dfa D \<Longrightarrow> R = F_DPDA_DFA_PRODUCT M D \<Longrightarrow> epdaH.marked_language M \<inter> epdaH.marked_language D \<subseteq> epdaH.marked_language R" apply(rule subsetI) apply(rename_tac x)(*strict*) apply(clarsimp) apply(simp add: epdaH.marked_language_def) apply(clarsimp) apply(rename_tac x d da)(*strict*) apply(thin_tac "epdaH.derivation M d") apply(thin_tac "epdaH.derivation D da") apply(thin_tac "epdaH_marking_condition M d") apply(thin_tac "epdaH_marking_condition D da") apply(subgoal_tac "\<exists>d. epdaH.derivation_initial (F_DPDA_DFA_PRODUCT M D) d \<and> x \<in> epdaH_marked_effect (F_DPDA_DFA_PRODUCT M D) d") apply(rename_tac x d da)(*strict*) apply(clarsimp) apply(rename_tac x d da db)(*strict*) apply(rule_tac x="db" in exI) apply(clarsimp) apply(simp add: epdaH.derivation_initial_def) apply(simp add: epdaH_marked_effect_def epdaH_marking_condition_def) apply(clarsimp) apply(rename_tac d da db i ia ib e ea eb c ca cb)(*strict*) apply(rule_tac x="ib" in exI) apply(clarsimp) apply(rename_tac x d da)(*strict*) apply(simp add: epdaH_marked_effect_def) apply(clarsimp) apply(rename_tac d da i ia e ea c ca)(*strict*) apply(case_tac c) apply(rename_tac d da i ia e ea c ca epdaH_conf_state epdaH_conf_historya epdaH_conf_stack)(*strict*) apply(case_tac ca) apply(rename_tac d da i ia e ea c ca epdaH_conf_state epdaH_conf_historya epdaH_conf_stack epdaH_conf_statea epdaH_conf_historyaa epdaH_conf_stacka)(*strict*) apply(clarsimp) apply(rename_tac d da i ia e ea epdaH_conf_state epdaH_conf_stack epdaH_conf_statea epdaH_conf_historya epdaH_conf_stacka)(*strict*) apply(rename_tac q1 s1 q2 h s2) apply(rename_tac d da i ia e ea q1 s1 q2 h s2)(*strict*) apply(subgoal_tac "X" for X) apply(rename_tac d da i ia e ea q1 s1 q2 h s2)(*strict*) prefer 2 apply(rule_tac G="D" and d="da" and n="ia" in DFA_one_symbol_per_step) apply(rename_tac d da i ia e ea q1 s1 q2 h s2)(*strict*) apply(force) apply(rename_tac d da i ia e ea q1 s1 q2 h s2)(*strict*) apply(force) apply(rename_tac d da i ia e ea q1 s1 q2 h s2)(*strict*) apply(force) apply(rename_tac d da i ia e ea q1 s1 q2 h s2)(*strict*) apply(clarsimp) apply(rename_tac d da i e ea q1 s1 q2 h s2)(*strict*) apply(subgoal_tac "X" for X) apply(rename_tac d da i e ea q1 s1 q2 h s2)(*strict*) prefer 2 apply(rule_tac G="M" and d="d" and n="i" in epda_at_most_one_symbol_per_step) apply(rename_tac d da i e ea q1 s1 q2 h s2)(*strict*) apply(simp add: valid_dpda_def valid_pda_def) apply(rename_tac d da i e ea q1 s1 q2 h s2)(*strict*) apply(force) apply(rename_tac d da i e ea q1 s1 q2 h s2)(*strict*) apply(force) apply(rename_tac d da i e ea q1 s1 q2 h s2)(*strict*) apply(clarsimp) apply(subgoal_tac "\<exists>ints. length h+ints=i") apply(rename_tac d da i e ea q1 s1 q2 h s2)(*strict*) prefer 2 apply(rule_tac x="i-length h" in exI) apply(force) apply(rename_tac d da i e ea q1 s1 q2 h s2)(*strict*) apply(clarsimp) apply(rename_tac d da e ea q1 s1 q2 h s2 ints)(*strict*) apply(subgoal_tac "X" for X) apply(rename_tac d da e ea q1 s1 q2 h s2 ints)(*strict*) prefer 2 apply(rule DFA_stack_consists_only_of_box) apply(rename_tac d da e ea q1 s1 q2 h s2 ints)(*strict*) apply(force) apply(rename_tac d da e ea q1 s1 q2 h s2 ints)(*strict*) apply(force) apply(rename_tac d da e ea q1 s1 q2 h s2 ints)(*strict*) apply(force) apply(rename_tac d da e ea q1 s1 q2 h s2 ints)(*strict*) apply(clarsimp) apply(rename_tac d da e ea q1 s1 q2 h ints)(*strict*) apply(subgoal_tac "X" for X) apply(rename_tac d da e ea q1 s1 q2 h ints)(*strict*) prefer 2 apply(rule_tac h="h" and ints="ints" and D="D" and M="M" in F_DPDA_DFA_PRODUCT__preserves__derivation_initial) apply(rename_tac d da e ea q1 s1 q2 h ints)(*strict*) apply(force) apply(rename_tac d da e ea q1 s1 q2 h ints)(*strict*) apply(force) apply(rename_tac d da e ea q1 s1 q2 h ints)(*strict*) apply(force) apply(rename_tac d da e ea q1 s1 q2 h ints)(*strict*) apply(rule F_DPDA_DFA_PRODUCT__produces__PDA) apply(rename_tac d da e ea q1 s1 q2 h ints)(*strict*) apply(force) apply(rename_tac d da e ea q1 s1 q2 h ints)(*strict*) apply(force) apply(rename_tac d da e ea q1 s1 q2 h ints)(*strict*) apply(force) apply(rename_tac d da e ea q1 s1 q2 h ints)(*strict*) apply(force) apply(rename_tac d da e ea q1 s1 q2 h ints)(*strict*) apply(force) apply(rename_tac d da e ea q1 s1 q2 h ints)(*strict*) apply(force) apply(rename_tac d da e ea q1 s1 q2 h ints)(*strict*) apply(force) apply(rename_tac d da e ea q1 s1 q2 h ints)(*strict*) apply(force) apply(rename_tac d da e ea q1 s1 q2 h ints)(*strict*) apply(clarsimp) apply(rename_tac d da e ea q1 s1 q2 h ints dR eR)(*strict*) apply(rule_tac x="derivation_take dR (length h+ints)" in exI) apply(rule conjI) apply(rename_tac d da e ea q1 s1 q2 h ints dR eR)(*strict*) apply(rule epdaH.derivation_take_preserves_derivation_initial) apply(force) apply(rename_tac d da e ea q1 s1 q2 h ints dR eR)(*strict*) apply(rule_tac x="length h+ints" in exI) apply(rule_tac x="eR" in exI) apply(rule_tac x="\<lparr>epdaH_conf_state = cons_tuple2 q1 q2, epdaH_conf_history = h, epdaH_conf_stack = s1\<rparr>" in exI) apply(rename_tac d da e ea q1 s1 q2 h ints dR eR)(*strict*) apply(rule conjI) apply(rename_tac d da e ea q1 s1 q2 h ints dR eR)(*strict*) apply(simp add: derivation_take_def) apply(rename_tac d da e ea q1 s1 q2 h ints dR eR)(*strict*) apply(rule conjI) apply(rename_tac d da e ea q1 s1 q2 h ints dR eR)(*strict*) apply(simp add: derivation_take_def F_DPDA_DFA_PRODUCT_def F_DPDA_DFA_PRODUCT__marking_states_def epdaH_marking_configurations_def epdaH_configurations_def F_DPDA_DFA_PRODUCT__states_def F_DPDA_DFA_PRODUCT__events_def) apply(rename_tac d da e ea q1 s1 q2 h ints dR eR)(*strict*) apply(rule conjI) apply(rename_tac d da e ea q1 s1 q2 h ints dR eR)(*strict*) apply(simp add: derivation_take_def F_DPDA_DFA_PRODUCT_def F_DPDA_DFA_PRODUCT__marking_states_def epdaH_marking_configurations_def epdaH_configurations_def F_DPDA_DFA_PRODUCT__states_def F_DPDA_DFA_PRODUCT__events_def) apply(rename_tac d da e ea q1 s1 q2 h ints dR eR)(*strict*) apply(clarsimp) apply(rename_tac d da e ea q1 s1 q2 h ints dR eR j e' c')(*strict*) apply(simp add: derivation_take_def F_DPDA_DFA_PRODUCT_def F_DPDA_DFA_PRODUCT__marking_states_def) done theorem F_DPDA_DFA_PRODUCT__relates__marked_language: " valid_dpda M \<Longrightarrow> valid_dfa D \<Longrightarrow> R = F_DPDA_DFA_PRODUCT M D \<Longrightarrow> epdaH.marked_language R = (epdaH.marked_language M) \<inter> (epdaH.marked_language D)" apply(rule order_antisym) apply(rule F_DPDA_DFA_PRODUCT__reflects__marked_language) apply(force) apply(force) apply(force) apply(rule F_DPDA_DFA_PRODUCT__preserves__marked_language) apply(force) apply(force) apply(force) done lemma F_DPDA_DFA_PRODUCT__preserves__determinism: " valid_dpda M \<Longrightarrow> valid_dfa D \<Longrightarrow> R = F_DPDA_DFA_PRODUCT M D \<Longrightarrow> valid_pda R \<Longrightarrow> epdaS.is_forward_edge_deterministic_accessible R" apply(subgoal_tac "epdaH.is_forward_edge_deterministicHist_DB_long M") prefer 2 apply(rule DPDA_to_epdaH_determinism) apply(force) apply(subgoal_tac "epdaH.is_forward_edge_deterministicHist_DB_long R") apply (metis epda_epdaS_is_forward_edge_deterministic_accessible_equal_to_epdaH_is_forward_edge_deterministicHist_DB_long valid_pda_to_valid_epda) apply(subgoal_tac "epdaH.is_forward_edge_deterministicHist_DB_long D") prefer 2 apply(rule DPDA_to_epdaH_determinism) apply(simp add: valid_dfa_def) apply(clarsimp) apply(simp add: epdaH.is_forward_edge_deterministicHist_DB_long_def) apply(clarsimp) apply(rename_tac c d c1 c2 e1 e2 n w1 w2)(*strict*) apply(case_tac "d n") apply(rename_tac c d c1 c2 e1 e2 n w1 w2)(*strict*) apply(simp add: get_configuration_def) apply(rename_tac c d c1 c2 e1 e2 n w1 w2 a)(*strict*) apply(clarsimp) apply(simp add: get_configuration_def) apply(case_tac a) apply(rename_tac c d c1 c2 e1 e2 n w1 w2 a option b)(*strict*) apply(clarsimp) apply(rename_tac c d c1 c2 e1 e2 n w1 w2 option)(*strict*) apply(case_tac c) apply(rename_tac c d c1 c2 e1 e2 n w1 w2 option epdaH_conf_state epdaH_conf_historya epdaH_conf_stack)(*strict*) apply(clarsimp) apply(rename_tac d c1 c2 e1 e2 n w1 w2 option epdaH_conf_state epdaH_conf_historya epdaH_conf_stack)(*strict*) apply(rename_tac e q h s) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e q h s)(*strict*) apply(case_tac q) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e q h s a b)(*strict*) apply(clarsimp) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s a b)(*strict*) apply(rename_tac q1 q2) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2)(*strict*) apply(subgoal_tac "X" for X) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2)(*strict*) prefer 2 apply(rule_tac F_DPDA_DFA_PRODUCT__reflects__derivation_initial) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2)(*strict*) apply(force) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2)(*strict*) apply(force) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2)(*strict*) apply(force) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2)(*strict*) apply(rule F_DPDA_DFA_PRODUCT__produces__PDA) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2)(*strict*) apply(force) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2)(*strict*) apply(force) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2)(*strict*) apply(force) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2)(*strict*) apply(force) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2)(*strict*) apply(force) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2)(*strict*) apply(clarsimp) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(erule_tac x="\<lparr>epdaH_conf_state = q1, epdaH_conf_history = h, epdaH_conf_stack = s\<rparr>" in allE) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(erule impE) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule_tac x="dM" in exI) apply(clarsimp) apply(rule_tac x="n" in exI) apply(clarsimp) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(erule_tac x="\<lparr>epdaH_conf_state = q2, epdaH_conf_history = h, epdaH_conf_stack = [epda_box D]\<rparr>" in allE) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(erule impE) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule_tac x="dD" in exI) apply(clarsimp) apply(rule_tac x="nD" in exI) apply(clarsimp) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(erule_tac x="slice_conf_A c1" in allE) apply(erule_tac x="slice_conf_A c2" in allE) apply(erule_tac x="slice_edge_A e1" in allE) apply(erule_tac x="slice_edge_A e2" in allE) apply(erule impE) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) prefer 2 apply(simp add: slice_edge_A_def slice_edge_B_def) apply(case_tac e1) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD edge_srca edge_eventa edge_popa edge_pusha edge_trga)(*strict*) apply(case_tac e2) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD edge_srca edge_eventa edge_popa edge_pusha edge_trga edge_srcaa edge_eventaa edge_popaa edge_pushaa edge_trgaa)(*strict*) apply(clarsimp) apply(rename_tac d c1 c2 n w1 w2 e h s q1 q2 dM dD nD eM eD edge_src edge_trg edge_srca edge_eventa edge_popa edge_pusha edge_trga)(*strict*) apply(case_tac edge_src) apply(rename_tac d c1 c2 n w1 w2 e h s q1 q2 dM dD nD eM eD edge_src edge_trg edge_srca edge_eventa edge_popa edge_pusha edge_trga a b)(*strict*) apply(clarsimp) apply(case_tac edge_srca) apply(clarsimp) apply(simp add: sel_tuple2_1_def) apply(case_tac edge_trg) apply(case_tac edge_trga) apply(clarsimp) apply(simp add: epdaH_step_relation_def) apply(clarsimp) apply(case_tac "edge_eventa") apply(clarsimp) apply(rename_tac d c1 c2 n e h dM dD nD eM eD edge_popa edge_pusha aa ba bb ac bc wa)(*strict*) apply(simp add: F_DPDA_DFA_PRODUCT_def F_DPDA_DFA_PRODUCT__edges_def F_DPDA_DFA_PRODUCT__edges_empty_def F_DPDA_DFA_PRODUCT__edges_execute_def) apply(erule disjE)+ apply(rename_tac d c1 c2 n e h dM dD nD eM eD edge_popa edge_pusha aa ba bb ac bc wa)(*strict*) apply(clarsimp) apply(rename_tac d c1 c2 n e h dM dD nD eM eD wa ea eb e' e'a)(*strict*) apply(simp add: option_to_list_def valid_dfa_def) apply(clarsimp) apply(rename_tac d c1 c2 n e dM dD nD eM eD wa ea eb e' e'a)(*strict*) apply(erule_tac x="e'a" in ballE) apply(rename_tac d c1 c2 n e dM dD nD eM eD wa ea eb e' e'a)(*strict*) prefer 2 apply(force) apply(rename_tac d c1 c2 n e dM dD nD eM eD wa ea eb e' e'a)(*strict*) apply(clarsimp) apply(rename_tac d c1 c2 n e h dM dD nD eM eD edge_popa edge_pusha aa ba bb ac bc wa)(*strict*) apply(clarsimp) apply(rename_tac d c1 c2 n e h dM dD nD eM eD wa ea eb e')(*strict*) apply(simp add: option_to_list_def valid_dfa_def) apply(clarsimp) apply(rename_tac d c1 c2 n e dM dD nD eM eD wa ea eb e')(*strict*) apply(erule_tac x="e'" in ballE) apply(rename_tac d c1 c2 n e dM dD nD eM eD wa ea eb e')(*strict*) prefer 2 apply(force) apply(rename_tac d c1 c2 n e dM dD nD eM eD wa ea eb e')(*strict*) apply(clarsimp) apply(rename_tac d c1 c2 n e h dM dD nD eM eD edge_popa edge_pusha aa ba bb ac bc wa)(*strict*) apply(erule disjE)+ apply(rename_tac d c1 c2 n e h dM dD nD eM eD edge_popa edge_pusha aa ba bb ac bc wa)(*strict*) apply(clarsimp) apply(rename_tac d c1 c2 n e h dM dD nD eM eD wa ea eb e')(*strict*) apply(simp add: option_to_list_def valid_dfa_def) apply(clarsimp) apply(rename_tac d c1 c2 n e dM dD nD eM eD wa ea eb e')(*strict*) apply(erule_tac x="e'" in ballE) apply(rename_tac d c1 c2 n e dM dD nD eM eD wa ea eb e')(*strict*) prefer 2 apply(force) apply(rename_tac d c1 c2 n e dM dD nD eM eD wa ea eb e')(*strict*) apply(clarsimp) apply(rename_tac d c1 c2 n e h dM dD nD eM eD edge_popa edge_pusha aa ba bb ac bc wa)(*strict*) apply(clarsimp) apply(rename_tac d c1 c2 n e h dM dD nD eM eD edge_eventa edge_popa edge_pusha aa ba bb ac bc wa a)(*strict*) apply(clarsimp) apply(rename_tac d c1 c2 n e h dM dD nD eM eD edge_popa edge_pusha aa ba bb ac bc wa a)(*strict*) apply(simp add: F_DPDA_DFA_PRODUCT_def F_DPDA_DFA_PRODUCT__edges_def F_DPDA_DFA_PRODUCT__edges_empty_def F_DPDA_DFA_PRODUCT__edges_execute_def) apply(clarsimp) apply(rename_tac d c1 c2 n e h dM dD nD eM eD wa a ea eb e' e'a)(*strict*) apply(erule_tac x="slice_conf_B c1 [epda_box D]" in allE) apply(erule_tac x="slice_conf_B c2 [epda_box D]" in allE) apply(erule_tac x="e'" in allE) apply(erule_tac x="e'a" in allE) apply(erule impE) apply(rename_tac d c1 c2 n e h dM dD nD eM eD wa a ea eb e' e'a)(*strict*) prefer 2 apply(force) apply(rename_tac d c1 c2 n e h dM dD nD eM eD wa a ea eb e' e'a)(*strict*) apply(rule_tac x="option_to_list (edge_event e')" in exI) apply(rule conjI) apply(rename_tac d c1 c2 n e h dM dD nD eM eD wa a ea eb e' e'a)(*strict*) apply(simp add: epda_effects_def F_DPDA_DFA_PRODUCT__events_def) apply(rename_tac d c1 c2 n e h dM dD nD eM eD wa a ea eb e' e'a)(*strict*) apply(rule_tac x="option_to_list (edge_event e')" in exI) apply(rule conjI) apply(rename_tac d c1 c2 n e h dM dD nD eM eD wa a ea eb e' e'a)(*strict*) apply(simp add: epda_effects_def F_DPDA_DFA_PRODUCT__events_def) apply(rename_tac d c1 c2 n e h dM dD nD eM eD wa a ea eb e' e'a)(*strict*) apply(clarsimp) apply(case_tac c1) apply(rename_tac d c1 c2 n e h dM dD nD eM eD wa a ea eb e' e'a epdaH_conf_statea epdaH_conf_historya epdaH_conf_stacka)(*strict*) apply(case_tac c2) apply(rename_tac d c1 c2 n e h dM dD nD eM eD wa a ea eb e' e'a epdaH_conf_statea epdaH_conf_historya epdaH_conf_stacka epdaH_conf_stateaa epdaH_conf_historyaa epdaH_conf_stackaa)(*strict*) apply(clarsimp) apply(rename_tac d n e h dM dD nD eM eD wa a ea eb e' e'a)(*strict*) apply(simp add: slice_conf_B_def sel_tuple2_2_def) apply(simp add: option_to_list_def valid_dfa_def) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(clarsimp) apply(thin_tac "\<forall>c1 c2 e1 e2. (\<exists>w1. w1 \<in> epda_effects D \<and> (\<exists>w2. w2 \<in> epda_effects D \<and> epdaH_step_relation D \<lparr>epdaH_conf_state = q2, epdaH_conf_history = h, epdaH_conf_stack = [epda_box D]\<rparr> e1 c1 \<and> epdaH_step_relation D \<lparr>epdaH_conf_state = q2, epdaH_conf_history = h, epdaH_conf_stack = [epda_box D]\<rparr> e2 c2 \<and> epdaH_conf_history c1 = h @ w1 \<and> epdaH_conf_history c2 = h @ w2 \<and> (ATS_History.history_fragment_prefixes epda_effects (@) D w1 \<subseteq> ATS_History.history_fragment_prefixes epda_effects (@) D w2 \<or> ATS_History.history_fragment_prefixes epda_effects (@) D w2 \<subseteq> ATS_History.history_fragment_prefixes epda_effects (@) D w1 \<or> ATS_History.history_fragment_prefixes epda_effects (@) D w2 = ATS_History.history_fragment_prefixes epda_effects (@) D w1))) \<longrightarrow> e1 = e2") apply(rule_tac x="w1" in exI) apply(rule conjI) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(simp add: epda_effects_def F_DPDA_DFA_PRODUCT_def F_DPDA_DFA_PRODUCT__events_def) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule_tac x="w2" in exI) apply(rule conjI) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(simp add: epda_effects_def F_DPDA_DFA_PRODUCT_def F_DPDA_DFA_PRODUCT__events_def) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule conjI) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(simp add: epdaH_step_relation_def) apply(clarsimp) apply(rename_tac d c1 c2 e1 e2 n e h q1 q2 dM dD nD eM eD w wa)(*strict*) apply(case_tac e1) apply(rename_tac d c1 c2 e1 e2 n e h q1 q2 dM dD nD eM eD w wa edge_srca edge_eventa edge_popa edge_pusha edge_trga)(*strict*) apply(clarsimp) apply(rename_tac d c1 c2 e2 n e h q1 q2 dM dD nD eM eD w wa edge_eventa edge_popa edge_pusha)(*strict*) apply(simp add: slice_edge_A_def) apply(case_tac c2) apply(rename_tac d c1 c2 e2 n e h q1 q2 dM dD nD eM eD w wa edge_eventa edge_popa edge_pusha epdaH_conf_statea epdaH_conf_historya epdaH_conf_stacka)(*strict*) apply(clarsimp) apply(rename_tac d c1 e2 n e h q1 q2 dM dD nD eM eD w wa edge_eventa edge_popa edge_push)(*strict*) apply(case_tac c1) apply(rename_tac d c1 e2 n e h q1 q2 dM dD nD eM eD w wa edge_eventa edge_popa edge_push epdaH_conf_statea epdaH_conf_historya epdaH_conf_stacka)(*strict*) apply(clarsimp) apply(rename_tac d e2 n e h q1 q2 dM dD nD eM eD w wa edge_eventa edge_popa edge_push epdaH_conf_statea)(*strict*) apply(case_tac e2) apply(rename_tac d e2 n e h q1 q2 dM dD nD eM eD w wa edge_eventa edge_popa edge_push epdaH_conf_statea edge_srca edge_eventaa edge_popaa edge_pusha edge_trg)(*strict*) apply(clarsimp) apply(rename_tac d n e h q1 q2 dM dD nD eM eD w wa edge_event edge_pop edge_push epdaH_conf_statea edge_eventa edge_popa edge_pusha edge_trg)(*strict*) apply(rename_tac x1 x2 x3 x4 x5 x6 x7 x8) apply(rename_tac d n e h q1 q2 dM dD nD eM eD w wa x1 x2 x3 x4 x5 x6 x7 x8)(*strict*) apply(simp add: sel_tuple2_1_def) apply(case_tac x4) apply(rename_tac d n e h q1 q2 dM dD nD eM eD w wa x1 x2 x3 x4 x5 x6 x7 x8 a b)(*strict*) apply(clarsimp) apply(rename_tac d n e h q1 q2 dM dD nD eM eD w wa x1 x2 x3 x5 x6 x7 x8 a b)(*strict*) apply(simp add: slice_conf_A_def sel_tuple2_1_def) apply(simp add: F_DPDA_DFA_PRODUCT_def F_DPDA_DFA_PRODUCT__edges_def F_DPDA_DFA_PRODUCT__edges_empty_def F_DPDA_DFA_PRODUCT__edges_execute_def) apply(erule disjE)+ apply(rename_tac d n e h q1 q2 dM dD nD eM eD w wa x1 x2 x3 x5 x6 x7 x8 a b)(*strict*) apply(clarsimp) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' e'a)(*strict*) apply(case_tac e') apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' e'a edge_srca edge_eventa edge_popa edge_pusha edge_trga)(*strict*) apply(case_tac ea) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' e'a edge_srca edge_eventa edge_popa edge_pusha edge_trga edge_srcaa edge_eventaa edge_popaa edge_pushaa edge_trgaa)(*strict*) apply(case_tac eb) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' e'a edge_srca edge_eventa edge_popa edge_pusha edge_trga edge_srcaa edge_eventaa edge_popaa edge_pushaa edge_trgaa edge_srcb edge_eventb edge_popb edge_pushb edge_trgb)(*strict*) apply(clarsimp) apply(rename_tac d n e h q1 q2 dM dD nD eM eD w wa x1 x2 x3 x5 x6 x7 x8 a b)(*strict*) apply(clarsimp) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e')(*strict*) apply(case_tac ea) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' edge_srca edge_eventa edge_popa edge_pusha edge_trga)(*strict*) apply(case_tac eb) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' edge_srca edge_eventa edge_popa edge_pusha edge_trga edge_srcaa edge_eventaa edge_popaa edge_pushaa edge_trgaa)(*strict*) apply(case_tac e') apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' edge_srca edge_eventa edge_popa edge_pusha edge_trga edge_srcaa edge_eventaa edge_popaa edge_pushaa edge_trgaa edge_srcb edge_eventb edge_popb edge_pushb edge_trgb)(*strict*) apply(clarsimp) apply(rename_tac d n e h q1 q2 dM dD nD eM eD w wa x1 x2 x3 x5 x6 x7 x8 a b)(*strict*) apply(clarsimp) apply(rename_tac d n e h q2 dM dD nD eM eD w wa x5 x6 x7 x8 ea)(*strict*) apply(case_tac ea) apply(rename_tac d n e h q2 dM dD nD eM eD w wa x5 x6 x7 x8 ea edge_srca edge_eventa edge_popa edge_pusha edge_trga)(*strict*) apply(clarsimp) apply(rename_tac d n e h q1 q2 dM dD nD eM eD w wa x1 x2 x3 x5 x6 x7 x8 a b)(*strict*) apply(erule disjE)+ apply(rename_tac d n e h q1 q2 dM dD nD eM eD w wa x1 x2 x3 x5 x6 x7 x8 a b)(*strict*) apply(clarsimp) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' e'a)(*strict*) apply(case_tac ea) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' e'a edge_srca edge_eventa edge_popa edge_pusha edge_trga)(*strict*) apply(case_tac eb) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' e'a edge_srca edge_eventa edge_popa edge_pusha edge_trga edge_srcaa edge_eventaa edge_popaa edge_pushaa edge_trgaa)(*strict*) apply(case_tac e') apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' e'a edge_srca edge_eventa edge_popa edge_pusha edge_trga edge_srcaa edge_eventaa edge_popaa edge_pushaa edge_trgaa edge_srcb edge_eventb edge_popb edge_pushb edge_trgb)(*strict*) apply(case_tac e'a) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' e'a edge_srca edge_eventa edge_popa edge_pusha edge_trga edge_srcaa edge_eventaa edge_popaa edge_pushaa edge_trgaa edge_srcb edge_eventb edge_popb edge_pushb edge_trgb edge_srcc edge_eventc edge_popc edge_pushc edge_trgc)(*strict*) apply(clarsimp) apply(rename_tac d n e h q1 q2 dM dD nD eM eD w wa x1 x2 x3 x5 x6 x7 x8 a b)(*strict*) apply(clarsimp) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' e'a)(*strict*) apply(case_tac ea) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' e'a edge_srca edge_eventa edge_popa edge_pusha edge_trga)(*strict*) apply(case_tac eb) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' e'a edge_srca edge_eventa edge_popa edge_pusha edge_trga edge_srcaa edge_eventaa edge_popaa edge_pushaa edge_trgaa)(*strict*) apply(case_tac e') apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' e'a edge_srca edge_eventa edge_popa edge_pusha edge_trga edge_srcaa edge_eventaa edge_popaa edge_pushaa edge_trgaa edge_srcb edge_eventb edge_popb edge_pushb edge_trgb)(*strict*) apply(case_tac e'a) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' e'a edge_srca edge_eventa edge_popa edge_pusha edge_trga edge_srcaa edge_eventaa edge_popaa edge_pushaa edge_trgaa edge_srcb edge_eventb edge_popb edge_pushb edge_trgb edge_srcc edge_eventc edge_popc edge_pushc edge_trgc)(*strict*) apply(clarsimp) apply(rename_tac d n e h q1 q2 dM dD nD eM eD w wa x1 x2 x3 x5 x6 x7 x8 a b)(*strict*) apply(clarsimp) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e')(*strict*) apply(case_tac ea) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' edge_srca edge_eventa edge_popa edge_pusha edge_trga)(*strict*) apply(case_tac eb) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' edge_srca edge_eventa edge_popa edge_pusha edge_trga edge_srcaa edge_eventaa edge_popaa edge_pushaa edge_trgaa)(*strict*) apply(case_tac e') apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' edge_srca edge_eventa edge_popa edge_pusha edge_trga edge_srcaa edge_eventaa edge_popaa edge_pushaa edge_trgaa edge_srcb edge_eventb edge_popb edge_pushb edge_trgb)(*strict*) apply(clarsimp) apply(rename_tac d n e h q1 q2 dM dD nD eM eD w wa x1 x2 x3 x5 x6 x7 x8 a b)(*strict*) apply(erule disjE)+ apply(rename_tac d n e h q1 q2 dM dD nD eM eD w wa x1 x2 x3 x5 x6 x7 x8 a b)(*strict*) apply(clarsimp) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e')(*strict*) apply(case_tac ea) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' edge_srca edge_eventa edge_popa edge_pusha edge_trga)(*strict*) apply(case_tac eb) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' edge_srca edge_eventa edge_popa edge_pusha edge_trga edge_srcaa edge_eventaa edge_popaa edge_pushaa edge_trgaa)(*strict*) apply(case_tac e') apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' edge_srca edge_eventa edge_popa edge_pusha edge_trga edge_srcaa edge_eventaa edge_popaa edge_pushaa edge_trgaa edge_srcb edge_eventb edge_popb edge_pushb edge_trgb)(*strict*) apply(clarsimp) apply(rename_tac d n e h q1 q2 dM dD nD eM eD w wa x1 x2 x3 x5 x6 x7 x8 a b)(*strict*) apply(clarsimp) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e')(*strict*) apply(case_tac ea) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' edge_srca edge_eventa edge_popa edge_pusha edge_trga)(*strict*) apply(case_tac eb) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' edge_srca edge_eventa edge_popa edge_pusha edge_trga edge_srcaa edge_eventaa edge_popaa edge_pushaa edge_trgaa)(*strict*) apply(case_tac e') apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' edge_srca edge_eventa edge_popa edge_pusha edge_trga edge_srcaa edge_eventaa edge_popaa edge_pushaa edge_trgaa edge_srcb edge_eventb edge_popb edge_pushb edge_trgb)(*strict*) apply(clarsimp) apply(rename_tac d n e h q1 q2 dM dD nD eM eD w wa x1 x2 x3 x5 x6 x7 x8 a b)(*strict*) apply(clarsimp) apply(rename_tac d n e h q2 dM dD nD eM eD w wa ea eb)(*strict*) apply(case_tac ea) apply(rename_tac d n e h q2 dM dD nD eM eD w wa ea eb edge_srca edge_eventa edge_popa edge_pusha edge_trga)(*strict*) apply(case_tac eb) apply(rename_tac d n e h q2 dM dD nD eM eD w wa ea eb edge_srca edge_eventa edge_popa edge_pusha edge_trga edge_srcaa edge_eventaa edge_popaa edge_pushaa edge_trgaa)(*strict*) apply(clarsimp) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule conjI) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(simp add: epdaH_step_relation_def) apply(clarsimp) apply(rename_tac d c1 c2 e1 e2 n e h q1 q2 dM dD nD eM eD w wa)(*strict*) apply(case_tac e1) apply(rename_tac d c1 c2 e1 e2 n e h q1 q2 dM dD nD eM eD w wa edge_srca edge_eventa edge_popa edge_pusha edge_trga)(*strict*) apply(clarsimp) apply(rename_tac d c1 c2 e2 n e h q1 q2 dM dD nD eM eD w wa edge_eventa edge_popa edge_pusha)(*strict*) apply(simp add: slice_edge_A_def) apply(case_tac c2) apply(rename_tac d c1 c2 e2 n e h q1 q2 dM dD nD eM eD w wa edge_eventa edge_popa edge_pusha epdaH_conf_statea epdaH_conf_historya epdaH_conf_stacka)(*strict*) apply(clarsimp) apply(rename_tac d c1 e2 n e h q1 q2 dM dD nD eM eD w wa edge_eventa edge_popa edge_pusha)(*strict*) apply(case_tac c1) apply(rename_tac d c1 e2 n e h q1 q2 dM dD nD eM eD w wa edge_eventa edge_popa edge_pusha epdaH_conf_statea epdaH_conf_historya epdaH_conf_stacka)(*strict*) apply(clarsimp) apply(rename_tac d e2 n e h q1 q2 dM dD nD eM eD w wa edge_eventa edge_popa edge_pusha epdaH_conf_statea)(*strict*) apply(case_tac e2) apply(rename_tac d e2 n e h q1 q2 dM dD nD eM eD w wa edge_eventa edge_popa edge_pusha epdaH_conf_statea edge_srca edge_eventaa edge_popaa edge_pushaa edge_trga)(*strict*) apply(clarsimp) apply(rename_tac d n e h q1 q2 dM dD nD eM eD w wa edge_event edge_pop edge_push epdaH_conf_statea edge_eventa edge_popa edge_pusha edge_trg)(*strict*) apply(rename_tac x1 x2 x3 x4 x5 x6 x7 x8) apply(rename_tac d n e h q1 q2 dM dD nD eM eD w wa x1 x2 x3 x4 x5 x6 x7 x8)(*strict*) apply(simp add: sel_tuple2_1_def) apply(case_tac x8) apply(rename_tac d n e h q1 q2 dM dD nD eM eD w wa x1 x2 x3 x4 x5 x6 x7 x8 a b)(*strict*) apply(clarsimp) apply(rename_tac d n e h q1 q2 dM dD nD eM eD w wa x1 x2 x3 x4 x5 x6 x7 a b)(*strict*) apply(simp add: slice_conf_A_def sel_tuple2_1_def) apply(simp add: F_DPDA_DFA_PRODUCT_def F_DPDA_DFA_PRODUCT__edges_def F_DPDA_DFA_PRODUCT__edges_empty_def F_DPDA_DFA_PRODUCT__edges_execute_def) apply(erule disjE)+ apply(rename_tac d n e h q1 q2 dM dD nD eM eD w wa x1 x2 x3 x4 x5 x6 x7 a b)(*strict*) apply(clarsimp) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' e'a)(*strict*) apply(case_tac e') apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' e'a edge_srca edge_eventa edge_popa edge_pusha edge_trga)(*strict*) apply(case_tac e'a) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' e'a edge_srca edge_eventa edge_popa edge_pusha edge_trga edge_srcaa edge_eventaa edge_popaa edge_pushaa edge_trgaa)(*strict*) apply(case_tac ea) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' e'a edge_srca edge_eventa edge_popa edge_pusha edge_trga edge_srcaa edge_eventaa edge_popaa edge_pushaa edge_trgaa edge_srcb edge_eventb edge_popb edge_pushb edge_trgb)(*strict*) apply(case_tac eb) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' e'a edge_srca edge_eventa edge_popa edge_pusha edge_trga edge_srcaa edge_eventaa edge_popaa edge_pushaa edge_trgaa edge_srcb edge_eventb edge_popb edge_pushb edge_trgb edge_srcc edge_eventc edge_popc edge_pushc edge_trgc)(*strict*) apply(clarsimp) apply(rename_tac d n e h q1 q2 dM dD nD eM eD w wa x1 x2 x3 x4 x5 x6 x7 a b)(*strict*) apply(clarsimp) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e')(*strict*) apply(case_tac ea) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' edge_srca edge_eventa edge_popa edge_pusha edge_trga)(*strict*) apply(case_tac eb) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' edge_srca edge_eventa edge_popa edge_pusha edge_trga edge_srcaa edge_eventaa edge_popaa edge_pushaa edge_trgaa)(*strict*) apply(case_tac e') apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' edge_srca edge_eventa edge_popa edge_pusha edge_trga edge_srcaa edge_eventaa edge_popaa edge_pushaa edge_trgaa edge_srcb edge_eventb edge_popb edge_pushb edge_trgb)(*strict*) apply(clarsimp) apply(rename_tac d n e h q1 q2 dM dD nD eM eD w wa x1 x2 x3 x4 x5 x6 x7 a b)(*strict*) apply(clarsimp) apply(rename_tac d n e h q2 dM dD nD eM eD w wa x5 x6 x7 a b ea)(*strict*) apply(case_tac ea) apply(rename_tac d n e h q2 dM dD nD eM eD w wa x5 x6 x7 a b ea edge_srca edge_eventa edge_popa edge_pusha edge_trga)(*strict*) apply(clarsimp) apply(rename_tac d n e h q2 dM dD nD eM eD w wa x5 x6 x7 a b edge_srca edge_popa edge_pusha edge_trga)(*strict*) apply(erule disjE)+ apply(rename_tac d n e h q2 dM dD nD eM eD w wa x5 x6 x7 a b edge_srca edge_popa edge_pusha edge_trga)(*strict*) apply(clarsimp) apply(rename_tac d n e h dM dD nD eM eD w wa edge_popa edge_pusha edge_trga ea e')(*strict*) apply(case_tac ea) apply(rename_tac d n e h dM dD nD eM eD w wa edge_popa edge_pusha edge_trga ea e' edge_srca edge_eventa edge_popaa edge_pushaa edge_trgaa)(*strict*) apply(case_tac e') apply(rename_tac d n e h dM dD nD eM eD w wa edge_popa edge_pusha edge_trga ea e' edge_srca edge_eventa edge_popaa edge_pushaa edge_trgaa edge_srcaa edge_eventaa edge_popb edge_pushb edge_trgb)(*strict*) apply(clarsimp) apply(rename_tac d n e h q2 dM dD nD eM eD w wa x5 x6 x7 a b edge_srca edge_popa edge_pusha edge_trga)(*strict*) apply(clarsimp) apply(rename_tac d n e h q2 dM dD nD eM eD w wa edge_popa edge_pusha edge_trga ea)(*strict*) apply(case_tac ea) apply(rename_tac d n e h q2 dM dD nD eM eD w wa edge_popa edge_pusha edge_trga ea edge_srca edge_eventa edge_popaa edge_pushaa edge_trgaa)(*strict*) apply(clarsimp) apply(rename_tac d n e h q1 q2 dM dD nD eM eD w wa x1 x2 x3 x4 x5 x6 x7 a b)(*strict*) apply(erule disjE)+ apply(rename_tac d n e h q1 q2 dM dD nD eM eD w wa x1 x2 x3 x4 x5 x6 x7 a b)(*strict*) apply(clarsimp) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' e'a)(*strict*) apply(case_tac ea) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' e'a edge_srca edge_eventa edge_popa edge_pusha edge_trga)(*strict*) apply(case_tac eb) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' e'a edge_srca edge_eventa edge_popa edge_pusha edge_trga edge_srcaa edge_eventaa edge_popaa edge_pushaa edge_trgaa)(*strict*) apply(case_tac e') apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' e'a edge_srca edge_eventa edge_popa edge_pusha edge_trga edge_srcaa edge_eventaa edge_popaa edge_pushaa edge_trgaa edge_srcb edge_eventb edge_popb edge_pushb edge_trgb)(*strict*) apply(case_tac e'a) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' e'a edge_srca edge_eventa edge_popa edge_pusha edge_trga edge_srcaa edge_eventaa edge_popaa edge_pushaa edge_trgaa edge_srcb edge_eventb edge_popb edge_pushb edge_trgb edge_srcc edge_eventc edge_popc edge_pushc edge_trgc)(*strict*) apply(clarsimp) apply(rename_tac d n e h q1 q2 dM dD nD eM eD w wa x1 x2 x3 x4 x5 x6 x7 a b)(*strict*) apply(clarsimp) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' e'a)(*strict*) apply(case_tac ea) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' e'a edge_srca edge_eventa edge_popa edge_pusha edge_trga)(*strict*) apply(case_tac eb) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' e'a edge_srca edge_eventa edge_popa edge_pusha edge_trga edge_srcaa edge_eventaa edge_popaa edge_pushaa edge_trgaa)(*strict*) apply(case_tac e') apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' e'a edge_srca edge_eventa edge_popa edge_pusha edge_trga edge_srcaa edge_eventaa edge_popaa edge_pushaa edge_trgaa edge_srcb edge_eventb edge_popb edge_pushb edge_trgb)(*strict*) apply(case_tac e'a) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' e'a edge_srca edge_eventa edge_popa edge_pusha edge_trga edge_srcaa edge_eventaa edge_popaa edge_pushaa edge_trgaa edge_srcb edge_eventb edge_popb edge_pushb edge_trgb edge_srcc edge_eventc edge_popc edge_pushc edge_trgc)(*strict*) apply(clarsimp) apply(rename_tac d n e h q1 q2 dM dD nD eM eD w wa x1 x2 x3 x4 x5 x6 x7 a b)(*strict*) apply(clarsimp) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e')(*strict*) apply(case_tac ea) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' edge_srca edge_eventa edge_popa edge_pusha edge_trga)(*strict*) apply(case_tac eb) apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' edge_srca edge_eventa edge_popa edge_pusha edge_trga edge_srcaa edge_eventaa edge_popaa edge_pushaa edge_trgaa)(*strict*) apply(case_tac e') apply(rename_tac d n e h dM dD nD eM eD w wa ea eb e' edge_srca edge_eventa edge_popa edge_pusha edge_trga edge_srcaa edge_eventaa edge_popaa edge_pushaa edge_trgaa edge_srcb edge_eventb edge_popb edge_pushb edge_trgb)(*strict*) apply(clarsimp) apply(rename_tac d n e h q1 q2 dM dD nD eM eD w wa x1 x2 x3 x4 x5 x6 x7 a b)(*strict*) apply(clarsimp) apply(rename_tac d n e h q2 dM dD nD eM eD w wa x5 x6 x7 a b ea)(*strict*) apply(case_tac ea) apply(rename_tac d n e h q2 dM dD nD eM eD w wa x5 x6 x7 a b ea edge_srca edge_eventa edge_popa edge_pusha edge_trga)(*strict*) apply(clarsimp) apply(rename_tac d n e h q2 dM dD nD eM eD w wa x5 x6 x7 a b edge_srca edge_popa edge_pusha edge_trga)(*strict*) apply(erule disjE)+ apply(rename_tac d n e h q2 dM dD nD eM eD w wa x5 x6 x7 a b edge_srca edge_popa edge_pusha edge_trga)(*strict*) apply(clarsimp) apply(rename_tac d n e h dM dD nD eM eD w wa edge_popa edge_pusha edge_trga ea e')(*strict*) apply(case_tac ea) apply(rename_tac d n e h dM dD nD eM eD w wa edge_popa edge_pusha edge_trga ea e' edge_srca edge_eventa edge_popaa edge_pushaa edge_trgaa)(*strict*) apply(case_tac e') apply(rename_tac d n e h dM dD nD eM eD w wa edge_popa edge_pusha edge_trga ea e' edge_srca edge_eventa edge_popaa edge_pushaa edge_trgaa edge_srcaa edge_eventaa edge_popb edge_pushb edge_trgb)(*strict*) apply(clarsimp) apply(rename_tac d n e h q2 dM dD nD eM eD w wa x5 x6 x7 a b edge_srca edge_popa edge_pusha edge_trga)(*strict*) apply(clarsimp) apply(rename_tac d n e h dM dD nD eM eD w wa edge_popa edge_pusha edge_trga ea e')(*strict*) apply(case_tac ea) apply(rename_tac d n e h dM dD nD eM eD w wa edge_popa edge_pusha edge_trga ea e' edge_srca edge_eventa edge_popaa edge_pushaa edge_trgaa)(*strict*) apply(case_tac e') apply(rename_tac d n e h dM dD nD eM eD w wa edge_popa edge_pusha edge_trga ea e' edge_srca edge_eventa edge_popaa edge_pushaa edge_trgaa edge_srcaa edge_eventaa edge_popb edge_pushb edge_trgb)(*strict*) apply(clarsimp) apply(rename_tac d n e h q2 dM dD nD eM eD w wa x5 x6 x7 a b edge_srca edge_popa edge_pusha edge_trga)(*strict*) apply(clarsimp) apply(rename_tac d n e h q2 dM dD nD eM eD w wa edge_popa edge_pusha edge_trga ea)(*strict*) apply(case_tac ea) apply(rename_tac d n e h q2 dM dD nD eM eD w wa edge_popa edge_pusha edge_trga ea edge_srca edge_eventa edge_popaa edge_pushaa edge_trgaa)(*strict*) apply(clarsimp) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule conjI) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(simp add: slice_conf_A_def) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule conjI) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(simp add: slice_conf_A_def) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule_tac t="ATS_History.history_fragment_prefixes epda_effects (@) M w2 = ATS_History.history_fragment_prefixes epda_effects (@) M w1" and s="ATS_History.history_fragment_prefixes epda_effects (@) M w1 \<subseteq> ATS_History.history_fragment_prefixes epda_effects (@) M w2 \<and> ATS_History.history_fragment_prefixes epda_effects (@) M w2 \<subseteq> ATS_History.history_fragment_prefixes epda_effects (@) M w1" in ssubst) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(force) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(erule disjE) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule disjI1) apply(rule_tac B="ATS_History.history_fragment_prefixes epda_effects (@) (F_DPDA_DFA_PRODUCT M D) w1" in subset_trans) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(thin_tac "ATS_History.history_fragment_prefixes epda_effects (@) (F_DPDA_DFA_PRODUCT M D) w1 \<subseteq> ATS_History.history_fragment_prefixes epda_effects (@) (F_DPDA_DFA_PRODUCT M D) w2") apply(simp add: F_DPDA_DFA_PRODUCT_def epdaHS.history_fragment_prefixes_def F_DPDA_DFA_PRODUCT__events_def) apply(simp add: epda_effects_def) apply(clarsimp) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule_tac B="ATS_History.history_fragment_prefixes epda_effects (@) (F_DPDA_DFA_PRODUCT M D) w2" in subset_trans) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(force) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(thin_tac "ATS_History.history_fragment_prefixes epda_effects (@) (F_DPDA_DFA_PRODUCT M D) w1 \<subseteq> ATS_History.history_fragment_prefixes epda_effects (@) (F_DPDA_DFA_PRODUCT M D) w2") apply(simp add: F_DPDA_DFA_PRODUCT_def epdaHS.history_fragment_prefixes_def F_DPDA_DFA_PRODUCT__events_def) apply(simp add: epda_effects_def) apply(clarsimp) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(erule disjE) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule disjI2) apply(rule disjI1) apply(rule_tac B="ATS_History.history_fragment_prefixes epda_effects (@) (F_DPDA_DFA_PRODUCT M D) w2" in subset_trans) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(thin_tac "ATS_History.history_fragment_prefixes epda_effects (@) (F_DPDA_DFA_PRODUCT M D) w2 \<subseteq> ATS_History.history_fragment_prefixes epda_effects (@) (F_DPDA_DFA_PRODUCT M D) w1") apply(simp add: F_DPDA_DFA_PRODUCT_def epdaHS.history_fragment_prefixes_def F_DPDA_DFA_PRODUCT__events_def) apply(simp add: epda_effects_def) apply(clarsimp) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule_tac B="ATS_History.history_fragment_prefixes epda_effects (@) (F_DPDA_DFA_PRODUCT M D) w1" in subset_trans) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(force) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(thin_tac "ATS_History.history_fragment_prefixes epda_effects (@) (F_DPDA_DFA_PRODUCT M D) w2 \<subseteq> ATS_History.history_fragment_prefixes epda_effects (@) (F_DPDA_DFA_PRODUCT M D) w1") apply(simp add: F_DPDA_DFA_PRODUCT_def epdaHS.history_fragment_prefixes_def F_DPDA_DFA_PRODUCT__events_def) apply(simp add: epda_effects_def) apply(clarsimp) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule disjI2) apply(rule disjI2) apply(rule conjI) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule_tac B="ATS_History.history_fragment_prefixes epda_effects (@) (F_DPDA_DFA_PRODUCT M D) w1" in subset_trans) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(thin_tac "ATS_History.history_fragment_prefixes epda_effects (@) (F_DPDA_DFA_PRODUCT M D) w2 = ATS_History.history_fragment_prefixes epda_effects (@) (F_DPDA_DFA_PRODUCT M D) w1") apply(simp add: F_DPDA_DFA_PRODUCT_def epdaHS.history_fragment_prefixes_def F_DPDA_DFA_PRODUCT__events_def) apply(simp add: epda_effects_def) apply(clarsimp) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule_tac B="ATS_History.history_fragment_prefixes epda_effects (@) (F_DPDA_DFA_PRODUCT M D) w2" in subset_trans) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(force) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(thin_tac "ATS_History.history_fragment_prefixes epda_effects (@) (F_DPDA_DFA_PRODUCT M D) w2 = ATS_History.history_fragment_prefixes epda_effects (@) (F_DPDA_DFA_PRODUCT M D) w1") apply(simp add: F_DPDA_DFA_PRODUCT_def epdaHS.history_fragment_prefixes_def F_DPDA_DFA_PRODUCT__events_def) apply(simp add: epda_effects_def) apply(clarsimp) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule_tac B="ATS_History.history_fragment_prefixes epda_effects (@) (F_DPDA_DFA_PRODUCT M D) w2" in subset_trans) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(thin_tac "ATS_History.history_fragment_prefixes epda_effects (@) (F_DPDA_DFA_PRODUCT M D) w2 = ATS_History.history_fragment_prefixes epda_effects (@) (F_DPDA_DFA_PRODUCT M D) w1") apply(simp add: F_DPDA_DFA_PRODUCT_def epdaHS.history_fragment_prefixes_def F_DPDA_DFA_PRODUCT__events_def) apply(simp add: epda_effects_def) apply(clarsimp) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(rule_tac B="ATS_History.history_fragment_prefixes epda_effects (@) (F_DPDA_DFA_PRODUCT M D) w1" in subset_trans) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(force) apply(rename_tac d c1 c2 e1 e2 n w1 w2 e h s q1 q2 dM dD nD eM eD)(*strict*) apply(thin_tac "ATS_History.history_fragment_prefixes epda_effects (@) (F_DPDA_DFA_PRODUCT M D) w2 = ATS_History.history_fragment_prefixes epda_effects (@) (F_DPDA_DFA_PRODUCT M D) w1") apply(simp add: F_DPDA_DFA_PRODUCT_def epdaHS.history_fragment_prefixes_def F_DPDA_DFA_PRODUCT__events_def) apply(simp add: epda_effects_def) apply(clarsimp) done theorem F_DPDA_DFA_PRODUCT__generates__DPDA: " valid_dpda M \<Longrightarrow> valid_dfa D \<Longrightarrow> R = F_DPDA_DFA_PRODUCT M D \<Longrightarrow> valid_dpda R" apply(simp (no_asm) add: valid_dpda_def) apply(rule context_conjI) apply(rule_tac M="M" and D="D" in F_DPDA_DFA_PRODUCT__produces__PDA) apply(force) apply(force) apply(force) apply(rule F_DPDA_DFA_PRODUCT__preserves__determinism) apply(force) apply(force) apply(force) apply(force) done theorem F_DPDA_DFA_PRODUCT__preserves__no_epdaH_livelock: " valid_dpda M \<Longrightarrow> \<not> epdaH_livelock M \<Longrightarrow> valid_dfa D \<Longrightarrow> R = F_DPDA_DFA_PRODUCT M D \<Longrightarrow> \<not> epdaH_livelock R" apply(subgoal_tac "X" for X) prefer 2 apply(rule_tac M="M" and D="D" in F_DPDA_DFA_PRODUCT__produces__PDA) apply(force) apply(force) apply(force) apply(simp add: epdaH_livelock_def) apply(clarsimp) apply(rename_tac d N)(*strict*) apply(subgoal_tac "X" for X) apply(rename_tac d N)(*strict*) prefer 2 apply(rule F_DPDA_DFA_PRODUCT__reflects__derivation_initial2) apply(rename_tac d N)(*strict*) apply(force) apply(rename_tac d N)(*strict*) apply(force) apply(rename_tac d N)(*strict*) apply(force) apply(rename_tac d N)(*strict*) apply(force) apply(rename_tac d N)(*strict*) apply(force) apply(rename_tac d N)(*strict*) apply(erule_tac x="(\<lambda>n. case d n of None \<Rightarrow> None | Some (pair e c) \<Rightarrow> Some (pair (case e of None \<Rightarrow> None | Some e \<Rightarrow> Some (slice_edge_A e)) (slice_conf_A c)))" in allE) apply(rename_tac d N)(*strict*) apply(clarsimp) apply(erule disjE) apply(rename_tac d N)(*strict*) apply(clarsimp) apply(rename_tac d N n)(*strict*) apply(case_tac "d n") apply(rename_tac d N n)(*strict*) apply(clarsimp) apply(erule_tac x="n" in allE) apply(force) apply(rename_tac d N n a)(*strict*) apply(clarsimp) apply(case_tac a) apply(rename_tac d N n a option b)(*strict*) apply(force) apply(rename_tac d N)(*strict*) apply(erule_tac x="N" and P="\<lambda>N. \<exists>n\<ge>N. epdaH_conf_history (the (get_configuration (case_option None (case_derivation_configuration (\<lambda>e c. Some (pair (case e of None \<Rightarrow> None | Some e \<Rightarrow> Some (slice_edge_A e)) (slice_conf_A c)))) (d n)))) \<noteq> epdaH_conf_history (the (get_configuration (case_option None (case_derivation_configuration (\<lambda>e c. Some (pair (case e of None \<Rightarrow> None | Some e \<Rightarrow> Some (slice_edge_A e)) (slice_conf_A c)))) (d N))))" in allE) apply(rename_tac d N)(*strict*) apply(clarsimp) apply(rename_tac d N n)(*strict*) apply(erule_tac x="N" in allE') apply(erule_tac x="n" in allE') apply(erule exE)+ apply(rename_tac d N n y ya)(*strict*) apply(case_tac y) apply(rename_tac d N n y ya option b)(*strict*) apply(case_tac ya) apply(rename_tac d N n y ya option b optiona ba)(*strict*) apply(clarsimp) apply(rename_tac d N n option b optiona ba)(*strict*) apply(simp add: get_configuration_def) apply(rule_tac x="n" in exI) apply(clarsimp) apply(simp add: slice_conf_A_def) done definition FUN_DPDA_DFA_PRODUCT__SpecInput1 :: " ('statesA, 'event, 'stackA) epda \<Rightarrow> ('statesB, 'event, 'stackB) epda \<Rightarrow> bool" where "FUN_DPDA_DFA_PRODUCT__SpecInput1 G D \<equiv> valid_dpda G \<and> valid_dfa D" definition epdaH_reflection_to_DFA_exists :: " ('statesA, 'event, 'stackA) epda \<Rightarrow> ('statesB, 'event, 'stackB) epda \<Rightarrow> ('statesA \<Rightarrow> 'statesB) \<Rightarrow> bool" where "epdaH_reflection_to_DFA_exists R D F \<equiv> \<forall>d n c. epdaH.derivation_initial R d \<longrightarrow> get_configuration (d n) = Some c \<longrightarrow> (\<exists>d' m. epdaH.derivation_initial D d' \<and> get_configuration (d' m) = Some \<lparr>epdaH_conf_state = F (epdaH_conf_state c), epdaH_conf_history = epdaH_conf_history c, epdaH_conf_stack = [epda_box D]\<rparr>)" definition FUN_DPDA_DFA_PRODUCT__SpecOutput1 :: " ('statesA, 'event, 'stackA) epda \<Rightarrow> ('statesB, 'event, 'stackB) epda \<Rightarrow> (('statesA, 'statesB) DT_tuple2, 'event, 'stackA) epda \<Rightarrow> bool" where "FUN_DPDA_DFA_PRODUCT__SpecOutput1 Gi D Go \<equiv> valid_dpda Go \<and> epdaH.marked_language Go = (epdaH.marked_language Gi) \<inter> (epdaH.marked_language D) \<and> epdaH.unmarked_language Go = (epdaH.unmarked_language Gi) \<inter> (epdaH.unmarked_language D) \<and> epdaH_reflection_to_DFA_exists Go D sel_tuple2_2" theorem F_DPDA_DFA_PRODUCT__SOUND1: " FUN_DPDA_DFA_PRODUCT__SpecInput1 Gi D \<Longrightarrow> FUN_DPDA_DFA_PRODUCT__SpecOutput1 Gi D (F_DPDA_DFA_PRODUCT Gi D)" apply(simp add: FUN_DPDA_DFA_PRODUCT__SpecOutput1_def FUN_DPDA_DFA_PRODUCT__SpecInput1_def) apply(clarsimp) apply(rule context_conjI) apply(rule F_DPDA_DFA_PRODUCT__generates__DPDA) prefer 3 apply(force) apply(force) apply(force) apply(rule context_conjI) apply(rule F_DPDA_DFA_PRODUCT__relates__marked_language) prefer 3 apply(force) apply(force) apply(force) apply(rule context_conjI) apply(rule F_DPDA_DFA_PRODUCT__relates__unmarked_language) prefer 3 apply(force) apply(force) apply(force) apply(simp add: epdaH_reflection_to_DFA_exists_def) apply(clarsimp) apply(rename_tac d n c)(*strict*) apply(simp add: get_configuration_def) apply(case_tac "d n") apply(rename_tac d n c)(*strict*) apply(clarsimp) apply(rename_tac d n c a)(*strict*) apply(clarsimp) apply(case_tac a) apply(rename_tac d n c a option b)(*strict*) apply(clarsimp) apply(rename_tac d n c option)(*strict*) apply(case_tac c) apply(rename_tac d n c option epdaH_conf_statea epdaH_conf_historya epdaH_conf_stack)(*strict*) apply(rename_tac e q h s) apply(rename_tac d n c e q h s)(*strict*) apply(case_tac q) apply(rename_tac d n c e q h s a b)(*strict*) apply(subgoal_tac "X" for X) apply(rename_tac d n c e q h s a b)(*strict*) prefer 2 apply(rule_tac n="n" and M="Gi" and D="D" in F_DPDA_DFA_PRODUCT__reflects__derivation_initial) apply(rename_tac d n c e q h s a b)(*strict*) prefer 3 apply(force) apply(rename_tac d n c e q h s a b)(*strict*) apply(force) apply(rename_tac d n c e q h s a b)(*strict*) apply(force) apply(rename_tac d n c e q h s a b)(*strict*) apply(simp add: valid_dpda_def) apply(rename_tac d n c e q h s a b)(*strict*) apply(force) apply(rename_tac d n c e q h s a b)(*strict*) apply(force) apply(rename_tac d n c e q h s a b)(*strict*) apply(clarsimp) apply(rename_tac d n e h s a b dM dD nD eM eD)(*strict*) apply(rule_tac x="dD" in exI) apply(clarsimp) apply(rule_tac x="nD" in exI) apply(clarsimp) done lemma F_DPDA_DFA_PRODUCT__preserves__nonblockingness_language: " valid_dpda G \<Longrightarrow> valid_dfa D \<Longrightarrow> nonblockingness_language (epdaH.unmarked_language G) (epdaH.marked_language G) \<Longrightarrow> R = F_DPDA_DFA_PRODUCT G D \<Longrightarrow> epdaH.marked_language G \<subseteq> epdaH.marked_language D \<Longrightarrow> epdaH.unmarked_language G \<subseteq> epdaH.unmarked_language D \<Longrightarrow> nonblockingness_language (epdaH.unmarked_language R) (epdaH.marked_language R)" apply(subgoal_tac "X" for X) prefer 2 apply(rule F_DPDA_DFA_PRODUCT__SOUND1) apply(simp add: FUN_DPDA_DFA_PRODUCT__SpecInput1_def) apply(clarsimp) apply(rule_tac t="epdaH.unmarked_language (F_DPDA_DFA_PRODUCT G D)" and s="epdaH.unmarked_language G \<inter> epdaH.unmarked_language D" in ssubst) apply(simp add: FUN_DPDA_DFA_PRODUCT__SpecOutput1_def) apply(rule_tac t="epdaH.marked_language (F_DPDA_DFA_PRODUCT G D)" and s="epdaH.marked_language G \<inter> epdaH.marked_language D" in ssubst) apply(simp add: FUN_DPDA_DFA_PRODUCT__SpecOutput1_def) apply(rule_tac t="epdaH.unmarked_language G \<inter> epdaH.unmarked_language D" and s="epdaH.unmarked_language G" in ssubst) apply(force) apply(rule_tac t="epdaH.marked_language G \<inter> epdaH.marked_language D" and s="epdaH.marked_language G" in ssubst) apply(force) apply(force) done lemma F_DPDA_DFA_PRODUCT__preserves__controllable_sublanguage: " valid_dpda G \<Longrightarrow> valid_dfa D \<Longrightarrow> nonblockingness_language (epdaH.unmarked_language G) (epdaH.marked_language G) \<Longrightarrow> R = F_DPDA_DFA_PRODUCT G D \<Longrightarrow> epdaH.marked_language G \<subseteq> epdaH.marked_language D \<Longrightarrow> epdaH.unmarked_language G \<subseteq> epdaH.unmarked_language D \<Longrightarrow> (controllable_sublanguage (epdaH.unmarked_language G) (alphabet_to_language \<Sigma>UC) (epdaH.unmarked_language D) (epdaH.unmarked_language G) \<longleftrightarrow> controllable_sublanguage (epdaH.unmarked_language R) (alphabet_to_language \<Sigma>UC) (epdaH.unmarked_language D) (epdaH.unmarked_language R))" apply(subgoal_tac "X" for X) prefer 2 apply(rule F_DPDA_DFA_PRODUCT__SOUND1) apply(simp add: FUN_DPDA_DFA_PRODUCT__SpecInput1_def) apply(clarsimp) apply(rule_tac t="epdaH.unmarked_language (F_DPDA_DFA_PRODUCT G D)" and s="epdaH.unmarked_language G \<inter> epdaH.unmarked_language D" in ssubst) apply(simp add: FUN_DPDA_DFA_PRODUCT__SpecOutput1_def) apply(rule_tac t="epdaH.marked_language (F_DPDA_DFA_PRODUCT G D)" and s="epdaH.marked_language G \<inter> epdaH.marked_language D" in ssubst) apply(simp add: FUN_DPDA_DFA_PRODUCT__SpecOutput1_def) apply(rule_tac t="epdaH.unmarked_language G \<inter> epdaH.unmarked_language D" and s="epdaH.unmarked_language G" in ssubst) apply(force) apply(rule_tac t="epdaH.marked_language G \<inter> epdaH.marked_language D" and s="epdaH.marked_language G" in ssubst) apply(force) apply(force) done definition FUN_DPDA_DFA_PRODUCT__SpecInput2 :: " ('stateA, 'event, 'stackA) epda \<Rightarrow> ('stateB, 'event, 'stackB) epda \<Rightarrow> bool" where "FUN_DPDA_DFA_PRODUCT__SpecInput2 G D \<equiv> valid_dpda G \<and> valid_dfa D \<and> nonblockingness_language (epdaH.unmarked_language G) (epdaH.marked_language G) \<and> epdaH.marked_language G \<subseteq> epdaH.marked_language D \<and> epdaH.unmarked_language G \<subseteq> epdaH.unmarked_language D \<and> \<not> epdaH_livelock G" definition FUN_DPDA_DFA_PRODUCT__SpecOutput2 :: " ('stateA, 'event, 'stackA) epda \<Rightarrow> (('stateB, 'event, 'stackB) epda \<times> 'event set) \<Rightarrow> (('stateA, 'stateB) DT_tuple2, 'event, 'stackA) epda \<Rightarrow> bool" where "FUN_DPDA_DFA_PRODUCT__SpecOutput2 Gi X Go \<equiv> case X of (P, \<Sigma>UC) \<Rightarrow> valid_dpda Go \<and> epdaH.marked_language Go = (epdaH.marked_language Gi) \<inter> (epdaH.marked_language P) \<and> epdaH.unmarked_language Go = (epdaH.unmarked_language Gi) \<inter> (epdaH.unmarked_language P) \<and> nonblockingness_language (epdaH.unmarked_language Go) (epdaH.marked_language Go) \<and> epdaH_reflection_to_DFA_exists Go P sel_tuple2_2 \<and> \<not> epdaH_livelock Go \<and> (controllable_language (epdaH.unmarked_language Gi) \<Sigma>UC (epdaH.unmarked_language P) \<longleftrightarrow> controllable_language (epdaH.unmarked_language Go) \<Sigma>UC (epdaH.unmarked_language P))" theorem F_DPDA_DFA_PRODUCT__SOUND2: " FUN_DPDA_DFA_PRODUCT__SpecInput2 Gi P \<Longrightarrow> FUN_DPDA_DFA_PRODUCT__SpecOutput2 Gi (P, \<Sigma>UC) (F_DPDA_DFA_PRODUCT Gi P)" apply(simp add: FUN_DPDA_DFA_PRODUCT__SpecInput2_def FUN_DPDA_DFA_PRODUCT__SpecOutput2_def) apply(clarsimp) apply(rule context_conjI) apply(rule F_DPDA_DFA_PRODUCT__generates__DPDA) prefer 3 apply(force) apply(force) apply(force) apply(rule context_conjI) apply(rule F_DPDA_DFA_PRODUCT__relates__marked_language) prefer 3 apply(force) apply(force) apply(force) apply(rule context_conjI) apply(rule F_DPDA_DFA_PRODUCT__relates__unmarked_language) apply(force) apply(force) apply(force) apply(rule context_conjI) apply(rule F_DPDA_DFA_PRODUCT__preserves__nonblockingness_language) apply(force) apply(force) apply(force) apply(force) apply(force) apply(force) apply(rule context_conjI) apply(simp add: epdaH_reflection_to_DFA_exists_def) apply(clarsimp) apply(rename_tac d n c)(*strict*) apply(simp add: get_configuration_def) apply(case_tac "d n") apply(rename_tac d n c)(*strict*) apply(clarsimp) apply(rename_tac d n c a)(*strict*) apply(clarsimp) apply(case_tac a) apply(rename_tac d n c a option b)(*strict*) apply(clarsimp) apply(rename_tac d n c option)(*strict*) apply(case_tac c) apply(rename_tac d n c option epdaH_conf_statea epdaH_conf_historya epdaH_conf_stack)(*strict*) apply(rename_tac e q h s) apply(rename_tac d n c e q h s)(*strict*) apply(case_tac q) apply(rename_tac d n c e q h s a b)(*strict*) apply(subgoal_tac "X" for X) apply(rename_tac d n c e q h s a b)(*strict*) prefer 2 apply(rule_tac n="n" and M="Gi" and D="P" in F_DPDA_DFA_PRODUCT__reflects__derivation_initial) apply(rename_tac d n c e q h s a b)(*strict*) prefer 3 apply(force) apply(rename_tac d n c e q h s a b)(*strict*) apply(force) apply(rename_tac d n c e q h s a b)(*strict*) apply(force) apply(rename_tac d n c e q h s a b)(*strict*) apply(simp add: valid_dpda_def) apply(rename_tac d n c e q h s a b)(*strict*) apply(force) apply(rename_tac d n c e q h s a b)(*strict*) apply(force) apply(rename_tac d n c e q h s a b)(*strict*) apply(clarsimp) apply(rename_tac d n e h s a b dM dD nD eM eD)(*strict*) apply(rule_tac x="dD" in exI) apply(clarsimp) apply(rule_tac x="nD" in exI) apply(clarsimp) apply(rule conjI) apply(rule F_DPDA_DFA_PRODUCT__preserves__no_epdaH_livelock) prefer 4 apply(force) apply(force) apply(force) apply(force) apply(rule_tac t="epdaH.unmarked_language (F_DPDA_DFA_PRODUCT Gi P)" and s="epdaH.unmarked_language Gi" in ssubst) apply(force) apply(force) done theorem F_DPDA_DFA_PRODUCT__SOUND3: " valid_dpda G \<Longrightarrow> valid_dfa D \<Longrightarrow> R = F_DPDA_DFA_PRODUCT G D \<Longrightarrow> valid_dpda R \<and> epdaH.marked_language R = epdaH.marked_language G \<inter> epdaH.marked_language D \<and> epdaH.unmarked_language R = epdaH.unmarked_language G \<inter> epdaH.unmarked_language D \<and> epdaH_reflection_to_DFA_exists R D sel_tuple2_2 \<and> (\<not> epdaH_livelock G \<longrightarrow> \<not> epdaH_livelock R) \<and> (epdaH.marked_language G \<subseteq> epdaH.marked_language D \<longrightarrow> epdaH.unmarked_language G \<subseteq> epdaH.unmarked_language D \<longrightarrow> ((nonblockingness_language (epdaH.unmarked_language G) (epdaH.marked_language G) \<longrightarrow> nonblockingness_language (epdaH.unmarked_language R) (epdaH.marked_language R)) \<and> (controllable_language (epdaH.unmarked_language G) \<Sigma>UC (epdaH.unmarked_language D) \<longleftrightarrow> controllable_language (epdaH.unmarked_language R) \<Sigma>UC (epdaH.unmarked_language D))))" apply(simp add: FUN_DPDA_DFA_PRODUCT__SpecInput2_def FUN_DPDA_DFA_PRODUCT__SpecOutput2_def) apply(subgoal_tac "X" for X) prefer 2 apply(rule F_DPDA_DFA_PRODUCT__SOUND1) apply(simp add: FUN_DPDA_DFA_PRODUCT__SpecInput1_def) apply(clarsimp) apply(simp add: FUN_DPDA_DFA_PRODUCT__SpecOutput1_def) apply(clarsimp) apply(rule context_conjI) apply(rule impI) apply(rule F_DPDA_DFA_PRODUCT__preserves__no_epdaH_livelock) prefer 4 apply(force) apply(force) apply(force) apply(force) apply(rule impI) apply(rule impI) apply(rule_tac t="epdaH.unmarked_language G \<inter> epdaH.unmarked_language D" and s="epdaH.unmarked_language G" in ssubst) apply(force) apply(rule_tac t="epdaH.marked_language G \<inter> epdaH.marked_language D" and s="epdaH.marked_language G" in ssubst) apply(force) apply(force) done definition strip_plant :: " (('controller_state, 'plant_state) DT_tuple2, 'event, 'controller_stack) epda_step_label option \<Rightarrow> ('controller_state, 'event, 'controller_stack) epda_step_label option" where "strip_plant e \<equiv> (case e of None \<Rightarrow> None | Some e \<Rightarrow> Some \<lparr>edge_src = sel_tuple2_1 (edge_src e), edge_event = edge_event e, edge_pop = edge_pop e, edge_push = edge_push e, edge_trg = sel_tuple2_1 (edge_trg e)\<rparr>)" lemma F_DPDA_DFA_PRODUCT__preserves__derivation_initial_strengthend: " valid_dpda M \<Longrightarrow> valid_dfa D \<Longrightarrow> R = F_DPDA_DFA_PRODUCT M D \<Longrightarrow> valid_pda R \<Longrightarrow> epdaH.derivation_initial D dD \<Longrightarrow> epdaH.derivation_initial M dM \<Longrightarrow> length h + ints = n \<Longrightarrow> dM n = Some (pair eM \<lparr>epdaH_conf_state = q1, epdaH_conf_history = h, epdaH_conf_stack = s\<rparr>) \<Longrightarrow> dD (length h) = Some (pair eD \<lparr>epdaH_conf_state = q2, epdaH_conf_history = h, epdaH_conf_stack = [epda_box D]\<rparr>) \<Longrightarrow> \<exists>dR eR. epdaH.derivation_initial R dR \<and> dR n = Some (pair eR \<lparr>epdaH_conf_state = cons_tuple2 q1 q2, epdaH_conf_history = h, epdaH_conf_stack = s\<rparr>) \<and> get_labels dM n = map strip_plant (get_labels dR n)" apply(induct n arbitrary: h q1 q2 s eM eD ints) apply(rename_tac h q1 q2 s eM eD ints)(*strict*) apply(clarsimp) apply(rename_tac q1 q2 s eM eD)(*strict*) apply(rule_tac x="der1 \<lparr>epdaH_conf_state = cons_tuple2 q1 q2, epdaH_conf_history = [], epdaH_conf_stack = s\<rparr>" in exI) apply(rename_tac q1 q2 s eM eD)(*strict*) apply(rule conjI) apply(rename_tac q1 q2 s eM eD)(*strict*) apply(rule epdaH.derivation_initialI) apply(rename_tac q1 q2 s eM eD)(*strict*) apply(rule epdaH.der1_is_derivation) apply(rename_tac q1 q2 s eM eD)(*strict*) apply(clarsimp) apply(rename_tac q1 q2 s eM eD c)(*strict*) apply(simp add: get_configuration_def der1_def) apply(clarsimp) apply(rename_tac q1 q2 s eM eD)(*strict*) apply(simp add: epdaH.derivation_initial_def) apply(clarsimp) apply(rename_tac q1 q2 s)(*strict*) apply(simp add: epdaH_initial_configurations_def epdaH_configurations_def F_DPDA_DFA_PRODUCT_def F_DPDA_DFA_PRODUCT__states_def) apply(clarsimp) apply(rename_tac q1 q2 s eM eD)(*strict*) apply(rule conjI) apply(rule_tac x="None" in exI) apply(simp add: der1_def) apply(simp add: get_labels_def) apply (metis (full_types) Suc_n_not_le_n le_0_eq nat_seq_in_interval) apply(rename_tac n h q1 q2 s eM eD ints)(*strict*) apply(clarsimp) apply(subgoal_tac "X" for X) apply(rename_tac n h q1 q2 s eM eD ints)(*strict*) prefer 2 apply(rule_tac G="M" and d="dM" and n="n" and m="Suc n" in epdaH.step_detail_before_some_position) apply(rename_tac n h q1 q2 s eM eD ints)(*strict*) apply(rule epdaH.derivation_initial_is_derivation) apply(force) apply(rename_tac n h q1 q2 s eM eD ints)(*strict*) apply(force) apply(rename_tac n h q1 q2 s eM eD ints)(*strict*) apply(force) apply(rename_tac n h q1 q2 s eM eD ints)(*strict*) apply(clarsimp) apply(rename_tac n h q1 q2 s eD ints e1 e2 c1)(*strict*) apply(case_tac c1) apply(rename_tac n h q1 q2 s eD ints e1 e2 c1 epdaH_conf_state epdaH_conf_history epdaH_conf_stack)(*strict*) apply(clarsimp) apply(rename_tac n h q1 q2 s eD ints e1 e2 epdaH_conf_state epdaH_conf_history epdaH_conf_stack)(*strict*) apply(rename_tac qx hx sx) apply(rename_tac n h q1 q2 s eD ints e1 e2 qx hx sx)(*strict*) apply(case_tac e2) apply(rename_tac n h q1 q2 s eD ints e1 e2 qx hx sx edge_src edge_event edge_pop edge_push edge_trg)(*strict*) apply(clarsimp) apply(rename_tac n h q1 q2 s eD ints e1 qx hx sx edge_src edge_event edge_pop edge_push edge_trg)(*strict*) apply(rename_tac qs read pop push qt) apply(rename_tac n h q1 q2 s eD ints e1 qx hx sx qs read pop push qt)(*strict*) apply(simp add: epdaH_step_relation_def) apply(clarsimp) apply(rename_tac n q2 eD ints e1 hx qs read pop push qt w)(*strict*) apply(erule_tac x="hx" in meta_allE) apply(erule_tac x="qs" in meta_allE) apply(clarsimp) apply(case_tac read) apply(rename_tac n q2 eD ints e1 hx qs read pop push qt w)(*strict*) apply(clarsimp) apply(rename_tac n q2 eD ints e1 hx qs pop push qt w)(*strict*) apply(simp add: option_to_list_def) apply(erule_tac x="q2" in meta_allE) apply(erule_tac x="pop@w" in meta_allE) apply(clarsimp) apply(erule_tac x="e1" in meta_allE) apply(erule_tac x="eD" in meta_allE) apply(clarsimp) apply(case_tac ints) apply(rename_tac n q2 eD ints e1 hx qs pop push qt w)(*strict*) apply(clarsimp) apply(rename_tac n q2 eD e1 hx qs pop push qt w)(*strict*) apply(subgoal_tac "X" for X) apply(rename_tac n q2 eD e1 hx qs pop push qt w)(*strict*) prefer 2 apply(rule_tac G="M" and d="dM" and n="n" in epda_at_most_one_symbol_per_step) apply(rename_tac n q2 eD e1 hx qs pop push qt w)(*strict*) apply(simp add: valid_dpda_def valid_pda_def) apply(rename_tac n q2 eD e1 hx qs pop push qt w)(*strict*) apply(force) apply(rename_tac n q2 eD e1 hx qs pop push qt w)(*strict*) apply(force) apply(rename_tac n q2 eD e1 hx qs pop push qt w)(*strict*) apply(clarsimp) apply(rename_tac n q2 eD ints e1 hx qs pop push qt w nat)(*strict*) apply(clarsimp) apply(rename_tac q2 eD e1 hx qs pop push qt w nat)(*strict*) apply(erule_tac x="nat" in meta_allE) apply(clarsimp) apply(rename_tac q2 eD e1 hx qs pop push qt w nat dR eR)(*strict*) apply(rename_tac ints dR eR) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(subgoal_tac "\<lparr>epdaH_conf_state = cons_tuple2 qs q2, epdaH_conf_history = hx, epdaH_conf_stack = pop @ w\<rparr> \<in> epdaH_configurations (F_DPDA_DFA_PRODUCT M D)") apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(rule_tac x="derivation_append dR (der2 \<lparr>epdaH_conf_state = cons_tuple2 qs q2, epdaH_conf_history = hx, epdaH_conf_stack = pop @ w\<rparr> \<lparr>edge_src = cons_tuple2 qs q2, edge_event = None, edge_pop = pop, edge_push = push, edge_trg = cons_tuple2 qt q2\<rparr> \<lparr>epdaH_conf_state = cons_tuple2 qt q2, epdaH_conf_history = hx, epdaH_conf_stack = push @ w\<rparr>) (length hx + ints)" in exI) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(rule conjI) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(rule epdaH.derivation_append_preserves_derivation_initial) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(simp add: valid_dfa_def valid_dpda_def valid_pda_def) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(force) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(rule epdaH.derivation_append_preserves_derivation) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(rule epdaH.derivation_initial_is_derivation) apply(force) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(rule epdaH.der2_is_derivation) apply(simp add: epdaH_step_relation_def F_DPDA_DFA_PRODUCT_def F_DPDA_DFA_PRODUCT__edges_def) apply(rule conjI) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(rule disjI2) apply(simp add: F_DPDA_DFA_PRODUCT__edges_empty_def) apply(rule_tac x="\<lparr>edge_src = qs, edge_event = None, edge_pop = pop, edge_push = push, edge_trg = qt\<rparr>" in exI) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(clarsimp) apply(simp add: epdaH_configurations_def F_DPDA_DFA_PRODUCT__states_def) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(simp add: option_to_list_def) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(simp add: derivation_append_def der2_def) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(rule conjI) apply(rule_tac x="Some \<lparr>edge_src = cons_tuple2 qs q2, edge_event = None, edge_pop = pop, edge_push = push, edge_trg = cons_tuple2 qt q2\<rparr>" in exI) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(simp add: derivation_append_def der2_def) apply(rule_tac t="get_labels dM (Suc (length hx + ints))" and s="get_labels dM (length hx + ints) @ [Some \<lparr>edge_src = qs, edge_event = None, edge_pop = pop, edge_push = push, edge_trg = qt\<rparr>]" in ssubst) apply(rule get_labels__seperate_last) apply(force) apply(rule_tac t="get_labels (derivation_append dR (der2 \<lparr>epdaH_conf_state = cons_tuple2 qs q2, epdaH_conf_history = hx, epdaH_conf_stack = pop @ w\<rparr> \<lparr>edge_src = cons_tuple2 qs q2, edge_event = None, edge_pop = pop, edge_push = push, edge_trg = cons_tuple2 qt q2\<rparr> \<lparr>epdaH_conf_state = cons_tuple2 qt q2, epdaH_conf_history = hx, epdaH_conf_stack = push @ w\<rparr>) (length hx + ints)) (Suc (length hx + ints))" and s="get_labels (derivation_append dR (der2 \<lparr>epdaH_conf_state = cons_tuple2 qs q2, epdaH_conf_history = hx, epdaH_conf_stack = pop @ w\<rparr> \<lparr>edge_src = cons_tuple2 qs q2, edge_event = None, edge_pop = pop, edge_push = push, edge_trg = cons_tuple2 qt q2\<rparr> \<lparr>epdaH_conf_state = cons_tuple2 qt q2, epdaH_conf_history = hx, epdaH_conf_stack = push @ w\<rparr>) (length hx + ints)) ((length hx + ints)) @[Some \<lparr>edge_src = cons_tuple2 qs q2, edge_event = None, edge_pop = pop, edge_push = push, edge_trg = cons_tuple2 qt q2\<rparr>]" in ssubst) apply(rule_tac c="\<lparr>epdaH_conf_state = cons_tuple2 qt q2, epdaH_conf_history = hx, epdaH_conf_stack = push @ w\<rparr>" in get_labels__seperate_last) apply(simp add: derivation_append_def der2_def) apply(clarsimp) apply(rule conjI) prefer 2 apply(simp add: strip_plant_def) apply(rule_tac t="(get_labels (derivation_append dR (der2 \<lparr>epdaH_conf_state = cons_tuple2 qs q2, epdaH_conf_history = hx, epdaH_conf_stack = pop @ w\<rparr> \<lparr>edge_src = cons_tuple2 qs q2, edge_event = None, edge_pop = pop, edge_push = push, edge_trg = cons_tuple2 qt q2\<rparr> \<lparr>epdaH_conf_state = cons_tuple2 qt q2, epdaH_conf_history = hx, epdaH_conf_stack = push @ w\<rparr>) (length hx + ints)) (length hx + ints))" in ssubst) apply(rule get_labels__derivation_append__trivial) apply(force) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(rule epdaH.belongs_configurations) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(rule epdaH.derivation_initial_belongs) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(simp add: valid_pda_def) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(force) apply(rename_tac q2 eD e1 hx qs pop push qt w ints dR eR)(*strict*) apply(force) apply(rename_tac n q2 eD ints e1 hx qs read pop push qt w a)(*strict*) apply(clarsimp) apply(rename_tac n q2 eD ints e1 hx qs pop push qt w a)(*strict*) apply(simp add: option_to_list_def) apply(subgoal_tac "X" for X) apply(rename_tac n q2 eD ints e1 hx qs pop push qt w a)(*strict*) prefer 2 apply(rule_tac G="D" and d="dD" and n="length hx" and m="Suc (length hx)" in epdaH.step_detail_before_some_position) apply(rename_tac n q2 eD ints e1 hx qs pop push qt w a)(*strict*) apply(rule epdaH.derivation_initial_is_derivation) apply(force) apply(rename_tac n q2 eD ints e1 hx qs pop push qt w a)(*strict*) apply(force) apply(rename_tac n q2 eD ints e1 hx qs pop push qt w a)(*strict*) apply(force) apply(rename_tac n q2 eD ints e1 hx qs pop push qt w a)(*strict*) apply(clarsimp) apply(rename_tac q2 ints e1 hx qs pop push qt w a e1a e2 c1)(*strict*) apply(case_tac c1) apply(rename_tac q2 ints e1 hx qs pop push qt w a e1a e2 c1 epdaH_conf_state epdaH_conf_history epdaH_conf_stack)(*strict*) apply(clarsimp) apply(rename_tac q2 ints e1 hx qs pop push qt w a e1a e2 epdaH_conf_state epdaH_conf_history epdaH_conf_stack)(*strict*) apply(rename_tac q2' h' s') apply(rename_tac q2 ints e1 hx qs pop push qt w a e1a e2 q2' h' s')(*strict*) apply(erule_tac x="q2'" in meta_allE) apply(clarsimp) apply(erule_tac x="pop@w" in meta_allE) apply(clarsimp) apply(erule_tac x="e1" in meta_allE) apply(clarsimp) apply(erule_tac x="e1a" in meta_allE) apply(clarsimp) apply(erule_tac x="ints" in meta_allE) apply(clarsimp) apply(simp add: epdaH_step_relation_def) apply(clarsimp) apply(rename_tac ints e1 hx qs pop push qt w a e1a e2 h' wa)(*strict*) apply(subgoal_tac "(\<forall>e\<in> epda_delta D. edge_event e \<noteq> None \<and> edge_pop e = [epda_box D] \<and> edge_push e = [epda_box D])") apply(rename_tac ints e1 hx qs pop push qt w a e1a e2 h' wa)(*strict*) prefer 2 apply(unfold valid_dfa_def)[1] apply(erule conjE)+ apply(force) apply(rename_tac ints e1 hx qs pop push qt w a e1a e2 h' wa)(*strict*) apply(erule_tac x="e2" in ballE) apply(rename_tac ints e1 hx qs pop push qt w a e1a e2 h' wa)(*strict*) prefer 2 apply(force) apply(rename_tac ints e1 hx qs pop push qt w a e1a e2 h' wa)(*strict*) apply(clarify) apply(rename_tac ints e1 hx qs pop push qt w a e1a e2 h' wa y)(*strict*) apply(case_tac e2) apply(rename_tac ints e1 hx qs pop push qt w a e1a e2 h' wa y edge_srca edge_eventa edge_popa edge_pusha edge_trga)(*strict*) apply(clarify) apply(simp) apply(rename_tac ints e1 hx qs pop push qt w a e1a h' wa y edge_src edge_event edge_pop edge_push edge_trg)(*strict*) apply(simp add: option_to_list_def) apply(clarify) apply(rename_tac ints e1 hx qs pop push qt w a e1a h' wa y edge_src edge_event edge_pop edge_push edge_trg dR eR)(*strict*) apply(simp) apply(rename_tac ints e1 qs pop push qt w e1a h' y edge_src edge_trg dR eR)(*strict*) apply(rename_tac qxs qxt dR eR) apply(rename_tac ints e1 qs pop push qt w e1a h' y qxs qxt dR eR)(*strict*) apply(rule_tac x="derivation_append dR (der2 \<lparr>epdaH_conf_state = cons_tuple2 qs qxs, epdaH_conf_history = h', epdaH_conf_stack = pop @ w\<rparr> \<lparr>edge_src = cons_tuple2 qs qxs, edge_event = Some y, edge_pop = pop, edge_push = push, edge_trg = cons_tuple2 qt qxt\<rparr> \<lparr>epdaH_conf_state = cons_tuple2 qt qxt, epdaH_conf_history = h' @ [y], epdaH_conf_stack = push @ w\<rparr>) (length h' + ints)" in exI) apply(rename_tac ints e1 qs pop push qt w e1a h' y qxs qxt dR eR)(*strict*) apply(rule conjI) apply(rename_tac ints e1 qs pop push qt w e1a h' y qxs qxt dR eR)(*strict*) apply(rule epdaH.derivation_append_preserves_derivation_initial) apply(rename_tac ints e1 qs pop push qt w e1a h' y qxs qxt dR eR)(*strict*) apply(simp add: valid_dfa_def valid_dpda_def valid_pda_def) apply(rename_tac ints e1 qs pop push qt w e1a h' y qxs qxt dR eR)(*strict*) apply(force) apply(rename_tac ints e1 qs pop push qt w e1a h' y qxs qxt dR eR)(*strict*) apply(rule epdaH.derivation_append_preserves_derivation) apply(rename_tac ints e1 qs pop push qt w e1a h' y qxs qxt dR eR)(*strict*) apply(rule epdaH.derivation_initial_is_derivation) apply(force) apply(rename_tac ints e1 qs pop push qt w e1a h' y qxs qxt dR eR)(*strict*) apply(rule epdaH.der2_is_derivation) apply(simp add: epdaH_step_relation_def F_DPDA_DFA_PRODUCT_def F_DPDA_DFA_PRODUCT__edges_def) apply(simp add: option_to_list_def) apply(rule disjI1) apply(simp add: F_DPDA_DFA_PRODUCT__edges_execute_def) apply(rule_tac x="\<lparr>edge_src = qs, edge_event = Some y, edge_pop = pop, edge_push = push, edge_trg = qt\<rparr>" in exI) apply(rename_tac ints e1 qs pop push qt w e1a h' y qxs qxt dR eR)(*strict*) apply(clarsimp) apply(rule_tac x="\<lparr>edge_src = qxs, edge_event = Some y, edge_pop = [epda_box D], edge_push = [epda_box D], edge_trg = qxt\<rparr>" in exI) apply(rename_tac ints e1 qs pop push qt w e1a h' y qxs qxt dR eR)(*strict*) apply(clarsimp) apply(rename_tac ints e1 qs pop push qt w e1a h' y qxs qxt dR eR)(*strict*) apply(simp add: derivation_append_def der2_def) apply(rename_tac ints e1 qs pop push qt w e1a h' y qxs qxt dR eR)(*strict*) apply(rule conjI) apply(rule_tac x="Some \<lparr>edge_src = cons_tuple2 qs qxs, edge_event = Some y, edge_pop = pop, edge_push = push, edge_trg = cons_tuple2 qt qxt\<rparr>" in exI) apply(rename_tac ints e1 qs pop push qt w e1a h' y qxs qxt dR eR)(*strict*) apply(simp add: derivation_append_def der2_def) apply(rule_tac t="get_labels dM (Suc (length h' + ints))" and s="get_labels dM (length h' + ints) @ [Some \<lparr>edge_src = qs, edge_event = Some y, edge_pop = pop, edge_push = push, edge_trg = qt\<rparr>]" in ssubst) apply(rule get_labels__seperate_last) apply(force) apply(rule_tac t="(get_labels (derivation_append dR (der2 \<lparr>epdaH_conf_state = cons_tuple2 qs qxs, epdaH_conf_history = h', epdaH_conf_stack = pop @ w\<rparr> \<lparr>edge_src = cons_tuple2 qs qxs, edge_event = Some y, edge_pop = pop, edge_push = push, edge_trg = cons_tuple2 qt qxt\<rparr> \<lparr>epdaH_conf_state = cons_tuple2 qt qxt, epdaH_conf_history = h' @ [y], epdaH_conf_stack = push @ w\<rparr>) (length h' + ints)) (Suc (length h' + ints)))" and s="(get_labels (derivation_append dR (der2 \<lparr>epdaH_conf_state = cons_tuple2 qs qxs, epdaH_conf_history = h', epdaH_conf_stack = pop @ w\<rparr> \<lparr>edge_src = cons_tuple2 qs qxs, edge_event = Some y, edge_pop = pop, edge_push = push, edge_trg = cons_tuple2 qt qxt\<rparr> \<lparr>epdaH_conf_state = cons_tuple2 qt qxt, epdaH_conf_history = h' @ [y], epdaH_conf_stack = push @ w\<rparr>) (length h' + ints)) ( (length h' + ints))) @[Some \<lparr>edge_src = cons_tuple2 qs qxs, edge_event = Some y, edge_pop = pop, edge_push = push, edge_trg = cons_tuple2 qt qxt\<rparr>]" in ssubst) apply(rule_tac c="\<lparr>epdaH_conf_state = cons_tuple2 qt qxt, epdaH_conf_history = h' @ [y], epdaH_conf_stack = push @ w\<rparr>" in get_labels__seperate_last) apply(simp add: derivation_append_def der2_def) apply(clarsimp) apply(rule conjI) prefer 2 apply(simp add: strip_plant_def) apply(rule_tac t="(get_labels (derivation_append dR (der2 \<lparr>epdaH_conf_state = cons_tuple2 qs qxs, epdaH_conf_history = h', epdaH_conf_stack = pop @ w\<rparr> \<lparr>edge_src = cons_tuple2 qs qxs, edge_event = Some y, edge_pop = pop, edge_push = push, edge_trg = cons_tuple2 qt qxt\<rparr> \<lparr>epdaH_conf_state = cons_tuple2 qt qxt, epdaH_conf_history = h' @ [y], epdaH_conf_stack = push @ w\<rparr>) (length h' + ints)) (length h' + ints))" in ssubst) apply(rule get_labels__derivation_append__trivial) apply(force) done lemma F_DPDA_DFA_PRODUCT__epda_to_des: " valid_dfa P \<Longrightarrow> valid_dpda G \<Longrightarrow> inf (epda_to_des G) (epda_to_des P) = epda_to_des (F_DPDA_DFA_PRODUCT G P)" apply(simp add: epda_to_des_def inf_DES_ext_def infDES_def) apply (metis FUN_DPDA_DFA_PRODUCT__SpecInput1_def FUN_DPDA_DFA_PRODUCT__SpecOutput1_def F_DPDA_DFA_PRODUCT__SOUND1) done lemma no_epdaH_livelock_implies_F_DPDA_DFA_PRODUCT__epdaH_livelock_freedom: " valid_dfa P \<Longrightarrow> valid_dpda SOL \<Longrightarrow> \<not> epdaH_livelock SOL \<Longrightarrow> epdaH_livelock_freedom (F_DPDA_DFA_PRODUCT SOL P)" apply(subgoal_tac "X" for X) prefer 2 apply(rule F_DPDA_DFA_PRODUCT__preserves__no_epdaH_livelock) apply(force) apply(force) apply(force) apply(force) apply(simp add: epdaH_livelock_freedom_def) apply(thin_tac "\<not> epdaH_livelock SOL") apply (metis F_DPDA_DFA_PRODUCT__generates__DPDA valid_pda_to_valid_epda epdaH_has_livelock_implies_livelock valid_dpda_to_valid_pda) done lemma F_DPDA_DFA_PRODUCT__Nonblockingness_branching: " valid_dpda S \<Longrightarrow> valid_dfa P \<Longrightarrow> DES_nonblockingness (epda_to_des SOL) \<Longrightarrow> DES_controllability \<Sigma>UC (epda_to_des P) (epda_to_des SOL) \<Longrightarrow> valid_dpda SOL \<Longrightarrow> inf (epda_to_des SOL) (epda_to_des P) = epda_to_des (F_DPDA_DFA_PRODUCT SOL P) \<Longrightarrow> epda_to_des (F_DPDA_DFA_PRODUCT SOL P) \<in> SCP_Closed_Loop_Satisfactory_Direct (epda_to_des P) (inf (epda_to_des P) (epda_to_des S)) \<Sigma>UC \<Longrightarrow> ATS_Language0.Nonblockingness_branching epdaH_configurations epdaH_initial_configurations epda_step_labels epdaH_step_relation epdaH_marking_condition (F_DPDA_DFA_PRODUCT SOL P)" apply(simp add: SCP_Closed_Loop_Satisfactory_Direct_def) apply(clarsimp) apply(thin_tac "DES_specification_satisfied (inf (epda_to_des P) (epda_to_des S)) (epda_to_des (F_DPDA_DFA_PRODUCT SOL P))") apply(thin_tac "DES_controllability \<Sigma>UC (epda_to_des P) (epda_to_des (F_DPDA_DFA_PRODUCT SOL P))") apply(thin_tac "DES_controllability \<Sigma>UC (epda_to_des P) (epda_to_des SOL)") apply(thin_tac "epda_to_des (F_DPDA_DFA_PRODUCT SOL P) \<le> epda_to_des P") apply(thin_tac "DES_nonblockingness (epda_to_des SOL)") apply(thin_tac "valid_dpda S") apply(subgoal_tac "valid_dpda (F_DPDA_DFA_PRODUCT SOL P)") prefer 2 apply (metis F_DPDA_DFA_PRODUCT__generates__DPDA) apply(thin_tac "inf (epda_to_des SOL) (epda_to_des P) = epda_to_des (F_DPDA_DFA_PRODUCT SOL P)") apply(thin_tac "IsDES (epda_to_des (F_DPDA_DFA_PRODUCT SOL P))") apply(simp add: DES_nonblockingness_def des_langUM_def des_langM_def epda_to_des_def) apply(thin_tac "valid_dfa P") apply(thin_tac "valid_dpda SOL") apply(metis epdaH_operational_Nonblockingness_from_language_Nonblockingness epda_inter_semantic_relationship valid_dpda_def valid_pda_def) done lemma epdaHNonblockingness_branching__to__epdaH_deadlock_freedom: " valid_dfa P \<Longrightarrow> valid_dpda SOL \<Longrightarrow> ATS_Language0.Nonblockingness_branching epdaH_configurations epdaH_initial_configurations epda_step_labels epdaH_step_relation epdaH_marking_condition (F_DPDA_DFA_PRODUCT SOL P) \<Longrightarrow> epdaH_deadlock_freedom (F_DPDA_DFA_PRODUCT SOL P)" apply(subgoal_tac "valid_dpda (F_DPDA_DFA_PRODUCT SOL P)") prefer 2 apply (metis F_DPDA_DFA_PRODUCT__generates__DPDA) apply(thin_tac "valid_dfa P") apply(thin_tac "valid_dpda SOL") apply(simp add: epdaH_deadlock_freedom_def epdaH.Nonblockingness_branching_def) apply(clarsimp) apply(erule_tac x="d" in allE) apply(clarsimp) apply(erule_tac x="n" in allE) apply(clarsimp) apply(case_tac x) apply(clarsimp) apply(simp add: epdaH_marking_condition_def) apply(clarsimp) apply(subgoal_tac "i\<le>n") prefer 2 apply(rule_tac M="(F_DPDA_DFA_PRODUCT SOL P)" and d="derivation_append d dc n" in epdaH.allPreMaxDomSome_prime) apply (metis (full_types) epdaH.derivation_append_from_derivation_append_fit epdaH.derivation_initial_is_derivation valid_dpda_def valid_pda_def) apply(force) apply (metis concat_has_max_dom monoid_add_class.add.right_neutral) apply(case_tac "i=n") apply(clarsimp) apply(erule_tac x="n" and P="%i. \<forall>e c. c \<in> epdaH_marking_configurations (F_DPDA_DFA_PRODUCT SOL P) \<longrightarrow> d i = Some (pair e c) \<longrightarrow> (\<exists>j>i. \<exists>e' c'. d j = Some (pair e' c') \<and> epdaH_string_state c \<noteq> epdaH_string_state c')" in allE) apply(clarsimp) apply(case_tac "c \<in> epdaH_marking_configurations (F_DPDA_DFA_PRODUCT SOL P)") apply(clarsimp) apply (metis epdaH.derivation_initial_is_derivation epdaH.noSomeAfterMaxDom not_None_eq) apply(clarsimp) apply(simp add: derivation_append_def) apply(subgoal_tac "i<n") prefer 2 apply(force) apply(clarsimp) apply(subgoal_tac "d i = Some (pair ea ca)") prefer 2 apply(simp add: derivation_append_def) apply(erule_tac x="i" and P="%i. \<forall> e c. c \<in> epdaH_marking_configurations (F_DPDA_DFA_PRODUCT SOL P) \<longrightarrow> d i = Some (pair e c) \<longrightarrow> (\<exists>j>i. \<exists>e' c'. d j = Some (pair e' c') \<and> epdaH_string_state c \<noteq> epdaH_string_state c')" in allE) apply(clarsimp) apply(subgoal_tac "j\<le>n") prefer 2 apply (metis (poly_guards_query) epdaH.derivation_initial_is_derivation epdaH.noSomeAfterMaxDom le_neq_implies_less nat_le_linear option.distinct(2)) apply(clarsimp) apply(erule_tac x="j" in allE) apply(simp add: derivation_append_def) apply(clarsimp) apply(rename_tac nc) apply(subgoal_tac "X" for X) prefer 2 apply(rule_tac G="F_DPDA_DFA_PRODUCT SOL P" and n="n" and m="n+Suc nc" and d="derivation_append d dc n" in epdaH.step_detail_before_some_position) apply(simp add: epdaH.derivation_initial_def) apply (metis epdaH.derivation_append_from_derivation_append_fit valid_dpda_def valid_pda_def) apply(simp add: derivation_append_def maximum_of_domain_def) apply(force) apply(clarsimp) apply(simp add: derivation_append_def maximum_of_domain_def) apply(clarsimp) apply(rule_tac x="e2" in exI) apply(rule_tac x="c2" in exI) apply(clarsimp) done end
State Before: α : Type u_1 β : Type ?u.364270 γ : Type ?u.364273 σ : Type ?u.364276 inst✝³ : Primcodable α inst✝² : Primcodable β inst✝¹ : Primcodable γ inst✝ : Primcodable σ n : ℕ a : Vector α n × Fin n ⊢ List.get? (Vector.toList a.fst) ↑a.snd = some (Vector.get a.fst a.snd) State After: α : Type u_1 β : Type ?u.364270 γ : Type ?u.364273 σ : Type ?u.364276 inst✝³ : Primcodable α inst✝² : Primcodable β inst✝¹ : Primcodable γ inst✝ : Primcodable σ n : ℕ a : Vector α n × Fin n ⊢ List.get? (Vector.toList a.fst) ↑a.snd = List.get? (Vector.toList a.fst) ↑(↑(Fin.cast (_ : n = List.length (Vector.toList a.fst))) a.snd) Tactic: rw [Vector.get_eq_get, ← List.get?_eq_get] State Before: α : Type u_1 β : Type ?u.364270 γ : Type ?u.364273 σ : Type ?u.364276 inst✝³ : Primcodable α inst✝² : Primcodable β inst✝¹ : Primcodable γ inst✝ : Primcodable σ n : ℕ a : Vector α n × Fin n ⊢ List.get? (Vector.toList a.fst) ↑a.snd = List.get? (Vector.toList a.fst) ↑(↑(Fin.cast (_ : n = List.length (Vector.toList a.fst))) a.snd) State After: no goals Tactic: rfl
/- Copyright (c) 2018 Mario Carneiro. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Mario Carneiro -/ /-! # Extra definitions on `option` This file defines more operations involving `option α`. Lemmas about them are located in other files under `data.option.`. Other basic operations on `option` are defined in the core library. -/ namespace option variables {α : Type*} {β : Type*} attribute [inline] option.is_some option.is_none /-- An elimination principle for `option`. It is a nondependent version of `option.rec_on`. -/ @[simp] protected def elim : option α → β → (α → β) → β | (some x) y f := f x | none y f := y instance has_mem : has_mem α (option α) := ⟨λ a b, b = some a⟩ @[simp] theorem mem_def {a : α} {b : option α} : a ∈ b ↔ b = some a := iff.rfl lemma mem_iff {a : α} {b : option α} : a ∈ b ↔ b = a := iff.rfl theorem is_none_iff_eq_none {o : option α} : o.is_none = tt ↔ o = none := ⟨option.eq_none_of_is_none, λ e, e.symm ▸ rfl⟩ theorem some_inj {a b : α} : some a = some b ↔ a = b := by simp /-- `o = none` is decidable even if the wrapped type does not have decidable equality. This is not an instance because it is not definitionally equal to `option.decidable_eq`. Try to use `o.is_none` or `o.is_some` instead. -/ @[inline] def decidable_eq_none {o : option α} : decidable (o = none) := decidable_of_decidable_of_iff (bool.decidable_eq _ _) is_none_iff_eq_none instance decidable_forall_mem {p : α → Prop} [decidable_pred p] : ∀ o : option α, decidable (∀ a ∈ o, p a) | none := is_true (by simp [false_implies_iff]) | (some a) := if h : p a then is_true $ λ o e, some_inj.1 e ▸ h else is_false $ mt (λ H, H _ rfl) h instance decidable_exists_mem {p : α → Prop} [decidable_pred p] : ∀ o : option α, decidable (∃ a ∈ o, p a) | none := is_false (λ ⟨a, ⟨h, _⟩⟩, by cases h) | (some a) := if h : p a then is_true $ ⟨_, rfl, h⟩ else is_false $ λ ⟨_, ⟨rfl, hn⟩⟩, h hn /-- Inhabited `get` function. Returns `a` if the input is `some a`, otherwise returns `default`. -/ @[reducible] def iget [inhabited α] : option α → α | (some x) := x | none := default α @[simp] theorem iget_some [inhabited α] {a : α} : (some a).iget = a := rfl /-- `guard p a` returns `some a` if `p a` holds, otherwise `none`. -/ def guard (p : α → Prop) [decidable_pred p] (a : α) : option α := if p a then some a else none /-- `filter p o` returns `some a` if `o` is `some a` and `p a` holds, otherwise `none`. -/ def filter (p : α → Prop) [decidable_pred p] (o : option α) : option α := o.bind (guard p) /-- Cast of `option` to `list `. Returns `[a]` if the input is `some a`, and `[]` if it is `none`. -/ def to_list : option α → list α | none := [] | (some a) := [a] @[simp] theorem mem_to_list {a : α} {o : option α} : a ∈ to_list o ↔ a ∈ o := by cases o; simp [to_list, eq_comm] /-- Two arguments failsafe function. Returns `f a b` if the inputs are `some a` and `some b`, and "does nothing" otherwise. -/ def lift_or_get (f : α → α → α) : option α → option α → option α | none none := none | (some a) none := some a -- get a | none (some b) := some b -- get b | (some a) (some b) := some (f a b) -- lift f instance lift_or_get_comm (f : α → α → α) [h : is_commutative α f] : is_commutative (option α) (lift_or_get f) := ⟨λ a b, by cases a; cases b; simp [lift_or_get, h.comm]⟩ instance lift_or_get_assoc (f : α → α → α) [h : is_associative α f] : is_associative (option α) (lift_or_get f) := ⟨λ a b c, by cases a; cases b; cases c; simp [lift_or_get, h.assoc]⟩ instance lift_or_get_idem (f : α → α → α) [h : is_idempotent α f] : is_idempotent (option α) (lift_or_get f) := ⟨λ a, by cases a; simp [lift_or_get, h.idempotent]⟩ instance lift_or_get_is_left_id (f : α → α → α) : is_left_id (option α) (lift_or_get f) none := ⟨λ a, by cases a; simp [lift_or_get]⟩ instance lift_or_get_is_right_id (f : α → α → α) : is_right_id (option α) (lift_or_get f) none := ⟨λ a, by cases a; simp [lift_or_get]⟩ /-- Lifts a relation `α → β → Prop` to a relation `option α → option β → Prop` by just adding `none ~ none`. -/ inductive rel (r : α → β → Prop) : option α → option β → Prop /-- If `a ~ b`, then `some a ~ some b` -/ | some {a b} : r a b → rel (some a) (some b) /-- `none ~ none` -/ | none : rel none none /-- Partial bind. If for some `x : option α`, `f : Π (a : α), a ∈ x → option β` is a partial function defined on `a : α` giving an `option β`, where `some a = x`, then `pbind x f h` is essentially the same as `bind x f` but is defined only when all `x = some a`, using the proof to apply `f`. -/ @[simp] def pbind : Π (x : option α), (Π (a : α), a ∈ x → option β) → option β | none _ := none | (some a) f := f a rfl /-- Partial map. If `f : Π a, p a → β` is a partial function defined on `a : α` satisfying `p`, then `pmap f x h` is essentially the same as `map f x` but is defined only when all members of `x` satisfy `p`, using the proof to apply `f`. -/ @[simp] def pmap {p : α → Prop} (f : Π (a : α), p a → β) : Π x : option α, (∀ a ∈ x, p a) → option β | none _ := none | (some a) H := some (f a (H a (mem_def.mpr rfl))) /-- Flatten an `option` of `option`, a specialization of `mjoin`. -/ @[simp] def join : option (option α) → option α := λ x, bind x id protected def {u v} traverse {F : Type u → Type v} [applicative F] {α β : Type*} (f : α → F β) : option α → F (option β) | none := pure none | (some x) := some <$> f x /- By analogy with `monad.sequence` in `init/category/combinators.lean`. -/ /-- If you maybe have a monadic computation in a `[monad m]` which produces a term of type `α`, then there is a naturally associated way to always perform a computation in `m` which maybe produces a result. -/ def {u v} maybe {m : Type u → Type v} [monad m] {α : Type u} : option (m α) → m (option α) | none := return none | (some fn) := some <$> fn /-- Map a monadic function `f : α → m β` over an `o : option α`, maybe producing a result. -/ def {u v w} mmap {m : Type u → Type v} [monad m] {α : Type w} {β : Type u} (f : α → m β) (o : option α) : m (option β) := (o.map f).maybe /-- A monadic analogue of `option.elim`. -/ def melim {α β : Type*} {m : Type* → Type*} [monad m] (x : m (option α)) (y : m β) (z : α → m β) : m β := x >>= λ o, option.elim o y z /-- A monadic analogue of `option.get_or_else`. -/ def mget_or_else {α : Type*} {m : Type* → Type*} [monad m] (x : m (option α)) (y : m α) : m α := melim x y pure end option
Formal statement is: lemma nullstellensatz_univariate: "(\<forall>x. poly p x = (0::complex) \<longrightarrow> poly q x = 0) \<longleftrightarrow> p dvd (q ^ (degree p)) \<or> (p = 0 \<and> q = 0)" Informal statement is: If $p$ and $q$ are univariate polynomials, then $p$ divides $q^{\deg(p)}$ or $p = q = 0$.
-- --------------------------------------------------------------- [ Query.idr ] -- Module : Query.idr -- Copyright : (c) Jan de Muijnck-Hughes -- License : see LICENSE -- --------------------------------------------------------------------- [ EOH ] module Freyja.Fetch import Edda import Edda.Reader.Org import XML.DOM import XML.XPath import XML.XPath.Query import Freyja import Freyja.Error import Freyja.Utils %access export -- ----------------------------------------------------------------- [ Queries ] getNodes : (node : Document ty) -> {auto prf : CanQuery ty} -> (qstr : String) -> Extract (List XMLNode) getNodes doc qstr = Query.getNodes ExtractionError qstr doc getNode : (node : Document ty) -> {auto prf : CanQuery ty} -> (qstr : String) -> Extract XMLNode getNode doc qstr = Query.getNode ExtractionError qstr doc getNamedAttrs : (node : Document ty) -> {auto prf : CanQuery ty} -> (name : String) -> (qstr : String) -> Extract (List String) getNamedAttrs doc name qstr = Query.getNamedAttrs name ExtractionError qstr doc getNamedAttr : (node : Document ty) -> {auto prf : CanQuery ty} -> (name : String) -> (qstr : String) -> Extract String getNamedAttr doc name qstr = Query.getNamedAttr name ExtractionError qstr doc getTextNodes : (node : Document ty) -> {auto prf : CanQuery ty} -> (qstr : String) -> Extract (List String) getTextNodes doc qstr = Query.getTextNodes ExtractionError qstr doc getTextNode : (node : Document ty) -> {auto prf : CanQuery ty} -> (qstr : String) -> Extract String getTextNode doc qstr = Query.getTextNode ExtractionError qstr doc getCDataNode : (node : Document ty) -> {auto prf : CanQuery ty} -> (qstr : String) -> Extract String getCDataNode doc qstr = Query.getCDataNode ExtractionError qstr doc -- -------------------------------------------------- [ Markup Related Queries ] getEddaStrings : Document ty -> {auto prf : CanQuery ty} -> String -> Extract (List EddaString) getEddaStrings e qstr = do res <- getTextNodes ExtractionError qstr e ss <- mapEither getString res pure ss where getString : String -> Extract $ EddaString getString s = case readOrgInline s of Left err => Left $ TextConvError err Right str => Right str getEddaString : Document ty -> {auto prf : CanQuery ty} -> String -> Extract EddaString getEddaString e qstr = do res <- getTextNode ExtractionError qstr e case readOrgInline res of Left err => Left $ TextConvError err Right str => Right str getEddaBlock : Document ty -> {auto prf : CanQuery ty} -> String -> Extract EddaBody getEddaBlock e qstr = do res <- getTextNode ExtractionError qstr e case readOrgBody res of Left err => Left $ TextConvError err Right str => Right str -- --------------------------------------------------------------------- [ EOF ]
------------------------------------------------------------------------------ -- First-order logic with equality ------------------------------------------------------------------------------ {-# OPTIONS --exact-split #-} {-# OPTIONS --no-sized-types #-} {-# OPTIONS --no-universe-polymorphism #-} {-# OPTIONS --without-K #-} -- This module exported all the logical constants and the -- propositional equality. This module is re-exported by the "base" -- modules whose theories are defined on first-order logic + equality. module Common.FOL.FOL-Eq where -- First-order logic (without equality). open import Common.FOL.FOL public -- Propositional equality. open import Common.FOL.Relation.Binary.PropositionalEquality public
# Using the hybrid database with BACI price info. This note book is an illustration of how BACI price data can be used in combinatoin with the $C_\mathrm{u}$ matrix to hybridise ecoinvent 3.5. ```python import pandas as pd import numpy as np import sys # your path to pylacio sys.path.append('/home/jakobs/Documents/IndEcol/OASES/pylcaio/src/') import pylcaio import gzip import pickle import pandas as pd import matplotlib.pyplot as plt import os import time import seaborn as sns from matplotlib import rc import matplotlib.cm as colormaps import feather import scipy from pypardiso import spsolve, factorized import scipy.stats as stats sys.path.append(os.path.realpath('../')) from price_variance_MC import generate_price_vector, do_price_MC ``` /home/jakobs/anaconda3/envs/pylcaio_paradiso/lib/python3.7/site-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead. import pandas.util.testing as tm ```python from matplotlib import rc # sns.set() rc('font',**{'family':'DejaVu Sans', 'size':18}) rc('text', usetex=True) ``` ## First load in the data: - ecoinvent, exiobase and the Cu matrix are stored in one pickle file: - The price data is stored as a feather file. ```python # Define double counting correction strategy and year double_counting = 'STAM' # 'STAM' or 'binary' year = 2012 # read in pickle analysis_object = pylcaio.Analysis('/home/jakobs/Documents/IndEcol/OASES/pylcaio/src/Databases/ecoinvent3.5_exiobase3_{}/hybrid_system_{}.pickle'.format(year, double_counting)) ``` ### Read in correct characterisation matrix for EXIOBASE 3.6 Because we use a different version of EXIOBASE than was used in pylcaio, we need to read it in seperately and use it here. We will use the GWP100 indicator which is of the RECIPE 2008 method. ```python C_io = pd.read_excel('/home/jakobs/Documents/IndEcol/Data/EXIOBASE/exiobase3_6/Characterization_EB36.xlsx', index_col=0, header=0) C_io = C_io.T Impact_names_io = C_io.index.values C_io = scipy.sparse.csr_matrix(C_io) C_io ``` <36x1707 sparse matrix of type '<class 'numpy.int64'>' with 1806 stored elements in Compressed Sparse Row format> ```python %%time # Calculate the leontief inverse for the LCA technology matrix. (Note the use of the IO convention here, # hence we first substract the A matrix form a identity matrix of appropriate size) A_lca = scipy.sparse.eye(analysis_object.A_ff.shape[0])-analysis_object.A_ff Lp = spsolve(A_lca, np.eye(A_lca.shape[0])) ``` CPU times: user 5min 14s, sys: 3.01 s, total: 5min 17s Wall time: 2min 47s ```python %%time # Calculate the Leontief inverse for EXIOBASE Lio = scipy.sparse.csr_matrix( scipy.linalg.solve(np.eye(analysis_object.A_io.shape[0])-analysis_object.A_io.todense(), np.eye(analysis_object.A_io.shape[0]))) ``` CPU times: user 1min 22s, sys: 3.59 s, total: 1min 26s Wall time: 50.3 s ```python # PRO contains the important meta data of the ecoinvent processes/activities PRO = pd.DataFrame.from_dict(analysis_object.PRO_f) print(PRO.columns) PRO ``` Index(['activityId', 'productId', 'activityName', 'ISIC', 'price', 'priceUnit', 'EcoSpoldCategory', 'geography', 'technologyLevel', 'macroEconomicScenario', 'productionVolume', 'productName', 'unitName', 'cpc', 'dry mass [kg]', 'wet mass [kg]', 'activityNameId', 'activityType', 'startDate', 'endDate', 'io_geography', 'ProductTypeName', 'priceless_scale_vector'], dtype='object') <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>activityId</th> <th>productId</th> <th>activityName</th> <th>ISIC</th> <th>price</th> <th>priceUnit</th> <th>EcoSpoldCategory</th> <th>geography</th> <th>technologyLevel</th> <th>macroEconomicScenario</th> <th>...</th> <th>cpc</th> <th>dry mass [kg]</th> <th>wet mass [kg]</th> <th>activityNameId</th> <th>activityType</th> <th>startDate</th> <th>endDate</th> <th>io_geography</th> <th>ProductTypeName</th> <th>priceless_scale_vector</th> </tr> </thead> <tbody> <tr> <th>6885fd40-ff73-40a4-8f71-225577ec684e_aeaf5266-3f9c-4074-bd34-eba76a61760c</th> <td>6885fd40-ff73-40a4-8f71-225577ec684e</td> <td>aeaf5266-3f9c-4074-bd34-eba76a61760c</td> <td>barley grain, feed production</td> <td>0111:Growing of cereals (except rice), legumin...</td> <td>0.14384</td> <td>EUR2005</td> <td>agricultural means of production/feed</td> <td>RoW</td> <td>Current</td> <td>Business-as-Usual</td> <td>...</td> <td>23319: Preparations used in animal feeding n.e.c.</td> <td>0.86</td> <td>1</td> <td>2816d5b0-50fd-42fd-9bc9-aed0bd1913e7</td> <td>0</td> <td>1996-01-01</td> <td>2018-12-31</td> <td>RoW(0)</td> <td>Cereal grains nec</td> <td>0.0</td> </tr> <tr> <th>a96cb241-a4a9-4980-a16a-ba4b6a80175e_aeaf5266-3f9c-4074-bd34-eba76a61760c</th> <td>a96cb241-a4a9-4980-a16a-ba4b6a80175e</td> <td>aeaf5266-3f9c-4074-bd34-eba76a61760c</td> <td>barley grain, feed production</td> <td>0111:Growing of cereals (except rice), legumin...</td> <td>0.14384</td> <td>EUR2005</td> <td>agricultural means of production/feed</td> <td>CA-QC</td> <td>Current</td> <td>Business-as-Usual</td> <td>...</td> <td>23319: Preparations used in animal feeding n.e.c.</td> <td>0.86</td> <td>1</td> <td>2816d5b0-50fd-42fd-9bc9-aed0bd1913e7</td> <td>0</td> <td>1996-01-01</td> <td>2018-12-31</td> <td>CA</td> <td>Cereal grains nec</td> <td>0.0</td> </tr> <tr> <th>d1c3b8ad-58b6-4ea4-8877-a51b356556bc_281fc4f0-c05d-410a-a784-06e3508e78e6</th> <td>d1c3b8ad-58b6-4ea4-8877-a51b356556bc</td> <td>281fc4f0-c05d-410a-a784-06e3508e78e6</td> <td>barley grain, feed production, Swiss integrate...</td> <td>0111:Growing of cereals (except rice), legumin...</td> <td>0.14384</td> <td>EUR2005</td> <td>agricultural means of production/feed</td> <td>CH</td> <td>Current</td> <td>Business-as-Usual</td> <td>...</td> <td>23319: Preparations used in animal feeding n.e.c.</td> <td>0.85</td> <td>1</td> <td>938a1c4e-41f2-40ea-9bf1-9f09c2437798</td> <td>0</td> <td>1996-01-01</td> <td>2018-12-31</td> <td>CH</td> <td>Cereal grains nec</td> <td>0.0</td> </tr> <tr> <th>92068396-88c7-45ed-9008-622008a299f3_0d860eb4-1a25-41b4-a821-81f5726d86e5</th> <td>92068396-88c7-45ed-9008-622008a299f3</td> <td>0d860eb4-1a25-41b4-a821-81f5726d86e5</td> <td>barley grain, feed production, organic</td> <td>0111:Growing of cereals (except rice), legumin...</td> <td>0.18444</td> <td>EUR2005</td> <td>agricultural means of production/feed</td> <td>CH</td> <td>Current</td> <td>Business-as-Usual</td> <td>...</td> <td>23319: Preparations used in animal feeding n.e.c.</td> <td>0.85</td> <td>1</td> <td>6a49a0c6-c114-4471-b6c4-8aa4006e4b0e</td> <td>0</td> <td>1996-01-01</td> <td>2018-12-31</td> <td>CH</td> <td>Cereal grains nec</td> <td>0.0</td> </tr> <tr> <th>35ddb020-9812-4808-bdfb-6845a454a73c_0d860eb4-1a25-41b4-a821-81f5726d86e5</th> <td>35ddb020-9812-4808-bdfb-6845a454a73c</td> <td>0d860eb4-1a25-41b4-a821-81f5726d86e5</td> <td>barley grain, feed production, organic</td> <td>0111:Growing of cereals (except rice), legumin...</td> <td>0.18444</td> <td>EUR2005</td> <td>agricultural means of production/feed</td> <td>RoW</td> <td>Current</td> <td>Business-as-Usual</td> <td>...</td> <td>23319: Preparations used in animal feeding n.e.c.</td> <td>0.85</td> <td>1</td> <td>6a49a0c6-c114-4471-b6c4-8aa4006e4b0e</td> <td>0</td> <td>1996-01-01</td> <td>2018-12-31</td> <td>RoW(1)</td> <td>Cereal grains nec</td> <td>0.0</td> </tr> <tr> <th>...</th> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> </tr> <tr> <th>31e6ab74-141d-49f4-9e75-5974bd62742b_b3184435-d00c-5713-823d-5ff6741bfbbc</th> <td>31e6ab74-141d-49f4-9e75-5974bd62742b</td> <td>b3184435-d00c-5713-823d-5ff6741bfbbc</td> <td>waste polyethylene terephthalate, for recyclin...</td> <td>NaN</td> <td>0.00000</td> <td>EUR2005</td> <td>NaN</td> <td>GLO</td> <td>Current</td> <td>Business-as-Usual</td> <td>...</td> <td>39270: Waste, parings and scrap of plastics</td> <td>NaN</td> <td>NaN</td> <td>712ffc38-2281-4fd2-a84d-089678e23bd3</td> <td>0</td> <td>2018-01-01</td> <td>2018-12-31</td> <td>GLO</td> <td>NaN</td> <td>0.0</td> </tr> <tr> <th>b50b032c-8bf0-41a4-bb7a-ce5e12cabf01_d13ae29b-7d6a-55d2-8380-86343a80fed8</th> <td>b50b032c-8bf0-41a4-bb7a-ce5e12cabf01</td> <td>d13ae29b-7d6a-55d2-8380-86343a80fed8</td> <td>waste polyethylene terephthalate, for recyclin...</td> <td>NaN</td> <td>0.00000</td> <td>EUR2005</td> <td>NaN</td> <td>GLO</td> <td>Current</td> <td>Business-as-Usual</td> <td>...</td> <td>39270: Waste, parings and scrap of plastics</td> <td>NaN</td> <td>NaN</td> <td>27dcd766-bc63-4a7c-9fdf-ffc55e0575b2</td> <td>0</td> <td>2018-01-01</td> <td>2018-12-31</td> <td>GLO</td> <td>NaN</td> <td>0.0</td> </tr> <tr> <th>307b681d-c6bc-44cb-ab3e-a9067a4e115a_ed25f589-c6c8-56a7-a518-b01dd3e8a453</th> <td>307b681d-c6bc-44cb-ab3e-a9067a4e115a</td> <td>ed25f589-c6c8-56a7-a518-b01dd3e8a453</td> <td>waste polyethylene, for recycling, sorted, Rec...</td> <td>NaN</td> <td>0.00000</td> <td>EUR2005</td> <td>NaN</td> <td>GLO</td> <td>Current</td> <td>Business-as-Usual</td> <td>...</td> <td>39270: Waste, parings and scrap of plastics</td> <td>NaN</td> <td>NaN</td> <td>40de0860-eadc-43ae-bdbf-5e507563379a</td> <td>0</td> <td>2018-01-01</td> <td>2018-12-31</td> <td>GLO</td> <td>NaN</td> <td>0.0</td> </tr> <tr> <th>73c9e29c-885e-490e-95f3-087227443758_101e55f2-2da0-528b-96fd-21687e242c1c</th> <td>73c9e29c-885e-490e-95f3-087227443758</td> <td>101e55f2-2da0-528b-96fd-21687e242c1c</td> <td>waste polyethylene, for recycling, unsorted, R...</td> <td>NaN</td> <td>0.00000</td> <td>EUR2005</td> <td>NaN</td> <td>GLO</td> <td>Current</td> <td>Business-as-Usual</td> <td>...</td> <td>39270: Waste, parings and scrap of plastics</td> <td>NaN</td> <td>NaN</td> <td>06f4a8b9-d8b2-42d1-94e4-9da5e9420754</td> <td>0</td> <td>2018-01-01</td> <td>2018-12-31</td> <td>GLO</td> <td>NaN</td> <td>0.0</td> </tr> <tr> <th>91ed532a-08f2-4fdd-b5e9-28bb2afd1336_451550a8-d9e6-4396-9f9a-97e752111228</th> <td>91ed532a-08f2-4fdd-b5e9-28bb2afd1336</td> <td>451550a8-d9e6-4396-9f9a-97e752111228</td> <td>waste wood, post-consumer, Recycled Content cu...</td> <td>NaN</td> <td>0.00000</td> <td>EUR2005</td> <td>NaN</td> <td>GLO</td> <td>Current</td> <td>Business-as-Usual</td> <td>...</td> <td>3928: Sawdust and wood waste and scrap</td> <td>1</td> <td>1.4</td> <td>e413d513-3e07-4f5a-ba52-b3bfdf3dc9a5</td> <td>0</td> <td>2018-01-01</td> <td>2018-12-31</td> <td>GLO</td> <td>NaN</td> <td>0.0</td> </tr> </tbody> </table> <p>16022 rows × 23 columns</p> </div> pylcaio estimates scale factors for groups of activities to omit the ecoinvent price information or in case there is none. These factors act effectively as a price (also unitwise of course). From the ecoinvent price info and these scale factors we create a new property 'effective price' ```python # In case the scale factor is available use that instead of the ecoinvent price. for index in PRO.index.values: if PRO.loc[index, 'priceless_scale_vector'] > 0: PRO.loc[index,'effective_price'] = PRO.loc[index, 'priceless_scale_vector'] else: PRO.loc[index,'effective_price'] = PRO.loc[index, 'price'] PRO['effective_price'] ``` 6885fd40-ff73-40a4-8f71-225577ec684e_aeaf5266-3f9c-4074-bd34-eba76a61760c 0.14384 a96cb241-a4a9-4980-a16a-ba4b6a80175e_aeaf5266-3f9c-4074-bd34-eba76a61760c 0.14384 d1c3b8ad-58b6-4ea4-8877-a51b356556bc_281fc4f0-c05d-410a-a784-06e3508e78e6 0.14384 92068396-88c7-45ed-9008-622008a299f3_0d860eb4-1a25-41b4-a821-81f5726d86e5 0.18444 35ddb020-9812-4808-bdfb-6845a454a73c_0d860eb4-1a25-41b4-a821-81f5726d86e5 0.18444 ... 31e6ab74-141d-49f4-9e75-5974bd62742b_b3184435-d00c-5713-823d-5ff6741bfbbc 0.00000 b50b032c-8bf0-41a4-bb7a-ce5e12cabf01_d13ae29b-7d6a-55d2-8380-86343a80fed8 0.00000 307b681d-c6bc-44cb-ab3e-a9067a4e115a_ed25f589-c6c8-56a7-a518-b01dd3e8a453 0.00000 73c9e29c-885e-490e-95f3-087227443758_101e55f2-2da0-528b-96fd-21687e242c1c 0.00000 91ed532a-08f2-4fdd-b5e9-28bb2afd1336_451550a8-d9e6-4396-9f9a-97e752111228 0.00000 Name: effective_price, Length: 16022, dtype: float64 ## Load in BACI price data The feather file contains both BACI price data for those activities that have a match, otherwise it contains samples from a lognormal distribution with a mean at the ecoinvent price (inflation corrected to 2012) and a CoV of 1.05 (the median CoV of the processes with a BACI price). ```python # Read in feather file price_data = pd.read_feather('/home/jakobs/Documents/IndEcol/OASES/pylcaio/src/Databases/ecoinvent3.5_exiobase3_2012/price_dataframe.ftr') price_data.set_index('index', inplace=True) price_data ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>0</th> <th>1</th> <th>2</th> <th>3</th> <th>4</th> <th>5</th> <th>6</th> <th>7</th> <th>8</th> <th>9</th> <th>...</th> <th>2990</th> <th>2991</th> <th>2992</th> <th>2993</th> <th>2994</th> <th>2995</th> <th>2996</th> <th>2997</th> <th>2998</th> <th>2999</th> </tr> <tr> <th>index</th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> </tr> </thead> <tbody> <tr> <th>6885fd40-ff73-40a4-8f71-225577ec684e_aeaf5266-3f9c-4074-bd34-eba76a61760c</th> <td>1.879268</td> <td>0.681711</td> <td>0.633574</td> <td>0.297718</td> <td>0.275381</td> <td>0.495583</td> <td>0.098505</td> <td>0.402184</td> <td>0.309860</td> <td>0.094935</td> <td>...</td> <td>0.274618</td> <td>0.367743</td> <td>0.318454</td> <td>0.834322</td> <td>0.339737</td> <td>1.175450</td> <td>0.275381</td> <td>0.339737</td> <td>1.044699</td> <td>0.416030</td> </tr> <tr> <th>a96cb241-a4a9-4980-a16a-ba4b6a80175e_aeaf5266-3f9c-4074-bd34-eba76a61760c</th> <td>0.624097</td> <td>2.389663</td> <td>0.683656</td> <td>0.624097</td> <td>0.624097</td> <td>0.624097</td> <td>1.208254</td> <td>0.624097</td> <td>0.624097</td> <td>0.624097</td> <td>...</td> <td>1.338070</td> <td>1.675702</td> <td>0.624097</td> <td>3.959463</td> <td>1.338070</td> <td>0.624097</td> <td>2.268278</td> <td>1.338070</td> <td>0.624097</td> <td>0.948400</td> </tr> <tr> <th>d1c3b8ad-58b6-4ea4-8877-a51b356556bc_281fc4f0-c05d-410a-a784-06e3508e78e6</th> <td>3.675250</td> <td>3.675250</td> <td>3.396044</td> <td>12.247802</td> <td>11.122441</td> <td>3.675250</td> <td>11.122441</td> <td>3.675250</td> <td>3.675250</td> <td>2.680182</td> <td>...</td> <td>3.396044</td> <td>3.903311</td> <td>3.903311</td> <td>3.396044</td> <td>3.396044</td> <td>3.396044</td> <td>3.675250</td> <td>3.675250</td> <td>3.396044</td> <td>10.102856</td> </tr> <tr> <th>92068396-88c7-45ed-9008-622008a299f3_0d860eb4-1a25-41b4-a821-81f5726d86e5</th> <td>3.675250</td> <td>3.903311</td> <td>3.675250</td> <td>3.396044</td> <td>2.079684</td> <td>3.675250</td> <td>5.507553</td> <td>3.675250</td> <td>3.903311</td> <td>3.675250</td> <td>...</td> <td>3.675250</td> <td>3.675250</td> <td>3.903311</td> <td>2.137872</td> <td>5.507553</td> <td>22.219628</td> <td>3.675250</td> <td>3.675250</td> <td>1.201978</td> <td>12.247802</td> </tr> <tr> <th>35ddb020-9812-4808-bdfb-6845a454a73c_0d860eb4-1a25-41b4-a821-81f5726d86e5</th> <td>0.331865</td> <td>0.358833</td> <td>0.464787</td> <td>3.561344</td> <td>0.297718</td> <td>0.301147</td> <td>0.904203</td> <td>0.309860</td> <td>1.206996</td> <td>1.889762</td> <td>...</td> <td>0.309860</td> <td>1.298496</td> <td>3.860966</td> <td>0.339737</td> <td>0.301147</td> <td>0.474415</td> <td>0.339737</td> <td>0.367173</td> <td>0.525427</td> <td>1.750417</td> </tr> <tr> <th>...</th> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> </tr> <tr> <th>31e6ab74-141d-49f4-9e75-5974bd62742b_b3184435-d00c-5713-823d-5ff6741bfbbc</th> <td>0.629772</td> <td>0.553619</td> <td>0.465567</td> <td>0.505673</td> <td>0.505673</td> <td>0.711286</td> <td>0.505673</td> <td>0.508607</td> <td>0.538488</td> <td>0.332482</td> <td>...</td> <td>0.332482</td> <td>0.732628</td> <td>0.515347</td> <td>0.505673</td> <td>1.016215</td> <td>0.336765</td> <td>0.641501</td> <td>0.285595</td> <td>0.859997</td> <td>0.505673</td> </tr> <tr> <th>b50b032c-8bf0-41a4-bb7a-ce5e12cabf01_d13ae29b-7d6a-55d2-8380-86343a80fed8</th> <td>0.414735</td> <td>0.538488</td> <td>2.753003</td> <td>0.427902</td> <td>0.240832</td> <td>0.476991</td> <td>0.336765</td> <td>0.240832</td> <td>0.538488</td> <td>0.472248</td> <td>...</td> <td>0.452560</td> <td>1.600383</td> <td>0.520246</td> <td>0.336765</td> <td>0.505673</td> <td>0.256560</td> <td>0.492641</td> <td>0.094215</td> <td>0.303952</td> <td>0.611896</td> </tr> <tr> <th>307b681d-c6bc-44cb-ab3e-a9067a4e115a_ed25f589-c6c8-56a7-a518-b01dd3e8a453</th> <td>0.720422</td> <td>0.343043</td> <td>0.282820</td> <td>0.099190</td> <td>0.010543</td> <td>0.240832</td> <td>0.320269</td> <td>0.505673</td> <td>0.272400</td> <td>0.476991</td> <td>...</td> <td>0.481484</td> <td>0.355664</td> <td>0.336765</td> <td>0.332482</td> <td>0.346147</td> <td>0.237820</td> <td>0.357863</td> <td>0.649262</td> <td>0.653808</td> <td>0.217355</td> </tr> <tr> <th>73c9e29c-885e-490e-95f3-087227443758_101e55f2-2da0-528b-96fd-21687e242c1c</th> <td>0.538488</td> <td>0.549331</td> <td>0.034887</td> <td>0.538488</td> <td>0.346147</td> <td>0.515347</td> <td>0.417434</td> <td>0.994987</td> <td>0.346147</td> <td>0.346147</td> <td>...</td> <td>1.661337</td> <td>0.336765</td> <td>0.538488</td> <td>0.482413</td> <td>0.538488</td> <td>0.350894</td> <td>0.505673</td> <td>0.242492</td> <td>0.417434</td> <td>0.477658</td> </tr> <tr> <th>91ed532a-08f2-4fdd-b5e9-28bb2afd1336_451550a8-d9e6-4396-9f9a-97e752111228</th> <td>0.161948</td> <td>0.061602</td> <td>0.057841</td> <td>0.019049</td> <td>0.165291</td> <td>0.125409</td> <td>0.146491</td> <td>0.058233</td> <td>0.043893</td> <td>0.146491</td> <td>...</td> <td>0.146491</td> <td>0.107341</td> <td>0.102470</td> <td>0.153226</td> <td>0.129955</td> <td>0.158157</td> <td>0.098705</td> <td>0.017497</td> <td>0.033496</td> <td>0.051883</td> </tr> </tbody> </table> <p>16022 rows × 3000 columns</p> </div> ## Precalculate EXIOBASE multipliers: \begin{align} M = C_\mathrm{io}S_\mathrm{io}L_\mathrm{io} \end{align} ```python %%time M_io = C_io.dot(analysis_object.F_io).dot(Lio) # get the unscaled cut-off matrix and the price data just as an array Cu_unscaled = analysis_object.A_io_f.copy() price_data_samples = price_data.to_numpy() ``` CPU times: user 1.78 s, sys: 59.9 ms, total: 1.84 s Wall time: 1.82 s ## Run MC For 1 impact category in this case only (for speed/memory purposes but possible for all) Might want to write results line by line in that case so not to fill up memory. ```python %%time # define number of samples, the number of independent samples drawn for each activity is 3000. nsamples = 3000 # The MC only calculates the impacts from the hybrid/IO part. impacts_io_samples = do_price_MC(M_io[4,:], Lp.todense(), Cu_unscaled, price_data_samples, Nruns=nsamples) ``` Starting run at Sat Jan 30 10:56:38 2021 Results shape: (1, 16022, 10000) Run 1 Run 201 Run 401 Run 601 Run 801 Run 1001 Run 1201 Run 1401 Run 1601 Run 1801 Run 2001 Run 2201 Run 2401 Run 2601 Run 2801 Run 3001 Run 3201 Run 3401 Run 3601 Run 3801 Run 4001 Run 4201 Run 4401 Run 4601 Run 4801 Run 5001 Run 5201 Run 5401 Run 5601 Run 5801 Run 6001 Run 6201 Run 6401 Run 6601 Run 6801 Run 7001 Run 7201 Run 7401 Run 7601 Run 7801 Run 8001 Run 8201 Run 8401 Run 8601 Run 8801 Run 9001 Run 9201 Run 9401 Run 9601 Run 9801 Finished run at Sat Jan 30 12:27:55 2021 Finished 10000 runs in 91.0 minutes and 17.405287504196167 seconds CPU times: user 2h 13min 36s, sys: 37min 5s, total: 2h 50min 41s Wall time: 1h 31min 17s ## Save data to file: ```python filename = '/home/jakobs/Documents/IndEcol/OASES/pylcaio/src/Databases/ecoinvent3.5_exiobase3_{}/MonteCarlo_results/IO_GWP100_impacts_{}_{}_MC_{}_lhs_runs_cpc21_hs12_mapping_Proxy_data_for_unmapped_processes_median_cov.npy'.format(year, double_counting, year, nsamples) impacts_io_samples.dump(filename) ``` ## Load data ```python nsamples = 10000 filename = '/home/jakobs/Documents/IndEcol/OASES/pylcaio/src/Databases/ecoinvent3.5_exiobase3_{}/MonteCarlo_results/IO_GWP100_impacts_{}_{}_MC_{}_lhs_runs_cpc21_hs12_mapping_Proxy_data_for_unmapped_processes_median_cov.npy'.format(year, double_counting, year, nsamples) impacts_io_samples = np.load(filename, allow_pickle=True) ``` ## Quick analysis ```python # LCA impacts per process: lca_impacts = analysis_object.C_f.dot(analysis_object.F_f.dot(Lp)) ``` ```python ``` ```python # Categorise processes: with open('/home/jakobs/Documents/IndEcol/OASES/pylcaio/src/Databases/ecoinvent3.5_exiobase3_{}/list_processes_with_baci_price.txt'.format(year), 'r') as fh: procs_with_baci_price = [line.rstrip() for line in fh] BACI_hybrid_list = list(set(procs_with_baci_price).intersection(analysis_object.hybridized_processes)) hybrid_processes_without_baci_price = set(analysis_object.hybridized_processes).difference(procs_with_baci_price) # Make boolean masks hybrid_processes_without_baci_price_boolean_mask = PRO.index.isin(hybrid_processes_without_baci_price) BACI_hybrid_list_booolean_mask = PRO.index.isin(BACI_hybrid_list) hybrid_process_boolean_mask = PRO.index.isin(analysis_object.hybridized_processes) ``` ```python color = ['c', 'orange', 'r', 'orange', 'c'] linestyle = [':', '--', '-', '--', ':'] #hybrid_baci, hybrid_non_baci, hybrid all masklist = [hybrid_process_boolean_mask, BACI_hybrid_list_booolean_mask, hybrid_processes_without_baci_price_boolean_mask] mask_name = ['B', 'C' , 'D'] for j,(mask,name) in enumerate(zip(masklist, mask_name)): print(name) fig = plt.figure(j) ax = fig.add_subplot(111) lca_impact = lca_impacts[425,mask] dist = np.nan_to_num(np.nanmedian(impacts_io_samples[0,mask, :], axis=1)/lca_impact*100) # print(type(dist)) bin_edges = np.linspace(*np.percentile(dist, [0, 98]), 30)# np.histogram_bin_edges(dist, bins='doane') print(bin_edges.min(), bin_edges.max()) ax.hist(dist, bins=bin_edges, color='k', lw=1.5, histtype='step', density=True) #''' for i,x in enumerate(np.percentile(dist, [2.5, 16, 50, 84, 97.5])): ax.axvline(x=x, c=color[i], ls=linestyle[i], lw=1.3) #ax.axvline(x=dist.mean(), color='r', ls='-.', lw=1.3) #ax.axvline(x=impacts_io_price_from_pylcaio[4,mask].sum()/lca_impact*100, color='k', ls='--', lw=1.3) ax.text(0.83, 0.9, s='Subset: {} '.format(name), horizontalalignment='center', verticalalignment='top', transform=ax.transAxes) ax.text(0.83, 0.8, s='$N_\mathrm{{proc}}$={}'.format(sum(mask)), horizontalalignment='center', verticalalignment='top', transform=ax.transAxes) ax.set_xlabel(r'Relative increase in Carbon Footprint [\%]') ax.set_ylabel(r'Probability density') #''' ax.set_yscale('log') plt.show() ``` ```python ```
{-# OPTIONS --without-K #-} module TypeEquiv where import Level using (zero; suc) open import Data.Empty using (⊥) open import Data.Unit using (⊤; tt) open import Data.Sum using (_⊎_; inj₁; inj₂) open import Data.Product using (_×_; proj₁; proj₂; _,_) open import Algebra using (CommutativeSemiring) open import Algebra.Structures using (IsSemigroup; IsCommutativeMonoid; IsCommutativeSemiring) open import Function renaming (_∘_ to _○_) open import Relation.Binary.PropositionalEquality using (refl) open import Equiv using (_∼_; refl∼; _≃_; id≃; sym≃; ≃IsEquiv; qinv; _⊎≃_; _×≃_) ------------------------------------------------------------------------------ -- Type Equivalences -- for each type combinator, define two functions that are inverses, and -- establish an equivalence. These are all in the 'semantic space' with -- respect to Pi combinators. -- swap₊ swap₊ : {A B : Set} → A ⊎ B → B ⊎ A swap₊ (inj₁ a) = inj₂ a swap₊ (inj₂ b) = inj₁ b abstract swapswap₊ : {A B : Set} → swap₊ ○ swap₊ {A} {B} ∼ id swapswap₊ (inj₁ a) = refl swapswap₊ (inj₂ b) = refl swap₊equiv : {A B : Set} → (A ⊎ B) ≃ (B ⊎ A) swap₊equiv = (swap₊ , qinv swap₊ swapswap₊ swapswap₊) -- unite₊ and uniti₊ unite₊ : {A : Set} → ⊥ ⊎ A → A unite₊ (inj₁ ()) unite₊ (inj₂ y) = y uniti₊ : {A : Set} → A → ⊥ ⊎ A uniti₊ a = inj₂ a abstract uniti₊∘unite₊ : {A : Set} → uniti₊ ○ unite₊ ∼ id {A = ⊥ ⊎ A} uniti₊∘unite₊ (inj₁ ()) uniti₊∘unite₊ (inj₂ y) = refl -- this is so easy, Agda can figure it out by itself (see below) unite₊∘uniti₊ : {A : Set} → unite₊ ○ uniti₊ ∼ id {A = A} unite₊∘uniti₊ _ = refl unite₊equiv : {A : Set} → (⊥ ⊎ A) ≃ A unite₊equiv = (unite₊ , qinv uniti₊ unite₊∘uniti₊ uniti₊∘unite₊) uniti₊equiv : {A : Set} → A ≃ (⊥ ⊎ A) uniti₊equiv = sym≃ unite₊equiv -- unite₊′ and uniti₊′ unite₊′ : {A : Set} → A ⊎ ⊥ → A unite₊′ (inj₁ x) = x unite₊′ (inj₂ ()) uniti₊′ : {A : Set} → A → A ⊎ ⊥ uniti₊′ a = inj₁ a abstract uniti₊′∘unite₊′ : {A : Set} → uniti₊′ ○ unite₊′ ∼ id {A = A ⊎ ⊥} uniti₊′∘unite₊′ (inj₁ _) = refl uniti₊′∘unite₊′ (inj₂ ()) -- this is so easy, Agda can figure it out by itself (see below) unite₊′∘uniti₊′ : {A : Set} → unite₊′ ○ uniti₊′ ∼ id {A = A} unite₊′∘uniti₊′ _ = refl unite₊′equiv : {A : Set} → (A ⊎ ⊥) ≃ A unite₊′equiv = (unite₊′ , qinv uniti₊′ refl∼ uniti₊′∘unite₊′) uniti₊′equiv : {A : Set} → A ≃ (A ⊎ ⊥) uniti₊′equiv = sym≃ unite₊′equiv -- unite⋆ and uniti⋆ unite⋆ : {A : Set} → ⊤ × A → A unite⋆ (tt , x) = x uniti⋆ : {A : Set} → A → ⊤ × A uniti⋆ x = tt , x abstract uniti⋆∘unite⋆ : {A : Set} → uniti⋆ ○ unite⋆ ∼ id {A = ⊤ × A} uniti⋆∘unite⋆ (tt , x) = refl unite⋆equiv : {A : Set} → (⊤ × A) ≃ A unite⋆equiv = unite⋆ , qinv uniti⋆ refl∼ uniti⋆∘unite⋆ uniti⋆equiv : {A : Set} → A ≃ (⊤ × A) uniti⋆equiv = sym≃ unite⋆equiv -- unite⋆′ and uniti⋆′ unite⋆′ : {A : Set} → A × ⊤ → A unite⋆′ (x , tt) = x uniti⋆′ : {A : Set} → A → A × ⊤ uniti⋆′ x = x , tt abstract uniti⋆′∘unite⋆′ : {A : Set} → uniti⋆′ ○ unite⋆′ ∼ id {A = A × ⊤} uniti⋆′∘unite⋆′ (x , tt) = refl unite⋆′equiv : {A : Set} → (A × ⊤) ≃ A unite⋆′equiv = unite⋆′ , qinv uniti⋆′ refl∼ uniti⋆′∘unite⋆′ uniti⋆′equiv : {A : Set} → A ≃ (A × ⊤) uniti⋆′equiv = sym≃ unite⋆′equiv -- swap⋆ swap⋆ : {A B : Set} → A × B → B × A swap⋆ (a , b) = (b , a) abstract swapswap⋆ : {A B : Set} → swap⋆ ○ swap⋆ ∼ id {A = A × B} swapswap⋆ (a , b) = refl swap⋆equiv : {A B : Set} → (A × B) ≃ (B × A) swap⋆equiv = swap⋆ , qinv swap⋆ swapswap⋆ swapswap⋆ -- assocl₊ and assocr₊ assocl₊ : {A B C : Set} → (A ⊎ (B ⊎ C)) → ((A ⊎ B) ⊎ C) assocl₊ (inj₁ a) = inj₁ (inj₁ a) assocl₊ (inj₂ (inj₁ b)) = inj₁ (inj₂ b) assocl₊ (inj₂ (inj₂ c)) = inj₂ c assocr₊ : {A B C : Set} → ((A ⊎ B) ⊎ C) → (A ⊎ (B ⊎ C)) assocr₊ (inj₁ (inj₁ a)) = inj₁ a assocr₊ (inj₁ (inj₂ b)) = inj₂ (inj₁ b) assocr₊ (inj₂ c) = inj₂ (inj₂ c) abstract assocl₊∘assocr₊ : {A B C : Set} → assocl₊ ○ assocr₊ ∼ id {A = ((A ⊎ B) ⊎ C)} assocl₊∘assocr₊ (inj₁ (inj₁ a)) = refl assocl₊∘assocr₊ (inj₁ (inj₂ b)) = refl assocl₊∘assocr₊ (inj₂ c) = refl assocr₊∘assocl₊ : {A B C : Set} → assocr₊ ○ assocl₊ ∼ id {A = (A ⊎ (B ⊎ C))} assocr₊∘assocl₊ (inj₁ a) = refl assocr₊∘assocl₊ (inj₂ (inj₁ b)) = refl assocr₊∘assocl₊ (inj₂ (inj₂ c)) = refl assocr₊equiv : {A B C : Set} → ((A ⊎ B) ⊎ C) ≃ (A ⊎ (B ⊎ C)) assocr₊equiv = assocr₊ , qinv assocl₊ assocr₊∘assocl₊ assocl₊∘assocr₊ assocl₊equiv : {A B C : Set} → (A ⊎ (B ⊎ C)) ≃ ((A ⊎ B) ⊎ C) assocl₊equiv = sym≃ assocr₊equiv -- assocl⋆ and assocr⋆ assocl⋆ : {A B C : Set} → (A × (B × C)) → ((A × B) × C) assocl⋆ (a , (b , c)) = ((a , b) , c) assocr⋆ : {A B C : Set} → ((A × B) × C) → (A × (B × C)) assocr⋆ ((a , b) , c) = (a , (b , c)) abstract assocl⋆∘assocr⋆ : {A B C : Set} → assocl⋆ ○ assocr⋆ ∼ id {A = ((A × B) × C)} assocl⋆∘assocr⋆ = refl∼ assocr⋆∘assocl⋆ : {A B C : Set} → assocr⋆ ○ assocl⋆ ∼ id {A = (A × (B × C))} assocr⋆∘assocl⋆ = refl∼ assocl⋆equiv : {A B C : Set} → (A × (B × C)) ≃ ((A × B) × C) assocl⋆equiv = assocl⋆ , qinv assocr⋆ assocl⋆∘assocr⋆ assocr⋆∘assocl⋆ assocr⋆equiv : {A B C : Set} → ((A × B) × C) ≃ (A × (B × C)) assocr⋆equiv = sym≃ assocl⋆equiv -- distz and factorz, on left distz : { A : Set} → (⊥ × A) → ⊥ distz = proj₁ factorz : {A : Set} → ⊥ → (⊥ × A) factorz () abstract distz∘factorz : {A : Set} → distz ○ factorz {A} ∼ id distz∘factorz () factorz∘distz : {A : Set} → factorz {A} ○ distz ∼ id factorz∘distz (() , proj₂) distzequiv : {A : Set} → (⊥ × A) ≃ ⊥ distzequiv {A} = distz , qinv factorz (distz∘factorz {A}) factorz∘distz factorzequiv : {A : Set} → ⊥ ≃ (⊥ × A) factorzequiv {A} = sym≃ distzequiv -- distz and factorz, on right distzr : { A : Set} → (A × ⊥) → ⊥ distzr = proj₂ factorzr : {A : Set} → ⊥ → (A × ⊥) factorzr () abstract distzr∘factorzr : {A : Set} → distzr ○ factorzr {A} ∼ id distzr∘factorzr () factorzr∘distzr : {A : Set} → factorzr {A} ○ distzr ∼ id factorzr∘distzr (_ , ()) distzrequiv : {A : Set} → (A × ⊥) ≃ ⊥ distzrequiv {A} = distzr , qinv factorzr (distzr∘factorzr {A}) factorzr∘distzr factorzrequiv : {A : Set} → ⊥ ≃ (A × ⊥) factorzrequiv {A} = sym≃ distzrequiv -- dist and factor, on right dist : {A B C : Set} → ((A ⊎ B) × C) → (A × C) ⊎ (B × C) dist (inj₁ x , c) = inj₁ (x , c) dist (inj₂ y , c) = inj₂ (y , c) factor : {A B C : Set} → (A × C) ⊎ (B × C) → ((A ⊎ B) × C) factor (inj₁ (a , c)) = inj₁ a , c factor (inj₂ (b , c)) = inj₂ b , c abstract dist∘factor : {A B C : Set} → dist {A} {B} {C} ○ factor ∼ id dist∘factor (inj₁ x) = refl dist∘factor (inj₂ y) = refl factor∘dist : {A B C : Set} → factor {A} {B} {C} ○ dist ∼ id factor∘dist (inj₁ x , c) = refl factor∘dist (inj₂ y , c) = refl distequiv : {A B C : Set} → ((A ⊎ B) × C) ≃ ((A × C) ⊎ (B × C)) distequiv = dist , qinv factor dist∘factor factor∘dist factorequiv : {A B C : Set} → ((A × C) ⊎ (B × C)) ≃ ((A ⊎ B) × C) factorequiv = sym≃ distequiv -- dist and factor, on left distl : {A B C : Set} → A × (B ⊎ C) → (A × B) ⊎ (A × C) distl (x , inj₁ x₁) = inj₁ (x , x₁) distl (x , inj₂ y) = inj₂ (x , y) factorl : {A B C : Set} → (A × B) ⊎ (A × C) → A × (B ⊎ C) factorl (inj₁ (x , y)) = x , inj₁ y factorl (inj₂ (x , y)) = x , inj₂ y abstract distl∘factorl : {A B C : Set} → distl {A} {B} {C} ○ factorl ∼ id distl∘factorl (inj₁ (x , y)) = refl distl∘factorl (inj₂ (x , y)) = refl factorl∘distl : {A B C : Set} → factorl {A} {B} {C} ○ distl ∼ id factorl∘distl (a , inj₁ x) = refl factorl∘distl (a , inj₂ y) = refl distlequiv : {A B C : Set} → (A × (B ⊎ C)) ≃ ((A × B) ⊎ (A × C)) distlequiv = distl , qinv factorl distl∘factorl factorl∘distl factorlequiv : {A B C : Set} → ((A × B) ⊎ (A × C)) ≃ (A × (B ⊎ C)) factorlequiv = sym≃ distlequiv ------------------------------------------------------------------------------ -- Commutative semiring structure typesPlusIsSG : IsSemigroup {Level.suc Level.zero} {Level.zero} {Set} _≃_ _⊎_ typesPlusIsSG = record { isMagma = record { isEquivalence = ≃IsEquiv ; ∙-cong = _⊎≃_ } ; assoc = λ t₁ t₂ t₃ → assocr₊equiv {t₁} {t₂} {t₃} } typesTimesIsSG : IsSemigroup {Level.suc Level.zero} {Level.zero} {Set} _≃_ _×_ typesTimesIsSG = record { isMagma = record { isEquivalence = ≃IsEquiv ; ∙-cong = _×≃_ } ; assoc = λ t₁ t₂ t₃ → assocr⋆equiv {t₁} {t₂} {t₃} } typesPlusIsCM : IsCommutativeMonoid _≃_ _⊎_ ⊥ typesPlusIsCM = record { isSemigroup = typesPlusIsSG ; identityˡ = λ t → unite₊equiv {t} ; comm = λ t₁ t₂ → swap₊equiv {t₁} {t₂} } typesTimesIsCM : IsCommutativeMonoid _≃_ _×_ ⊤ typesTimesIsCM = record { isSemigroup = typesTimesIsSG ; identityˡ = λ t → unite⋆equiv {t} ; comm = λ t₁ t₂ → swap⋆equiv {t₁} {t₂} } typesIsCSR : IsCommutativeSemiring _≃_ _⊎_ _×_ ⊥ ⊤ typesIsCSR = record { +-isCommutativeMonoid = typesPlusIsCM ; *-isCommutativeMonoid = typesTimesIsCM ; distribʳ = λ t₁ t₂ t₃ → distequiv {t₂} {t₃} {t₁} ; zeroˡ = λ t → distzequiv {t} } typesCSR : CommutativeSemiring (Level.suc Level.zero) Level.zero typesCSR = record { Carrier = Set ; _≈_ = _≃_ ; _+_ = _⊎_ ; _*_ = _×_ ; 0# = ⊥ ; 1# = ⊤ ; isCommutativeSemiring = typesIsCSR } ------------------------------------------------------------------------------
module Main pythag : Int -> List (Int, Int, Int) pythag max = [(x, y, z) | z <- [1..max], y <- [1..z], x <- [1..y], x * x + y * y == z * z] main : IO () main = printLn (pythag 100)
K80(κ::Float64, safe::Bool=true) = K80rel(κ, safe) K80(α::Float64, β::Float64, safe::Bool=true) = K80abs(α, β, safe) function K80(θ_vec::A, safe::Bool=true) where A <: AbstractArray if length(θ_vec) == 1 return K80rel(θ_vec[1], safe) elseif length(θ_vec) == 2 return K80abs(θ_vec[1], θ_vec[2], safe) else error("Parameter vector length incompatiable with absolute or relative rate form of substitution model") end end function K80rel(θ_vec::A, safe::Bool=true) where A <: AbstractArray if safe if length(θ_vec) != 1 error("Incorrect parameter vector length") end end return K80rel(θ_vec[1], safe) end function K80abs(θ_vec::A, safe::Bool=true) where A <: AbstractArray if safe if length(θ_vec) != 2 error("Incorrect parameter vector length") end end return K80abs(θ_vec[1], θ_vec[2], safe) end
/- Copyright (c) 2022 Yury Kudryashov. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yury Kudryashov ! This file was ported from Lean 3 source module topology.algebra.order.t5 ! leanprover-community/mathlib commit 50832daea47b195a48b5b33b1c8b2162c48c3afc ! Please do not edit these lines, except to modify the commit id ! if you have ported upstream changes. -/ import Mathbin.Topology.Order.Basic import Mathbin.Data.Set.Intervals.OrdConnectedComponent /-! # Linear order is a completely normal Hausdorff topological space > THIS FILE IS SYNCHRONIZED WITH MATHLIB4. > Any changes to this file require a corresponding PR to mathlib4. In this file we prove that a linear order with order topology is a completely normal Hausdorff topological space. -/ open Filter Set Function OrderDual open Topology Filter Interval variable {X : Type _} [LinearOrder X] [TopologicalSpace X] [OrderTopology X] {a b c : X} {s t : Set X} namespace Set #print Set.ordConnectedComponent_mem_nhds /- @[simp] theorem ordConnectedComponent_mem_nhds : ordConnectedComponent s a ∈ 𝓝 a ↔ s ∈ 𝓝 a := by refine' ⟨fun h => mem_of_superset h ord_connected_component_subset, fun h => _⟩ rcases exists_Icc_mem_subset_of_mem_nhds h with ⟨b, c, ha, ha', hs⟩ exact mem_of_superset ha' (subset_ord_connected_component ha hs) #align set.ord_connected_component_mem_nhds Set.ordConnectedComponent_mem_nhds -/ /- warning: set.compl_section_ord_separating_set_mem_nhds_within_Ici -> Set.compl_section_ordSeparatingSet_mem_nhdsWithin_Ici is a dubious translation: lean 3 declaration is forall {X : Type.{u1}} [_inst_1 : LinearOrder.{u1} X] [_inst_2 : TopologicalSpace.{u1} X] [_inst_3 : OrderTopology.{u1} X _inst_2 (PartialOrder.toPreorder.{u1} X (SemilatticeInf.toPartialOrder.{u1} X (Lattice.toSemilatticeInf.{u1} X (LinearOrder.toLattice.{u1} X _inst_1))))] {a : X} {s : Set.{u1} X} {t : Set.{u1} X}, (Disjoint.{u1} (Set.{u1} X) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} X) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} X) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} X) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} X) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} X) (Set.completeBooleanAlgebra.{u1} X)))))) (GeneralizedBooleanAlgebra.toOrderBot.{u1} (Set.{u1} X) (BooleanAlgebra.toGeneralizedBooleanAlgebra.{u1} (Set.{u1} X) (Set.booleanAlgebra.{u1} X))) s (closure.{u1} X _inst_2 t)) -> (Membership.Mem.{u1, u1} X (Set.{u1} X) (Set.hasMem.{u1} X) a s) -> (Membership.Mem.{u1, u1} (Set.{u1} X) (Filter.{u1} X) (Filter.hasMem.{u1} X) (HasCompl.compl.{u1} (Set.{u1} X) (BooleanAlgebra.toHasCompl.{u1} (Set.{u1} X) (Set.booleanAlgebra.{u1} X)) (Set.ordConnectedSection.{u1} X _inst_1 (Set.ordSeparatingSet.{u1} X _inst_1 s t))) (nhdsWithin.{u1} X _inst_2 a (Set.Ici.{u1} X (PartialOrder.toPreorder.{u1} X (SemilatticeInf.toPartialOrder.{u1} X (Lattice.toSemilatticeInf.{u1} X (LinearOrder.toLattice.{u1} X _inst_1)))) a))) but is expected to have type forall {X : Type.{u1}} [_inst_1 : LinearOrder.{u1} X] [_inst_2 : TopologicalSpace.{u1} X] [_inst_3 : OrderTopology.{u1} X _inst_2 (PartialOrder.toPreorder.{u1} X (SemilatticeInf.toPartialOrder.{u1} X (Lattice.toSemilatticeInf.{u1} X (DistribLattice.toLattice.{u1} X (instDistribLattice.{u1} X _inst_1)))))] {a : X} {s : Set.{u1} X} {t : Set.{u1} X}, (Disjoint.{u1} (Set.{u1} X) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} X) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} X) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} X) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} X) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} X) (Set.instCompleteBooleanAlgebraSet.{u1} X)))))) (BoundedOrder.toOrderBot.{u1} (Set.{u1} X) (Preorder.toLE.{u1} (Set.{u1} X) (PartialOrder.toPreorder.{u1} (Set.{u1} X) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} X) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} X) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} X) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} X) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} X) (Set.instCompleteBooleanAlgebraSet.{u1} X)))))))) (CompleteLattice.toBoundedOrder.{u1} (Set.{u1} X) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} X) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} X) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} X) (Set.instCompleteBooleanAlgebraSet.{u1} X)))))) s (closure.{u1} X _inst_2 t)) -> (Membership.mem.{u1, u1} X (Set.{u1} X) (Set.instMembershipSet.{u1} X) a s) -> (Membership.mem.{u1, u1} (Set.{u1} X) (Filter.{u1} X) (instMembershipSetFilter.{u1} X) (HasCompl.compl.{u1} (Set.{u1} X) (BooleanAlgebra.toHasCompl.{u1} (Set.{u1} X) (Set.instBooleanAlgebraSet.{u1} X)) (Set.ordConnectedSection.{u1} X _inst_1 (Set.ordSeparatingSet.{u1} X _inst_1 s t))) (nhdsWithin.{u1} X _inst_2 a (Set.Ici.{u1} X (PartialOrder.toPreorder.{u1} X (SemilatticeInf.toPartialOrder.{u1} X (Lattice.toSemilatticeInf.{u1} X (DistribLattice.toLattice.{u1} X (instDistribLattice.{u1} X _inst_1))))) a))) Case conversion may be inaccurate. Consider using '#align set.compl_section_ord_separating_set_mem_nhds_within_Ici Set.compl_section_ordSeparatingSet_mem_nhdsWithin_Iciₓ'. -/ theorem compl_section_ordSeparatingSet_mem_nhdsWithin_Ici (hd : Disjoint s (closure t)) (ha : a ∈ s) : (ordConnectedSection <| ordSeparatingSet s t)ᶜ ∈ 𝓝[≥] a := by have hmem : tᶜ ∈ 𝓝[≥] a := by refine' mem_nhdsWithin_of_mem_nhds _ rw [← mem_interior_iff_mem_nhds, interior_compl] exact disjoint_left.1 hd ha rcases exists_Icc_mem_subset_of_mem_nhdsWithin_Ici hmem with ⟨b, hab, hmem', hsub⟩ by_cases H : Disjoint (Icc a b) (ord_connected_section <| ord_separating_set s t) · exact mem_of_superset hmem' (disjoint_left.1 H) · simp only [Set.disjoint_left, not_forall, Classical.not_not] at H rcases H with ⟨c, ⟨hac, hcb⟩, hc⟩ have hsub' : Icc a b ⊆ ord_connected_component (tᶜ) a := subset_ord_connected_component (left_mem_Icc.2 hab) hsub replace hac : a < c := hac.lt_of_ne (Ne.symm <| ne_of_mem_of_not_mem hc <| disjoint_left.1 (disjoint_left_ord_separating_set.mono_right ord_connected_section_subset) ha) refine' mem_of_superset (Ico_mem_nhdsWithin_Ici (left_mem_Ico.2 hac)) fun x hx hx' => _ refine' hx.2.Ne (eq_of_mem_ord_connected_section_of_uIcc_subset hx' hc _) refine' subset_inter (subset_Union₂_of_subset a ha _) _ · exact ord_connected.uIcc_subset inferInstance (hsub' ⟨hx.1, hx.2.le.trans hcb⟩) (hsub' ⟨hac.le, hcb⟩) · rcases mem_Union₂.1 (ord_connected_section_subset hx').2 with ⟨y, hyt, hxy⟩ refine' subset_Union₂_of_subset y hyt (ord_connected.uIcc_subset inferInstance hxy _) refine' subset_ord_connected_component left_mem_uIcc hxy _ suffices c < y by rw [uIcc_of_ge (hx.2.trans this).le] exact ⟨hx.2.le, this.le⟩ refine' lt_of_not_le fun hyc => _ have hya : y < a := not_le.1 fun hay => hsub ⟨hay, hyc.trans hcb⟩ hyt exact hxy (Icc_subset_uIcc ⟨hya.le, hx.1⟩) ha #align set.compl_section_ord_separating_set_mem_nhds_within_Ici Set.compl_section_ordSeparatingSet_mem_nhdsWithin_Ici /- warning: set.compl_section_ord_separating_set_mem_nhds_within_Iic -> Set.compl_section_ordSeparatingSet_mem_nhdsWithin_Iic is a dubious translation: lean 3 declaration is forall {X : Type.{u1}} [_inst_1 : LinearOrder.{u1} X] [_inst_2 : TopologicalSpace.{u1} X] [_inst_3 : OrderTopology.{u1} X _inst_2 (PartialOrder.toPreorder.{u1} X (SemilatticeInf.toPartialOrder.{u1} X (Lattice.toSemilatticeInf.{u1} X (LinearOrder.toLattice.{u1} X _inst_1))))] {a : X} {s : Set.{u1} X} {t : Set.{u1} X}, (Disjoint.{u1} (Set.{u1} X) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} X) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} X) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} X) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} X) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} X) (Set.completeBooleanAlgebra.{u1} X)))))) (GeneralizedBooleanAlgebra.toOrderBot.{u1} (Set.{u1} X) (BooleanAlgebra.toGeneralizedBooleanAlgebra.{u1} (Set.{u1} X) (Set.booleanAlgebra.{u1} X))) s (closure.{u1} X _inst_2 t)) -> (Membership.Mem.{u1, u1} X (Set.{u1} X) (Set.hasMem.{u1} X) a s) -> (Membership.Mem.{u1, u1} (Set.{u1} X) (Filter.{u1} X) (Filter.hasMem.{u1} X) (HasCompl.compl.{u1} (Set.{u1} X) (BooleanAlgebra.toHasCompl.{u1} (Set.{u1} X) (Set.booleanAlgebra.{u1} X)) (Set.ordConnectedSection.{u1} X _inst_1 (Set.ordSeparatingSet.{u1} X _inst_1 s t))) (nhdsWithin.{u1} X _inst_2 a (Set.Iic.{u1} X (PartialOrder.toPreorder.{u1} X (SemilatticeInf.toPartialOrder.{u1} X (Lattice.toSemilatticeInf.{u1} X (LinearOrder.toLattice.{u1} X _inst_1)))) a))) but is expected to have type forall {X : Type.{u1}} [_inst_1 : LinearOrder.{u1} X] [_inst_2 : TopologicalSpace.{u1} X] [_inst_3 : OrderTopology.{u1} X _inst_2 (PartialOrder.toPreorder.{u1} X (SemilatticeInf.toPartialOrder.{u1} X (Lattice.toSemilatticeInf.{u1} X (DistribLattice.toLattice.{u1} X (instDistribLattice.{u1} X _inst_1)))))] {a : X} {s : Set.{u1} X} {t : Set.{u1} X}, (Disjoint.{u1} (Set.{u1} X) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} X) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} X) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} X) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} X) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} X) (Set.instCompleteBooleanAlgebraSet.{u1} X)))))) (BoundedOrder.toOrderBot.{u1} (Set.{u1} X) (Preorder.toLE.{u1} (Set.{u1} X) (PartialOrder.toPreorder.{u1} (Set.{u1} X) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} X) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} X) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} X) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} X) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} X) (Set.instCompleteBooleanAlgebraSet.{u1} X)))))))) (CompleteLattice.toBoundedOrder.{u1} (Set.{u1} X) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} X) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} X) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} X) (Set.instCompleteBooleanAlgebraSet.{u1} X)))))) s (closure.{u1} X _inst_2 t)) -> (Membership.mem.{u1, u1} X (Set.{u1} X) (Set.instMembershipSet.{u1} X) a s) -> (Membership.mem.{u1, u1} (Set.{u1} X) (Filter.{u1} X) (instMembershipSetFilter.{u1} X) (HasCompl.compl.{u1} (Set.{u1} X) (BooleanAlgebra.toHasCompl.{u1} (Set.{u1} X) (Set.instBooleanAlgebraSet.{u1} X)) (Set.ordConnectedSection.{u1} X _inst_1 (Set.ordSeparatingSet.{u1} X _inst_1 s t))) (nhdsWithin.{u1} X _inst_2 a (Set.Iic.{u1} X (PartialOrder.toPreorder.{u1} X (SemilatticeInf.toPartialOrder.{u1} X (Lattice.toSemilatticeInf.{u1} X (DistribLattice.toLattice.{u1} X (instDistribLattice.{u1} X _inst_1))))) a))) Case conversion may be inaccurate. Consider using '#align set.compl_section_ord_separating_set_mem_nhds_within_Iic Set.compl_section_ordSeparatingSet_mem_nhdsWithin_Iicₓ'. -/ theorem compl_section_ordSeparatingSet_mem_nhdsWithin_Iic (hd : Disjoint s (closure t)) (ha : a ∈ s) : (ordConnectedSection <| ordSeparatingSet s t)ᶜ ∈ 𝓝[≤] a := by have hd' : Disjoint (ofDual ⁻¹' s) (closure <| ofDual ⁻¹' t) := hd have ha' : toDual a ∈ ofDual ⁻¹' s := ha simpa only [dual_ord_separating_set, dual_ord_connected_section] using compl_section_ord_separating_set_mem_nhds_within_Ici hd' ha' #align set.compl_section_ord_separating_set_mem_nhds_within_Iic Set.compl_section_ordSeparatingSet_mem_nhdsWithin_Iic /- warning: set.compl_section_ord_separating_set_mem_nhds -> Set.compl_section_ordSeparatingSet_mem_nhds is a dubious translation: lean 3 declaration is forall {X : Type.{u1}} [_inst_1 : LinearOrder.{u1} X] [_inst_2 : TopologicalSpace.{u1} X] [_inst_3 : OrderTopology.{u1} X _inst_2 (PartialOrder.toPreorder.{u1} X (SemilatticeInf.toPartialOrder.{u1} X (Lattice.toSemilatticeInf.{u1} X (LinearOrder.toLattice.{u1} X _inst_1))))] {a : X} {s : Set.{u1} X} {t : Set.{u1} X}, (Disjoint.{u1} (Set.{u1} X) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} X) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} X) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} X) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} X) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} X) (Set.completeBooleanAlgebra.{u1} X)))))) (GeneralizedBooleanAlgebra.toOrderBot.{u1} (Set.{u1} X) (BooleanAlgebra.toGeneralizedBooleanAlgebra.{u1} (Set.{u1} X) (Set.booleanAlgebra.{u1} X))) s (closure.{u1} X _inst_2 t)) -> (Membership.Mem.{u1, u1} X (Set.{u1} X) (Set.hasMem.{u1} X) a s) -> (Membership.Mem.{u1, u1} (Set.{u1} X) (Filter.{u1} X) (Filter.hasMem.{u1} X) (HasCompl.compl.{u1} (Set.{u1} X) (BooleanAlgebra.toHasCompl.{u1} (Set.{u1} X) (Set.booleanAlgebra.{u1} X)) (Set.ordConnectedSection.{u1} X _inst_1 (Set.ordSeparatingSet.{u1} X _inst_1 s t))) (nhds.{u1} X _inst_2 a)) but is expected to have type forall {X : Type.{u1}} [_inst_1 : LinearOrder.{u1} X] [_inst_2 : TopologicalSpace.{u1} X] [_inst_3 : OrderTopology.{u1} X _inst_2 (PartialOrder.toPreorder.{u1} X (SemilatticeInf.toPartialOrder.{u1} X (Lattice.toSemilatticeInf.{u1} X (DistribLattice.toLattice.{u1} X (instDistribLattice.{u1} X _inst_1)))))] {a : X} {s : Set.{u1} X} {t : Set.{u1} X}, (Disjoint.{u1} (Set.{u1} X) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} X) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} X) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} X) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} X) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} X) (Set.instCompleteBooleanAlgebraSet.{u1} X)))))) (BoundedOrder.toOrderBot.{u1} (Set.{u1} X) (Preorder.toLE.{u1} (Set.{u1} X) (PartialOrder.toPreorder.{u1} (Set.{u1} X) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} X) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} X) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} X) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} X) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} X) (Set.instCompleteBooleanAlgebraSet.{u1} X)))))))) (CompleteLattice.toBoundedOrder.{u1} (Set.{u1} X) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} X) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} X) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} X) (Set.instCompleteBooleanAlgebraSet.{u1} X)))))) s (closure.{u1} X _inst_2 t)) -> (Membership.mem.{u1, u1} X (Set.{u1} X) (Set.instMembershipSet.{u1} X) a s) -> (Membership.mem.{u1, u1} (Set.{u1} X) (Filter.{u1} X) (instMembershipSetFilter.{u1} X) (HasCompl.compl.{u1} (Set.{u1} X) (BooleanAlgebra.toHasCompl.{u1} (Set.{u1} X) (Set.instBooleanAlgebraSet.{u1} X)) (Set.ordConnectedSection.{u1} X _inst_1 (Set.ordSeparatingSet.{u1} X _inst_1 s t))) (nhds.{u1} X _inst_2 a)) Case conversion may be inaccurate. Consider using '#align set.compl_section_ord_separating_set_mem_nhds Set.compl_section_ordSeparatingSet_mem_nhdsₓ'. -/ theorem compl_section_ordSeparatingSet_mem_nhds (hd : Disjoint s (closure t)) (ha : a ∈ s) : (ordConnectedSection <| ordSeparatingSet s t)ᶜ ∈ 𝓝 a := by rw [← nhds_left_sup_nhds_right, mem_sup] exact ⟨compl_section_ord_separating_set_mem_nhds_within_Iic hd ha, compl_section_ord_separating_set_mem_nhds_within_Ici hd ha⟩ #align set.compl_section_ord_separating_set_mem_nhds Set.compl_section_ordSeparatingSet_mem_nhds /- warning: set.ord_t5_nhd_mem_nhds_set -> Set.ordT5Nhd_mem_nhdsSet is a dubious translation: lean 3 declaration is forall {X : Type.{u1}} [_inst_1 : LinearOrder.{u1} X] [_inst_2 : TopologicalSpace.{u1} X] [_inst_3 : OrderTopology.{u1} X _inst_2 (PartialOrder.toPreorder.{u1} X (SemilatticeInf.toPartialOrder.{u1} X (Lattice.toSemilatticeInf.{u1} X (LinearOrder.toLattice.{u1} X _inst_1))))] {s : Set.{u1} X} {t : Set.{u1} X}, (Disjoint.{u1} (Set.{u1} X) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} X) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} X) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} X) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} X) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} X) (Set.completeBooleanAlgebra.{u1} X)))))) (GeneralizedBooleanAlgebra.toOrderBot.{u1} (Set.{u1} X) (BooleanAlgebra.toGeneralizedBooleanAlgebra.{u1} (Set.{u1} X) (Set.booleanAlgebra.{u1} X))) s (closure.{u1} X _inst_2 t)) -> (Membership.Mem.{u1, u1} (Set.{u1} X) (Filter.{u1} X) (Filter.hasMem.{u1} X) (Set.ordT5Nhd.{u1} X _inst_1 s t) (nhdsSet.{u1} X _inst_2 s)) but is expected to have type forall {X : Type.{u1}} [_inst_1 : LinearOrder.{u1} X] [_inst_2 : TopologicalSpace.{u1} X] [_inst_3 : OrderTopology.{u1} X _inst_2 (PartialOrder.toPreorder.{u1} X (SemilatticeInf.toPartialOrder.{u1} X (Lattice.toSemilatticeInf.{u1} X (DistribLattice.toLattice.{u1} X (instDistribLattice.{u1} X _inst_1)))))] {s : Set.{u1} X} {t : Set.{u1} X}, (Disjoint.{u1} (Set.{u1} X) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} X) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} X) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} X) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} X) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} X) (Set.instCompleteBooleanAlgebraSet.{u1} X)))))) (BoundedOrder.toOrderBot.{u1} (Set.{u1} X) (Preorder.toLE.{u1} (Set.{u1} X) (PartialOrder.toPreorder.{u1} (Set.{u1} X) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} X) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} X) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} X) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} X) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} X) (Set.instCompleteBooleanAlgebraSet.{u1} X)))))))) (CompleteLattice.toBoundedOrder.{u1} (Set.{u1} X) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} X) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} X) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} X) (Set.instCompleteBooleanAlgebraSet.{u1} X)))))) s (closure.{u1} X _inst_2 t)) -> (Membership.mem.{u1, u1} (Set.{u1} X) (Filter.{u1} X) (instMembershipSetFilter.{u1} X) (Set.ordT5Nhd.{u1} X _inst_1 s t) (nhdsSet.{u1} X _inst_2 s)) Case conversion may be inaccurate. Consider using '#align set.ord_t5_nhd_mem_nhds_set Set.ordT5Nhd_mem_nhdsSetₓ'. -/ theorem ordT5Nhd_mem_nhdsSet (hd : Disjoint s (closure t)) : ordT5Nhd s t ∈ 𝓝ˢ s := bUnion_mem_nhdsSet fun x hx => ordConnectedComponent_mem_nhds.2 <| inter_mem (by rw [← mem_interior_iff_mem_nhds, interior_compl] exact disjoint_left.1 hd hx) (compl_section_ordSeparatingSet_mem_nhds hd hx) #align set.ord_t5_nhd_mem_nhds_set Set.ordT5Nhd_mem_nhdsSet end Set open Set #print OrderTopology.t5Space /- /-- A linear order with order topology is a completely normal Hausdorff topological space. -/ instance (priority := 100) OrderTopology.t5Space : T5Space X := ⟨fun s t h₁ h₂ => Filter.disjoint_iff.2 ⟨ordT5Nhd s t, ordT5Nhd_mem_nhdsSet h₂, ordT5Nhd t s, ordT5Nhd_mem_nhdsSet h₁.symm, disjoint_ordT5Nhd⟩⟩ #align order_topology.t5_space OrderTopology.t5Space -/
{- Eilenberg–Mac Lane type K(G, 1) -} {-# OPTIONS --cubical --no-import-sorts --safe --experimental-lossy-unification #-} module Cubical.HITs.EilenbergMacLane1.Properties where open import Cubical.HITs.EilenbergMacLane1.Base open import Cubical.Core.Everything open import Cubical.Foundations.Prelude open import Cubical.Foundations.Isomorphism open import Cubical.Foundations.Equiv open import Cubical.Foundations.HLevels open import Cubical.Foundations.GroupoidLaws open import Cubical.Foundations.Univalence open import Cubical.Data.Sigma open import Cubical.Data.Empty renaming (rec to ⊥-rec) hiding (elim) open import Cubical.Algebra.Group.Base open import Cubical.Algebra.Group.Properties open import Cubical.Algebra.AbGroup.Base open import Cubical.Functions.Morphism private variable ℓG ℓ : Level module _ ((G , str) : Group ℓG) where open GroupStr str elimGroupoid : {B : EM₁ (G , str) → Type ℓ} → ((x : EM₁ (G , str)) → isGroupoid (B x)) → (b : B embase) → (bloop : ((g : G) → PathP (λ i → B (emloop g i)) b b)) → ((g h : G) → PathP (λ i → PathP (λ j → B (emcomp g h j i)) (bloop g i) (bloop (g · h) i)) (λ _ → b) (bloop h)) → (x : EM₁ (G , str)) → B x elimGroupoid Bgroup b bloop bcomp embase = b elimGroupoid Bgroup b bloop bcomp (emloop x i) = bloop x i elimGroupoid Bgroup b bloop bcomp (emcomp g h j i) = bcomp g h i j elimGroupoid {B = B} Bgroup b bloop bcomp (emsquash g h p q r s i j k) = help i j k where help : PathP (λ i → PathP (λ j → PathP (λ k → B (emsquash g h p q r s i j k)) (elimGroupoid Bgroup b bloop bcomp g) (elimGroupoid Bgroup b bloop bcomp h)) (λ k → elimGroupoid Bgroup b bloop bcomp (p k)) λ k → elimGroupoid Bgroup b bloop bcomp (q k)) (λ j k → elimGroupoid Bgroup b bloop bcomp (r j k)) λ j k → elimGroupoid Bgroup b bloop bcomp (s j k) help = toPathP (isOfHLevelPathP' 1 (isOfHLevelPathP' 2 (Bgroup _) _ _) _ _ _ _) elimSet : {B : EM₁ (G , str) → Type ℓ} → ((x : EM₁ (G , str)) → isSet (B x)) → (b : B embase) → ((g : G) → PathP (λ i → B (emloop g i)) b b) → (x : EM₁ (G , str)) → B x elimSet Bset b bloop embase = b elimSet Bset b bloop (emloop g i) = bloop g i elimSet Bset b bloop (emcomp g h i j) = isSet→SquareP (λ i j → Bset (emcomp g h i j)) (λ j → bloop g j) (λ j → bloop (g · h) j) (λ i → b) (λ i → bloop h i) i j elimSet Bset b bloop (emsquash x y p q r s i j k) = isOfHLevel→isOfHLevelDep 3 (λ x → isSet→isGroupoid (Bset x)) _ _ _ _ (λ j k → g (r j k)) (λ j k → g (s j k)) (emsquash x y p q r s) i j k where g = elimSet Bset b bloop elimProp : {B : EM₁ (G , str) → Type ℓ} → ((x : EM₁ (G , str)) → isProp (B x)) → B embase → (x : EM₁ (G , str)) → B x elimProp Bprop b x = elimSet (λ x → isProp→isSet (Bprop x)) b (λ g → isProp→PathP (λ i → Bprop ((emloop g) i)) b b) x elimProp2 : {C : EM₁ (G , str) → EM₁ (G , str) → Type ℓ} → ((x y : EM₁ (G , str)) → isProp (C x y)) → C embase embase → (x y : EM₁ (G , str)) → C x y elimProp2 Cprop c = elimProp (λ x → isPropΠ (λ y → Cprop x y)) (elimProp (λ y → Cprop embase y) c) elim : {B : EM₁ (G , str) → Type ℓ} → ((x : EM₁ (G , str)) → isGroupoid (B x)) → (b : B embase) → (bloop : (g : G) → PathP (λ i → B (emloop g i)) b b) → ((g h : G) → SquareP (λ i j → B (emcomp g h i j)) (bloop g) (bloop (g · h)) (λ j → b) (bloop h)) → (x : EM₁ (G , str)) → B x elim Bgpd b bloop bcomp embase = b elim Bgpd b bloop bcomp (emloop g i) = bloop g i elim Bgpd b bloop bcomp (emcomp g h i j) = bcomp g h i j elim Bgpd b bloop bcomp (emsquash x y p q r s i j k) = isOfHLevel→isOfHLevelDep 3 Bgpd _ _ _ _ (λ j k → g (r j k)) (λ j k → g (s j k)) (emsquash x y p q r s) i j k where g = elim Bgpd b bloop bcomp rec : {B : Type ℓ} → isGroupoid B → (b : B) → (bloop : G → b ≡ b) → ((g h : G) → Square (bloop g) (bloop (g · h)) refl (bloop h)) → (x : EM₁ (G , str)) → B rec Bgpd = elim (λ _ → Bgpd)
[GOAL] α : Type u_1 inst✝ : TopologicalSpace α C : Set α x : α U : Set α h_acc : AccPt x (𝓟 C) hU : U ∈ 𝓝 x ⊢ AccPt x (𝓟 (U ∩ C)) [PROOFSTEP] have : 𝓝[≠] x ≤ 𝓟 U := by rw [le_principal_iff] exact mem_nhdsWithin_of_mem_nhds hU [GOAL] α : Type u_1 inst✝ : TopologicalSpace α C : Set α x : α U : Set α h_acc : AccPt x (𝓟 C) hU : U ∈ 𝓝 x ⊢ 𝓝[{x}ᶜ] x ≤ 𝓟 U [PROOFSTEP] rw [le_principal_iff] [GOAL] α : Type u_1 inst✝ : TopologicalSpace α C : Set α x : α U : Set α h_acc : AccPt x (𝓟 C) hU : U ∈ 𝓝 x ⊢ U ∈ 𝓝[{x}ᶜ] x [PROOFSTEP] exact mem_nhdsWithin_of_mem_nhds hU [GOAL] α : Type u_1 inst✝ : TopologicalSpace α C : Set α x : α U : Set α h_acc : AccPt x (𝓟 C) hU : U ∈ 𝓝 x this : 𝓝[{x}ᶜ] x ≤ 𝓟 U ⊢ AccPt x (𝓟 (U ∩ C)) [PROOFSTEP] rw [AccPt, ← inf_principal, ← inf_assoc, inf_of_le_left this] [GOAL] α : Type u_1 inst✝ : TopologicalSpace α C : Set α x : α U : Set α h_acc : AccPt x (𝓟 C) hU : U ∈ 𝓝 x this : 𝓝[{x}ᶜ] x ≤ 𝓟 U ⊢ NeBot (𝓝[{x}ᶜ] x ⊓ 𝓟 C) [PROOFSTEP] exact h_acc [GOAL] α : Type u_1 inst✝ : TopologicalSpace α C : Set α ⊢ Preperfect C ↔ ∀ (x : α), x ∈ C → ∀ (U : Set α), U ∈ 𝓝 x → ∃ y, y ∈ U ∩ C ∧ y ≠ x [PROOFSTEP] simp only [Preperfect, accPt_iff_nhds] [GOAL] α : Type u_1 inst✝ : TopologicalSpace α C U : Set α hC : Preperfect C hU : IsOpen U ⊢ Preperfect (U ∩ C) [PROOFSTEP] rintro x ⟨xU, xC⟩ [GOAL] case intro α : Type u_1 inst✝ : TopologicalSpace α C U : Set α hC : Preperfect C hU : IsOpen U x : α xU : x ∈ U xC : x ∈ C ⊢ AccPt x (𝓟 (U ∩ C)) [PROOFSTEP] apply (hC _ xC).nhds_inter [GOAL] case intro α : Type u_1 inst✝ : TopologicalSpace α C U : Set α hC : Preperfect C hU : IsOpen U x : α xU : x ∈ U xC : x ∈ C ⊢ U ∈ 𝓝 x [PROOFSTEP] exact hU.mem_nhds xU [GOAL] α : Type u_1 inst✝ : TopologicalSpace α C : Set α hC : Preperfect C ⊢ Perfect (closure C) [PROOFSTEP] constructor [GOAL] case closed α : Type u_1 inst✝ : TopologicalSpace α C : Set α hC : Preperfect C ⊢ IsClosed (closure C) [PROOFSTEP] exact isClosed_closure [GOAL] case acc α : Type u_1 inst✝ : TopologicalSpace α C : Set α hC : Preperfect C ⊢ Preperfect (closure C) [PROOFSTEP] intro x hx [GOAL] case acc α : Type u_1 inst✝ : TopologicalSpace α C : Set α hC : Preperfect C x : α hx : x ∈ closure C ⊢ AccPt x (𝓟 (closure C)) [PROOFSTEP] by_cases h : x ∈ C [GOAL] case pos α : Type u_1 inst✝ : TopologicalSpace α C : Set α hC : Preperfect C x : α hx : x ∈ closure C h : x ∈ C ⊢ AccPt x (𝓟 (closure C)) [PROOFSTEP] apply AccPt.mono _ (principal_mono.mpr subset_closure) [GOAL] case neg α : Type u_1 inst✝ : TopologicalSpace α C : Set α hC : Preperfect C x : α hx : x ∈ closure C h : ¬x ∈ C ⊢ AccPt x (𝓟 (closure C)) [PROOFSTEP] apply AccPt.mono _ (principal_mono.mpr subset_closure) [GOAL] α : Type u_1 inst✝ : TopologicalSpace α C : Set α hC : Preperfect C x : α hx : x ∈ closure C h : x ∈ C ⊢ AccPt x (𝓟 C) [PROOFSTEP] exact hC _ h [GOAL] α : Type u_1 inst✝ : TopologicalSpace α C : Set α hC : Preperfect C x : α hx : x ∈ closure C h : ¬x ∈ C ⊢ AccPt x (𝓟 C) [PROOFSTEP] have : { x }ᶜ ∩ C = C := by simp [h] [GOAL] α : Type u_1 inst✝ : TopologicalSpace α C : Set α hC : Preperfect C x : α hx : x ∈ closure C h : ¬x ∈ C ⊢ {x}ᶜ ∩ C = C [PROOFSTEP] simp [h] [GOAL] α : Type u_1 inst✝ : TopologicalSpace α C : Set α hC : Preperfect C x : α hx : x ∈ closure C h : ¬x ∈ C this : {x}ᶜ ∩ C = C ⊢ AccPt x (𝓟 C) [PROOFSTEP] rw [AccPt, nhdsWithin, inf_assoc, inf_principal, this] [GOAL] α : Type u_1 inst✝ : TopologicalSpace α C : Set α hC : Preperfect C x : α hx : x ∈ closure C h : ¬x ∈ C this : {x}ᶜ ∩ C = C ⊢ NeBot (𝓝 x ⊓ 𝓟 C) [PROOFSTEP] rw [closure_eq_cluster_pts] at hx [GOAL] α : Type u_1 inst✝ : TopologicalSpace α C : Set α hC : Preperfect C x : α hx : x ∈ {a | ClusterPt a (𝓟 C)} h : ¬x ∈ C this : {x}ᶜ ∩ C = C ⊢ NeBot (𝓝 x ⊓ 𝓟 C) [PROOFSTEP] exact hx [GOAL] α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : T1Space α ⊢ Preperfect C ↔ Perfect (closure C) [PROOFSTEP] constructor [GOAL] case mp α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : T1Space α ⊢ Preperfect C → Perfect (closure C) [PROOFSTEP] intro h [GOAL] case mpr α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : T1Space α ⊢ Perfect (closure C) → Preperfect C [PROOFSTEP] intro h [GOAL] case mp α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : T1Space α h : Preperfect C ⊢ Perfect (closure C) [PROOFSTEP] exact h.perfect_closure [GOAL] case mpr α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : T1Space α h : Perfect (closure C) ⊢ Preperfect C [PROOFSTEP] intro x xC [GOAL] case mpr α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : T1Space α h : Perfect (closure C) x : α xC : x ∈ C ⊢ AccPt x (𝓟 C) [PROOFSTEP] have H : AccPt x (𝓟 (closure C)) := h.acc _ (subset_closure xC) [GOAL] case mpr α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : T1Space α h : Perfect (closure C) x : α xC : x ∈ C H : AccPt x (𝓟 (closure C)) ⊢ AccPt x (𝓟 C) [PROOFSTEP] rw [accPt_iff_frequently] at * [GOAL] case mpr α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : T1Space α h : Perfect (closure C) x : α xC : x ∈ C H : ∃ᶠ (y : α) in 𝓝 x, y ≠ x ∧ y ∈ closure C ⊢ ∃ᶠ (y : α) in 𝓝 x, y ≠ x ∧ y ∈ C [PROOFSTEP] have : ∀ y, y ≠ x ∧ y ∈ closure C → ∃ᶠ z in 𝓝 y, z ≠ x ∧ z ∈ C := by rintro y ⟨hyx, yC⟩ simp only [← mem_compl_singleton_iff, and_comm, ← frequently_nhdsWithin_iff, hyx.nhdsWithin_compl_singleton, ← mem_closure_iff_frequently] exact yC [GOAL] α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : T1Space α h : Perfect (closure C) x : α xC : x ∈ C H : ∃ᶠ (y : α) in 𝓝 x, y ≠ x ∧ y ∈ closure C ⊢ ∀ (y : α), y ≠ x ∧ y ∈ closure C → ∃ᶠ (z : α) in 𝓝 y, z ≠ x ∧ z ∈ C [PROOFSTEP] rintro y ⟨hyx, yC⟩ [GOAL] case intro α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : T1Space α h : Perfect (closure C) x : α xC : x ∈ C H : ∃ᶠ (y : α) in 𝓝 x, y ≠ x ∧ y ∈ closure C y : α hyx : y ≠ x yC : y ∈ closure C ⊢ ∃ᶠ (z : α) in 𝓝 y, z ≠ x ∧ z ∈ C [PROOFSTEP] simp only [← mem_compl_singleton_iff, and_comm, ← frequently_nhdsWithin_iff, hyx.nhdsWithin_compl_singleton, ← mem_closure_iff_frequently] [GOAL] case intro α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : T1Space α h : Perfect (closure C) x : α xC : x ∈ C H : ∃ᶠ (y : α) in 𝓝 x, y ≠ x ∧ y ∈ closure C y : α hyx : y ≠ x yC : y ∈ closure C ⊢ y ∈ closure C [PROOFSTEP] exact yC [GOAL] case mpr α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : T1Space α h : Perfect (closure C) x : α xC : x ∈ C H : ∃ᶠ (y : α) in 𝓝 x, y ≠ x ∧ y ∈ closure C this : ∀ (y : α), y ≠ x ∧ y ∈ closure C → ∃ᶠ (z : α) in 𝓝 y, z ≠ x ∧ z ∈ C ⊢ ∃ᶠ (y : α) in 𝓝 x, y ≠ x ∧ y ∈ C [PROOFSTEP] rw [← frequently_frequently_nhds] [GOAL] case mpr α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : T1Space α h : Perfect (closure C) x : α xC : x ∈ C H : ∃ᶠ (y : α) in 𝓝 x, y ≠ x ∧ y ∈ closure C this : ∀ (y : α), y ≠ x ∧ y ∈ closure C → ∃ᶠ (z : α) in 𝓝 y, z ≠ x ∧ z ∈ C ⊢ ∃ᶠ (y : α) in 𝓝 x, ∃ᶠ (x_1 : α) in 𝓝 y, x_1 ≠ x ∧ x_1 ∈ C [PROOFSTEP] exact H.mono this [GOAL] α : Type u_1 inst✝ : TopologicalSpace α C U : Set α hC : Perfect C x : α xC : x ∈ C xU : x ∈ U Uop : IsOpen U ⊢ Perfect (closure (U ∩ C)) ∧ Set.Nonempty (closure (U ∩ C)) [PROOFSTEP] constructor [GOAL] case left α : Type u_1 inst✝ : TopologicalSpace α C U : Set α hC : Perfect C x : α xC : x ∈ C xU : x ∈ U Uop : IsOpen U ⊢ Perfect (closure (U ∩ C)) [PROOFSTEP] apply Preperfect.perfect_closure [GOAL] case left.hC α : Type u_1 inst✝ : TopologicalSpace α C U : Set α hC : Perfect C x : α xC : x ∈ C xU : x ∈ U Uop : IsOpen U ⊢ Preperfect (U ∩ C) [PROOFSTEP] exact hC.acc.open_inter Uop [GOAL] case right α : Type u_1 inst✝ : TopologicalSpace α C U : Set α hC : Perfect C x : α xC : x ∈ C xU : x ∈ U Uop : IsOpen U ⊢ Set.Nonempty (closure (U ∩ C)) [PROOFSTEP] apply Nonempty.closure [GOAL] case right.a α : Type u_1 inst✝ : TopologicalSpace α C U : Set α hC : Perfect C x : α xC : x ∈ C xU : x ∈ U Uop : IsOpen U ⊢ Set.Nonempty (U ∩ C) [PROOFSTEP] exact ⟨x, ⟨xU, xC⟩⟩ [GOAL] α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : T25Space α hC : Perfect C hnonempty : Set.Nonempty C ⊢ ∃ C₀ C₁, (Perfect C₀ ∧ Set.Nonempty C₀ ∧ C₀ ⊆ C) ∧ (Perfect C₁ ∧ Set.Nonempty C₁ ∧ C₁ ⊆ C) ∧ Disjoint C₀ C₁ [PROOFSTEP] cases' hnonempty with y yC [GOAL] case intro α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : T25Space α hC : Perfect C y : α yC : y ∈ C ⊢ ∃ C₀ C₁, (Perfect C₀ ∧ Set.Nonempty C₀ ∧ C₀ ⊆ C) ∧ (Perfect C₁ ∧ Set.Nonempty C₁ ∧ C₁ ⊆ C) ∧ Disjoint C₀ C₁ [PROOFSTEP] obtain ⟨x, xC, hxy⟩ : ∃ x ∈ C, x ≠ y := by have := hC.acc _ yC rw [accPt_iff_nhds] at this rcases this univ univ_mem with ⟨x, xC, hxy⟩ exact ⟨x, xC.2, hxy⟩ [GOAL] α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : T25Space α hC : Perfect C y : α yC : y ∈ C ⊢ ∃ x, x ∈ C ∧ x ≠ y [PROOFSTEP] have := hC.acc _ yC [GOAL] α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : T25Space α hC : Perfect C y : α yC : y ∈ C this : AccPt y (𝓟 C) ⊢ ∃ x, x ∈ C ∧ x ≠ y [PROOFSTEP] rw [accPt_iff_nhds] at this [GOAL] α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : T25Space α hC : Perfect C y : α yC : y ∈ C this : ∀ (U : Set α), U ∈ 𝓝 y → ∃ y_1, y_1 ∈ U ∩ C ∧ y_1 ≠ y ⊢ ∃ x, x ∈ C ∧ x ≠ y [PROOFSTEP] rcases this univ univ_mem with ⟨x, xC, hxy⟩ [GOAL] case intro.intro α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : T25Space α hC : Perfect C y : α yC : y ∈ C this : ∀ (U : Set α), U ∈ 𝓝 y → ∃ y_1, y_1 ∈ U ∩ C ∧ y_1 ≠ y x : α xC : x ∈ univ ∩ C hxy : x ≠ y ⊢ ∃ x, x ∈ C ∧ x ≠ y [PROOFSTEP] exact ⟨x, xC.2, hxy⟩ [GOAL] case intro.intro.intro α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : T25Space α hC : Perfect C y : α yC : y ∈ C x : α xC : x ∈ C hxy : x ≠ y ⊢ ∃ C₀ C₁, (Perfect C₀ ∧ Set.Nonempty C₀ ∧ C₀ ⊆ C) ∧ (Perfect C₁ ∧ Set.Nonempty C₁ ∧ C₁ ⊆ C) ∧ Disjoint C₀ C₁ [PROOFSTEP] obtain ⟨U, xU, Uop, V, yV, Vop, hUV⟩ := exists_open_nhds_disjoint_closure hxy [GOAL] case intro.intro.intro.intro.intro.intro.intro.intro.intro α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : T25Space α hC : Perfect C y : α yC : y ∈ C x : α xC : x ∈ C hxy : x ≠ y U : Set α xU : x ∈ U Uop : IsOpen U V : Set α yV : y ∈ V Vop : IsOpen V hUV : Disjoint (closure U) (closure V) ⊢ ∃ C₀ C₁, (Perfect C₀ ∧ Set.Nonempty C₀ ∧ C₀ ⊆ C) ∧ (Perfect C₁ ∧ Set.Nonempty C₁ ∧ C₁ ⊆ C) ∧ Disjoint C₀ C₁ [PROOFSTEP] use closure (U ∩ C), closure (V ∩ C) [GOAL] case h α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : T25Space α hC : Perfect C y : α yC : y ∈ C x : α xC : x ∈ C hxy : x ≠ y U : Set α xU : x ∈ U Uop : IsOpen U V : Set α yV : y ∈ V Vop : IsOpen V hUV : Disjoint (closure U) (closure V) ⊢ (Perfect (closure (U ∩ C)) ∧ Set.Nonempty (closure (U ∩ C)) ∧ closure (U ∩ C) ⊆ C) ∧ (Perfect (closure (V ∩ C)) ∧ Set.Nonempty (closure (V ∩ C)) ∧ closure (V ∩ C) ⊆ C) ∧ Disjoint (closure (U ∩ C)) (closure (V ∩ C)) [PROOFSTEP] constructor [GOAL] case h.left α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : T25Space α hC : Perfect C y : α yC : y ∈ C x : α xC : x ∈ C hxy : x ≠ y U : Set α xU : x ∈ U Uop : IsOpen U V : Set α yV : y ∈ V Vop : IsOpen V hUV : Disjoint (closure U) (closure V) ⊢ Perfect (closure (U ∩ C)) ∧ Set.Nonempty (closure (U ∩ C)) ∧ closure (U ∩ C) ⊆ C [PROOFSTEP] rw [← and_assoc] [GOAL] case h.right α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : T25Space α hC : Perfect C y : α yC : y ∈ C x : α xC : x ∈ C hxy : x ≠ y U : Set α xU : x ∈ U Uop : IsOpen U V : Set α yV : y ∈ V Vop : IsOpen V hUV : Disjoint (closure U) (closure V) ⊢ (Perfect (closure (V ∩ C)) ∧ Set.Nonempty (closure (V ∩ C)) ∧ closure (V ∩ C) ⊆ C) ∧ Disjoint (closure (U ∩ C)) (closure (V ∩ C)) [PROOFSTEP] rw [← and_assoc] [GOAL] case h.left α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : T25Space α hC : Perfect C y : α yC : y ∈ C x : α xC : x ∈ C hxy : x ≠ y U : Set α xU : x ∈ U Uop : IsOpen U V : Set α yV : y ∈ V Vop : IsOpen V hUV : Disjoint (closure U) (closure V) ⊢ (Perfect (closure (U ∩ C)) ∧ Set.Nonempty (closure (U ∩ C))) ∧ closure (U ∩ C) ⊆ C [PROOFSTEP] refine' ⟨hC.closure_nhds_inter x xC xU Uop, _⟩ [GOAL] case h.left α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : T25Space α hC : Perfect C y : α yC : y ∈ C x : α xC : x ∈ C hxy : x ≠ y U : Set α xU : x ∈ U Uop : IsOpen U V : Set α yV : y ∈ V Vop : IsOpen V hUV : Disjoint (closure U) (closure V) ⊢ closure (U ∩ C) ⊆ C [PROOFSTEP] rw [hC.closed.closure_subset_iff] [GOAL] case h.left α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : T25Space α hC : Perfect C y : α yC : y ∈ C x : α xC : x ∈ C hxy : x ≠ y U : Set α xU : x ∈ U Uop : IsOpen U V : Set α yV : y ∈ V Vop : IsOpen V hUV : Disjoint (closure U) (closure V) ⊢ U ∩ C ⊆ C [PROOFSTEP] exact inter_subset_right _ _ [GOAL] case h.right α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : T25Space α hC : Perfect C y : α yC : y ∈ C x : α xC : x ∈ C hxy : x ≠ y U : Set α xU : x ∈ U Uop : IsOpen U V : Set α yV : y ∈ V Vop : IsOpen V hUV : Disjoint (closure U) (closure V) ⊢ ((Perfect (closure (V ∩ C)) ∧ Set.Nonempty (closure (V ∩ C))) ∧ closure (V ∩ C) ⊆ C) ∧ Disjoint (closure (U ∩ C)) (closure (V ∩ C)) [PROOFSTEP] constructor [GOAL] case h.right.left α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : T25Space α hC : Perfect C y : α yC : y ∈ C x : α xC : x ∈ C hxy : x ≠ y U : Set α xU : x ∈ U Uop : IsOpen U V : Set α yV : y ∈ V Vop : IsOpen V hUV : Disjoint (closure U) (closure V) ⊢ (Perfect (closure (V ∩ C)) ∧ Set.Nonempty (closure (V ∩ C))) ∧ closure (V ∩ C) ⊆ C [PROOFSTEP] refine' ⟨hC.closure_nhds_inter y yC yV Vop, _⟩ [GOAL] case h.right.left α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : T25Space α hC : Perfect C y : α yC : y ∈ C x : α xC : x ∈ C hxy : x ≠ y U : Set α xU : x ∈ U Uop : IsOpen U V : Set α yV : y ∈ V Vop : IsOpen V hUV : Disjoint (closure U) (closure V) ⊢ closure (V ∩ C) ⊆ C [PROOFSTEP] rw [hC.closed.closure_subset_iff] [GOAL] case h.right.left α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : T25Space α hC : Perfect C y : α yC : y ∈ C x : α xC : x ∈ C hxy : x ≠ y U : Set α xU : x ∈ U Uop : IsOpen U V : Set α yV : y ∈ V Vop : IsOpen V hUV : Disjoint (closure U) (closure V) ⊢ V ∩ C ⊆ C [PROOFSTEP] exact inter_subset_right _ _ [GOAL] case h.right.right α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : T25Space α hC : Perfect C y : α yC : y ∈ C x : α xC : x ∈ C hxy : x ≠ y U : Set α xU : x ∈ U Uop : IsOpen U V : Set α yV : y ∈ V Vop : IsOpen V hUV : Disjoint (closure U) (closure V) ⊢ Disjoint (closure (U ∩ C)) (closure (V ∩ C)) [PROOFSTEP] apply Disjoint.mono _ _ hUV [GOAL] α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : T25Space α hC : Perfect C y : α yC : y ∈ C x : α xC : x ∈ C hxy : x ≠ y U : Set α xU : x ∈ U Uop : IsOpen U V : Set α yV : y ∈ V Vop : IsOpen V hUV : Disjoint (closure U) (closure V) ⊢ closure (U ∩ C) ≤ closure U [PROOFSTEP] apply closure_mono [GOAL] α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : T25Space α hC : Perfect C y : α yC : y ∈ C x : α xC : x ∈ C hxy : x ≠ y U : Set α xU : x ∈ U Uop : IsOpen U V : Set α yV : y ∈ V Vop : IsOpen V hUV : Disjoint (closure U) (closure V) ⊢ closure (V ∩ C) ≤ closure V [PROOFSTEP] apply closure_mono [GOAL] case h α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : T25Space α hC : Perfect C y : α yC : y ∈ C x : α xC : x ∈ C hxy : x ≠ y U : Set α xU : x ∈ U Uop : IsOpen U V : Set α yV : y ∈ V Vop : IsOpen V hUV : Disjoint (closure U) (closure V) ⊢ U ∩ C ⊆ U [PROOFSTEP] exact inter_subset_left _ _ [GOAL] case h α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : T25Space α hC : Perfect C y : α yC : y ∈ C x : α xC : x ∈ C hxy : x ≠ y U : Set α xU : x ∈ U Uop : IsOpen U V : Set α yV : y ∈ V Vop : IsOpen V hUV : Disjoint (closure U) (closure V) ⊢ V ∩ C ⊆ V [PROOFSTEP] exact inter_subset_left _ _ [GOAL] α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C ⊢ ∃ V D, Set.Countable V ∧ Perfect D ∧ C = V ∪ D [PROOFSTEP] obtain ⟨b, bct, _, bbasis⟩ := TopologicalSpace.exists_countable_basis α [GOAL] case intro.intro.intro α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C b : Set (Set α) bct : Set.Countable b left✝ : ¬∅ ∈ b bbasis : IsTopologicalBasis b ⊢ ∃ V D, Set.Countable V ∧ Perfect D ∧ C = V ∪ D [PROOFSTEP] let v := {U ∈ b | (U ∩ C).Countable} [GOAL] case intro.intro.intro α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C b : Set (Set α) bct : Set.Countable b left✝ : ¬∅ ∈ b bbasis : IsTopologicalBasis b v : Set (Set α) := {U | U ∈ b ∧ Set.Countable (U ∩ C)} ⊢ ∃ V D, Set.Countable V ∧ Perfect D ∧ C = V ∪ D [PROOFSTEP] let V := ⋃ U ∈ v, U [GOAL] case intro.intro.intro α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C b : Set (Set α) bct : Set.Countable b left✝ : ¬∅ ∈ b bbasis : IsTopologicalBasis b v : Set (Set α) := {U | U ∈ b ∧ Set.Countable (U ∩ C)} V : Set α := ⋃ (U : Set α) (_ : U ∈ v), U ⊢ ∃ V D, Set.Countable V ∧ Perfect D ∧ C = V ∪ D [PROOFSTEP] let D := C \ V [GOAL] case intro.intro.intro α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C b : Set (Set α) bct : Set.Countable b left✝ : ¬∅ ∈ b bbasis : IsTopologicalBasis b v : Set (Set α) := {U | U ∈ b ∧ Set.Countable (U ∩ C)} V : Set α := ⋃ (U : Set α) (_ : U ∈ v), U D : Set α := C \ V ⊢ ∃ V D, Set.Countable V ∧ Perfect D ∧ C = V ∪ D [PROOFSTEP] have Vct : (V ∩ C).Countable := by simp only [iUnion_inter, mem_sep_iff] apply Countable.biUnion · exact Countable.mono (inter_subset_left _ _) bct · exact inter_subset_right _ _ [GOAL] α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C b : Set (Set α) bct : Set.Countable b left✝ : ¬∅ ∈ b bbasis : IsTopologicalBasis b v : Set (Set α) := {U | U ∈ b ∧ Set.Countable (U ∩ C)} V : Set α := ⋃ (U : Set α) (_ : U ∈ v), U D : Set α := C \ V ⊢ Set.Countable (V ∩ C) [PROOFSTEP] simp only [iUnion_inter, mem_sep_iff] [GOAL] α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C b : Set (Set α) bct : Set.Countable b left✝ : ¬∅ ∈ b bbasis : IsTopologicalBasis b v : Set (Set α) := {U | U ∈ b ∧ Set.Countable (U ∩ C)} V : Set α := ⋃ (U : Set α) (_ : U ∈ v), U D : Set α := C \ V ⊢ Set.Countable (⋃ (i : Set α) (_ : i ∈ b ∧ Set.Countable (i ∩ C)), i ∩ C) [PROOFSTEP] apply Countable.biUnion [GOAL] case hs α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C b : Set (Set α) bct : Set.Countable b left✝ : ¬∅ ∈ b bbasis : IsTopologicalBasis b v : Set (Set α) := {U | U ∈ b ∧ Set.Countable (U ∩ C)} V : Set α := ⋃ (U : Set α) (_ : U ∈ v), U D : Set α := C \ V ⊢ Set.Countable fun a => a ∈ b ∧ Set.Countable (a ∩ C) [PROOFSTEP] exact Countable.mono (inter_subset_left _ _) bct [GOAL] case a α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C b : Set (Set α) bct : Set.Countable b left✝ : ¬∅ ∈ b bbasis : IsTopologicalBasis b v : Set (Set α) := {U | U ∈ b ∧ Set.Countable (U ∩ C)} V : Set α := ⋃ (U : Set α) (_ : U ∈ v), U D : Set α := C \ V ⊢ ∀ (a : Set α), (a ∈ fun a => a ∈ b ∧ Set.Countable (a ∩ C)) → Set.Countable (a ∩ C) [PROOFSTEP] exact inter_subset_right _ _ [GOAL] case intro.intro.intro α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C b : Set (Set α) bct : Set.Countable b left✝ : ¬∅ ∈ b bbasis : IsTopologicalBasis b v : Set (Set α) := {U | U ∈ b ∧ Set.Countable (U ∩ C)} V : Set α := ⋃ (U : Set α) (_ : U ∈ v), U D : Set α := C \ V Vct : Set.Countable (V ∩ C) ⊢ ∃ V D, Set.Countable V ∧ Perfect D ∧ C = V ∪ D [PROOFSTEP] refine' ⟨V ∩ C, D, Vct, ⟨_, _⟩, _⟩ [GOAL] case intro.intro.intro.refine'_1 α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C b : Set (Set α) bct : Set.Countable b left✝ : ¬∅ ∈ b bbasis : IsTopologicalBasis b v : Set (Set α) := {U | U ∈ b ∧ Set.Countable (U ∩ C)} V : Set α := ⋃ (U : Set α) (_ : U ∈ v), U D : Set α := C \ V Vct : Set.Countable (V ∩ C) ⊢ IsClosed D [PROOFSTEP] refine' hclosed.sdiff (isOpen_biUnion fun _ ↦ _) [GOAL] case intro.intro.intro.refine'_1 α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C b : Set (Set α) bct : Set.Countable b left✝ : ¬∅ ∈ b bbasis : IsTopologicalBasis b v : Set (Set α) := {U | U ∈ b ∧ Set.Countable (U ∩ C)} V : Set α := ⋃ (U : Set α) (_ : U ∈ v), U D : Set α := C \ V Vct : Set.Countable (V ∩ C) x✝ : Set α ⊢ x✝ ∈ v → IsOpen x✝ [PROOFSTEP] exact fun ⟨Ub, _⟩ ↦ IsTopologicalBasis.isOpen bbasis Ub [GOAL] case intro.intro.intro.refine'_2 α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C b : Set (Set α) bct : Set.Countable b left✝ : ¬∅ ∈ b bbasis : IsTopologicalBasis b v : Set (Set α) := {U | U ∈ b ∧ Set.Countable (U ∩ C)} V : Set α := ⋃ (U : Set α) (_ : U ∈ v), U D : Set α := C \ V Vct : Set.Countable (V ∩ C) ⊢ Preperfect D [PROOFSTEP] rw [preperfect_iff_nhds] [GOAL] case intro.intro.intro.refine'_2 α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C b : Set (Set α) bct : Set.Countable b left✝ : ¬∅ ∈ b bbasis : IsTopologicalBasis b v : Set (Set α) := {U | U ∈ b ∧ Set.Countable (U ∩ C)} V : Set α := ⋃ (U : Set α) (_ : U ∈ v), U D : Set α := C \ V Vct : Set.Countable (V ∩ C) ⊢ ∀ (x : α), x ∈ D → ∀ (U : Set α), U ∈ 𝓝 x → ∃ y, y ∈ U ∩ D ∧ y ≠ x [PROOFSTEP] intro x xD E xE [GOAL] case intro.intro.intro.refine'_2 α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C b : Set (Set α) bct : Set.Countable b left✝ : ¬∅ ∈ b bbasis : IsTopologicalBasis b v : Set (Set α) := {U | U ∈ b ∧ Set.Countable (U ∩ C)} V : Set α := ⋃ (U : Set α) (_ : U ∈ v), U D : Set α := C \ V Vct : Set.Countable (V ∩ C) x : α xD : x ∈ D E : Set α xE : E ∈ 𝓝 x ⊢ ∃ y, y ∈ E ∩ D ∧ y ≠ x [PROOFSTEP] have : ¬(E ∩ D).Countable := by intro h obtain ⟨U, hUb, xU, hU⟩ : ∃ U ∈ b, x ∈ U ∧ U ⊆ E := (IsTopologicalBasis.mem_nhds_iff bbasis).mp xE have hU_cnt : (U ∩ C).Countable := by apply @Countable.mono _ _ (E ∩ D ∪ V ∩ C) · rintro y ⟨yU, yC⟩ by_cases h : y ∈ V · exact mem_union_right _ (mem_inter h yC) · exact mem_union_left _ (mem_inter (hU yU) ⟨yC, h⟩) exact Countable.union h Vct have : U ∈ v := ⟨hUb, hU_cnt⟩ apply xD.2 exact mem_biUnion this xU [GOAL] α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C b : Set (Set α) bct : Set.Countable b left✝ : ¬∅ ∈ b bbasis : IsTopologicalBasis b v : Set (Set α) := {U | U ∈ b ∧ Set.Countable (U ∩ C)} V : Set α := ⋃ (U : Set α) (_ : U ∈ v), U D : Set α := C \ V Vct : Set.Countable (V ∩ C) x : α xD : x ∈ D E : Set α xE : E ∈ 𝓝 x ⊢ ¬Set.Countable (E ∩ D) [PROOFSTEP] intro h [GOAL] α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C b : Set (Set α) bct : Set.Countable b left✝ : ¬∅ ∈ b bbasis : IsTopologicalBasis b v : Set (Set α) := {U | U ∈ b ∧ Set.Countable (U ∩ C)} V : Set α := ⋃ (U : Set α) (_ : U ∈ v), U D : Set α := C \ V Vct : Set.Countable (V ∩ C) x : α xD : x ∈ D E : Set α xE : E ∈ 𝓝 x h : Set.Countable (E ∩ D) ⊢ False [PROOFSTEP] obtain ⟨U, hUb, xU, hU⟩ : ∃ U ∈ b, x ∈ U ∧ U ⊆ E := (IsTopologicalBasis.mem_nhds_iff bbasis).mp xE [GOAL] case intro.intro.intro α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C b : Set (Set α) bct : Set.Countable b left✝ : ¬∅ ∈ b bbasis : IsTopologicalBasis b v : Set (Set α) := {U | U ∈ b ∧ Set.Countable (U ∩ C)} V : Set α := ⋃ (U : Set α) (_ : U ∈ v), U D : Set α := C \ V Vct : Set.Countable (V ∩ C) x : α xD : x ∈ D E : Set α xE : E ∈ 𝓝 x h : Set.Countable (E ∩ D) U : Set α hUb : U ∈ b xU : x ∈ U hU : U ⊆ E ⊢ False [PROOFSTEP] have hU_cnt : (U ∩ C).Countable := by apply @Countable.mono _ _ (E ∩ D ∪ V ∩ C) · rintro y ⟨yU, yC⟩ by_cases h : y ∈ V · exact mem_union_right _ (mem_inter h yC) · exact mem_union_left _ (mem_inter (hU yU) ⟨yC, h⟩) exact Countable.union h Vct [GOAL] α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C b : Set (Set α) bct : Set.Countable b left✝ : ¬∅ ∈ b bbasis : IsTopologicalBasis b v : Set (Set α) := {U | U ∈ b ∧ Set.Countable (U ∩ C)} V : Set α := ⋃ (U : Set α) (_ : U ∈ v), U D : Set α := C \ V Vct : Set.Countable (V ∩ C) x : α xD : x ∈ D E : Set α xE : E ∈ 𝓝 x h : Set.Countable (E ∩ D) U : Set α hUb : U ∈ b xU : x ∈ U hU : U ⊆ E ⊢ Set.Countable (U ∩ C) [PROOFSTEP] apply @Countable.mono _ _ (E ∩ D ∪ V ∩ C) [GOAL] case h α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C b : Set (Set α) bct : Set.Countable b left✝ : ¬∅ ∈ b bbasis : IsTopologicalBasis b v : Set (Set α) := {U | U ∈ b ∧ Set.Countable (U ∩ C)} V : Set α := ⋃ (U : Set α) (_ : U ∈ v), U D : Set α := C \ V Vct : Set.Countable (V ∩ C) x : α xD : x ∈ D E : Set α xE : E ∈ 𝓝 x h : Set.Countable (E ∩ D) U : Set α hUb : U ∈ b xU : x ∈ U hU : U ⊆ E ⊢ U ∩ C ⊆ E ∩ D ∪ V ∩ C [PROOFSTEP] rintro y ⟨yU, yC⟩ [GOAL] case h.intro α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C b : Set (Set α) bct : Set.Countable b left✝ : ¬∅ ∈ b bbasis : IsTopologicalBasis b v : Set (Set α) := {U | U ∈ b ∧ Set.Countable (U ∩ C)} V : Set α := ⋃ (U : Set α) (_ : U ∈ v), U D : Set α := C \ V Vct : Set.Countable (V ∩ C) x : α xD : x ∈ D E : Set α xE : E ∈ 𝓝 x h : Set.Countable (E ∩ D) U : Set α hUb : U ∈ b xU : x ∈ U hU : U ⊆ E y : α yU : y ∈ U yC : y ∈ C ⊢ y ∈ E ∩ D ∪ V ∩ C [PROOFSTEP] by_cases h : y ∈ V [GOAL] case pos α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C b : Set (Set α) bct : Set.Countable b left✝ : ¬∅ ∈ b bbasis : IsTopologicalBasis b v : Set (Set α) := {U | U ∈ b ∧ Set.Countable (U ∩ C)} V : Set α := ⋃ (U : Set α) (_ : U ∈ v), U D : Set α := C \ V Vct : Set.Countable (V ∩ C) x : α xD : x ∈ D E : Set α xE : E ∈ 𝓝 x h✝ : Set.Countable (E ∩ D) U : Set α hUb : U ∈ b xU : x ∈ U hU : U ⊆ E y : α yU : y ∈ U yC : y ∈ C h : y ∈ V ⊢ y ∈ E ∩ D ∪ V ∩ C [PROOFSTEP] exact mem_union_right _ (mem_inter h yC) [GOAL] case neg α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C b : Set (Set α) bct : Set.Countable b left✝ : ¬∅ ∈ b bbasis : IsTopologicalBasis b v : Set (Set α) := {U | U ∈ b ∧ Set.Countable (U ∩ C)} V : Set α := ⋃ (U : Set α) (_ : U ∈ v), U D : Set α := C \ V Vct : Set.Countable (V ∩ C) x : α xD : x ∈ D E : Set α xE : E ∈ 𝓝 x h✝ : Set.Countable (E ∩ D) U : Set α hUb : U ∈ b xU : x ∈ U hU : U ⊆ E y : α yU : y ∈ U yC : y ∈ C h : ¬y ∈ V ⊢ y ∈ E ∩ D ∪ V ∩ C [PROOFSTEP] exact mem_union_left _ (mem_inter (hU yU) ⟨yC, h⟩) [GOAL] case a α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C b : Set (Set α) bct : Set.Countable b left✝ : ¬∅ ∈ b bbasis : IsTopologicalBasis b v : Set (Set α) := {U | U ∈ b ∧ Set.Countable (U ∩ C)} V : Set α := ⋃ (U : Set α) (_ : U ∈ v), U D : Set α := C \ V Vct : Set.Countable (V ∩ C) x : α xD : x ∈ D E : Set α xE : E ∈ 𝓝 x h : Set.Countable (E ∩ D) U : Set α hUb : U ∈ b xU : x ∈ U hU : U ⊆ E ⊢ Set.Countable (E ∩ D ∪ V ∩ C) [PROOFSTEP] exact Countable.union h Vct [GOAL] case intro.intro.intro α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C b : Set (Set α) bct : Set.Countable b left✝ : ¬∅ ∈ b bbasis : IsTopologicalBasis b v : Set (Set α) := {U | U ∈ b ∧ Set.Countable (U ∩ C)} V : Set α := ⋃ (U : Set α) (_ : U ∈ v), U D : Set α := C \ V Vct : Set.Countable (V ∩ C) x : α xD : x ∈ D E : Set α xE : E ∈ 𝓝 x h : Set.Countable (E ∩ D) U : Set α hUb : U ∈ b xU : x ∈ U hU : U ⊆ E hU_cnt : Set.Countable (U ∩ C) ⊢ False [PROOFSTEP] have : U ∈ v := ⟨hUb, hU_cnt⟩ [GOAL] case intro.intro.intro α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C b : Set (Set α) bct : Set.Countable b left✝ : ¬∅ ∈ b bbasis : IsTopologicalBasis b v : Set (Set α) := {U | U ∈ b ∧ Set.Countable (U ∩ C)} V : Set α := ⋃ (U : Set α) (_ : U ∈ v), U D : Set α := C \ V Vct : Set.Countable (V ∩ C) x : α xD : x ∈ D E : Set α xE : E ∈ 𝓝 x h : Set.Countable (E ∩ D) U : Set α hUb : U ∈ b xU : x ∈ U hU : U ⊆ E hU_cnt : Set.Countable (U ∩ C) this : U ∈ v ⊢ False [PROOFSTEP] apply xD.2 [GOAL] case intro.intro.intro α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C b : Set (Set α) bct : Set.Countable b left✝ : ¬∅ ∈ b bbasis : IsTopologicalBasis b v : Set (Set α) := {U | U ∈ b ∧ Set.Countable (U ∩ C)} V : Set α := ⋃ (U : Set α) (_ : U ∈ v), U D : Set α := C \ V Vct : Set.Countable (V ∩ C) x : α xD : x ∈ D E : Set α xE : E ∈ 𝓝 x h : Set.Countable (E ∩ D) U : Set α hUb : U ∈ b xU : x ∈ U hU : U ⊆ E hU_cnt : Set.Countable (U ∩ C) this : U ∈ v ⊢ x ∈ V [PROOFSTEP] exact mem_biUnion this xU [GOAL] case intro.intro.intro.refine'_2 α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C b : Set (Set α) bct : Set.Countable b left✝ : ¬∅ ∈ b bbasis : IsTopologicalBasis b v : Set (Set α) := {U | U ∈ b ∧ Set.Countable (U ∩ C)} V : Set α := ⋃ (U : Set α) (_ : U ∈ v), U D : Set α := C \ V Vct : Set.Countable (V ∩ C) x : α xD : x ∈ D E : Set α xE : E ∈ 𝓝 x this : ¬Set.Countable (E ∩ D) ⊢ ∃ y, y ∈ E ∩ D ∧ y ≠ x [PROOFSTEP] by_contra' h [GOAL] case intro.intro.intro.refine'_2 α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C b : Set (Set α) bct : Set.Countable b left✝ : ¬∅ ∈ b bbasis : IsTopologicalBasis b v : Set (Set α) := {U | U ∈ b ∧ Set.Countable (U ∩ C)} V : Set α := ⋃ (U : Set α) (_ : U ∈ v), U D : Set α := C \ V Vct : Set.Countable (V ∩ C) x : α xD : x ∈ D E : Set α xE : E ∈ 𝓝 x this : ¬Set.Countable (E ∩ D) h : ∀ (y : α), y ∈ E ∩ (C \ V) → y = x ⊢ False [PROOFSTEP] exact absurd (Countable.mono h (Set.countable_singleton _)) this [GOAL] case intro.intro.intro.refine'_3 α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C b : Set (Set α) bct : Set.Countable b left✝ : ¬∅ ∈ b bbasis : IsTopologicalBasis b v : Set (Set α) := {U | U ∈ b ∧ Set.Countable (U ∩ C)} V : Set α := ⋃ (U : Set α) (_ : U ∈ v), U D : Set α := C \ V Vct : Set.Countable (V ∩ C) ⊢ C = V ∩ C ∪ D [PROOFSTEP] rw [inter_comm, inter_union_diff] [GOAL] α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C hunc : ¬Set.Countable C ⊢ ∃ D, Perfect D ∧ Set.Nonempty D ∧ D ⊆ C [PROOFSTEP] rcases exists_countable_union_perfect_of_isClosed hclosed with ⟨V, D, Vct, Dperf, VD⟩ [GOAL] case intro.intro.intro.intro α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C hunc : ¬Set.Countable C V D : Set α Vct : Set.Countable V Dperf : Perfect D VD : C = V ∪ D ⊢ ∃ D, Perfect D ∧ Set.Nonempty D ∧ D ⊆ C [PROOFSTEP] refine' ⟨D, ⟨Dperf, _⟩⟩ [GOAL] case intro.intro.intro.intro α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C hunc : ¬Set.Countable C V D : Set α Vct : Set.Countable V Dperf : Perfect D VD : C = V ∪ D ⊢ Set.Nonempty D ∧ D ⊆ C [PROOFSTEP] constructor [GOAL] case intro.intro.intro.intro.left α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C hunc : ¬Set.Countable C V D : Set α Vct : Set.Countable V Dperf : Perfect D VD : C = V ∪ D ⊢ Set.Nonempty D [PROOFSTEP] rw [nonempty_iff_ne_empty] [GOAL] case intro.intro.intro.intro.left α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C hunc : ¬Set.Countable C V D : Set α Vct : Set.Countable V Dperf : Perfect D VD : C = V ∪ D ⊢ D ≠ ∅ [PROOFSTEP] by_contra h [GOAL] case intro.intro.intro.intro.left α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C hunc : ¬Set.Countable C V D : Set α Vct : Set.Countable V Dperf : Perfect D VD : C = V ∪ D h : D = ∅ ⊢ False [PROOFSTEP] rw [h, union_empty] at VD [GOAL] case intro.intro.intro.intro.left α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C hunc : ¬Set.Countable C V D : Set α Vct : Set.Countable V Dperf : Perfect D VD : C = V h : D = ∅ ⊢ False [PROOFSTEP] rw [VD] at hunc [GOAL] case intro.intro.intro.intro.left α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C V : Set α hunc : ¬Set.Countable V D : Set α Vct : Set.Countable V Dperf : Perfect D VD : C = V h : D = ∅ ⊢ False [PROOFSTEP] contradiction [GOAL] case intro.intro.intro.intro.right α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C hunc : ¬Set.Countable C V D : Set α Vct : Set.Countable V Dperf : Perfect D VD : C = V ∪ D ⊢ D ⊆ C [PROOFSTEP] rw [VD] [GOAL] case intro.intro.intro.intro.right α : Type u_1 inst✝¹ : TopologicalSpace α C : Set α inst✝ : SecondCountableTopology α hclosed : IsClosed C hunc : ¬Set.Countable C V D : Set α Vct : Set.Countable V Dperf : Perfect D VD : C = V ∪ D ⊢ D ⊆ V ∪ D [PROOFSTEP] exact subset_union_right _ _ [GOAL] α : Type u_1 inst✝ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ ε_pos : 0 < ε x : α xC : x ∈ C ⊢ let D := closure (EMetric.ball x (ε / 2) ∩ C); Perfect D ∧ Set.Nonempty D ∧ D ⊆ C ∧ EMetric.diam D ≤ ε [PROOFSTEP] have : x ∈ EMetric.ball x (ε / 2) := by apply EMetric.mem_ball_self rw [ENNReal.div_pos_iff] exact ⟨ne_of_gt ε_pos, by norm_num⟩ [GOAL] α : Type u_1 inst✝ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ ε_pos : 0 < ε x : α xC : x ∈ C ⊢ x ∈ EMetric.ball x (ε / 2) [PROOFSTEP] apply EMetric.mem_ball_self [GOAL] case h α : Type u_1 inst✝ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ ε_pos : 0 < ε x : α xC : x ∈ C ⊢ 0 < ε / 2 [PROOFSTEP] rw [ENNReal.div_pos_iff] [GOAL] case h α : Type u_1 inst✝ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ ε_pos : 0 < ε x : α xC : x ∈ C ⊢ ε ≠ 0 ∧ 2 ≠ ⊤ [PROOFSTEP] exact ⟨ne_of_gt ε_pos, by norm_num⟩ [GOAL] α : Type u_1 inst✝ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ ε_pos : 0 < ε x : α xC : x ∈ C ⊢ 2 ≠ ⊤ [PROOFSTEP] norm_num [GOAL] α : Type u_1 inst✝ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ ε_pos : 0 < ε x : α xC : x ∈ C this : x ∈ EMetric.ball x (ε / 2) ⊢ let D := closure (EMetric.ball x (ε / 2) ∩ C); Perfect D ∧ Set.Nonempty D ∧ D ⊆ C ∧ EMetric.diam D ≤ ε [PROOFSTEP] have := hC.closure_nhds_inter x xC this EMetric.isOpen_ball [GOAL] α : Type u_1 inst✝ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ ε_pos : 0 < ε x : α xC : x ∈ C this✝ : x ∈ EMetric.ball x (ε / 2) this : Perfect (closure (EMetric.ball x (ε / 2) ∩ C)) ∧ Set.Nonempty (closure (EMetric.ball x (ε / 2) ∩ C)) ⊢ let D := closure (EMetric.ball x (ε / 2) ∩ C); Perfect D ∧ Set.Nonempty D ∧ D ⊆ C ∧ EMetric.diam D ≤ ε [PROOFSTEP] refine' ⟨this.1, this.2, _, _⟩ [GOAL] case refine'_1 α : Type u_1 inst✝ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ ε_pos : 0 < ε x : α xC : x ∈ C this✝ : x ∈ EMetric.ball x (ε / 2) this : Perfect (closure (EMetric.ball x (ε / 2) ∩ C)) ∧ Set.Nonempty (closure (EMetric.ball x (ε / 2) ∩ C)) ⊢ closure (EMetric.ball x (ε / 2) ∩ C) ⊆ C [PROOFSTEP] rw [IsClosed.closure_subset_iff hC.closed] [GOAL] case refine'_1 α : Type u_1 inst✝ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ ε_pos : 0 < ε x : α xC : x ∈ C this✝ : x ∈ EMetric.ball x (ε / 2) this : Perfect (closure (EMetric.ball x (ε / 2) ∩ C)) ∧ Set.Nonempty (closure (EMetric.ball x (ε / 2) ∩ C)) ⊢ EMetric.ball x (ε / 2) ∩ C ⊆ C [PROOFSTEP] apply inter_subset_right [GOAL] case refine'_2 α : Type u_1 inst✝ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ ε_pos : 0 < ε x : α xC : x ∈ C this✝ : x ∈ EMetric.ball x (ε / 2) this : Perfect (closure (EMetric.ball x (ε / 2) ∩ C)) ∧ Set.Nonempty (closure (EMetric.ball x (ε / 2) ∩ C)) ⊢ EMetric.diam (closure (EMetric.ball x (ε / 2) ∩ C)) ≤ ε [PROOFSTEP] rw [EMetric.diam_closure] [GOAL] case refine'_2 α : Type u_1 inst✝ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ ε_pos : 0 < ε x : α xC : x ∈ C this✝ : x ∈ EMetric.ball x (ε / 2) this : Perfect (closure (EMetric.ball x (ε / 2) ∩ C)) ∧ Set.Nonempty (closure (EMetric.ball x (ε / 2) ∩ C)) ⊢ EMetric.diam (EMetric.ball x (ε / 2) ∩ C) ≤ ε [PROOFSTEP] apply le_trans (EMetric.diam_mono (inter_subset_left _ _)) [GOAL] case refine'_2 α : Type u_1 inst✝ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ ε_pos : 0 < ε x : α xC : x ∈ C this✝ : x ∈ EMetric.ball x (ε / 2) this : Perfect (closure (EMetric.ball x (ε / 2) ∩ C)) ∧ Set.Nonempty (closure (EMetric.ball x (ε / 2) ∩ C)) ⊢ EMetric.diam (EMetric.ball x (ε / 2)) ≤ ε [PROOFSTEP] convert EMetric.diam_ball (x := x) [GOAL] case h.e'_4 α : Type u_1 inst✝ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ ε_pos : 0 < ε x : α xC : x ∈ C this✝ : x ∈ EMetric.ball x (ε / 2) this : Perfect (closure (EMetric.ball x (ε / 2) ∩ C)) ∧ Set.Nonempty (closure (EMetric.ball x (ε / 2) ∩ C)) ⊢ ε = 2 * (ε / 2) [PROOFSTEP] rw [mul_comm, ENNReal.div_mul_cancel] [GOAL] case h.e'_4.h0 α : Type u_1 inst✝ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ ε_pos : 0 < ε x : α xC : x ∈ C this✝ : x ∈ EMetric.ball x (ε / 2) this : Perfect (closure (EMetric.ball x (ε / 2) ∩ C)) ∧ Set.Nonempty (closure (EMetric.ball x (ε / 2) ∩ C)) ⊢ 2 ≠ 0 [PROOFSTEP] norm_num [GOAL] case h.e'_4.hI α : Type u_1 inst✝ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ ε_pos : 0 < ε x : α xC : x ∈ C this✝ : x ∈ EMetric.ball x (ε / 2) this : Perfect (closure (EMetric.ball x (ε / 2) ∩ C)) ∧ Set.Nonempty (closure (EMetric.ball x (ε / 2) ∩ C)) ⊢ 2 ≠ ⊤ [PROOFSTEP] norm_num [GOAL] α : Type u_1 inst✝ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C ε_pos : 0 < ε ⊢ ∃ C₀ C₁, (Perfect C₀ ∧ Set.Nonempty C₀ ∧ C₀ ⊆ C ∧ EMetric.diam C₀ ≤ ε) ∧ (Perfect C₁ ∧ Set.Nonempty C₁ ∧ C₁ ⊆ C ∧ EMetric.diam C₁ ≤ ε) ∧ Disjoint C₀ C₁ [PROOFSTEP] rcases hC.splitting hnonempty with ⟨D₀, D₁, ⟨perf0, non0, sub0⟩, ⟨perf1, non1, sub1⟩, hdisj⟩ [GOAL] case intro.intro.intro.intro.intro.intro.intro.intro α : Type u_1 inst✝ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C ε_pos : 0 < ε D₀ D₁ : Set α perf0 : Perfect D₀ non0 : Set.Nonempty D₀ sub0 : D₀ ⊆ C hdisj : Disjoint D₀ D₁ perf1 : Perfect D₁ non1 : Set.Nonempty D₁ sub1 : D₁ ⊆ C ⊢ ∃ C₀ C₁, (Perfect C₀ ∧ Set.Nonempty C₀ ∧ C₀ ⊆ C ∧ EMetric.diam C₀ ≤ ε) ∧ (Perfect C₁ ∧ Set.Nonempty C₁ ∧ C₁ ⊆ C ∧ EMetric.diam C₁ ≤ ε) ∧ Disjoint C₀ C₁ [PROOFSTEP] cases' non0 with x₀ hx₀ [GOAL] case intro.intro.intro.intro.intro.intro.intro.intro.intro α : Type u_1 inst✝ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C ε_pos : 0 < ε D₀ D₁ : Set α perf0 : Perfect D₀ sub0 : D₀ ⊆ C hdisj : Disjoint D₀ D₁ perf1 : Perfect D₁ non1 : Set.Nonempty D₁ sub1 : D₁ ⊆ C x₀ : α hx₀ : x₀ ∈ D₀ ⊢ ∃ C₀ C₁, (Perfect C₀ ∧ Set.Nonempty C₀ ∧ C₀ ⊆ C ∧ EMetric.diam C₀ ≤ ε) ∧ (Perfect C₁ ∧ Set.Nonempty C₁ ∧ C₁ ⊆ C ∧ EMetric.diam C₁ ≤ ε) ∧ Disjoint C₀ C₁ [PROOFSTEP] cases' non1 with x₁ hx₁ [GOAL] case intro.intro.intro.intro.intro.intro.intro.intro.intro.intro α : Type u_1 inst✝ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C ε_pos : 0 < ε D₀ D₁ : Set α perf0 : Perfect D₀ sub0 : D₀ ⊆ C hdisj : Disjoint D₀ D₁ perf1 : Perfect D₁ sub1 : D₁ ⊆ C x₀ : α hx₀ : x₀ ∈ D₀ x₁ : α hx₁ : x₁ ∈ D₁ ⊢ ∃ C₀ C₁, (Perfect C₀ ∧ Set.Nonempty C₀ ∧ C₀ ⊆ C ∧ EMetric.diam C₀ ≤ ε) ∧ (Perfect C₁ ∧ Set.Nonempty C₁ ∧ C₁ ⊆ C ∧ EMetric.diam C₁ ≤ ε) ∧ Disjoint C₀ C₁ [PROOFSTEP] rcases perf0.small_diam_aux ε_pos hx₀ with ⟨perf0', non0', sub0', diam0⟩ [GOAL] case intro.intro.intro.intro.intro.intro.intro.intro.intro.intro.intro.intro.intro α : Type u_1 inst✝ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C ε_pos : 0 < ε D₀ D₁ : Set α perf0 : Perfect D₀ sub0 : D₀ ⊆ C hdisj : Disjoint D₀ D₁ perf1 : Perfect D₁ sub1 : D₁ ⊆ C x₀ : α hx₀ : x₀ ∈ D₀ x₁ : α hx₁ : x₁ ∈ D₁ perf0' : Perfect (closure (EMetric.ball x₀ (ε / 2) ∩ D₀)) non0' : Set.Nonempty (closure (EMetric.ball x₀ (ε / 2) ∩ D₀)) sub0' : closure (EMetric.ball x₀ (ε / 2) ∩ D₀) ⊆ D₀ diam0 : EMetric.diam (closure (EMetric.ball x₀ (ε / 2) ∩ D₀)) ≤ ε ⊢ ∃ C₀ C₁, (Perfect C₀ ∧ Set.Nonempty C₀ ∧ C₀ ⊆ C ∧ EMetric.diam C₀ ≤ ε) ∧ (Perfect C₁ ∧ Set.Nonempty C₁ ∧ C₁ ⊆ C ∧ EMetric.diam C₁ ≤ ε) ∧ Disjoint C₀ C₁ [PROOFSTEP] rcases perf1.small_diam_aux ε_pos hx₁ with ⟨perf1', non1', sub1', diam1⟩ [GOAL] case intro.intro.intro.intro.intro.intro.intro.intro.intro.intro.intro.intro.intro.intro.intro.intro α : Type u_1 inst✝ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C ε_pos : 0 < ε D₀ D₁ : Set α perf0 : Perfect D₀ sub0 : D₀ ⊆ C hdisj : Disjoint D₀ D₁ perf1 : Perfect D₁ sub1 : D₁ ⊆ C x₀ : α hx₀ : x₀ ∈ D₀ x₁ : α hx₁ : x₁ ∈ D₁ perf0' : Perfect (closure (EMetric.ball x₀ (ε / 2) ∩ D₀)) non0' : Set.Nonempty (closure (EMetric.ball x₀ (ε / 2) ∩ D₀)) sub0' : closure (EMetric.ball x₀ (ε / 2) ∩ D₀) ⊆ D₀ diam0 : EMetric.diam (closure (EMetric.ball x₀ (ε / 2) ∩ D₀)) ≤ ε perf1' : Perfect (closure (EMetric.ball x₁ (ε / 2) ∩ D₁)) non1' : Set.Nonempty (closure (EMetric.ball x₁ (ε / 2) ∩ D₁)) sub1' : closure (EMetric.ball x₁ (ε / 2) ∩ D₁) ⊆ D₁ diam1 : EMetric.diam (closure (EMetric.ball x₁ (ε / 2) ∩ D₁)) ≤ ε ⊢ ∃ C₀ C₁, (Perfect C₀ ∧ Set.Nonempty C₀ ∧ C₀ ⊆ C ∧ EMetric.diam C₀ ≤ ε) ∧ (Perfect C₁ ∧ Set.Nonempty C₁ ∧ C₁ ⊆ C ∧ EMetric.diam C₁ ≤ ε) ∧ Disjoint C₀ C₁ [PROOFSTEP] refine' ⟨closure (EMetric.ball x₀ (ε / 2) ∩ D₀), closure (EMetric.ball x₁ (ε / 2) ∩ D₁), ⟨perf0', non0', sub0'.trans sub0, diam0⟩, ⟨perf1', non1', sub1'.trans sub1, diam1⟩, _⟩ [GOAL] case intro.intro.intro.intro.intro.intro.intro.intro.intro.intro.intro.intro.intro.intro.intro.intro α : Type u_1 inst✝ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C ε_pos : 0 < ε D₀ D₁ : Set α perf0 : Perfect D₀ sub0 : D₀ ⊆ C hdisj : Disjoint D₀ D₁ perf1 : Perfect D₁ sub1 : D₁ ⊆ C x₀ : α hx₀ : x₀ ∈ D₀ x₁ : α hx₁ : x₁ ∈ D₁ perf0' : Perfect (closure (EMetric.ball x₀ (ε / 2) ∩ D₀)) non0' : Set.Nonempty (closure (EMetric.ball x₀ (ε / 2) ∩ D₀)) sub0' : closure (EMetric.ball x₀ (ε / 2) ∩ D₀) ⊆ D₀ diam0 : EMetric.diam (closure (EMetric.ball x₀ (ε / 2) ∩ D₀)) ≤ ε perf1' : Perfect (closure (EMetric.ball x₁ (ε / 2) ∩ D₁)) non1' : Set.Nonempty (closure (EMetric.ball x₁ (ε / 2) ∩ D₁)) sub1' : closure (EMetric.ball x₁ (ε / 2) ∩ D₁) ⊆ D₁ diam1 : EMetric.diam (closure (EMetric.ball x₁ (ε / 2) ∩ D₁)) ≤ ε ⊢ Disjoint (closure (EMetric.ball x₀ (ε / 2) ∩ D₀)) (closure (EMetric.ball x₁ (ε / 2) ∩ D₁)) [PROOFSTEP] apply Disjoint.mono _ _ hdisj [GOAL] α : Type u_1 inst✝ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C ε_pos : 0 < ε D₀ D₁ : Set α perf0 : Perfect D₀ sub0 : D₀ ⊆ C hdisj : Disjoint D₀ D₁ perf1 : Perfect D₁ sub1 : D₁ ⊆ C x₀ : α hx₀ : x₀ ∈ D₀ x₁ : α hx₁ : x₁ ∈ D₁ perf0' : Perfect (closure (EMetric.ball x₀ (ε / 2) ∩ D₀)) non0' : Set.Nonempty (closure (EMetric.ball x₀ (ε / 2) ∩ D₀)) sub0' : closure (EMetric.ball x₀ (ε / 2) ∩ D₀) ⊆ D₀ diam0 : EMetric.diam (closure (EMetric.ball x₀ (ε / 2) ∩ D₀)) ≤ ε perf1' : Perfect (closure (EMetric.ball x₁ (ε / 2) ∩ D₁)) non1' : Set.Nonempty (closure (EMetric.ball x₁ (ε / 2) ∩ D₁)) sub1' : closure (EMetric.ball x₁ (ε / 2) ∩ D₁) ⊆ D₁ diam1 : EMetric.diam (closure (EMetric.ball x₁ (ε / 2) ∩ D₁)) ≤ ε ⊢ closure (EMetric.ball x₀ (ε / 2) ∩ D₀) ≤ D₀ [PROOFSTEP] assumption [GOAL] α : Type u_1 inst✝ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C ε_pos : 0 < ε D₀ D₁ : Set α perf0 : Perfect D₀ sub0 : D₀ ⊆ C hdisj : Disjoint D₀ D₁ perf1 : Perfect D₁ sub1 : D₁ ⊆ C x₀ : α hx₀ : x₀ ∈ D₀ x₁ : α hx₁ : x₁ ∈ D₁ perf0' : Perfect (closure (EMetric.ball x₀ (ε / 2) ∩ D₀)) non0' : Set.Nonempty (closure (EMetric.ball x₀ (ε / 2) ∩ D₀)) sub0' : closure (EMetric.ball x₀ (ε / 2) ∩ D₀) ⊆ D₀ diam0 : EMetric.diam (closure (EMetric.ball x₀ (ε / 2) ∩ D₀)) ≤ ε perf1' : Perfect (closure (EMetric.ball x₁ (ε / 2) ∩ D₁)) non1' : Set.Nonempty (closure (EMetric.ball x₁ (ε / 2) ∩ D₁)) sub1' : closure (EMetric.ball x₁ (ε / 2) ∩ D₁) ⊆ D₁ diam1 : EMetric.diam (closure (EMetric.ball x₁ (ε / 2) ∩ D₁)) ≤ ε ⊢ closure (EMetric.ball x₁ (ε / 2) ∩ D₁) ≤ D₁ [PROOFSTEP] assumption [GOAL] α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α ⊢ ∃ f, range f ⊆ C ∧ Continuous f ∧ Injective f [PROOFSTEP] obtain ⟨u, -, upos', hu⟩ := exists_seq_strictAnti_tendsto' (zero_lt_one' ℝ≥0∞) [GOAL] case intro.intro.intro α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) ⊢ ∃ f, range f ⊆ C ∧ Continuous f ∧ Injective f [PROOFSTEP] have upos := fun n => (upos' n).1 [GOAL] case intro.intro.intro α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n ⊢ ∃ f, range f ⊆ C ∧ Continuous f ∧ Injective f [PROOFSTEP] let P := Subtype fun E : Set α => Perfect E ∧ E.Nonempty [GOAL] case intro.intro.intro α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } ⊢ ∃ f, range f ⊆ C ∧ Continuous f ∧ Injective f [PROOFSTEP] choose C0 C1 h0 h1 hdisj using fun {C : Set α} (hC : Perfect C) (hnonempty : C.Nonempty) {ε : ℝ≥0∞} (hε : 0 < ε) => hC.small_diam_splitting hnonempty hε [GOAL] case intro.intro.intro α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) ⊢ ∃ f, range f ⊆ C ∧ Continuous f ∧ Injective f [PROOFSTEP] let DP : List Bool → P := fun l => by induction' l with a l ih; · exact ⟨C, ⟨hC, hnonempty⟩⟩ cases a · use C0 ih.property.1 ih.property.2 (upos l.length.succ) exact ⟨(h0 _ _ _).1, (h0 _ _ _).2.1⟩ use C1 ih.property.1 ih.property.2 (upos l.length.succ) exact ⟨(h1 _ _ _).1, (h1 _ _ _).2.1⟩ [GOAL] α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) l : List Bool ⊢ P [PROOFSTEP] induction' l with a l ih [GOAL] case nil α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) ⊢ P [PROOFSTEP] exact ⟨C, ⟨hC, hnonempty⟩⟩ [GOAL] case cons α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) a : Bool l : List Bool ih : P ⊢ P [PROOFSTEP] cases a [GOAL] case cons.false α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) l : List Bool ih : P ⊢ P [PROOFSTEP] use C0 ih.property.1 ih.property.2 (upos l.length.succ) [GOAL] case property α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) l : List Bool ih : P ⊢ Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) [PROOFSTEP] exact ⟨(h0 _ _ _).1, (h0 _ _ _).2.1⟩ [GOAL] case cons.true α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) l : List Bool ih : P ⊢ P [PROOFSTEP] use C1 ih.property.1 ih.property.2 (upos l.length.succ) [GOAL] case property α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) l : List Bool ih : P ⊢ Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) [PROOFSTEP] exact ⟨(h1 _ _ _).1, (h1 _ _ _).2.1⟩ [GOAL] case intro.intro.intro α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l ⊢ ∃ f, range f ⊆ C ∧ Continuous f ∧ Injective f [PROOFSTEP] let D : List Bool → Set α := fun l => (DP l).val [GOAL] case intro.intro.intro α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) ⊢ ∃ f, range f ⊆ C ∧ Continuous f ∧ Injective f [PROOFSTEP] have hanti : ClosureAntitone D := by refine' Antitone.closureAntitone _ fun l => (DP l).property.1.closed intro l a cases a · exact (h0 _ _ _).2.2.1 exact (h1 _ _ _).2.2.1 [GOAL] α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) ⊢ ClosureAntitone D [PROOFSTEP] refine' Antitone.closureAntitone _ fun l => (DP l).property.1.closed [GOAL] α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) ⊢ CantorScheme.Antitone D [PROOFSTEP] intro l a [GOAL] α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) l : List Bool a : Bool ⊢ D (a :: l) ⊆ D l [PROOFSTEP] cases a [GOAL] case false α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) l : List Bool ⊢ D (false :: l) ⊆ D l [PROOFSTEP] exact (h0 _ _ _).2.2.1 [GOAL] case true α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) l : List Bool ⊢ D (true :: l) ⊆ D l [PROOFSTEP] exact (h1 _ _ _).2.2.1 [GOAL] case intro.intro.intro α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) hanti : ClosureAntitone D ⊢ ∃ f, range f ⊆ C ∧ Continuous f ∧ Injective f [PROOFSTEP] have hdiam : VanishingDiam D := by intro x apply tendsto_of_tendsto_of_tendsto_of_le_of_le' tendsto_const_nhds hu · simp rw [eventually_atTop] refine' ⟨1, fun m (hm : 1 ≤ m) => _⟩ rw [Nat.one_le_iff_ne_zero] at hm rcases Nat.exists_eq_succ_of_ne_zero hm with ⟨n, rfl⟩ dsimp cases x n · convert (h0 _ _ _).2.2.2 rw [PiNat.res_length] convert (h1 _ _ _).2.2.2 rw [PiNat.res_length] [GOAL] α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) hanti : ClosureAntitone D ⊢ VanishingDiam D [PROOFSTEP] intro x [GOAL] α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) hanti : ClosureAntitone D x : ℕ → Bool ⊢ Tendsto (fun n => EMetric.diam (D (PiNat.res x n))) atTop (𝓝 0) [PROOFSTEP] apply tendsto_of_tendsto_of_tendsto_of_le_of_le' tendsto_const_nhds hu [GOAL] case hgf α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) hanti : ClosureAntitone D x : ℕ → Bool ⊢ ∀ᶠ (b : ℕ) in atTop, 0 ≤ EMetric.diam (D (PiNat.res x b)) [PROOFSTEP] simp [GOAL] case hfh α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) hanti : ClosureAntitone D x : ℕ → Bool ⊢ ∀ᶠ (b : ℕ) in atTop, EMetric.diam (D (PiNat.res x b)) ≤ u b [PROOFSTEP] rw [eventually_atTop] [GOAL] case hfh α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) hanti : ClosureAntitone D x : ℕ → Bool ⊢ ∃ a, ∀ (b : ℕ), b ≥ a → EMetric.diam (D (PiNat.res x b)) ≤ u b [PROOFSTEP] refine' ⟨1, fun m (hm : 1 ≤ m) => _⟩ [GOAL] case hfh α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) hanti : ClosureAntitone D x : ℕ → Bool m : ℕ hm : 1 ≤ m ⊢ EMetric.diam (D (PiNat.res x m)) ≤ u m [PROOFSTEP] rw [Nat.one_le_iff_ne_zero] at hm [GOAL] case hfh α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) hanti : ClosureAntitone D x : ℕ → Bool m : ℕ hm : m ≠ 0 ⊢ EMetric.diam (D (PiNat.res x m)) ≤ u m [PROOFSTEP] rcases Nat.exists_eq_succ_of_ne_zero hm with ⟨n, rfl⟩ [GOAL] case hfh.intro α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) hanti : ClosureAntitone D x : ℕ → Bool n : ℕ hm : Nat.succ n ≠ 0 ⊢ EMetric.diam (D (PiNat.res x (Nat.succ n))) ≤ u (Nat.succ n) [PROOFSTEP] dsimp [GOAL] case hfh.intro α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) hanti : ClosureAntitone D x : ℕ → Bool n : ℕ hm : Nat.succ n ≠ 0 ⊢ EMetric.diam ↑(Bool.rec (motive := fun t => x n = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = x n) ▸ { val := C0 (_ : Perfect ↑(List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.rec (motive := fun t => a = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) a (_ : a = a)) (PiNat.res x n))) (_ : Set.Nonempty ↑(List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.rec (motive := fun t => a = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) a (_ : a = a)) (PiNat.res x n))) (_ : 0 < u (Nat.succ (List.length (PiNat.res x n)))), property := (_ : Perfect (C0 (_ : Perfect ↑(List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.rec (motive := fun t => a = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) a (_ : a = a)) (PiNat.res x n))) (_ : Set.Nonempty ↑(List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.rec (motive := fun t => a = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) a (_ : a = a)) (PiNat.res x n))) (_ : 0 < u (Nat.succ (List.length (PiNat.res x n))))) ∧ Set.Nonempty (C0 (_ : Perfect ↑(List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.rec (motive := fun t => a = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) a (_ : a = a)) (PiNat.res x n))) (_ : Set.Nonempty ↑(List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.rec (motive := fun t => a = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) a (_ : a = a)) (PiNat.res x n))) (_ : 0 < u (Nat.succ (List.length (PiNat.res x n)))))) }) (fun h => (_ : true = x n) ▸ { val := C1 (_ : Perfect ↑(List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.rec (motive := fun t => a = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) a (_ : a = a)) (PiNat.res x n))) (_ : Set.Nonempty ↑(List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.rec (motive := fun t => a = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) a (_ : a = a)) (PiNat.res x n))) (_ : 0 < u (Nat.succ (List.length (PiNat.res x n)))), property := (_ : Perfect (C1 (_ : Perfect ↑(List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.rec (motive := fun t => a = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) a (_ : a = a)) (PiNat.res x n))) (_ : Set.Nonempty ↑(List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.rec (motive := fun t => a = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) a (_ : a = a)) (PiNat.res x n))) (_ : 0 < u (Nat.succ (List.length (PiNat.res x n))))) ∧ Set.Nonempty (C1 (_ : Perfect ↑(List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.rec (motive := fun t => a = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) a (_ : a = a)) (PiNat.res x n))) (_ : Set.Nonempty ↑(List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.rec (motive := fun t => a = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) a (_ : a = a)) (PiNat.res x n))) (_ : 0 < u (Nat.succ (List.length (PiNat.res x n)))))) }) (x n) (_ : x n = x n)) ≤ u (Nat.succ n) [PROOFSTEP] cases x n [GOAL] case hfh.intro.false α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) hanti : ClosureAntitone D x : ℕ → Bool n : ℕ hm : Nat.succ n ≠ 0 ⊢ EMetric.diam ↑(Bool.rec (motive := fun t => false = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = false) ▸ { val := C0 (_ : Perfect ↑(List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.rec (motive := fun t => a = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) a (_ : a = a)) (PiNat.res x n))) (_ : Set.Nonempty ↑(List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.rec (motive := fun t => a = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) a (_ : a = a)) (PiNat.res x n))) (_ : 0 < u (Nat.succ (List.length (PiNat.res x n)))), property := (_ : Perfect (C0 (_ : Perfect ↑(List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.rec (motive := fun t => a = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) a (_ : a = a)) (PiNat.res x n))) (_ : Set.Nonempty ↑(List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.rec (motive := fun t => a = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) a (_ : a = a)) (PiNat.res x n))) (_ : 0 < u (Nat.succ (List.length (PiNat.res x n))))) ∧ Set.Nonempty (C0 (_ : Perfect ↑(List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.rec (motive := fun t => a = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) a (_ : a = a)) (PiNat.res x n))) (_ : Set.Nonempty ↑(List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.rec (motive := fun t => a = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) a (_ : a = a)) (PiNat.res x n))) (_ : 0 < u (Nat.succ (List.length (PiNat.res x n)))))) }) (fun h => (_ : true = false) ▸ { val := C1 (_ : Perfect ↑(List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.rec (motive := fun t => a = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) a (_ : a = a)) (PiNat.res x n))) (_ : Set.Nonempty ↑(List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.rec (motive := fun t => a = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) a (_ : a = a)) (PiNat.res x n))) (_ : 0 < u (Nat.succ (List.length (PiNat.res x n)))), property := (_ : Perfect (C1 (_ : Perfect ↑(List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.rec (motive := fun t => a = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) a (_ : a = a)) (PiNat.res x n))) (_ : Set.Nonempty ↑(List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.rec (motive := fun t => a = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) a (_ : a = a)) (PiNat.res x n))) (_ : 0 < u (Nat.succ (List.length (PiNat.res x n))))) ∧ Set.Nonempty (C1 (_ : Perfect ↑(List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.rec (motive := fun t => a = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) a (_ : a = a)) (PiNat.res x n))) (_ : Set.Nonempty ↑(List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.rec (motive := fun t => a = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) a (_ : a = a)) (PiNat.res x n))) (_ : 0 < u (Nat.succ (List.length (PiNat.res x n)))))) }) false (_ : false = false)) ≤ u (Nat.succ n) [PROOFSTEP] convert (h0 _ _ _).2.2.2 [GOAL] case h.e'_4.h.e'_1.h.e'_1 α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) hanti : ClosureAntitone D x : ℕ → Bool n : ℕ hm : Nat.succ n ≠ 0 ⊢ n = List.length (PiNat.res x n) [PROOFSTEP] rw [PiNat.res_length] [GOAL] case hfh.intro.true α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) hanti : ClosureAntitone D x : ℕ → Bool n : ℕ hm : Nat.succ n ≠ 0 ⊢ EMetric.diam ↑(Bool.rec (motive := fun t => true = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = true) ▸ { val := C0 (_ : Perfect ↑(List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.rec (motive := fun t => a = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) a (_ : a = a)) (PiNat.res x n))) (_ : Set.Nonempty ↑(List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.rec (motive := fun t => a = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) a (_ : a = a)) (PiNat.res x n))) (_ : 0 < u (Nat.succ (List.length (PiNat.res x n)))), property := (_ : Perfect (C0 (_ : Perfect ↑(List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.rec (motive := fun t => a = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) a (_ : a = a)) (PiNat.res x n))) (_ : Set.Nonempty ↑(List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.rec (motive := fun t => a = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) a (_ : a = a)) (PiNat.res x n))) (_ : 0 < u (Nat.succ (List.length (PiNat.res x n))))) ∧ Set.Nonempty (C0 (_ : Perfect ↑(List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.rec (motive := fun t => a = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) a (_ : a = a)) (PiNat.res x n))) (_ : Set.Nonempty ↑(List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.rec (motive := fun t => a = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) a (_ : a = a)) (PiNat.res x n))) (_ : 0 < u (Nat.succ (List.length (PiNat.res x n)))))) }) (fun h => (_ : true = true) ▸ { val := C1 (_ : Perfect ↑(List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.rec (motive := fun t => a = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) a (_ : a = a)) (PiNat.res x n))) (_ : Set.Nonempty ↑(List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.rec (motive := fun t => a = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) a (_ : a = a)) (PiNat.res x n))) (_ : 0 < u (Nat.succ (List.length (PiNat.res x n)))), property := (_ : Perfect (C1 (_ : Perfect ↑(List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.rec (motive := fun t => a = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) a (_ : a = a)) (PiNat.res x n))) (_ : Set.Nonempty ↑(List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.rec (motive := fun t => a = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) a (_ : a = a)) (PiNat.res x n))) (_ : 0 < u (Nat.succ (List.length (PiNat.res x n))))) ∧ Set.Nonempty (C1 (_ : Perfect ↑(List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.rec (motive := fun t => a = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) a (_ : a = a)) (PiNat.res x n))) (_ : Set.Nonempty ↑(List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.rec (motive := fun t => a = t → { E // Perfect E ∧ Set.Nonempty E }) (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) a (_ : a = a)) (PiNat.res x n))) (_ : 0 < u (Nat.succ (List.length (PiNat.res x n)))))) }) true (_ : true = true)) ≤ u (Nat.succ n) [PROOFSTEP] convert (h1 _ _ _).2.2.2 [GOAL] case h.e'_4.h.e'_1.h.e'_1 α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) hanti : ClosureAntitone D x : ℕ → Bool n : ℕ hm : Nat.succ n ≠ 0 ⊢ n = List.length (PiNat.res x n) [PROOFSTEP] rw [PiNat.res_length] [GOAL] case intro.intro.intro α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) hanti : ClosureAntitone D hdiam : VanishingDiam D ⊢ ∃ f, range f ⊆ C ∧ Continuous f ∧ Injective f [PROOFSTEP] have hdisj' : CantorScheme.Disjoint D := by rintro l (a | a) (b | b) hab <;> try contradiction · exact hdisj _ _ _ exact (hdisj _ _ _).symm [GOAL] α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) hanti : ClosureAntitone D hdiam : VanishingDiam D ⊢ CantorScheme.Disjoint D [PROOFSTEP] rintro l (a | a) (b | b) hab [GOAL] case false.false α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) hanti : ClosureAntitone D hdiam : VanishingDiam D l : List Bool hab : false ≠ false ⊢ Disjoint (D (false :: l)) (D (false :: l)) [PROOFSTEP] try contradiction [GOAL] case false.false α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) hanti : ClosureAntitone D hdiam : VanishingDiam D l : List Bool hab : false ≠ false ⊢ Disjoint (D (false :: l)) (D (false :: l)) [PROOFSTEP] contradiction [GOAL] case false.true α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) hanti : ClosureAntitone D hdiam : VanishingDiam D l : List Bool hab : false ≠ true ⊢ Disjoint (D (false :: l)) (D (true :: l)) [PROOFSTEP] try contradiction [GOAL] case false.true α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) hanti : ClosureAntitone D hdiam : VanishingDiam D l : List Bool hab : false ≠ true ⊢ Disjoint (D (false :: l)) (D (true :: l)) [PROOFSTEP] contradiction [GOAL] case true.false α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) hanti : ClosureAntitone D hdiam : VanishingDiam D l : List Bool hab : true ≠ false ⊢ Disjoint (D (true :: l)) (D (false :: l)) [PROOFSTEP] try contradiction [GOAL] case true.false α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) hanti : ClosureAntitone D hdiam : VanishingDiam D l : List Bool hab : true ≠ false ⊢ Disjoint (D (true :: l)) (D (false :: l)) [PROOFSTEP] contradiction [GOAL] case true.true α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) hanti : ClosureAntitone D hdiam : VanishingDiam D l : List Bool hab : true ≠ true ⊢ Disjoint (D (true :: l)) (D (true :: l)) [PROOFSTEP] try contradiction [GOAL] case true.true α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) hanti : ClosureAntitone D hdiam : VanishingDiam D l : List Bool hab : true ≠ true ⊢ Disjoint (D (true :: l)) (D (true :: l)) [PROOFSTEP] contradiction [GOAL] case false.true α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) hanti : ClosureAntitone D hdiam : VanishingDiam D l : List Bool hab : false ≠ true ⊢ Disjoint (D (false :: l)) (D (true :: l)) [PROOFSTEP] exact hdisj _ _ _ [GOAL] case true.false α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) hanti : ClosureAntitone D hdiam : VanishingDiam D l : List Bool hab : true ≠ false ⊢ Disjoint (D (true :: l)) (D (false :: l)) [PROOFSTEP] exact (hdisj _ _ _).symm [GOAL] case intro.intro.intro α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) hanti : ClosureAntitone D hdiam : VanishingDiam D hdisj' : CantorScheme.Disjoint D ⊢ ∃ f, range f ⊆ C ∧ Continuous f ∧ Injective f [PROOFSTEP] have hdom : ∀ {x : ℕ → Bool}, x ∈ (inducedMap D).1 := fun {x} => by rw [hanti.map_of_vanishingDiam hdiam fun l => (DP l).property.2] apply mem_univ [GOAL] α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) hanti : ClosureAntitone D hdiam : VanishingDiam D hdisj' : CantorScheme.Disjoint D x : ℕ → Bool ⊢ x ∈ (inducedMap D).fst [PROOFSTEP] rw [hanti.map_of_vanishingDiam hdiam fun l => (DP l).property.2] [GOAL] α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) hanti : ClosureAntitone D hdiam : VanishingDiam D hdisj' : CantorScheme.Disjoint D x : ℕ → Bool ⊢ x ∈ univ [PROOFSTEP] apply mem_univ [GOAL] case intro.intro.intro α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) hanti : ClosureAntitone D hdiam : VanishingDiam D hdisj' : CantorScheme.Disjoint D hdom : ∀ {x : ℕ → Bool}, x ∈ (inducedMap D).fst ⊢ ∃ f, range f ⊆ C ∧ Continuous f ∧ Injective f [PROOFSTEP] refine' ⟨fun x => (inducedMap D).2 ⟨x, hdom⟩, _, _, _⟩ [GOAL] case intro.intro.intro.refine'_1 α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) hanti : ClosureAntitone D hdiam : VanishingDiam D hdisj' : CantorScheme.Disjoint D hdom : ∀ {x : ℕ → Bool}, x ∈ (inducedMap D).fst ⊢ (range fun x => Sigma.snd (inducedMap D) { val := x, property := (_ : x ∈ (inducedMap D).fst) }) ⊆ C [PROOFSTEP] rintro y ⟨x, rfl⟩ [GOAL] case intro.intro.intro.refine'_1.intro α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) hanti : ClosureAntitone D hdiam : VanishingDiam D hdisj' : CantorScheme.Disjoint D hdom : ∀ {x : ℕ → Bool}, x ∈ (inducedMap D).fst x : ℕ → Bool ⊢ (fun x => Sigma.snd (inducedMap D) { val := x, property := (_ : x ∈ (inducedMap D).fst) }) x ∈ C [PROOFSTEP] exact map_mem ⟨_, hdom⟩ 0 [GOAL] case intro.intro.intro.refine'_2 α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) hanti : ClosureAntitone D hdiam : VanishingDiam D hdisj' : CantorScheme.Disjoint D hdom : ∀ {x : ℕ → Bool}, x ∈ (inducedMap D).fst ⊢ Continuous fun x => Sigma.snd (inducedMap D) { val := x, property := (_ : x ∈ (inducedMap D).fst) } [PROOFSTEP] apply hdiam.map_continuous.comp [GOAL] case intro.intro.intro.refine'_2 α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) hanti : ClosureAntitone D hdiam : VanishingDiam D hdisj' : CantorScheme.Disjoint D hdom : ∀ {x : ℕ → Bool}, x ∈ (inducedMap D).fst ⊢ Continuous fun x => { val := x, property := (_ : x ∈ (inducedMap D).fst) } [PROOFSTEP] continuity [GOAL] case intro.intro.intro.refine'_3 α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) hanti : ClosureAntitone D hdiam : VanishingDiam D hdisj' : CantorScheme.Disjoint D hdom : ∀ {x : ℕ → Bool}, x ∈ (inducedMap D).fst ⊢ Injective fun x => Sigma.snd (inducedMap D) { val := x, property := (_ : x ∈ (inducedMap D).fst) } [PROOFSTEP] intro x y hxy [GOAL] case intro.intro.intro.refine'_3 α : Type u_1 inst✝¹ : MetricSpace α C : Set α hC : Perfect C ε : ℝ≥0∞ hnonempty : Set.Nonempty C inst✝ : CompleteSpace α u : ℕ → ℝ≥0∞ upos' : ∀ (n : ℕ), u n ∈ Ioo 0 1 hu : Tendsto u atTop (𝓝 0) upos : ∀ (n : ℕ), 0 < u n P : Type u_1 := { E // Perfect E ∧ Set.Nonempty E } C0 C1 : {C : Set α} → Perfect C → Set.Nonempty C → {ε : ℝ≥0∞} → 0 < ε → Set α h0 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C0 hC hnonempty hε) ∧ Set.Nonempty (C0 hC hnonempty hε) ∧ C0 hC hnonempty hε ⊆ C ∧ EMetric.diam (C0 hC hnonempty hε) ≤ ε h1 : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Perfect (C1 hC hnonempty hε) ∧ Set.Nonempty (C1 hC hnonempty hε) ∧ C1 hC hnonempty hε ⊆ C ∧ EMetric.diam (C1 hC hnonempty hε) ≤ ε hdisj : ∀ {C : Set α} (hC : Perfect C) (hnonempty : Set.Nonempty C) {ε : ℝ≥0∞} (hε : 0 < ε), Disjoint (C0 hC hnonempty hε) (C1 hC hnonempty hε) DP : List Bool → P := fun l => List.rec { val := C, property := (_ : Perfect C ∧ Set.Nonempty C) } (fun a l ih => Bool.casesOn (motive := fun t => a = t → P) a (fun h => (_ : false = a) ▸ { val := C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C0 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (fun h => (_ : true = a) ▸ { val := C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))), property := (_ : Perfect (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l)))) ∧ Set.Nonempty (C1 (_ : Perfect ↑ih) (_ : Set.Nonempty ↑ih) (_ : 0 < u (Nat.succ (List.length l))))) }) (_ : a = a)) l D : List Bool → Set α := fun l => ↑(DP l) hanti : ClosureAntitone D hdiam : VanishingDiam D hdisj' : CantorScheme.Disjoint D hdom : ∀ {x : ℕ → Bool}, x ∈ (inducedMap D).fst x y : ℕ → Bool hxy : (fun x => Sigma.snd (inducedMap D) { val := x, property := (_ : x ∈ (inducedMap D).fst) }) x = (fun x => Sigma.snd (inducedMap D) { val := x, property := (_ : x ∈ (inducedMap D).fst) }) y ⊢ x = y [PROOFSTEP] simpa only [← Subtype.val_inj] using hdisj'.map_injective hxy [GOAL] α : Type u_1 inst✝¹ : TopologicalSpace α inst✝ : PolishSpace α C : Set α hC : IsClosed C hunc : ¬Set.Countable C ⊢ ∃ f, range f ⊆ C ∧ Continuous f ∧ Function.Injective f [PROOFSTEP] letI := upgradePolishSpace α [GOAL] α : Type u_1 inst✝¹ : TopologicalSpace α inst✝ : PolishSpace α C : Set α hC : IsClosed C hunc : ¬Set.Countable C this : UpgradedPolishSpace α := upgradePolishSpace α ⊢ ∃ f, range f ⊆ C ∧ Continuous f ∧ Function.Injective f [PROOFSTEP] obtain ⟨D, hD, Dnonempty, hDC⟩ := exists_perfect_nonempty_of_isClosed_of_not_countable hC hunc [GOAL] case intro.intro.intro α : Type u_1 inst✝¹ : TopologicalSpace α inst✝ : PolishSpace α C : Set α hC : IsClosed C hunc : ¬Set.Countable C this : UpgradedPolishSpace α := upgradePolishSpace α D : Set α hD : Perfect D Dnonempty : Set.Nonempty D hDC : D ⊆ C ⊢ ∃ f, range f ⊆ C ∧ Continuous f ∧ Function.Injective f [PROOFSTEP] obtain ⟨f, hfD, hf⟩ := hD.exists_nat_bool_injection Dnonempty [GOAL] case intro.intro.intro.intro.intro α : Type u_1 inst✝¹ : TopologicalSpace α inst✝ : PolishSpace α C : Set α hC : IsClosed C hunc : ¬Set.Countable C this : UpgradedPolishSpace α := upgradePolishSpace α D : Set α hD : Perfect D Dnonempty : Set.Nonempty D hDC : D ⊆ C f : (ℕ → Bool) → α hfD : range f ⊆ D hf : Continuous f ∧ Function.Injective f ⊢ ∃ f, range f ⊆ C ∧ Continuous f ∧ Function.Injective f [PROOFSTEP] exact ⟨f, hfD.trans hDC, hf⟩
Savor the world-shaking flavor of a big and bold California classic with Michael David Earthquake Petite Sirah. This daring tannin-rich wine blends essences of fresh raspberries with hints of dried black currants for a uniquely refreshing experience. Ideal for a backyard cookout, this wine pairs especially well with barbecue beef, cheeseburgers, and roasted pork.
------------------------------------------------ -- | -- Module : Numeric.Matrix.Integral.SmithNF -- Copyright : (c) Jun Yoshida 2019 -- License : BSD3 -- -- Compute Smith normal form -- ------------------------------------------------ module Numeric.Matrix.Integral.SmithNF where import Control.Monad (when, unless, forM_, guard, join) import Control.Monad.ST import Control.Monad.Loops (whileM_, whileJust_) import Data.List as L import Data.STRef import Numeric.LinearAlgebra as LA import Numeric.LinearAlgebra.Devel --import Numeric.Matrix.Integral.HNFLLL (hnfLLLST) import Numeric.Matrix.Integral.NormalForms (hermiteNFST) -- | Find off-diagonal entries and calculate min of min{row,col} among them findDiagSz :: LA.Matrix LA.Z -> Maybe Int findDiagSz ma = let offdiag = filter (uncurry (/=)) $ LA.find (/=0) ma in L.foldl' bin Nothing offdiag where bin Nothing (i,j) = Just (min i j) bin (Just k) (i,j) = Just $ L.foldl' min k [i,j] -- | Compute "pre-Smith" form with a function calculating Hermite normal forms -- "Pre-Smith" means diag(c_1,c_2,..) with not necessarily c_i|c_{i+1} preSmithNFST :: STMatrix s LA.Z -> STMatrix s LA.Z -> STMatrix s LA.Z -> ST s () preSmithNFST stMatUL stMatA stMatURt = do whileJust_ (liftSTMatrix findDiagSz stMatA) $ \diagSz -> do -- Extract a submatrix containing all the off-diagonal entries. stMatAex <- thawMatrix =<< extractMatrix stMatA (FromRow diagSz) (FromCol diagSz) stMatULex <- thawMatrix =<< extractMatrix stMatUL (FromRow diagSz) AllCols stMatURtex <- thawMatrix =<< extractMatrix stMatURt (FromRow diagSz) AllCols -- Compute the Hermite normal forms of the extracted submatrix and of the transpose of the result. --hnfLLLST stMatULex stMatAex join $ hermiteNFST <$> newMatrix 0 0 0 <*> pure stMatULex <*> pure stMatAex stMatAext <- thawMatrix =<< (LA.tr' <$> freezeMatrix stMatAex) --hnfLLLST stMatURtex stMatAext join $ hermiteNFST <$> newMatrix 0 0 0 <*> pure stMatURtex <*> pure stMatAext -- Write out the result setMatrix stMatA diagSz diagSz =<< (LA.tr' <$> freezeMatrix stMatAext) setMatrix stMatUL diagSz 0 =<< freezeMatrix stMatULex setMatrix stMatURt diagSz 0 =<< freezeMatrix stMatURtex isHeadGCD :: LA.Vector LA.Z -> Bool isHeadGCD = isHeadGCD' . LA.toList where isHeadGCD' [] = True isHeadGCD' (x:xs) = L.foldl' (\b y -> b && rem y x == 0) True xs -- Compute the submatrix containing all the non-zero diagonal entries extractMaxNZDiag :: LA.Matrix LA.Z -> (Int,LA.Matrix LA.Z) extractMaxNZDiag mx = runST $ do dRef <- newSTRef 0 let p d = d < uncurry min (LA.size mx) && (mx!d!d) /= 0 whileM_ (p <$> readSTRef dRef) $ modifySTRef' dRef (+1) d <- readSTRef dRef return (d,LA.subMatrix (0,0) (d,d) mx) -- | Normalize diagonals to form a divisible chain; i.e. c_1 | c_2 | c_3 | ... normalizeST :: STMatrix s LA.Z -> STMatrix s LA.Z -> STMatrix s LA.Z -> ST s () normalizeST stMatUL stMatA stMatURt = do (dlen,matD) <- liftSTMatrix extractMaxNZDiag stMatA stMatD <- thawMatrix matD stMatULex <- thawMatrix =<< extractMatrix stMatUL (RowRange 0 (dlen-1)) AllCols stMatURtex <- thawMatrix =<< extractMatrix stMatURt (RowRange 0 (dlen-1)) AllCols normalizeST' stMatULex stMatD stMatURtex setMatrix stMatA 0 0 =<< freezeMatrix stMatD setMatrix stMatUL 0 0 =<< freezeMatrix stMatULex setMatrix stMatURt 0 0 =<< freezeMatrix stMatURtex where normalizeST' stMatULex stMatD stMatURtex = do b <- (isHeadGCD . LA.takeDiag) <$> freezeMatrix stMatD n <- liftSTMatrix LA.rows stMatD unless b $ do forM_ [1..(n-1)] $ \i -> do readMatrix stMatD i i >>= writeMatrix stMatD i 0 rowOper (AXPY 1 i 0 AllCols) stMatURtex preSmithNFST stMatULex stMatD stMatURtex when (n > 1) $ do stMatDex <- thawMatrix =<< extractMatrix stMatD (FromRow 1) (FromCol 1) stMatULexex <- thawMatrix =<< extractMatrix stMatULex (FromRow 1) AllCols stMatURtexex <- thawMatrix =<< extractMatrix stMatURtex (FromRow 1) AllCols normalizeST' stMatULexex stMatDex stMatURtexex setMatrix stMatD 1 1 =<< freezeMatrix stMatDex setMatrix stMatULex 1 0 =<< freezeMatrix stMatULexex setMatrix stMatURtex 1 0 =<< freezeMatrix stMatURtexex -- | Compute Smith normal form together with transform unimodular matrices. smithNF :: LA.Matrix LA.Z -> (LA.Matrix LA.Z, LA.Matrix LA.Z, LA.Matrix LA.Z) smithNF matA = runST $ do stMatA <- thawMatrix matA stMatUL <- thawMatrix $ LA.ident (LA.rows matA) stMatURt <- thawMatrix $ LA.ident (LA.cols matA) preSmithNFST stMatUL stMatA stMatURt normalizeST stMatUL stMatA stMatURt resMatS <- freezeMatrix stMatA resMatUL <- freezeMatrix stMatUL resMatURt <- freezeMatrix stMatURt return (resMatUL, resMatS, LA.tr' resMatURt) -- | For debug. -- Compute "pre-Smith form." preSmithNF :: LA.Matrix LA.Z -> (LA.Matrix LA.Z, LA.Matrix LA.Z, LA.Matrix LA.Z) preSmithNF matA = runST $ do stMatA <- thawMatrix matA stMatUL <- thawMatrix $ LA.ident (LA.rows matA) stMatURt <- thawMatrix $ LA.ident (LA.cols matA) preSmithNFST stMatUL stMatA stMatURt resMatS <- freezeMatrix stMatA resMatUL <- freezeMatrix stMatUL resMatURt <- freezeMatrix stMatURt return (resMatUL, resMatS, LA.tr' resMatURt)
(* Title: HOL/IMPP/EvenOdd.thy Author: David von Oheimb, TUM *) section {* Example of mutually recursive procedures verified with Hoare logic *} theory EvenOdd imports Main Misc begin axiomatization Even :: pname and Odd :: pname where Even_neq_Odd: "Even ~= Odd" and Arg_neq_Res: "Arg ~= Res" definition evn :: com where "evn = (IF (%s. s<Arg> = 0) THEN Loc Res:==(%s. 0) ELSE(Loc Res:=CALL Odd(%s. s<Arg> - 1);; Loc Arg:=CALL Odd(%s. s<Arg> - 1);; Loc Res:==(%s. s<Res> * s<Arg>)))" definition odd :: com where "odd = (IF (%s. s<Arg> = 0) THEN Loc Res:==(%s. 1) ELSE(Loc Res:=CALL Even (%s. s<Arg> - 1)))" defs bodies_def: "bodies == [(Even,evn),(Odd,odd)]" definition Z_eq_Arg_plus :: "nat => nat assn" ("Z=Arg+_" [50]50) where "Z=Arg+n = (%Z s. Z = s<Arg>+n)" definition Res_ok :: "nat assn" where "Res_ok = (%Z s. even Z = (s<Res> = 0))" subsection "Arg, Res" declare Arg_neq_Res [simp] Arg_neq_Res [THEN not_sym, simp] declare Even_neq_Odd [simp] Even_neq_Odd [THEN not_sym, simp] lemma Z_eq_Arg_plus_def2: "(Z=Arg+n) Z s = (Z = s<Arg>+n)" apply (unfold Z_eq_Arg_plus_def) apply (rule refl) done lemma Res_ok_def2: "Res_ok Z s = (even Z = (s<Res> = 0))" apply (unfold Res_ok_def) apply (rule refl) done lemmas Arg_Res_simps = Z_eq_Arg_plus_def2 Res_ok_def2 lemma body_Odd [simp]: "body Odd = Some odd" apply (unfold body_def bodies_def) apply auto done lemma body_Even [simp]: "body Even = Some evn" apply (unfold body_def bodies_def) apply auto done subsection "verification" lemma Odd_lemma: "{{Z=Arg+0}. BODY Even .{Res_ok}}|-{Z=Arg+Suc 0}. odd .{Res_ok}" apply (unfold odd_def) apply (rule hoare_derivs.If) apply (rule hoare_derivs.Ass [THEN conseq1]) apply (clarsimp simp: Arg_Res_simps) apply (rule export_s) apply (rule hoare_derivs.Call [THEN conseq1]) apply (rule_tac P = "Z=Arg+Suc (Suc 0) " in conseq12) apply (rule single_asm) apply (auto simp: Arg_Res_simps) done lemma Even_lemma: "{{Z=Arg+1}. BODY Odd .{Res_ok}}|-{Z=Arg+0}. evn .{Res_ok}" apply (unfold evn_def) apply (rule hoare_derivs.If) apply (rule hoare_derivs.Ass [THEN conseq1]) apply (clarsimp simp: Arg_Res_simps) apply (rule hoare_derivs.Comp) apply (rule_tac [2] hoare_derivs.Ass) apply clarsimp apply (rule_tac Q = "%Z s. ?P Z s & Res_ok Z s" in hoare_derivs.Comp) apply (rule export_s) apply (rule_tac I1 = "%Z l. Z = l Arg & 0 < Z" and Q1 = "Res_ok" in Call_invariant [THEN conseq12]) apply (rule single_asm [THEN conseq2]) apply (clarsimp simp: Arg_Res_simps) apply (force simp: Arg_Res_simps) apply (rule export_s) apply (rule_tac I1 = "%Z l. even Z = (l Res = 0) " and Q1 = "%Z s. even Z = (s<Arg> = 0) " in Call_invariant [THEN conseq12]) apply (rule single_asm [THEN conseq2]) apply (clarsimp simp: Arg_Res_simps) apply (force simp: Arg_Res_simps) done lemma Even_ok_N: "{}|-{Z=Arg+0}. BODY Even .{Res_ok}" apply (rule BodyN) apply (simp (no_asm)) apply (rule Even_lemma [THEN hoare_derivs.cut]) apply (rule BodyN) apply (simp (no_asm)) apply (rule Odd_lemma [THEN thin]) apply (simp (no_asm)) done lemma Even_ok_S: "{}|-{Z=Arg+0}. BODY Even .{Res_ok}" apply (rule conseq1) apply (rule_tac Procs = "{Odd, Even}" and pn = "Even" and P = "%pn. Z=Arg+ (if pn = Odd then 1 else 0) " and Q = "%pn. Res_ok" in Body1) apply auto apply (rule hoare_derivs.insert) apply (rule Odd_lemma [THEN thin]) apply (simp (no_asm)) apply (rule Even_lemma [THEN thin]) apply (simp (no_asm)) done end
/- Copyright (c) 2022 Yaël Dillies. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yaël Dillies -/ import group_theory.group_action.defs /-! # Sum instances for additive and multiplicative actions This file defines instances for additive and multiplicative actions on the binary `sum` type. ## See also * `group_theory.group_action.pi` * `group_theory.group_action.prod` * `group_theory.group_action.sigma` -/ variables {M N P α β γ : Type*} namespace sum section has_smul variables [has_smul M α] [has_smul M β] [has_smul N α] [has_smul N β] (a : M) (b : α) (c : β) (x : α ⊕ β) @[to_additive sum.has_vadd] instance : has_smul M (α ⊕ β) := ⟨λ a, sum.map ((•) a) ((•) a)⟩ @[to_additive] lemma smul_def : a • x = x.map ((•) a) ((•) a) := rfl @[simp, to_additive] lemma smul_inl : a • (inl b : α ⊕ β) = inl (a • b) := rfl @[simp, to_additive] lemma smul_inr : a • (inr c : α ⊕ β) = inr (a • c) := rfl @[simp, to_additive] lemma smul_swap : (a • x).swap = a • x.swap := by cases x; refl instance [has_smul M N] [is_scalar_tower M N α] [is_scalar_tower M N β] : is_scalar_tower M N (α ⊕ β) := ⟨λ a b x, by { cases x, exacts [congr_arg inl (smul_assoc _ _ _), congr_arg inr (smul_assoc _ _ _)] }⟩ @[to_additive] instance [smul_comm_class M N α] [smul_comm_class M N β] : smul_comm_class M N (α ⊕ β) := ⟨λ a b x, by { cases x, exacts [congr_arg inl (smul_comm _ _ _), congr_arg inr (smul_comm _ _ _)] }⟩ instance [has_smul Mᵐᵒᵖ α] [has_smul Mᵐᵒᵖ β] [is_central_scalar M α] [is_central_scalar M β] : is_central_scalar M (α ⊕ β) := ⟨λ a x, by { cases x, exacts [congr_arg inl (op_smul_eq_smul _ _), congr_arg inr (op_smul_eq_smul _ _)] }⟩ @[to_additive] instance has_faithful_smul_left [has_faithful_smul M α] : has_faithful_smul M (α ⊕ β) := ⟨λ x y h, eq_of_smul_eq_smul $ λ a : α, by injection h (inl a)⟩ @[to_additive] instance has_faithful_smul_right [has_faithful_smul M β] : has_faithful_smul M (α ⊕ β) := ⟨λ x y h, eq_of_smul_eq_smul $ λ b : β, by injection h (inr b)⟩ end has_smul @[to_additive] instance {m : monoid M} [mul_action M α] [mul_action M β] : mul_action M (α ⊕ β) := { mul_smul := λ a b x, by { cases x, exacts [congr_arg inl (mul_smul _ _ _), congr_arg inr (mul_smul _ _ _)] }, one_smul := λ x, by { cases x, exacts [congr_arg inl (one_smul _ _), congr_arg inr (one_smul _ _)] } } end sum
using FinancialMath using Test @testset "FinancialMath" begin include("core.jl") end
-- ------------------------------------------------------------- [ Recover.idr ] -- Module : Recover.idr -- Copyright : (c) Jan de Muijnck-Hughes -- License : see LICENSE -- --------------------------------------------------------------------- [ EOH ] module Sif.Pattern.Recover import Data.AVL.Dict import Data.DList import GRL.Lang.GLang import XML.DOM import Edda import Edda.Writer.Org import Freyja import Sif.Types import Sif.Pattern.Model import Sif.Pattern.API %access private blocks : Maybe EddaBody -> Maybe String blocks Nothing = Nothing blocks (Just b) = Just $ concatMap block b -- -------------------------------------------------------------------- [ Reqs ] recoverReq : SifBuilder impl -> (d : SifDomain) -> Requirement ty -> REQUIREMENT impl d recoverReq bob c r {ty} = mkRequirement bob c ty (inlines $ name r) (blocks $ Just (desc r)) -- ----------------------------------------------------------------- [ Problem ] recoverProblem : SifBuilder impl -> (d : SifDomain) -> Problem -> PROBLEM impl d recoverProblem bob c p = mkProblem bob c t d rs where t : String t = inlines (name p) d : Maybe String d = blocks (Just (desc p)) rs : REQUIREMENTS impl c rs = mapDList (\r => recoverReq bob c r) (reqs p) -- ------------------------------------------------------------------ [ Affect ] recoverAffect : SifBuilder impl -> (d : SifDomain) -> Affect -> AFFECT impl d recoverAffect bob c a = mkAffect bob c (cval a) r d where r : REQUIREMENT impl c r = recoverReq bob c (req a) d : Maybe String d = blocks (desc a) -- ------------------------------------------------------------------- [ Trait ] recoverTrait : SifBuilder impl -> (d : SifDomain) -> Trait ty -> TRAIT impl d recoverTrait bob c t {ty} = mkTrait bob c ty n d (sval t) as where n : String n = inlines (name t) d : Maybe String d = blocks (Just (desc t)) as : AFFECTS impl c as = map (\a => recoverAffect bob c a) (affects t) -- ---------------------------------------------------------------- [ Property ] recoverProperty : SifBuilder impl -> (d : SifDomain) -> Property -> PROPERTY impl d recoverProperty bob c p = mkProperty bob c n d ts where n : String n = inlines (name p) d : Maybe String d = blocks (Just (desc p)) ts : TRAITS impl c ts = mapDList (\t => recoverTrait bob c t) (traits p) -- ---------------------------------------------------------------- [ Solution ] recoverSolution : SifBuilder impl -> (d : SifDomain) -> Solution -> SOLUTION impl d recoverSolution bob c s = mkSolution bob c n d ps where n : String n = inlines (name s) d : Maybe String d = blocks (Just (desc s)) ps : PROPERTIES impl c ps = map (\p => recoverProperty bob c p) (properties s) -- -------------------------------------------------------------------- [ Main ] export fromFreya : SifBuilder impl -> PatternDoc -> (d ** PATTERN impl d) fromFreya bob doc {impl} = (c ** mkPattern bob c t d p s) where t : String t = inlines (name doc) d : Maybe String d = blocks (Just (summary doc)) c' : Context c' = (context doc) c : SifDomain c = MkDomain (inlines $ name c') (blocks (Just (desc c'))) p : PROBLEM impl c p = recoverProblem bob c (problem doc) s : SOLUTION impl c s = recoverSolution bob c (solution doc) -- --------------------------------------------------------------------- [ EOF ]
State Before: α : Type u inst✝² : CommGroup α inst✝¹ : LT α inst✝ : CovariantClass α α (fun x x_1 => x * x_1) fun x x_1 => x < x_1 a b c d : α ⊢ a * b⁻¹ < c ↔ a < b * c State After: no goals Tactic: rw [← inv_mul_lt_iff_lt_mul, mul_comm]
[STATEMENT] lemma eqButC_sym: assumes "eqButC pap pap1" shows "eqButC pap1 pap" [PROOF STATE] proof (prove) goal (1 subgoal): 1. eqButC pap1 pap [PROOF STEP] apply(cases pap, cases pap1) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<And>x1 x2 x3 x4 x5 x6 x1a x2a x3a x4a x5a x6a. \<lbrakk>pap = Paper x1 x2 x3 x4 x5 x6; pap1 = Paper x1a x2a x3a x4a x5a x6a\<rbrakk> \<Longrightarrow> eqButC pap1 pap [PROOF STEP] using assms [PROOF STATE] proof (prove) using this: eqButC pap pap1 goal (1 subgoal): 1. \<And>x1 x2 x3 x4 x5 x6 x1a x2a x3a x4a x5a x6a. \<lbrakk>pap = Paper x1 x2 x3 x4 x5 x6; pap1 = Paper x1a x2a x3a x4a x5a x6a\<rbrakk> \<Longrightarrow> eqButC pap1 pap [PROOF STEP] by auto
{-# LANGUAGE FlexibleContexts, NamedFieldPuns #-} module School.Train.SimpleDescentUpdate ( simpleDescentUpdate ) where import Numeric.LinearAlgebra (Container, Vector, add, scale) import School.Train.TrainState (TrainState(..)) import School.Train.UpdateParams (UpdateParams) import School.Types.PingPong (pingPongList, toPingPong) import School.Unit.UnitParams (UnitParams(..)) update :: (Container Vector a, Num a) => a -> UnitParams a -> UnitParams a -> UnitParams a update rate AffineParams { affineBias = bias , affineWeights = weights} AffineParams { affineBias = biasGrad , affineWeights = weightGrad } = AffineParams { affineBias, affineWeights } where affineBias = add bias (scale (-rate) biasGrad) affineWeights = add weights (scale (-rate) weightGrad) update _ _ _ = EmptyParams simpleDescentUpdate :: (Container Vector a, Num a) => UpdateParams a simpleDescentUpdate state@TrainState { paramDerivs , paramList , learningRate } = do newParamList <- toPingPong $ zipWith (update learningRate) (pingPongList paramList) paramDerivs return state { paramList = newParamList }
State Before: α : Type u_1 β : Type ?u.589734 γ : Type ?u.589737 δ : Type ?u.589740 ι : Type ?u.589743 R : Type ?u.589746 R' : Type ?u.589749 m0 : MeasurableSpace α inst✝¹ : MeasurableSpace β inst✝ : MeasurableSpace γ μ μ₁ μ₂ μ₃ ν ν' ν₁ ν₂ : Measure α s✝ s' t✝ : Set α s t : α → Prop hst : s =ᶠ[ae μ] t p : α → Prop ⊢ (∀ᵐ (x : α) ∂Measure.restrict μ s, p x) → ∀ᵐ (x : α) ∂Measure.restrict μ t, p x State After: no goals Tactic: simp [Measure.restrict_congr_set hst]
(* * Copyright 2014, NICTA * * This software may be distributed and modified according to the terms of * the BSD 2-Clause license. Note that NO WARRANTY is provided. * See "LICENSE_BSD2.txt" for details. * * @TAG(NICTA_BSD) *) (*Alternate apply command which displays "used" theorems in refinement step*) theory Apply_Trace_Cmd imports Apply_Trace keywords "apply_trace" :: prf_script begin ML\<open> val _ = Outer_Syntax.command @{command_keyword "apply_trace"} "initial refinement step (unstructured)" (Args.mode "only_names" -- (Scan.option (Parse.position Parse.cartouche)) -- Method.parse >> (fn ((on,query),text) => Toplevel.proofs (Apply_Trace.apply_results {silent_fail = false} (Pretty.writeln ooo (Apply_Trace.pretty_deps on query)) text))); \<close> lemmas [no_trace] = protectI protectD TrueI Eq_TrueI eq_reflection (* Test. *) lemma "(a \<and> b) = (b \<and> a)" apply_trace auto oops (* Test. *) lemma "(a \<and> b) = (b \<and> a)" apply_trace \<open>intro\<close> auto oops (* Local assumptions might mask real facts (or each other). Probably not an issue in practice.*) lemma assumes X: "b = a" assumes Y: "b = a" shows "b = a" apply_trace (rule Y) oops (* If any locale facts are accessible their local variant is assumed to the one that is used. *) locale Apply_Trace_foo = fixes b a assumes X: "b = a" begin lemma shows "b = a" "b = a" apply - apply_trace (rule Apply_Trace_foo.X) prefer 2 apply_trace (rule X) oops end experiment begin text \<open>Example of trace for grouped lemmas\<close> definition ex :: "nat set" where "ex = {1,2,3,4}" lemma v1: "1 \<in> ex" by (simp add: ex_def) lemma v2: "2 \<in> ex" by (simp add: ex_def) lemma v3: "3 \<in> ex" by (simp add: ex_def) text \<open>Group several lemmas in a single one\<close> lemmas vs = v1 v2 v3 lemma "2 \<in> ex" apply_trace (simp add: vs) oops end end
/- Copyright (c) 2021 Yury Kudryashov. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yury Kudryashov -/ import set_theory.cardinal.ordinal /-! # Cardinality of continuum In this file we define `cardinal.continuum` (notation: `𝔠`, localized in `cardinal`) to be `2 ^ ℵ₀`. We also prove some `simp` lemmas about cardinal arithmetic involving `𝔠`. ## Notation - `𝔠` : notation for `cardinal.continuum` in locale `cardinal`. -/ namespace cardinal universes u v open_locale cardinal /-- Cardinality of continuum. -/ def continuum : cardinal.{u} := 2 ^ aleph_0.{u} localized "notation `𝔠` := cardinal.continuum" in cardinal @[simp] lemma two_power_aleph_0 : 2 ^ aleph_0.{u} = continuum.{u} := rfl @[simp] lemma lift_continuum : lift.{v} 𝔠 = 𝔠 := by rw [←two_power_aleph_0, lift_two_power, lift_aleph_0, two_power_aleph_0] /-! ### Inequalities -/ lemma aleph_0_lt_continuum : ℵ₀ < 𝔠 := cantor ℵ₀ lemma aleph_0_le_continuum : ℵ₀ ≤ 𝔠 := aleph_0_lt_continuum.le lemma nat_lt_continuum (n : ℕ) : ↑n < 𝔠 := (nat_lt_aleph_0 n).trans aleph_0_lt_continuum lemma mk_set_nat : #(set ℕ) = 𝔠 := by simp lemma continuum_pos : 0 < 𝔠 := nat_lt_continuum 0 lemma continuum_ne_zero : 𝔠 ≠ 0 := continuum_pos.ne' lemma aleph_one_le_continuum : aleph 1 ≤ 𝔠 := by { rw ←succ_aleph_0, exact order.succ_le_of_lt aleph_0_lt_continuum } @[simp] theorem continuum_to_nat : continuum.to_nat = 0 := to_nat_apply_of_aleph_0_le aleph_0_le_continuum @[simp] theorem continuum_to_enat : continuum.to_enat = ⊤ := to_enat_apply_of_aleph_0_le aleph_0_le_continuum /-! ### Addition -/ @[simp] lemma aleph_0_add_continuum : ℵ₀ + 𝔠 = 𝔠 := add_eq_right aleph_0_le_continuum aleph_0_le_continuum @[simp] lemma continuum_add_aleph_0 : 𝔠 + ℵ₀ = 𝔠 := (add_comm _ _).trans aleph_0_add_continuum @[simp] lemma continuum_add_self : 𝔠 + 𝔠 = 𝔠 := add_eq_right aleph_0_le_continuum le_rfl @[simp] lemma nat_add_continuum (n : ℕ) : ↑n + 𝔠 = 𝔠 := add_eq_right aleph_0_le_continuum (nat_lt_continuum n).le @[simp] lemma continuum_add_nat (n : ℕ) : 𝔠 + n = 𝔠 := (add_comm _ _).trans (nat_add_continuum n) /-! ### Multiplication -/ @[simp] lemma continuum_mul_self : 𝔠 * 𝔠 = 𝔠 := mul_eq_left aleph_0_le_continuum le_rfl continuum_ne_zero @[simp] lemma continuum_mul_aleph_0 : 𝔠 * ℵ₀ = 𝔠 := mul_eq_left aleph_0_le_continuum aleph_0_le_continuum aleph_0_ne_zero @[simp] lemma aleph_0_mul_continuum : ℵ₀ * 𝔠 = 𝔠 := (mul_comm _ _).trans continuum_mul_aleph_0 @[simp] lemma nat_mul_continuum {n : ℕ} (hn : n ≠ 0) : ↑n * 𝔠 = 𝔠 := mul_eq_right aleph_0_le_continuum (nat_lt_continuum n).le (nat.cast_ne_zero.2 hn) @[simp] lemma continuum_mul_nat {n : ℕ} (hn : n ≠ 0) : 𝔠 * n = 𝔠 := (mul_comm _ _).trans (nat_mul_continuum hn) /-! ### Power -/ @[simp] lemma aleph_0_power_aleph_0 : aleph_0.{u} ^ aleph_0.{u} = 𝔠 := power_self_eq le_rfl @[simp] lemma nat_power_aleph_0 {n : ℕ} (hn : 2 ≤ n) : (n ^ aleph_0.{u} : cardinal.{u}) = 𝔠 := nat_power_eq le_rfl hn @[simp] lemma continuum_power_aleph_0 : continuum.{u} ^ aleph_0.{u} = 𝔠 := by rw [←two_power_aleph_0, ←power_mul, mul_eq_left le_rfl le_rfl aleph_0_ne_zero] end cardinal
module SFGReservoirs using DifferentialEquations using SFGAnalysis using StaticArrays using Optim include("sfspectra.jl") include("reservoirs.jl") export model, State end # module
State Before: α : Type u_1 β : Type ?u.17119 inst✝ : PartialOrder α a b c : α h : a ⩿ b ⊢ Icc a b = {a, b} State After: case h α : Type u_1 β : Type ?u.17119 inst✝ : PartialOrder α a b c✝ : α h : a ⩿ b c : α ⊢ c ∈ Icc a b ↔ c ∈ {a, b} Tactic: ext c State Before: case h α : Type u_1 β : Type ?u.17119 inst✝ : PartialOrder α a b c✝ : α h : a ⩿ b c : α ⊢ c ∈ Icc a b ↔ c ∈ {a, b} State After: no goals Tactic: exact h.le_and_le_iff
[STATEMENT] lemma \<oo>\<^sub>P_in_cfl: "\<oo>\<^sub>P \<alpha> \<Longrightarrow> in_cfl \<alpha> []" [PROOF STATE] proof (prove) goal (1 subgoal): 1. local.\<oo>\<^sub>P \<alpha> \<Longrightarrow> local.in_cfl \<alpha> [] [PROOF STEP] by (induct \<alpha> rule: \<oo>\<^sub>P.induct) (auto intro!: in_cfl.intros elim: fBexI[rotated])
[GOAL] ι : Type u_1 R : Type u_2 M : Type u_3 inst✝¹ : DecidableEq ι inst✝ : Zero M i : ι m : M ⊢ toDFinsupp (single i m) = DFinsupp.single i m [PROOFSTEP] ext [GOAL] case h ι : Type u_1 R : Type u_2 M : Type u_3 inst✝¹ : DecidableEq ι inst✝ : Zero M i : ι m : M i✝ : ι ⊢ ↑(toDFinsupp (single i m)) i✝ = ↑(DFinsupp.single i m) i✝ [PROOFSTEP] simp [Finsupp.single_apply, DFinsupp.single_apply] [GOAL] ι : Type u_1 R : Type u_2 M : Type u_3 inst✝² : DecidableEq ι inst✝¹ : Zero M inst✝ : (m : M) → Decidable (m ≠ 0) f : ι →₀ M ⊢ DFinsupp.support (Finsupp.toDFinsupp f) = f.support [PROOFSTEP] ext [GOAL] case a ι : Type u_1 R : Type u_2 M : Type u_3 inst✝² : DecidableEq ι inst✝¹ : Zero M inst✝ : (m : M) → Decidable (m ≠ 0) f : ι →₀ M a✝ : ι ⊢ a✝ ∈ DFinsupp.support (Finsupp.toDFinsupp f) ↔ a✝ ∈ f.support [PROOFSTEP] simp [GOAL] ι : Type u_1 R : Type u_2 M : Type u_3 inst✝² : DecidableEq ι inst✝¹ : Zero M inst✝ : (m : M) → Decidable (m ≠ 0) f : Π₀ (x : ι), M i : ι ⊢ i ∈ support f ↔ ↑f i ≠ 0 [PROOFSTEP] simp only [DFinsupp.mem_support_iff] [GOAL] ι : Type u_1 R : Type u_2 M : Type u_3 inst✝² : DecidableEq ι inst✝¹ : Zero M inst✝ : (m : M) → Decidable (m ≠ 0) f : Π₀ (x : ι), M ⊢ (toFinsupp f).support = support f [PROOFSTEP] ext [GOAL] case a ι : Type u_1 R : Type u_2 M : Type u_3 inst✝² : DecidableEq ι inst✝¹ : Zero M inst✝ : (m : M) → Decidable (m ≠ 0) f : Π₀ (x : ι), M a✝ : ι ⊢ a✝ ∈ (toFinsupp f).support ↔ a✝ ∈ support f [PROOFSTEP] simp [GOAL] ι : Type u_1 R : Type u_2 M : Type u_3 inst✝² : DecidableEq ι inst✝¹ : Zero M inst✝ : (m : M) → Decidable (m ≠ 0) i : ι m : M ⊢ toFinsupp (single i m) = Finsupp.single i m [PROOFSTEP] ext [GOAL] case h ι : Type u_1 R : Type u_2 M : Type u_3 inst✝² : DecidableEq ι inst✝¹ : Zero M inst✝ : (m : M) → Decidable (m ≠ 0) i : ι m : M a✝ : ι ⊢ ↑(toFinsupp (single i m)) a✝ = ↑(Finsupp.single i m) a✝ [PROOFSTEP] simp [Finsupp.single_apply, DFinsupp.single_apply] [GOAL] ι : Type u_1 R : Type u_2 M : Type u_3 inst✝⁴ : DecidableEq ι inst✝³ : Semiring R inst✝² : AddCommMonoid M inst✝¹ : (m : M) → Decidable (m ≠ 0) inst✝ : Module R M ⊢ ↑(finsuppLequivDFinsupp R) = Finsupp.toDFinsupp [PROOFSTEP] simp only [@LinearEquiv.coe_coe] [GOAL] ι : Type u_1 R : Type u_2 M : Type u_3 inst✝⁴ : DecidableEq ι inst✝³ : Semiring R inst✝² : AddCommMonoid M inst✝¹ : (m : M) → Decidable (m ≠ 0) inst✝ : Module R M ⊢ ↑(finsuppLequivDFinsupp R) = Finsupp.toDFinsupp [PROOFSTEP] rfl [GOAL] ι : Type u_1 R : Type u_2 M : Type u_3 η : ι → Type u_4 N : Type u_5 inst✝¹ : Semiring R inst✝ : Zero N f : (i : ι) × η i →₀ N i : ι ⊢ i ∈ (splitSupport f).val ∨ split f i = 0 [PROOFSTEP] rw [← Finset.mem_def, mem_splitSupport_iff_nonzero] [GOAL] ι : Type u_1 R : Type u_2 M : Type u_3 η : ι → Type u_4 N : Type u_5 inst✝¹ : Semiring R inst✝ : Zero N f : (i : ι) × η i →₀ N i : ι ⊢ split f i ≠ 0 ∨ split f i = 0 [PROOFSTEP] exact (em _).symm [GOAL] ι : Type u_1 R : Type u_2 M : Type u_3 η : ι → Type u_4 N : Type u_5 inst✝¹ : Semiring R inst✝ : Zero N f : Π₀ (i : ι), η i →₀ N ⊢ (i : ι) × η i →₀ N [PROOFSTEP] haveI := Classical.decEq ι [GOAL] ι : Type u_1 R : Type u_2 M : Type u_3 η : ι → Type u_4 N : Type u_5 inst✝¹ : Semiring R inst✝ : Zero N f : Π₀ (i : ι), η i →₀ N this : DecidableEq ι ⊢ (i : ι) × η i →₀ N [PROOFSTEP] haveI := fun i => Classical.decEq (η i →₀ N) [GOAL] ι : Type u_1 R : Type u_2 M : Type u_3 η : ι → Type u_4 N : Type u_5 inst✝¹ : Semiring R inst✝ : Zero N f : Π₀ (i : ι), η i →₀ N this✝ : DecidableEq ι this : (i : ι) → DecidableEq (η i →₀ N) ⊢ (i : ι) × η i →₀ N [PROOFSTEP] refine' onFinset (Finset.sigma f.support fun j => (f j).support) (fun ji => f ji.1 ji.2) fun g hg => Finset.mem_sigma.mpr ⟨_, mem_support_iff.mpr hg⟩ [GOAL] ι : Type u_1 R : Type u_2 M : Type u_3 η : ι → Type u_4 N : Type u_5 inst✝¹ : Semiring R inst✝ : Zero N f : Π₀ (i : ι), η i →₀ N this✝ : DecidableEq ι this : (i : ι) → DecidableEq (η i →₀ N) g : (i : ι) × η i hg : (fun ji => ↑(↑f ji.fst) ji.snd) g ≠ 0 ⊢ g.fst ∈ DFinsupp.support f [PROOFSTEP] simp only [Ne.def, DFinsupp.mem_support_toFun] [GOAL] ι : Type u_1 R : Type u_2 M : Type u_3 η : ι → Type u_4 N : Type u_5 inst✝¹ : Semiring R inst✝ : Zero N f : Π₀ (i : ι), η i →₀ N this✝ : DecidableEq ι this : (i : ι) → DecidableEq (η i →₀ N) g : (i : ι) × η i hg : (fun ji => ↑(↑f ji.fst) ji.snd) g ≠ 0 ⊢ ¬↑f g.fst = 0 [PROOFSTEP] intro h [GOAL] ι : Type u_1 R : Type u_2 M : Type u_3 η : ι → Type u_4 N : Type u_5 inst✝¹ : Semiring R inst✝ : Zero N f : Π₀ (i : ι), η i →₀ N this✝ : DecidableEq ι this : (i : ι) → DecidableEq (η i →₀ N) g : (i : ι) × η i hg : (fun ji => ↑(↑f ji.fst) ji.snd) g ≠ 0 h : ↑f g.fst = 0 ⊢ False [PROOFSTEP] dsimp at hg [GOAL] ι : Type u_1 R : Type u_2 M : Type u_3 η : ι → Type u_4 N : Type u_5 inst✝¹ : Semiring R inst✝ : Zero N f : Π₀ (i : ι), η i →₀ N this✝ : DecidableEq ι this : (i : ι) → DecidableEq (η i →₀ N) g : (i : ι) × η i hg : ¬↑(↑f g.fst) g.snd = 0 h : ↑f g.fst = 0 ⊢ False [PROOFSTEP] rw [h] at hg [GOAL] ι : Type u_1 R : Type u_2 M : Type u_3 η : ι → Type u_4 N : Type u_5 inst✝¹ : Semiring R inst✝ : Zero N f : Π₀ (i : ι), η i →₀ N this✝ : DecidableEq ι this : (i : ι) → DecidableEq (η i →₀ N) g : (i : ι) × η i hg : ¬↑0 g.snd = 0 h : ↑f g.fst = 0 ⊢ False [PROOFSTEP] simp only [coe_zero, Pi.zero_apply, not_true] at hg [GOAL] ι : Type u_1 R : Type u_2 M : Type u_3 η : ι → Type u_4 N : Type u_5 inst✝¹ : Semiring R inst✝ : Zero N f : (i : ι) × η i →₀ N ⊢ (fun f => onFinset (Finset.sigma (DFinsupp.support f) fun j => (↑f j).support) (fun ji => ↑(↑f ji.fst) ji.snd) (_ : ∀ (g : (i : ι) × η i), (fun ji => ↑(↑f ji.fst) ji.snd) g ≠ 0 → g ∈ Finset.sigma (DFinsupp.support f) fun j => (↑f j).support)) ((fun f => { toFun := split f, support' := Trunc.mk { val := (splitSupport f).val, property := (_ : ∀ (i : ι), i ∈ (splitSupport f).val ∨ split f i = 0) } }) f) = f [PROOFSTEP] ext [GOAL] case h ι : Type u_1 R : Type u_2 M : Type u_3 η : ι → Type u_4 N : Type u_5 inst✝¹ : Semiring R inst✝ : Zero N f : (i : ι) × η i →₀ N a✝ : (i : ι) × η i ⊢ ↑((fun f => onFinset (Finset.sigma (DFinsupp.support f) fun j => (↑f j).support) (fun ji => ↑(↑f ji.fst) ji.snd) (_ : ∀ (g : (i : ι) × η i), (fun ji => ↑(↑f ji.fst) ji.snd) g ≠ 0 → g ∈ Finset.sigma (DFinsupp.support f) fun j => (↑f j).support)) ((fun f => { toFun := split f, support' := Trunc.mk { val := (splitSupport f).val, property := (_ : ∀ (i : ι), i ∈ (splitSupport f).val ∨ split f i = 0) } }) f)) a✝ = ↑f a✝ [PROOFSTEP] simp [split] [GOAL] ι : Type u_1 R : Type u_2 M : Type u_3 η : ι → Type u_4 N : Type u_5 inst✝¹ : Semiring R inst✝ : Zero N f : Π₀ (i : ι), η i →₀ N ⊢ (fun f => { toFun := split f, support' := Trunc.mk { val := (splitSupport f).val, property := (_ : ∀ (i : ι), i ∈ (splitSupport f).val ∨ split f i = 0) } }) ((fun f => onFinset (Finset.sigma (DFinsupp.support f) fun j => (↑f j).support) (fun ji => ↑(↑f ji.fst) ji.snd) (_ : ∀ (g : (i : ι) × η i), (fun ji => ↑(↑f ji.fst) ji.snd) g ≠ 0 → g ∈ Finset.sigma (DFinsupp.support f) fun j => (↑f j).support)) f) = f [PROOFSTEP] ext [GOAL] case h.h ι : Type u_1 R : Type u_2 M : Type u_3 η : ι → Type u_4 N : Type u_5 inst✝¹ : Semiring R inst✝ : Zero N f : Π₀ (i : ι), η i →₀ N i✝ : ι a✝ : η i✝ ⊢ ↑(↑((fun f => { toFun := split f, support' := Trunc.mk { val := (splitSupport f).val, property := (_ : ∀ (i : ι), i ∈ (splitSupport f).val ∨ split f i = 0) } }) ((fun f => onFinset (Finset.sigma (DFinsupp.support f) fun j => (↑f j).support) (fun ji => ↑(↑f ji.fst) ji.snd) (_ : ∀ (g : (i : ι) × η i), (fun ji => ↑(↑f ji.fst) ji.snd) g ≠ 0 → g ∈ Finset.sigma (DFinsupp.support f) fun j => (↑f j).support)) f)) i✝) a✝ = ↑(↑f i✝) a✝ [PROOFSTEP] simp [split] [GOAL] ι : Type u_1 R : Type u_2 M : Type u_3 η : ι → Type u_4 N : Type u_5 inst✝³ : Semiring R inst✝² : DecidableEq ι inst✝¹ : Zero N inst✝ : (i : ι) → (x : η i →₀ N) → Decidable (x ≠ 0) f : (i : ι) × η i →₀ N ⊢ DFinsupp.support (↑sigmaFinsuppEquivDFinsupp f) = splitSupport f [PROOFSTEP] ext [GOAL] case a ι : Type u_1 R : Type u_2 M : Type u_3 η : ι → Type u_4 N : Type u_5 inst✝³ : Semiring R inst✝² : DecidableEq ι inst✝¹ : Zero N inst✝ : (i : ι) → (x : η i →₀ N) → Decidable (x ≠ 0) f : (i : ι) × η i →₀ N a✝ : ι ⊢ a✝ ∈ DFinsupp.support (↑sigmaFinsuppEquivDFinsupp f) ↔ a✝ ∈ splitSupport f [PROOFSTEP] rw [DFinsupp.mem_support_toFun] [GOAL] case a ι : Type u_1 R : Type u_2 M : Type u_3 η : ι → Type u_4 N : Type u_5 inst✝³ : Semiring R inst✝² : DecidableEq ι inst✝¹ : Zero N inst✝ : (i : ι) → (x : η i →₀ N) → Decidable (x ≠ 0) f : (i : ι) × η i →₀ N a✝ : ι ⊢ ↑(↑sigmaFinsuppEquivDFinsupp f) a✝ ≠ 0 ↔ a✝ ∈ splitSupport f [PROOFSTEP] exact (Finsupp.mem_splitSupport_iff_nonzero _ _).symm [GOAL] ι : Type u_1 R : Type u_2 M : Type u_3 η : ι → Type u_4 N : Type u_5 inst✝² : Semiring R inst✝¹ : DecidableEq ι inst✝ : Zero N a : (i : ι) × η i n : N ⊢ ↑sigmaFinsuppEquivDFinsupp (single a n) = DFinsupp.single a.fst (single a.snd n) [PROOFSTEP] obtain ⟨i, a⟩ := a [GOAL] case mk ι : Type u_1 R : Type u_2 M : Type u_3 η : ι → Type u_4 N : Type u_5 inst✝² : Semiring R inst✝¹ : DecidableEq ι inst✝ : Zero N n : N i : ι a : η i ⊢ ↑sigmaFinsuppEquivDFinsupp (single { fst := i, snd := a } n) = DFinsupp.single { fst := i, snd := a }.fst (single { fst := i, snd := a }.snd n) [PROOFSTEP] ext j b [GOAL] case mk.h.h ι : Type u_1 R : Type u_2 M : Type u_3 η : ι → Type u_4 N : Type u_5 inst✝² : Semiring R inst✝¹ : DecidableEq ι inst✝ : Zero N n : N i : ι a : η i j : ι b : η j ⊢ ↑(↑(↑sigmaFinsuppEquivDFinsupp (single { fst := i, snd := a } n)) j) b = ↑(↑(DFinsupp.single { fst := i, snd := a }.fst (single { fst := i, snd := a }.snd n)) j) b [PROOFSTEP] by_cases h : i = j [GOAL] case pos ι : Type u_1 R : Type u_2 M : Type u_3 η : ι → Type u_4 N : Type u_5 inst✝² : Semiring R inst✝¹ : DecidableEq ι inst✝ : Zero N n : N i : ι a : η i j : ι b : η j h : i = j ⊢ ↑(↑(↑sigmaFinsuppEquivDFinsupp (single { fst := i, snd := a } n)) j) b = ↑(↑(DFinsupp.single { fst := i, snd := a }.fst (single { fst := i, snd := a }.snd n)) j) b [PROOFSTEP] subst h [GOAL] case pos ι : Type u_1 R : Type u_2 M : Type u_3 η : ι → Type u_4 N : Type u_5 inst✝² : Semiring R inst✝¹ : DecidableEq ι inst✝ : Zero N n : N i : ι a b : η i ⊢ ↑(↑(↑sigmaFinsuppEquivDFinsupp (single { fst := i, snd := a } n)) i) b = ↑(↑(DFinsupp.single { fst := i, snd := a }.fst (single { fst := i, snd := a }.snd n)) i) b [PROOFSTEP] classical simp [split_apply, Finsupp.single_apply] [GOAL] case pos ι : Type u_1 R : Type u_2 M : Type u_3 η : ι → Type u_4 N : Type u_5 inst✝² : Semiring R inst✝¹ : DecidableEq ι inst✝ : Zero N n : N i : ι a b : η i ⊢ ↑(↑(↑sigmaFinsuppEquivDFinsupp (single { fst := i, snd := a } n)) i) b = ↑(↑(DFinsupp.single { fst := i, snd := a }.fst (single { fst := i, snd := a }.snd n)) i) b [PROOFSTEP] simp [split_apply, Finsupp.single_apply] [GOAL] case neg ι : Type u_1 R : Type u_2 M : Type u_3 η : ι → Type u_4 N : Type u_5 inst✝² : Semiring R inst✝¹ : DecidableEq ι inst✝ : Zero N n : N i : ι a : η i j : ι b : η j h : ¬i = j ⊢ ↑(↑(↑sigmaFinsuppEquivDFinsupp (single { fst := i, snd := a } n)) j) b = ↑(↑(DFinsupp.single { fst := i, snd := a }.fst (single { fst := i, snd := a }.snd n)) j) b [PROOFSTEP] suffices Finsupp.single (⟨i, a⟩ : Σ i, η i) n ⟨j, b⟩ = 0 by simp [split_apply, dif_neg h, this] [GOAL] ι : Type u_1 R : Type u_2 M : Type u_3 η : ι → Type u_4 N : Type u_5 inst✝² : Semiring R inst✝¹ : DecidableEq ι inst✝ : Zero N n : N i : ι a : η i j : ι b : η j h : ¬i = j this : ↑(single { fst := i, snd := a } n) { fst := j, snd := b } = 0 ⊢ ↑(↑(↑sigmaFinsuppEquivDFinsupp (single { fst := i, snd := a } n)) j) b = ↑(↑(DFinsupp.single { fst := i, snd := a }.fst (single { fst := i, snd := a }.snd n)) j) b [PROOFSTEP] simp [split_apply, dif_neg h, this] [GOAL] case neg ι : Type u_1 R : Type u_2 M : Type u_3 η : ι → Type u_4 N : Type u_5 inst✝² : Semiring R inst✝¹ : DecidableEq ι inst✝ : Zero N n : N i : ι a : η i j : ι b : η j h : ¬i = j ⊢ ↑(single { fst := i, snd := a } n) { fst := j, snd := b } = 0 [PROOFSTEP] have H : (⟨i, a⟩ : Σ i, η i) ≠ ⟨j, b⟩ := by simp [h] [GOAL] ι : Type u_1 R : Type u_2 M : Type u_3 η : ι → Type u_4 N : Type u_5 inst✝² : Semiring R inst✝¹ : DecidableEq ι inst✝ : Zero N n : N i : ι a : η i j : ι b : η j h : ¬i = j ⊢ { fst := i, snd := a } ≠ { fst := j, snd := b } [PROOFSTEP] simp [h] [GOAL] case neg ι : Type u_1 R : Type u_2 M : Type u_3 η : ι → Type u_4 N : Type u_5 inst✝² : Semiring R inst✝¹ : DecidableEq ι inst✝ : Zero N n : N i : ι a : η i j : ι b : η j h : ¬i = j H : { fst := i, snd := a } ≠ { fst := j, snd := b } ⊢ ↑(single { fst := i, snd := a } n) { fst := j, snd := b } = 0 [PROOFSTEP] classical rw [Finsupp.single_apply, if_neg H] [GOAL] case neg ι : Type u_1 R : Type u_2 M : Type u_3 η : ι → Type u_4 N : Type u_5 inst✝² : Semiring R inst✝¹ : DecidableEq ι inst✝ : Zero N n : N i : ι a : η i j : ι b : η j h : ¬i = j H : { fst := i, snd := a } ≠ { fst := j, snd := b } ⊢ ↑(single { fst := i, snd := a } n) { fst := j, snd := b } = 0 [PROOFSTEP] rw [Finsupp.single_apply, if_neg H] [GOAL] ι : Type u_1 R : Type u_2 M : Type u_3 η : ι → Type u_4 N : Type u_5 inst✝¹ : Semiring R inst✝ : AddZeroClass N f g : (i : ι) × η i →₀ N ⊢ ↑sigmaFinsuppEquivDFinsupp (f + g) = ↑sigmaFinsuppEquivDFinsupp f + ↑sigmaFinsuppEquivDFinsupp g [PROOFSTEP] ext [GOAL] case h.h ι : Type u_1 R : Type u_2 M : Type u_3 η : ι → Type u_4 N : Type u_5 inst✝¹ : Semiring R inst✝ : AddZeroClass N f g : (i : ι) × η i →₀ N i✝ : ι a✝ : η i✝ ⊢ ↑(↑(↑sigmaFinsuppEquivDFinsupp (f + g)) i✝) a✝ = ↑(↑(↑sigmaFinsuppEquivDFinsupp f + ↑sigmaFinsuppEquivDFinsupp g) i✝) a✝ [PROOFSTEP] rfl [GOAL] ι : Type u_1 R✝ : Type u_2 M : Type u_3 η : ι → Type u_4 N : Type u_5 inst✝³ : Semiring R✝ R : Type u_6 inst✝² : Monoid R inst✝¹ : AddMonoid N inst✝ : DistribMulAction R N r : R f : (i : ι) × η i →₀ N ⊢ ↑sigmaFinsuppEquivDFinsupp (r • f) = SMul.smul r (↑sigmaFinsuppEquivDFinsupp f) [PROOFSTEP] ext [GOAL] case h.h ι : Type u_1 R✝ : Type u_2 M : Type u_3 η : ι → Type u_4 N : Type u_5 inst✝³ : Semiring R✝ R : Type u_6 inst✝² : Monoid R inst✝¹ : AddMonoid N inst✝ : DistribMulAction R N r : R f : (i : ι) × η i →₀ N i✝ : ι a✝ : η i✝ ⊢ ↑(↑(↑sigmaFinsuppEquivDFinsupp (r • f)) i✝) a✝ = ↑(↑(SMul.smul r (↑sigmaFinsuppEquivDFinsupp f)) i✝) a✝ [PROOFSTEP] rfl
[STATEMENT] lemma [simp]: "length (cspp m S fs) = length fs" [PROOF STATE] proof (prove) goal (1 subgoal): 1. length (cspp m S fs) = length fs [PROOF STEP] by(simp add:cspp_def)
Formatank Ltd recently refurbished two water storage tanks on board a pipe lay and heavy lift vessel. The onboard engineers had noted deterioration of the coating, and walls of their fresh water hydrophore tanks, and relayed the diagnosis to Formatank. Understanding the importance of dependable water supply on board an industrial vessel, and challenges with replacing the tanks, Formatank accommodated the refurbishment requirements in a short timescale. The restricted access to the two 6.5m3 water storage tanks didn�t pose an issue for the skilled Formatank team. A complete internal tank wall cleaning process was carried out removing the rust and grime build up. Upon completion of the surface preparation a full tank wall inspection was carried out to determine the most suitable course of action. The Formatank 2-coat GRP liner was applied to the entire internal tank wall adopting hand-lay technique method, and using WRAS approved materials. The GRP liner effectively creates a new internal tank wall, suitable for drinking water storage and in this case guaranteed for 5 years. Within 48 hours the water storage tanks were back to ship shape condition and providing the dependable clean water source once again to the three-hundred vessel crew.
module Network.HTTP.URL import Data.String.Parser import Generics.Derive import Network.HTTP.Protocol import Data.String import Network.HTTP.Path import Data.So %language ElabReflection public export record Hostname where constructor MkHostname domain : String port : Maybe Bits16 public export record URLCredential where constructor MkURLCredential username : String password : Maybe String public export record URL where constructor MkURL protocol : String credential : Maybe URLCredential host : Hostname path : Path extensions : String %runElab derive "URLCredential" [Generic, Meta, Eq, Show] %runElab derive "Hostname" [Generic, Meta, Eq, Show] %runElab derive "URL" [Generic, Meta, Show] parse_port_number : Parser Bits16 parse_port_number = do n <- natural _ <- eos if n < 65536 then pure (cast n) else fail "port number bigger than 65535" parse_host : Parser Hostname parse_host = do domain <- takeWhile (/= ':') port <- optional (char ':' *> (parse_port_number <|> pure 0)) pure (MkHostname domain port) parse_credential : Parser URLCredential parse_credential = parse_username_password <|> parse_username where parse_username : Parser URLCredential parse_username = do username <- takeWhile (const True) pure (MkURLCredential username Nothing) parse_username_password : Parser URLCredential parse_username_password = do username <- takeUntil ":" password <- takeWhile (const True) pure (MkURLCredential username $ Just password) export parse_url : Parser URL parse_url = do protocol <- takeUntil "://" credential <- optional (takeUntil "@") domain <- takeUntil "/" <|> takeWhile (const True) credential <- case parse parse_credential <$> credential of Just (Right (credential, _)) => pure $ Just credential Just (Left err) => fail err Nothing => pure Nothing case parse parse_host domain of Right (domain_and_port, _) => do path <- takeWhile (\c => (c /= '#') && (c /= '?')) extensions <- takeWhile (const True) let path = fromString ("/" <+> path) pure $ MkURL protocol credential domain_and_port path extensions Left err => fail err export url_from_string : String -> Either String URL url_from_string = map fst . parse parse_url . ltrim public export data URLProof : AsList m -> Type where IsHTTPURL : URLProof ('h' :: 't' :: 't' :: 'p' :: ':' :: '/' :: '/' :: xs) IsHTTPSURL : URLProof ('h' :: 't' :: 't' :: 'p' :: 's' :: ':' :: '/' :: '/' :: xs) export url' : (str : String) -> {auto 0 ok : URLProof (asList str)} -> URL url' string = case url_from_string string of Right x => x Left err => assert_total $ idris_crash err export add : URL -> String -> URL add url' string = case url_from_string string of Right url'' => url'' Left _ => case break (\c => c == '#' || c == '?') string of (path, "") => { path := (url'.path <+> fromString path), extensions := "" } url' (path, extension) => { path := (url'.path <+> fromString path), extensions := extension } url' export parse_hostname : String -> Either String Hostname parse_hostname = map fst . parse parse_host . trim export hostname_string : Hostname -> String hostname_string host = host.domain <+> case host.port of Just x => ":\{show x}"; Nothing => "" export url_port_number : URL -> Maybe Bits16 url_port_number url = url.host.port <|> (protocol_port_number <$> protocol_from_str url.protocol)
%!TEX root = ..\..\dissertation.tex \section{Co-Development \& Co-Platforming}\label{sec:coDevPltf} Co-development in systems engineering, product, and production design refers to the simultaneous development or design of two or more systems with some required or anticipated mutual effect on each other, \eg{} a product and the manufacturing system producing it. Co-development, along with the related concept co-\gls{glos:platforming}, has been gaining footing in research on platforms and reconfigurable manufacturing in particular~\parencite{MichaelisJohannesson,ElMaraghy2015407}. The end-goal of co-development of products and manufacturing systems, as outlined by~\textcite{MichaelisJohannesson} and shown on \cref{fig:pltfCoDev}, is to achieve platform-based co-development, wherein aligned instantiations of the platforms are simultaneously created as explicit configurations of products and their corresponding manufacturing system. \begin{figure}[tb] \includegraphics[width=\textwidth, trim=2 2 2 2, clip]{mainmatter/introduction/figures/pltfCoDev.pdf} \caption[Platform-based co-development.] {Platform-based co-development with new instantiations of the product and manufacturing system platform being developed alongside each other to ensure alignment and compatibility. Adapted from \textcite{MichaelisJohannesson}.}\label{fig:pltfCoDev} \end{figure} Inconsistencies and lack of communication with regards to platform development as well as misalignment between platforms have proven massive challenges for manufacturers~\parencite{SorensenAPMS2018}. To address this, and generally improve the synergy between product and manufacturing system development, various approaches to integrate and align the two areas are appearing, such as integrated product and production modelling~\parencite{Michaelis2015203,BRUNOE2018592,BrunoePPModel}, resource modelling and capability matchmaking \parencite{7750724,JARVENPAA201887,dhunganaMarket}, and set-based concurrent engineering utilising platforms \parencite{Levandowski2014,Levandowski01092014,LANDAHL201661}. Such approaches provide support for a formal way to integrate the work of product and production development teams, ensuring their alignment and compatibility by making it clear which product functions and features are needed, which manufacturing capabilities are available, and how these can be matched, thus facilitating co-development of solutions.
(** * Term definition for Simply Typed Lambda Calculus and de Bruijn manipulation . *) (** * Usual Term syntax .*) Require Import Arith Omega. (** Var syntax: *) Definition Vars := nat. (** Type syntax:*) Inductive Ty: Set := | Base : Ty | Arr : Ty -> Ty -> Ty . (** Term syntax:*) Inductive Term : Set:= | Var : Vars -> Term | App : Term -> Term -> Term | La : Ty -> Term -> Term . Notation "x · y" := (App x y) (at level 15, left associativity) : STLC_scope. Notation "# v" := (Var v) (at level 1) : STLC_scope. Notation "'λ' [ T ] , v " := (La T v) (at level 20, T , v at level 30) : STLC_scope. Notation "A ⇒ B" := (Arr A B) (at level 17, right associativity): STLC_scope. Reserved Notation " t ↑ x # n " (at level 5, x at level 0, left associativity). Delimit Scope STLC_scope with STLC. Open Scope STLC_scope. (** In order to deal with variable bindings and captures, we need a lift function to deal with free and bounded variables. [M ↑ n # m] recursivly add [n] to all variables that are above [m] in [M]. *) Fixpoint lift_rec (n:nat) (k:nat) (T:Term) {struct T} := match T with | # x => if le_gt_dec k x then Var (n+x) else Var x | M · N => App (M ↑ n # k) (N ↑ n # k) | λ [ A ], M => λ [A ], (M ↑ n # (S k)) end where "t ↑ n # k" := (lift_rec n k t) : STLC_scope. Notation " t ↑ n " := (lift_rec n 0 t) (at level 5, n at level 0, left associativity) : STLC_scope. (** Some basic properties of the lift function. That is everything we will ever need to handle de Bruijn indexes *) Lemma inv_lift : forall M N n m , M ↑ n # m = N ↑ n # m -> M = N. Proof. induction M as [ v | F hiF X hiX | T V hiV]; destruct N as [ w | G Y | T' W]; simpl in *; intros n m. - destruct (le_gt_dec m v) as [ h | h]; destruct (le_gt_dec m w) as [ h' | h']; intro heq; now injection heq; clear heq; intro heq; f_equal; omega. - destruct (le_gt_dec m v) as [ h | h]; intro heq; now discriminate. - destruct (le_gt_dec m v) as [ h | h]; intro heq; now discriminate. - destruct (le_gt_dec m w) as [ h | h]; intro heq; now discriminate. - intro heq1; injection heq1; clear heq1; intros heq1 heq2. now rewrite (hiF _ _ _ heq2), (hiX _ _ _ heq1). - intro heq; now discriminate. - destruct (le_gt_dec m w) as [ h | h]; intro heq; now discriminate. - intro heq; now discriminate. - intro heq1; injection heq1; clear heq1; intros heq1 heq2. now rewrite (hiV _ _ _ heq1), heq2. Qed. Lemma lift_rec0 : forall M n, M ↑ 0 # n = M. Proof. induction M as [ v | F hiF X hiX | T V hiV]; intros; simpl. - now destruct (le_gt_dec n v). - now rewrite hiF, hiX. - now rewrite hiV. Qed. Lemma lift0 : forall M, M ↑ 0 = M. Proof. intros; apply lift_rec0. Qed. Lemma liftP1 : forall M i j k, (M ↑ j # i) ↑ k # (j+i) = M ↑ (j+k) # i. Proof. induction M as [ v | F hiF X hiX | T V hiV]; intros i j k; simpl in *. - destruct (le_gt_dec i v) as [ h | h]; simpl. + destruct (le_gt_dec (j+i) (j+v)) as [ h' | h']; simpl; now apply f_equal; omega. + destruct (le_gt_dec (j+i)) as [ h' | h']; [ | reflexivity]. now apply f_equal; omega. - now rewrite hiF, hiX. - rewrite <- hiV. now replace (j + S i) with (S(j + i)). Qed. Lemma liftP2: forall M i j k n, i <= n -> (M ↑ j # i) ↑ k # (j+n) = (M ↑ k # n) ↑ j # i. Proof. induction M as [ v | F hiF X hiX | T V hiV]; intros i j k n hle; simpl in *. - destruct (le_gt_dec i v) as [ h | h]; destruct (le_gt_dec n v) as [ h' | h']; simpl in *; destruct le_gt_dec; destruct le_gt_dec; now apply f_equal; omega. - now rewrite hiF, hiX. - replace (S (j + n)) with (j + S n) by intuition. rewrite hiV; [ reflexivity | now omega]. Qed. Lemma liftP3 : forall M i k j n , i <= k -> k <= (i+n) -> (M ↑ n # i) ↑ j # k = M ↑ (j+n) # i. Proof. induction M as [ v | F hiF X hiX | T V hiV]; intros i k j n h1 h2; simpl in *. - destruct (le_gt_dec i v) as [ h | h]; simpl in *; destruct le_gt_dec; apply f_equal; omega. - now rewrite hiF, hiX. - rewrite hiV; [ reflexivity | now omega | now omega]. Qed. Lemma lift_lift : forall M n m, (M ↑ m) ↑ n = M↑ (n+m). Proof. intros. apply liftP3; intuition. Qed. (** We will consider the usual implicit substitution without variable capture (this is where the lift operator comes in handy). [ M [ n ← N ] ] replace the variable [n] in [M] by the term [N]. *) Reserved Notation "t [ x ← u ]" (at level 5, x at level 0, left associativity). Fixpoint subst_rec U T n {struct T} := match T with | # x => match (lt_eq_lt_dec x n) with | inleft (left _) => # x (* v < n *) | inleft (right _) => U ↑ n (* v = n *) | inright _ => # (x - 1) (* v > n *) end | M · N => (M [ n ← U ]) · ( N [ n ← U ]) | λ [ A ], M => λ [ A ], (M [ S n ← U ]) end where " t [ n ← w ] " := (subst_rec w t n) : STLC_scope. Notation " t [ ← w ] " := (subst_rec w t 0) (at level 5) : STLC_scope. (** Some basic properties of the substitution function. Again, we will only need a few functions to deal with indexes. *) Lemma substP1: forall M N i j k , ( M [ j ← N] ) ↑ k # (j+i) = (M ↑ k # (S (j+i))) [ j ← (N ↑ k # i ) ]. Proof. induction M as [ v | F hiF X hiX | T V hiV]; intros N i j k. - simpl (#v [j ← N] ↑ k # (j+i)). change (#v ↑ k # (S (j+i))) with (if le_gt_dec (S (j+i)) v then #(k+v) else #v). destruct (lt_eq_lt_dec v j) as [[ | ] | ]. + destruct (le_gt_dec (S (j+i)) v); [ now omega| ]. simpl. destruct (le_gt_dec (j+i) v) as [ | ]; [ now omega | ]. destruct (lt_eq_lt_dec v j) as [[ | ] | ]; [ reflexivity | now omega| now omega]. + destruct (le_gt_dec (S(j+i)) v) as [ | ]; [ now omega| ]. simpl. destruct (lt_eq_lt_dec v j) as [[] | ]; [now omega | | now omega]. subst; apply liftP2; now omega. + destruct (le_gt_dec (S (j+i)) v). * simpl. destruct (le_gt_dec (j+i) v) as [ | ]. -- destruct (lt_eq_lt_dec) as [ [] | ]. ++ destruct le_gt_dec;now omega. ++ destruct le_gt_dec; now omega. ++ destruct le_gt_dec; [ f_equal; now omega| now omega]. -- destruct le_gt_dec; destruct lt_eq_lt_dec as [ [] | ]; now omega. * simpl. destruct le_gt_dec; destruct lt_eq_lt_dec as [ [] | ]; try now omega. reflexivity. - simpl. rewrite hiF. replace (S(S(j+i))) with (S((S j)+i)) by intuition. now rewrite <- hiX. - simpl. replace (S(S(j+i))) with (S((S j)+i)) by intuition. now rewrite <- hiV. Qed. Lemma substP2: forall M N i j n, i <= n -> (M ↑ j # i ) [ j+n ← N ] = ( M [ n ← N]) ↑ j # i . Proof. induction M as [ v | F hiF X hiX | T V hiV]; intros N i j n hle; simpl in *. - destruct (le_gt_dec i v); destruct (lt_eq_lt_dec v n) as [[] | ]; simpl. + destruct lt_eq_lt_dec as [ [] | ]; destruct le_gt_dec; try now omega. reflexivity. + destruct lt_eq_lt_dec as [ [] | ]; try now omega. now rewrite liftP3; [ | omega | omega]. + destruct lt_eq_lt_dec as [ [] | ]; destruct le_gt_dec; try now omega. now f_equal; omega. + destruct lt_eq_lt_dec as [ [] | ]; destruct le_gt_dec; try now omega. reflexivity. + now omega. + now omega. - now rewrite hiF, hiX. - rewrite <- hiV; [ | now omega]. now replace (S (j + n)) with (j + S n). Qed. Lemma substP3: forall M N i k n, i <= k -> k <= i+n -> (M↑ (S n) # i) [ k← N] = M ↑ n # i. Proof. induction M as [ v | F hiF X hiX | T V hiV]; intros N i k n h1 h2; simpl in *. - destruct (le_gt_dec i v). + unfold subst_rec. destruct (lt_eq_lt_dec (S(n+v)) k) as [[] | ]; [ now omega | now omega| now f_equal; omega]. + simpl. destruct (lt_eq_lt_dec v k) as [[] | ]; [ reflexivity | now omega | now omega]. - rewrite hiF, hiX; [ reflexivity | now omega | now omega | now omega | now omega]. - rewrite hiV; [ reflexivity | now omega | now omega]. Qed. Lemma substP4: forall M N P i j, (M [ i← N]) [i+j ← P] = (M [S(i+j) ← P]) [i← N[j← P]]. Proof. induction M as [ v | F hiF X hiX | T V hiV]; intros N P i j; simpl in *. - destruct lt_eq_lt_dec as [ [] | ]; destruct lt_eq_lt_dec as [ [] | ]; simpl. + destruct lt_eq_lt_dec as [ [] | ]; destruct lt_eq_lt_dec as [ [] | ]; [ reflexivity | | | | | | | | ]; now omega. + now omega. + now omega. + destruct lt_eq_lt_dec as [ [] | ]; [ now omega | | now omega ]. now rewrite substP2; [ reflexivity | omega ]. + now omega. + now omega. + destruct lt_eq_lt_dec as [ [] | ]; destruct lt_eq_lt_dec as [ [] | ]; [ | | reflexivity | | | | | | ]; now omega. + destruct lt_eq_lt_dec as [ [] | ]; [ now omega | | now omega]. now rewrite substP3; [ reflexivity | omega | omega ]. + destruct lt_eq_lt_dec as [ [] | ]; destruct lt_eq_lt_dec as [ [] | ]; [ reflexivity | | | | | | | | reflexivity]; now omega. - now rewrite hiF, hiX. - replace (S(S(i+j))) with (S((S i)+ j)) by intuition. now rewrite <- hiV. Qed. Lemma subst_travers : forall M N P n, (M [← N]) [n ← P] = (M [n+1 ← P])[← N[n← P]]. Proof. intros. rewrite plus_comm. change n with (O+n). apply substP4. Qed. (** Tool function usefull when eta-conversion is used, but this is not the case here. *) Lemma expand_term_with_subst : forall M n, (M ↑ 1 # (S n)) [ n ← #0 ] = M. Proof. induction M as [ v | F hiF X hiX | T V hiV]; intros n. - unfold lift_rec. destruct le_gt_dec as [ | ]. + unfold subst_rec. destruct (lt_eq_lt_dec (1+v) n) as [[] | ]; [ now omega | now omega | now f_equal; omega]. + simpl; destruct (lt_eq_lt_dec v n) as [[] | ]; [ reflexivity |now subst; f_equal; omega| now omega]. - now simpl; rewrite hiF, hiX. - now simpl; rewrite hiV. Qed. Reserved Notation " A → B " (at level 80). Inductive Beta : Term -> Term -> Prop := | Beta_head : forall A M N , (λ[A], M)· N → M [← N] | Beta_red1 : forall M M' N , M → M' -> M · N → M'· N | Beta_red2 : forall M N N', N → N' -> M · N → M · N' | Beta_lam : forall A M M', M → M' -> λ[A],M → λ[A ],M' where "M → N" := (Beta M N) : STLC_scope. Lemma Beta_lift: forall M N, M → N -> forall n m, M ↑ n # m → N ↑ n # m. Proof. induction 1 as [ A M N | M M' N hM hi | M N N' hN hi | A M M' hM hi]; intros n m; simpl in *. - change m with (0 + m). rewrite (substP1 M N m 0 n); simpl. now constructor. - now constructor; apply hi. - now constructor; apply hi. - now constructor; apply hi. Qed. Lemma Beta_lift_inv : forall M N n m , M ↑ n # m → N -> exists P, M → P /\ N = P ↑ n # m . Proof. induction M as [ v | u hiU v hiV | A M hi]; intros N n m hred; simpl in *. - now destruct le_gt_dec as [ h | h]; inversion hred. - inversion hred; subst; clear hred. + destruct u as [ | | B N]; simpl in *; [ destruct le_gt_dec; discriminate | discriminate | ]. injection H0; intros; subst; clear H0. exists (N [ ← v]); split; [ now constructor | ]. now change m with (0 + m); rewrite <- substP1. + apply hiU in H2 as (u' & hu' & ->). now exists (u' · v); simpl; split; [ constructor | ]. + apply hiV in H2 as (v' & hv' & ->). now exists (u · v'); simpl; split; [ constructor | ]. - inversion hred; subst; clear hred. apply hi in H2 as (Q & hQ & ->). now exists (λ [A], Q); split; [ constructor | ]. Qed. Lemma Beta_subst : forall M N, M → N -> forall n P, M [ n ← P] → N [ n ← P]. Proof. induction 1 as [ A M N | M M' N hred hi| M N N' hred hi| A M M' hred hi]; intros n P; simpl in *. - rewrite subst_travers. replace (n + 1) with (S n) by intuition. now constructor. - now constructor; apply hi. - now constructor; apply hi. - now constructor; apply hi. Qed. Require Import List. (** * Typing Environment for annotated terms . As for Terms, we define contexts of "Annotated" terms, with the very safe function and tools as for the usual one.*) (** Very naive definition of environment : list of term be carefull, the usual written env Γ(x:A) is encoded in A::Γ **) Definition Env := list Ty. (** Some manipulation functions (mostly from Bruno Barras' PTS contrib): - how to find an item in the environment - how to truncate an environment - how to insert a new element (with correct de Bruijn update) - how to substitute something in the environment *) Set Implicit Arguments. Inductive item (A:Type) (x:A): list A ->nat->Prop := | item_hd: forall Γ :list A, (item x (cons x Γ) O) | item_tl: forall (Γ:list A)(n:nat)(y:A), item x Γ n -> item x (cons y Γ) (S n). Hint Constructors item. (** In the list [Γ], the [n]th item is syntacticaly [x]. *) Notation " x ↓ n ∈ Γ " := (item x Γ n) (at level 80, no associativity) : STLC_scope. Lemma fun_item: forall T (A B:T)(Γ:list T)(n:nat), A ↓ n ∈ Γ -> B ↓ n ∈ Γ -> A = B. Proof. intros T A B Γ n;revert T A B Γ. induction n as [ | n hi]; intros T A B Γ h1 h2. - inversion h1; subst; clear h1. now inversion h2; subst; clear h2. - inversion h1; subst; clear h1. inversion h2; subst; clear h2. now apply (hi _ _ _ _ H1 H0). Qed. Inductive trunc (A:Type) : nat->list A ->list A->Prop := | trunc_O: forall (Γ:list A) , (trunc O Γ Γ) | trunc_S: forall (k:nat)(Γ Γ':list A)(x:A), trunc k Γ Γ' -> trunc (S k) (cons x Γ) Γ'. Hint Constructors trunc. Lemma item_trunc: forall (T:Type) (n:nat) (Γ:list T) (t:T), t ↓ n ∈ Γ -> exists Γ' , trunc (S n) Γ Γ'. Proof. intros T; induction n as [ | n hi]; intros Γ t hin. - inversion hin; subst; clear hin. exists Γ0. now repeat constructor. - inversion hin; subst; clear hin. destruct (hi Γ0 t H1) as [Γ' hΓ]. exists Γ'. now repeat constructor. Qed. (** This type describe how do we add an element in an environment: no type checking is done, this is just the mecanic way to do it. *) Inductive ins_in_env (Γ:Env ) (d1:Ty): nat->Env -> Env ->Prop := | ins_O: ins_in_env Γ d1 O Γ (d1::Γ) | ins_S: forall (n:nat)(Δ Δ':Env )(d:Ty), (ins_in_env Γ d1 n Δ Δ') -> ins_in_env Γ d1 (S n) (d::Δ) ( d::Δ' ). Hint Constructors ins_in_env. (** Some lemmas about inserting a new element. They explain how terms in the environment are lifted according to their original position and the position of insertion. *) Lemma ins_item_ge: forall (d':Ty) (n:nat) (Γ Δ Δ':Env), ins_in_env Γ d' n Δ Δ' -> forall (v:nat), n<=v -> forall (d:Ty), d ↓ v ∈ Δ -> d ↓ (S v) ∈ Δ'. Proof. intros d'; induction n as [ | n hi]; intros Γ Δ Δ' hins v hle d hd. - inversion hd; subst; clear hd. + inversion hins; subst; clear hins. now repeat constructor. + inversion hins; subst; clear hins. now repeat constructor. - inversion hd; subst; clear hd; [ now omega | ]. inversion hins; subst; clear hins. constructor. eapply hi. + now apply H4. + now omega. + assumption. Qed. Lemma ins_item_lt: forall (d':Ty)(n:nat)(Γ Δ Δ':Env), ins_in_env Γ d' n Δ Δ' -> forall (v:nat), n > v -> forall (d:Ty), d ↓ v ∈ Δ -> d ↓ v ∈ Δ' . Proof. intros d'; induction n as [ | n hi]; intros Γ Δ Δ' hins v hlt d hd; [ now omega | ]. inversion hins; subst; clear hins. destruct v as [ | v]. - now inversion hd; subst; constructor. - inversion hd; subst; clear hd. constructor. eapply hi. + now apply H0. + now omega. + now assumption. Qed. (** This type describe how do we do substitution inside a context. As previously, no type checking is done at this point.*) Inductive sub_in_env (Γ : Env) (T:Ty): nat -> Env -> Env -> Prop := | sub_O : sub_in_env Γ T 0 (T :: Γ) Γ | sub_S : forall Δ Δ' n B, sub_in_env Γ T n Δ Δ' -> sub_in_env Γ T (S n) (B :: Δ) ( B :: Δ'). Hint Constructors sub_in_env. (** Some ins / subst related facts: what happens to term when we do a substitution in a context.*) Lemma nth_sub_sup : forall n Γ Δ Δ' T, sub_in_env Γ T n Δ Δ' -> forall v : nat, n <= v -> forall d , d ↓ (S v) ∈ Δ -> d ↓ v ∈ Δ'. Proof. induction 1 as [ | Δ Δ' n b hΔ hi]; intros v hle d hd. - now inversion hd; subst; clear hd. - inversion hd; subst; clear hd. destruct v as [ | v]; [ now omega | ]. constructor. now apply hi; [ omega |]. Qed. Lemma nth_sub_eq : forall T n Γ Δ Δ', sub_in_env Γ T n Δ Δ' -> forall d , d↓ n ∈ Δ -> T = d. Proof. induction 1 as [ | Δ Δ' n b hΔ hi]; intros d hd. - now inversion hd; subst; clear hd. - inversion hd; subst; clear hd. now apply hi. Qed. Lemma nth_sub_inf : forall T n Γ Δ Δ', sub_in_env Γ T n Δ Δ' -> forall v : nat, n > v -> forall d , d ↓ v ∈ Δ -> d ↓ v ∈ Δ' . Proof. induction 1 as [ | Δ Δ' n b hΔ hi]; intros v hlt d hd; [ now omega | ]. inversion hd; subst; clear hd; [ now constructor | ]. constructor. apply hi; [ now omega |assumption]. Qed. Reserved Notation "Γ ⊢ t : T" (at level 80, t, T at level 30, no associativity) . Inductive typ : Env -> Term -> Ty -> Prop := | cVar : forall Γ A v, A ↓ v ∈ Γ -> Γ ⊢ #v : A | cLa : forall Γ A B M, A::Γ ⊢ M : B -> Γ ⊢ λ[A], M : A ⇒ B | cApp : forall Γ M N A B , Γ ⊢ M : A ⇒ B -> Γ ⊢ N : A -> Γ ⊢ M · N : B where "Γ ⊢ t : T" := (typ Γ t T) : STLC_scope. Hint Constructors typ. (** Weakening Property: if a judgement is valid, we can insert a well-typed term in the context, it will remain valid. This is where the type checking for inserting items in a context is done.*) Theorem weakening: forall Δ M T, Δ ⊢ M : T -> forall Γ A n Δ', ins_in_env Γ A n Δ Δ' -> Δ' ⊢ M ↑ 1 # n : T. Proof. induction 1 as [ Δ V v hin | Δ U V M hM hiM | Δ M N U V hM hiM hN hiN]; intros Γ A n Δ' hins; simpl in *. - destruct le_gt_dec; constructor. + eapply ins_item_ge; [ now apply hins | assumption | assumption]. + eapply ins_item_lt; [ now apply hins | | ]; assumption. - constructor. eapply hiM. constructor. now apply hins. - econstructor. + eapply hiM; now apply hins. + eapply hiN; now apply hins. Qed. Theorem thinning : forall Γ M T A, Γ ⊢ M : T -> A::Γ ⊢ M ↑ 1 : T. Proof. intros. eapply weakening. - now apply H. - now constructor. Qed. Theorem thinning_n : forall n Δ Δ', trunc n Δ Δ' -> forall M T , Δ' ⊢ M : T -> Δ ⊢ M ↑ n : T. Proof. induction n as [ | n hi]; intros Δ Δ' ht M T hM. - inversion ht; subst; clear ht. now rewrite lift0. - inversion ht; subst; clear ht. change (S n) with (1 + n). replace (M ↑ (1+n)) with ((M ↑ n )↑ 1) by (apply lift_lift). apply thinning; trivial. eapply hi. + now apply H0. + assumption. Qed. (** Substitution Property: if a judgment is valid and we replace a variable by a well-typed term of the same type, it will remain valid.*) (* begin hide *) Lemma sub_trunc : forall Δ A n Γ Γ', sub_in_env Δ A n Γ Γ' -> trunc n Γ' Δ. Proof. induction 1; now repeat constructor. Qed. (* end hide *) Theorem substitution : forall Γ M T , Γ ⊢ M : T -> forall Δ P A, Δ ⊢ P : A -> forall Γ' n , sub_in_env Δ A n Γ Γ' -> Γ' ⊢ M [ n ←P ] : T. Proof. induction 1 as [ Γ V v hin | Γ U V M hM hiM | Γ M N U V hM hiM hN hiN]; intros Δ P A hP Γ' n hsub; simpl. - destruct lt_eq_lt_dec as [ [] | ]. + constructor. eapply nth_sub_inf; [ now apply hsub | now omega | assumption]. + subst. eapply thinning_n. * eapply sub_trunc. now apply hsub. * replace V with A; [ assumption | ]. eapply nth_sub_eq; [ now apply hsub | assumption]. + constructor. eapply nth_sub_sup; [ now apply hsub | now omega |]. replace (S (v - 1)) with v by now omega. assumption. - econstructor. eapply hiM; [ now apply hP | ]. now constructor. - econstructor. + eapply hiM; [ now apply hP | assumption]. + eapply hiN; [ now apply hP | assumption ]. Qed. Lemma SR : forall Γ M T, Γ ⊢ M : T -> forall N, M → N -> Γ ⊢ N : T. Proof. induction 1 as [ Γ A v hin | Γ A B M hM hiM | Γ M N A B hM hiM hN hiN]; intros P hred. - now inversion hred. - inversion hred; subst; clear hred. constructor. now apply hiM. - inversion hred; subst; clear hred. + inversion hM; subst; clear hM. eapply substitution; [ now apply H1 | now apply hN| now constructor]. + econstructor. * apply hiM; assumption. * assumption. + econstructor. * now apply hM. * apply hiN; assumption. Qed. Definition is_value (t: Term) : Prop := match t with | # v => True | λ [ A ], M => True | _ => False end. Lemma Progress_: forall Γ M T, Γ ⊢ M : T -> Γ = nil -> (exists N, M → N) \/ is_value M. Proof. induction 1 as [ Γ A v hin | Γ A B M hM hiM | Γ M N A B hM hiM hN hiN]; intros heq; simpl; [ now right | now right | ]. left. destruct (hiM heq) as [ [M' hM'] | hm]. - exists (M'· N); now constructor. - destruct (hiN heq) as [ [N' hN'] | hn]. + exists (M · N'); now constructor. + subst; destruct M as [ v | U V | U V]. * inversion hM; subst; clear hM. now inversion H1. * simpl in hm; now elim hm. * exists (V [← N]); now constructor. Qed. Lemma Progress: forall M T, nil ⊢ M : T -> (exists N, M → N) \/ is_value M. Proof. intros M T h; eapply Progress_. - now apply h. - reflexivity. Qed. Inductive subterm : Term -> Term -> Prop := | sbtrm_abs : forall A M, subterm M (λ [A], M) | sbtrm_app_l : forall M N, subterm M (M · N) | sbtrm_app_r : forall M N, subterm N (M · N) . Fixpoint boccur (n:nat) (t:Term) := match t with | # i => if eq_nat_dec n i then true else false | u · v => orb (boccur n u) (boccur n v) | λ [A], m => boccur (S n) m end. Require Import Relations. Definition normal t := forall u, ~ (t → u). Notation SN := (Acc (transp _ Beta)). Lemma commut_Beta_subterm : commut _ subterm (transp _ Beta). Proof. intros M N hsub P hP; unfold transp in *. inversion hsub; subst; clear hsub. - now exists (λ [A], P); constructor. - now exists (P · N0); constructor. - now exists (M0 · P); constructor. Qed. Lemma subterm_SN : forall M , SN M -> forall P, subterm P M -> SN P. Proof. induction 1 as [ M hM hi]; intros P hP. constructor. intros Q HQ. destruct (commut_Beta_subterm hP HQ) as [R h1 h2]. now eapply hi; [ now apply h1 | ]. Qed. Lemma SN_red_SN : forall x y, SN x -> x → y -> SN y. Proof. intros x y h; revert y; induction h as [ x hx hi]; simpl in *. exact hx. Qed. Lemma SN_var : forall n, SN (# n). Proof. intros n; constructor; intros x hx. now inversion hx. Qed. Lemma SN_abs : forall M, SN M -> forall A, SN (λ [A], M). Proof. induction 1 as [ M hM hi]; intros A; simpl in *. constructor; intros N hN. inversion hN; subst; clear hN. now apply hi. Qed. Lemma SN_lift : forall n t k, SN t -> SN (t ↑ n # k). Proof. intros n t k h; revert t h n k. induction 1 as [N hN hi]; intros n k. constructor; intros P hP. apply Beta_lift_inv in hP as ( Q & hQ & ->). now apply hi. Qed. Lemma SN_lift_inv : forall M', SN M' -> forall n M k, M' = M ↑ n # k -> SN M. Proof. induction 1 as [ M' hM hi]; intros n M k heq; subst; simpl in *. constructor; intros N hN. apply hi with (y := N ↑ n # k) (n := n) (k := k); [ | reflexivity]. now apply Beta_lift. Qed. Lemma SN_subst_inv_l u m k : SN (subst_rec u m k) -> boccur k m = true -> SN u. Proof. revert u k; induction m as [ v | U hiU V hiV | A U hi]; intros u k hSN hin; simpl in *. - destruct lt_eq_lt_dec as [ [] | ]. + destruct Nat.eq_dec; [ now omega | discriminate]. + destruct Nat.eq_dec; [ | subst; discriminate]. now apply (SN_lift_inv hSN k _ 0). + destruct Nat.eq_dec; [ now omega | discriminate]. - apply Bool.orb_true_iff in hin as [ h | h]. + apply hiU with (k := k); [ | assumption]. eapply subterm_SN; [ now apply hSN | ]. now constructor. + apply hiV with (k := k); [ | assumption]. eapply subterm_SN; [ now apply hSN | ]. now constructor. - apply hi with (k := S k); [ | assumption]. eapply subterm_SN; [ now apply hSN | ]. now constructor. Qed. Lemma SN_subst : forall M T, SN (M [← T]) -> SN M. Proof. intros M T hsub. cut (forall t, SN t -> forall m, t = m [ ← T] -> SN m). - intro h; now apply (h _ hsub). - simple induction 1; intros. apply Acc_intro; intros; subst. apply H1 with (y [ ← T]); [ | reflexivity]. now apply Beta_subst. Qed. Definition neutral M := match M with La _ _ => False | _ => True end. Inductive nf : Term -> Prop := | Nf_var : forall n, nf (# n) | Nf_app : forall u v, neutral u -> nf u -> nf v -> nf (u · v) | Nf_abs : forall t, nf t -> forall A, nf (λ [A], t) . Lemma nf_norm : forall t, nf t -> forall u, ~ (t → u). Proof. induction 1 as [ v | U V hneutral hU hiU hV hiV | N hN hi A]. - now intros u hu; inversion hu. - intros u hu; inversion hu; subst; clear hu; [ now elim hneutral | |]. + now apply (hiU M'). + now apply (hiV N'). - intros u hu; inversion hu; subst; clear hu. now apply (hi M'). Qed. Lemma nf_sound : forall t, normal t -> nf t. Proof. induction t as [ v | U hiU V hiV| A M hi]; intros h; simpl in *. - now constructor. - destruct U as [ u | K L | B M]. + constructor; [ now idtac | |]. * apply hiU. now intros v hv; inversion hv. * apply hiV. intros W hW. now apply (h (#u · W)); constructor. + constructor; [ now idtac | |]. * apply hiU. intros W hW. now apply (h (W · V)); constructor. * apply hiV. intros W hW. now apply (h (K · L · W)); constructor. + now elim (h (M [← V])); constructor. - constructor. apply hi. intros W hW. now apply (h (λ [A], W)); constructor. Qed. Lemma Beta_dec : forall t, {u| t→ u}+{nf t}. Proof. induction t as [ u | U hiU V hiV | A M hi]; simpl in *. - right; now constructor. - destruct hiU as [[u hu] | hu]. + now left; exists (u · V); constructor. + destruct hiV as [[v hv] | hv]. * now left; exists (U · v); constructor. * destruct U as [ w | K L | A M]. -- now right; constructor. -- now right; constructor. -- left; exists (M [← V]); now constructor. - destruct hi as [[N hN] | h]. + left; exists (λ [A], N); now constructor. + now right; constructor. Qed. Inductive Betas: Term -> Term -> Prop := | Betas_refl : forall M, Betas M M | Betas_trans: forall M N P, Beta M N -> Betas N P -> Betas M P. Lemma Betas_Beta: forall M N, Beta M N -> Betas M N. Proof. intros M N h; econstructor. - now apply h. - now constructor. Qed. Lemma Betas_Betas: forall M N, Betas M N -> forall P, Betas N P -> Betas M P. Proof. induction 1 as [ M | M N P hMN hi1 hNP hi2]; intros; [ assumption |]. econstructor; [ now apply hMN | ]. now apply hNP. Qed. Lemma Betas_App: forall M N, Betas M N -> forall U V, Betas U V -> Betas (M· U) (N · V). Proof. induction 1 as [ M | M N P hMN hNP hi]; intros U V h. - induction h as [ U | U V P hUV hVP hi]; [ now constructor | ]. eapply Betas_Betas. + apply Betas_Beta; apply Beta_red2; exact hUV. + assumption. - eapply Betas_Betas. + apply Betas_Beta; apply Beta_red1; exact hMN. + now apply hi. Qed. Lemma Betas_Lam: forall M N, Betas M N -> forall A, Betas (λ [A], M) (λ [A], N). Proof. induction 1 as [ M | M N P hMN hNP hi]; intros A; [ now constructor | ]. eapply Betas_Betas. + apply Betas_Beta; apply Beta_lam; now apply hMN. + now apply hi. Qed. Lemma Betas_lift: forall M N, Betas M N -> forall n m, Betas (M ↑ n # m) (N ↑ n # m). Proof. induction 1 as [ M | M N P hMN hNP hi]; intros n m; simpl in *. - now constructor. - econstructor. + apply Beta_lift. now apply hMN. + now apply hi. Qed. Lemma Beta_subst_l : forall P M N, M → N -> forall n, Betas (P [ n ← M]) (P [ n ← N]). Proof. induction P as [ v | U hiU V hiV | A B hi]; intros M N hred n; simpl in *. - destruct lt_eq_lt_dec as [ [] | ]; [ now constructor | | now constructor ]. now apply Betas_lift; apply Betas_Beta. - apply Betas_App; [ now apply hiU | now apply hiV]. - apply Betas_Lam; now apply hi. Qed. (* Strong -> Weak *) Lemma norm : forall t, SN t -> { u | Betas t u /\ nf u}. Proof. induction 1 as [ N hN hi]. destruct (Beta_dec N) as [[ P hP] | hnf]. - destruct (hi P hP) as [Q [h1 h2]]. exists Q; split; [ | assumption]. now apply Betas_trans with (N := P). - exists N; split; [ | assumption]. now constructor. Qed. Fixpoint reducible (ty : Ty) (M : Term) : Prop := match ty with | Base => SN M | T1 ⇒ T2 => forall N, reducible T1 N -> reducible T2 (M · N) end. Lemma CR2: forall T M N, M → N -> reducible T M -> reducible T N. Proof. induction T as [ | T1 h1 T2 h2]; intros M N hred hcand; simpl in *. - now apply hcand. - intros Q hQ; simpl in *. eapply h2. + constructor. now apply hred. + now apply hcand. Qed. Lemma CR2s: forall T M N, Betas M N -> reducible T M -> reducible T N. Proof. intro T. induction 1 as [ M | M N P hMN hNP hi]; intros h; simpl in *; [ assumption | ]. apply hi. now apply CR2 with (M := M). Qed. Lemma acc_preservation_: forall A B (RA : relation A) (RB : relation B) (f : A -> B) a, (forall x y, RA x y -> RB (f x) (f y)) -> forall z, Acc RB z -> z = f a -> Acc RA a. Proof. intros A B RA RB f a h z hacc. revert RA f a h. induction hacc as [x hx hi]; intros RA f a h heq; subst. constructor; intros b hb. eapply hi. - eapply h. now apply hb. - now apply h. - reflexivity. Qed. Lemma acc_preservation: forall A B (RA : relation A) (RB : relation B) (f : A -> B) a, (forall x y, RA x y -> RB (f x) (f y)) -> Acc RB (f a) -> Acc RA a. Proof. intros; eapply acc_preservation_. now apply H. now apply H0. reflexivity. Qed. Lemma Acc_ind':forall (A : Type) (R : relation A) (P : A -> Prop), (forall x, (forall y, R y x -> P y) -> P x) -> forall x, Acc R x -> P x. Proof. intros A R P h x hacc. eapply Acc_ind; [ | now apply hacc]. intros y hy h'. apply h. exact h'. Qed. Lemma CR1_and_CR3: forall T, (forall M, reducible T M -> SN M) /\ (forall M, neutral M -> (forall N, M → N -> reducible T N) -> reducible T M). Proof. induction T as [ | T1 [hi1 hi1'] T2 [hi2 hi2']]; simpl in *; split. - intros M hM; assumption. - intros M _ h; constructor; exact h. - intros M hred. assert (h : SN ((fun M => M · #0) M)). + apply hi2, hi2'; [ now idtac | ]. intros N h; apply (CR2 _ h). apply hred, hi1'; [ now idtac | ]. now intros x hh; inversion hh. + apply acc_preservation with (f := fun M => M · #0) (RB := transp _ Beta); [ | assumption]. intros x y hb; now constructor. - intros M hn h N hred. assert (hSN : SN N) by now apply hi1. revert hred. elim hSN using Acc_ind'; clear N hSN. intros N hN hi. apply hi2'; [ now idtac | ]. intros MN hMN; inversion hMN; subst; clear hMN; [ now elim hn | now apply h|]. apply hN; [ assumption | ]. now apply CR2 with (M := N). Qed. Lemma CR1 : forall T M, reducible T M -> SN M. Proof. intros T M; eapply CR1_and_CR3. Qed. Lemma CR3: forall T M, neutral M -> (forall N, M → N -> reducible T N) -> reducible T M. Proof. intros T M; eapply CR1_and_CR3. Qed. Lemma var_reducibility: forall v T, reducible T # v. Proof. intros v T; apply CR3; [ now idtac | ]. now intros u h; inversion h. Qed. Lemma red_sat M P : boccur 0 M = true \/ SN P -> forall T, reducible T (M [ ← P]) -> forall A, reducible T ((λ[A], M)· P). Proof. intros h T hred A. assert (hP : SN P). - destruct h as [ h | h]; [ | exact h]. apply CR1 in hred. now apply SN_subst_inv_l in hred. - clear h; revert M hred. induction hP as [ P _ hiP]; unfold transp in *. intros M hred. generalize hred. cut (SN M). + simple induction 1. clear M hred H; intros M _ hi hred; unfold transp in *. apply CR3; [ now idtac | ]. intros N hbeta. inversion hbeta; subst; clear hbeta; [ assumption | | ]. * inversion H2; subst; clear H2. apply hi; [ assumption | ]. apply CR2 with (M [← P]); [ now apply Beta_subst | assumption]. * apply hiP; [ assumption | ]. apply CR2s with (M [ ← P]); [ | assumption]. now apply Beta_subst_l. + apply SN_subst with P. now apply CR1 with T. Qed. Fixpoint subst_list n l M : Term := match M with | #v => if le_gt_dec n v then lift_rec n 0 (nth (v - n) l (# (v - n - length l))) else #v | M · N => (subst_list n l M) · (subst_list n l N) | λ [A], M => λ [A], (subst_list (S n) l M) end . Lemma subst_list_nil n M : subst_list n nil M = M. Proof. revert n; induction M as [ v | U hiU V hiV | A M hi]; intros n; simpl in *. - destruct le_gt_dec; [ | reflexivity ]. rewrite <- minus_n_O. replace (match v - n with | O | _ => #(v - n) end) with #(v - n) by now destruct (v - n). now simpl; f_equal; omega. - f_equal; [ now rewrite hiU | now rewrite hiV]. - f_equal; now rewrite hi. Qed. Lemma subst_shift_cancel n d c l M : c <= n -> length l + n <= d + c -> subst_list n l (lift_rec d c M) = lift_rec (d - length l) c M. Proof. revert n d c l. induction M as [ v | U hiU V hiV | A M hi]; intros n d c l h1 h2; simpl in *. - destruct le_gt_dec; simpl in *. + destruct le_gt_dec; simpl in *; [ | now omega]. rewrite nth_overflow; [ | now omega]. now simpl; f_equal; omega. + destruct le_gt_dec; simpl in *; [ now omega | reflexivity ]. - f_equal. + now apply hiU. + now apply hiV. - f_equal; apply hi;now omega. Qed. Lemma subst_list_app n k l M : subst_list n k (subst_list (length k + n) l M) = subst_list n (k ++ l) M. Proof. revert M n; induction M as [ v | U hiU V hiV | A M hi]; simpl in *; intros n. - rewrite app_length. destruct (le_gt_dec n v); destruct le_gt_dec; simpl; [ | | now omega| ]. + rewrite app_nth2; [ | now omega]. rewrite subst_shift_cancel; [ | now omega | now omega]. f_equal; [ now omega | ]. f_equal; [ now omega | ]. f_equal; now omega. + destruct le_gt_dec; [ | now omega]. f_equal. rewrite app_nth1; [ | now omega]. f_equal; f_equal; now omega. + destruct le_gt_dec; [ now omega | reflexivity ]. - f_equal; [ now apply hiU | now apply hiV]. - f_equal. replace (S (length k + n)) with (length k + S n) by omega. now apply hi. Qed. (* just to test it's ok with subst_rec *) Lemma subst_list_ok: forall M n N, M [ n← N] = subst_list n (N :: nil) M. Proof. induction M as [ v | U hiU V hiV | A M hi]; intros n N; simpl in *. - destruct lt_eq_lt_dec as [ [] | ]. + destruct le_gt_dec; [ now omega | reflexivity]. + destruct le_gt_dec; [ | now omega]; subst. now rewrite <- minus_n_n. + destruct le_gt_dec; [ | now omega ]. case_eq (v - n); [ now omega | ]. intros q heq; subst. replace (match q with | O | _ => # (S q - 1) end ) with # (S q - 1). * simpl; f_equal. rewrite <- minus_n_O. now omega. * now destruct q. - f_equal. + now apply hiU. + now apply hiV. - f_equal; now apply hi. Qed. Definition left_list (A B: Type) (l: list (A * B)) := map fst l. Definition right_list (A B: Type) (l: list (A * B)) := map snd l. Lemma left_list_length: forall A B (l: list (A * B)), length (left_list l) = length l. Proof. intros A B l. unfold left_list; now rewrite map_length. Qed. Lemma reduce_lemma: forall (Δ : list (Term * Ty)) Γ M T, (right_list Δ) ++ Γ ⊢ M : T -> Forall (fun (x: Term * Ty) => let (tm, ty) := x in reducible ty tm) Δ -> reducible T (subst_list 0 (left_list Δ) M). Proof. intros Δ Γ M T; revert M T Γ Δ. induction M as [ v | U hiU V hiV | A M hi]; intros T Γ Δ; simpl in *. - simpl. rewrite <- minus_n_O in *. rewrite lift0, left_list_length. revert v. induction Δ as [ | hd tl hi]; simpl in *; intros v. + replace (reducible T match v with | 0 | _ => # (v - 0) end) with (reducible T #v) by now destruct v. intros _ _; now apply var_reducibility. + intros hty hf. inversion hf; subst; clear hf. destruct hd as [hdT hdTy]; simpl in *. destruct v. * inversion hty; subst; clear hty. now inversion H3; subst; clear H3. * apply hi; [ | assumption ]. inversion hty; subst; clear hty. inversion H3; subst; clear H3. now constructor. - intros hty hf. inversion hty; subst; clear hty. generalize (hiU (A ⇒ T) Γ Δ H2 hf); simpl; intros h. apply h. now apply hiV with Γ. - intros hty hf. inversion hty; subst; clear hty; simpl; intros N hN. apply red_sat; [ right; now apply CR1 with A| ]. rewrite subst_list_ok, subst_list_app; simpl. change (N :: left_list Δ) with (left_list ((N, A) :: Δ)). apply hi with Γ; [ simpl; assumption | ]. now constructor. Qed. Lemma typ_are_SN: forall Γ M T, Γ ⊢ M : T -> SN M. Proof. intros Γ M T hty. assert (h : reducible T (subst_list 0 (left_list nil) M)) by now apply reduce_lemma with Γ. simpl in h. rewrite subst_list_nil in h. now apply CR1 with T. Qed.
-- Andreas, 2012-03-09 do not solve relevant meta variable by irr. constraint module Issue351a where open import Common.Irrelevance open import Common.Equality data Bool : Set where true false : Bool -- the Boolean b is not(!) constrained by the equation f : (b : Bool) -> squash b ≡ squash true -> Bool f b _ = b test = f _ refl -- meta needs to remain unsolved
function mc_policy_eval!(mdp, V, action_probability::Function; iterations = 100, Neps = 100, verbose = true, max_steps_per_ep = 1000, opt = ADAM(1e-3), batchsize = 32, shuffle = true, rng::AbstractRNG = Random.GLOBAL_RNG, a_and_p = action_and_probability, logger = TBLogger("log/", tb_increment), log_freq = 1, exploration_policy = nothing) log = !isnothing(logger) loss = (x, y) -> Flux.mse(V(x), y) best_avgR = 0 for iter in 1:iterations verbose && println("iteration: ", iter) # Setup the exploration policy. If none is provided, use the current policy isnothing(exploration_policy) && (exploration_policy = ISPolicy(V, mdp, action_probability, rng)) # Sample episodes. S is the states, G is the per-state return (with IS weights), R is the undiscounted return for each episode S, G, avgR = sample_episodes(mdp, exploration_policy, Neps, max_steps = max_steps_per_ep, a_and_p = a_and_p) # If the returns are higher (i.e. more failures found) then save the model log && avgR > best_avgR && @save string(logger.logdir, "/best_model.bson") V # Load the data into a DataLoader and train data = Flux.Data.DataLoader(S, G, batchsize=batchsize, shuffle = shuffle) Flux.train!(loss, Flux.params(V), data, opt) # If we are logging then record the loss and the avg return (!log || iter % log_freq != 0) && continue with_logger(logger) do @info "Training" loss=loss(S, G) undiscounted_return = avgR @save string(logger.logdir, "/last_model.bson") V end end end
using Test using POMDPs using POMDPTesting using POMDPModelTools import POMDPs: transition, observation, initialstate, updater, states, actions, observations struct TestPOMDP <: POMDP{Bool, Bool, Bool} end updater(problem::TestPOMDP) = DiscreteUpdater(problem) initialstate(::TestPOMDP) = BoolDistribution(0.0) transition(p::TestPOMDP, s, a) = BoolDistribution(0.5) observation(p::TestPOMDP, a, sp) = BoolDistribution(0.5) states(p::TestPOMDP) = (true, false) actions(p::TestPOMDP) = (true, false) observations(p::TestPOMDP) = (true, false) @testset "model" begin m = TestPOMDP() @test has_consistent_initial_distribution(m) @test has_consistent_transition_distributions(m) @test has_consistent_observation_distributions(m) @test has_consistent_distributions(m) end @testset "old model" begin probability_check(TestPOMDP()) end @testset "support mismatch" begin struct SupportMismatchPOMDP <: POMDP{Int, Int, Int} end POMDPs.states(::SupportMismatchPOMDP) = 1:2 POMDPs.actions(::SupportMismatchPOMDP) = 1:2 POMDPs.observations(::SupportMismatchPOMDP) = 1:2 POMDPs.initialstate(::SupportMismatchPOMDP) = Deterministic(3) POMDPs.transition(::SupportMismatchPOMDP, s, a) = SparseCat([1, 2, 3], [1.0, 0.0, 0.1]) POMDPs.observation(::SupportMismatchPOMDP, s, a, sp) = SparseCat([1, 2, 3], [1.0, 0.0, 0.1]) @test !has_consistent_transition_distributions(SupportMismatchPOMDP()) @test !has_consistent_observation_distributions(SupportMismatchPOMDP()) @test !has_consistent_distributions(SupportMismatchPOMDP()) end
module Test.Spec import Specdris.Spec import Data.Functor.Foldable import Data.Vect import Control.Comonad.Cofree -- FIXME morphisms with constraints? particularly re: symmteries -- "safe" compilation using this? -- particularly discrete pseudo-hamiltonian dynamics -- oh man finite symplectic geometry -- TODO computation of pi from http://www.cs.ox.ac.uk/jeremy.gibbons/publications/metamorphisms-mpc.pdf naturals : Nat -> ListF Nat Nat naturals Z = NilF naturals (S n) = Cons (n + 1) n -- This is also an instructive use of cofree comonads! -- Do note that it indexes starting at 0. catalan : Nat -> Nat catalan = dyna coalgebra naturals where coalgebra : ListF Nat (Cofree (ListF Nat) Nat) -> Nat coalgebra NilF = 1 coalgebra (Cons n table) = sum (Prelude.List.zipWith (*) xs (reverse xs)) where xs = take n table take : Nat -> (Cofree (ListF Nat) Nat) -> List Nat take Z _ = [] take (S n) (Co a NilF) = [a] take (S n) (Co a (Cons v as)) = a :: take n as roundedSqrt : Nat -> Nat roundedSqrt = cast . cast {to=Integer} . sqrt . cast toN : Nat -> List Nat toN = reverse . ana naturals isPrime : Nat -> List Nat -> Bool isPrime n ns = all (\a => mod n a /= 0) (filter (<= (roundedSqrt n)) ns) dedup : (Eq a) => List a -> List a dedup = para pseudoalgebra where pseudoalgebra : (Eq a) => ListF a (List a, List a) -> List a pseudoalgebra NilF = [] pseudoalgebra (Cons x (past, xs)) = if elem x past then xs else x :: xs evenOdd : Nat -> Bool evenOdd = mutu odd even where odd : Maybe (Bool, Bool) -> Bool odd Nothing = False odd (Just (_, b)) = b even : Maybe (Bool, Bool) -> Bool even Nothing = True even (Just (_, b)) = b collatzCoalgebra : Int -> Either (List Int) (ListF Int Int) collatzCoalgebra 1 = Left [1] collatzCoalgebra 2 = Left [2, 1] collatzCoalgebra 3 = Left [3, 10, 5, 16, 8, 4, 2, 1] collatzCoalgebra 4 = Left [6, 3, 10, 5, 16, 8, 4, 2, 1] collatzCoalgebra n with (modInt n 2) | 0 = Right $ Cons n (divInt n 2) | _ = Right $ Cons n (3 * n + 1) collatz : Int -> List Int collatz = micro collatzCoalgebra elgotCoalgebra : List a -> Either (List (List a)) (ListF (List a) (List a)) elgotCoalgebra [] = Right NilF elgotCoalgebra (x :: []) = Left ([[x]]) elgotCoalgebra (x :: xs) = Right (Cons (x :: xs) xs) -- fibonacci zygomorphism? zygoPseudoalgebra : ListF Int (Bool, Int) -> Int zygoPseudoalgebra NilF = 0 zygoPseudoalgebra (Cons n (b, x)) = if b then (n+x) else (n-x) zygoAlgebra : ListF Int Bool -> Bool zygoAlgebra NilF = False zygoAlgebra (Cons _ bool) = not bool plusMinus : List Int -> Int plusMinus = zygo zygoAlgebra zygoPseudoalgebra algebra' : ListF (List a) (List a) -> List a algebra' NilF = [] algebra' (Cons x xs) = x ++ xs cataConcat : List (List a) -> List a cataConcat = cata algebra' algebra : ListF (List a) (List (List a)) -> List (List a) algebra NilF = [] algebra (Cons x xs) = x::xs coalgebra : List a -> ListF (List a) (List a) coalgebra (x::xs) = Cons (x::xs) xs coalgebra [] = NilF suffix : List a -> List (List a) suffix = hylo algebra coalgebra . drop 1 export specSuite : IO () specSuite = spec $ do describe "hylo" $ it "should be able to implement the suffix function" $ (suffix . unpack) "ego" `shouldBe` [['g','o'], ['o']] describe "cata" $ it "should be able to implement 'concat'" $ (cataConcat . map unpack) [ "I", "am" ] `shouldBe` ['I', 'a', 'm'] describe "zygo" $ it "should be able to implement plusMinus" $ plusMinus [1,2,3] `shouldBe` -4 describe "micro" $ it "should provide a simple way to compute the Collatz sequence associated with a number" $ collatz 12 `shouldBe` [12, 6, 3, 10, 5, 16, 8, 4, 2, 1] describe "mutu" $ it "should be able to do recursion on the natural numbers to check for parity" $ (evenOdd . fromIntegerNat) 10 `shouldBe` True describe "para" $ it "should provide an elegant way to remove duplicates from a list when order doesn't matter" $ dedup [1,1,2,3,4,5,4] `shouldBe` [1,2,3,5,4] describe "dyna" $ it "should do something with catalan numbers" $ catalan 6 `shouldBe` 132 describe "ana" $ it "should give the first n naturals" $ toN 5 `shouldBe` [1,2,3,4,5]
I wrote something yesterday while I was feeling pretty energized. I sat myself down and let the feeling be. At some point I really felt the urge to write, I mean, I scoured my desk for a pencil because I didn't want to miss something. I ended up posting it on a closed group that I am a part of on FB. I posted a reply to show where I was coming from, after seeing the time that I posted it I think I rolled my eyes a bit... "As if!" Still don't know why, and if I add up all the things where I'd say "still don't know why" I could summarize that there's a bigger picture. Something of a positive distraction while nature works at her pace. there's another curious thing that happens when I get worked-up or feel like I am getting pulled down. It's likely that I've mentioned this before. Songs play in my head, very specific songs with lyrical content that (though subjective and absolutely biased ) speak to my exact situation. I'd be worried if the hints were negative, but they are always trying to life me up. I think it's pretty cool and it makes me smile. She said "I'll take you some place where I know it will change" "Arise, remove your earth, shake off your dust, raise yourself, that you may travel in company with the spirits, for your wings are those of a falcon, your gleam is that of a star..." 'It is man's duty not to acquiesce in his merely human state, but rather, in the strength of his contemplation of things divine, to scorn and despise the mortal part which has been attached to him because it was needful that he should keep and tend his lower world. 'If man takes upon him in all its fullness the function assigned to him, that is, the tendence which is his special task, he becomes the means of right order to the Kosmos, and the Kosmos to him.'" I am pasting something Roger wrote to Jan because it fits well with my previous post. I was looking at older posts and found this. I also found one where you mention basically what is happening right now with the US and Syria. Sorry if this moves about in too many directions but I feel the need to post it in this way. Let’s be a little provocative: to me, a grown up human being is someone who naturally relates to the ‘Unus Mundus’ because doing this has become a fully integrated function corresponding to a bilateral or reciprocal need: the need to consciously feel one’s working belonging to the universe, and the need for the universe to be consciously recognized in its living relatedness, Eros. So paradoxically in the culmination of 'amour-propre', self-love, where we in some ways seek nothing but our own good and own well-being, the ego and its will principle become so 'immersed' and unified with the greater field of energy, that they 'vanish' alltogether, because there is nothing egoic that 'stands out' anymore, and the sense of self-ness becomes condensed to its most basic form of simple 'being'. As any natural growth process, Unio corporalis requires time. It also requires conscious acceptance. What I mean by ‘natural relation’ is that the relating process has become integrated, that is to say ‘works’ as a ‘natural function’, another ‘sense’. Of course the personality shift it implies is the fruit of this progressive growth process. On this path paved with abandoned ego desires, the (pavlovian? ) wish for bliss eventually finds its place as something else unfolds. The ‘crystal body’ is not the exclusive treasure of some people considered as ‘holy hermits’. (If they are ‘holy’ it is because they are “whole-y”, and no saint at all by the way). It also permeates, filtrates through, and finally gilds the surroundings of the life of ‘simple beings’ as you put it. It is, and incarnates in the acts (not necessarily ‘deeds’) of the individuals concerned. Was reading something of Remo's work tending to the Child archetype. I had to laugh a bit, seems I was setup (life conditions) to have to deal with it. My impression was that out of two images the child appears, yet split, and is the next image to contend with. *I'm putting my own words to that to give a description of what it feels like. "Image" Edit: I had the chance to speak with my parents about being adopted. Many interesting things came from that. In the conversation I explained that this heartbeat that formed me I had no choice to become unfamiliar with regardless of how much love they gave me when I was young. It's not their "fault" that I have things to deal with in that regard, I said. My dad said "you were our chosen son". This morning (in the shower!) I remembered the gift I received from them on my 1st birthday, a drum (a new beat). *edit: just read (today, Monday) that everything in nature is dual - 4. "So maybe Hillman was right. We don’t have to ground the puer out of fear for his extravagances. Wait a while, and life will bring him closer to earth." How's this for a type-o? Lapis Philosophorum: Also known as the ultima materia, aqua permanens (=its libido aspect), rubedo tinctura, filius macrocosmi or philosophorum, quinta essentia, panacea, medicina catholica, rotundrum, elixir vitae, lapis exilis (stone of no worth), everlasting food): the Philosopher's Stone, prized goal of alchemy. According to legend, the Stone, a freed form of the spirit of Mercurius trapped within the prima materia or initially unprocessed raw material, grants immortality, heals all disease, and transforms base metals into gold. Jung saw it as a Self symbol--one compensating Christ--and the goal of individuation. Zeus was heavy-handed at times, as a result I had difficulty in storms because I was afraid to get hit by lightning. Might not be good to carry that with me, or maybe I should just to let it clear. i am unsure about that. Might as well keep the ball rolling. There are a couple of people who helping me with an intense time. I feel pretty blessed in a lot of ways. One song sent me on a search and I ended up here. Call me crazy, go ahead, nothing can take this away from me. I wrote on a FB group page that I've wept what felt like blood on my drums. I just read this page and I guess it makes more sense. *btw, I'm tired of talking about them. this morning I was wondering if individuation was the culmination of Romanticism, it was a totally random thought - I've never really studied Romanticism. I started to search that idea and found some really (really) cool stuff. Hi, I hope everyone who still reads here is doing well. I have an interface that allows me to play into my computer and record the random stuff that I am trying to process. Anyway, the other day I was playing and looking at the guitar strings vibrate. At that time I was playing something I thought was pretty cool, and thought that I should stop the recording and make a note about it. I stopped the recording at the 137th bar (I didn't see the screen, just reached up and pressed the space bar). This is meaningful to me because I felt really good about what I was playing! there's something I wanted to write about on the day it happened but I've let it sit for a while. Just now I felt compelled to write about it. I often read Gregory's posts and find some ideas in there that are very similar to things I may have been contemplating at the time, etc. Well, this last one was pretty vivid. I was walking my dog and kind of laughed at a random idea that came up. I was thinking of some kind of peg leg person playing some kind of pirate music. I don't know why but I felt it was comical, and I could visualize it pretty well. Shortly after that day I came across Gregory's post talking about the golden peg leg. Ok, fine, similar theme. Why the hell would I think about it in time with that post? I've looked at it a bunch of ways and I still just have to let it be. Not sure what to make of it, if anything at all. Pascal, I am glad to see you posting about complex and deeply meaningful synchronicities. I have been experiencing lots of them many times a day for several months. Other people I know in person or online are also having visions and encountering new people in their every day lives who are also surprisingly having deep spiritual aspirations recently arising in them. I have been thinking frequently of our deceased blog mate Ann Elliot. She wrote mystical books that are still posted online by her family. I think there is one I read a few years ago... and in a recent dream I heard an inner voice say that I will discover answers to some of my deepest concerns from Ann in a very specific book... I will recognize when I go to see again what it might be. Thank You for your continuing participation all these years in preserving the Ann/Suzanne Blog. I have two wonderful daughters nearby where I live... but you in effect are like a wonderful son at a distance who shares all of our similar devotion to knowing we have a purpose in our lives now and most likely will be in the same afterlife locale together. thanks for a being like a mom at a distance! I can't even describe how much I have learned from you and this forum, it's pretty amazing. For instance, again, Greg's posts. I recently wrote the words PANIK to you in a message about an event I attended (that word was part of a synchronicity). Now I read his latest post and laughed out loud. I suppose I just have to settle in to the fact that it's all connected. *edit: after submitting my post I read it. The image fit so perfectly with the idea of a "mom at a distance"! Today I read the most recent post in Gregory's thread. I swore, I got up, and I walked away from my desk. Why o why, that's all I can say. I don't mention mana for I don't know how long and there it is. I posted on FB this morning on a thread by a Jungian named Craig, it was about Elijah and Mana. I know where I'm stuck. Why? The only song I've ever recorded and put spoken word to is a song I named THESEUS. I had no idea what Theseus was about when I named it. Just read another thread, I wonder if the forum will close? I hope not but it is a possibility. You must feel a little lonely now on the forum. A little, but it’s ok, we have our reasons. For the past couple of years I thought to let it go, but this magical life keeps offering reasons to stay. It’s weird, I just thought to post here because of a song lyric that I am listening to. I debated it because - not sure why. Hi All, Actually it was not us but Remo who started the Ann-Suzanne Blog. He clipped it off from an earlier thread. Of course many of our old faithful posters here and throughout the rest of Unus Mundus no longer post. Some reasons have been that they were moving into work situations and would not be making personal experiences and opinions open to be read by folks wandering in who do not become posters. One of the largest reasons for less devoted posting is, that sadly to us who loved them, many have died over several years mostly from old age. Some of those persons were among our most spiritual as well as intellectual posters. A few people in my personal life have asked me about the forum. I trusted them to come and read. They all replied that this is an amazing forum with wonderful posts that are superior to many new age books about the esoteric topics we have created and maintained I suppose for about 10 years now. I call all of this forum, not just Ann-Suzanne, but instead -- THE UNIVERSITY OF THE WORLD SOUL. One person in my family says it has so much astonishing information that the individual comes by once in awhile and reads more pages. However I do not think that any few people I might know are the ones bumping up the number of readers of our present Ann-Suzanne. Just a very few months ago, I commented to two family members that I expected the number of drop-in readers to reach over 200,000 soon. The number is now actually more than that. Every time we two post, there is a kind of surge of readers. I do not know who they are or why they are likely coming back and not just brand new viewers. Once in awhile I go to various entire forum topics and read some of the very oldest posts from when there were mostly then only a few members and then more coming in and joining in for months or years. It is actually a very fast surge within a day or two of new posts. Perhaps somebody may post who and why they come after reading my comment here. Actually every day and every night, I use the belly brain meditation method that Remo taught us all in the beginning of this wonderful gathering place. It has been a blessing to me and many old friends who were here years ago even if they are not with us any more. If there is a real afterlife, Ann and I wrote emails about probably meeting again over there in the dimension suitable to our mutual ideals and devotions. Today I realized that it is good that I don’t want to excomunicate the church from me. Christ is the pure light, Mercurius is its complement. If it takes over the light, then, I suppose it is a false light. Revealing itself in self-discovery. I asked recently “what is the subtrate onto which my thoughts are placed” - it is Mercurius. Not sure where to post this. In the past 36 hours or so I feel like Neo in the matrix. I’m tired from the info. Yesterday morning when I was coming out of sleep I heard a very loud popping sound, like snapping out of something, vacuum-like. Tired, but not exhausted, keeping my limits as best as I can. Art thou noble to receive? Know this: I, Mercurius, have here set down a full, true and infallible account of the Great Work. But I give you fair warning that unless you seek the true philosophical gold and not the gold of the vulgar, unless you heart is fixed with unbending intent on the true Stone of the Philosophers, unless you are steadfast in your quest, abiding by God’s laws in all faith and humility and eschewing all vanity, conceit, falsehood, intemperance, pride, lust and faint-heartedness, read no farther lest I prove fatal to you. For I am the watery venomous serpent who lies buried at the earth’s centre; I am the fiery dragon who flies through the air. I am the one thing necessary for the whole Opus. I am the spirit of metals, the fire which does not burn, the water which does not wet the hands. If you find the way to slay me you will find the philosophical mercury of the wise, even the White Stone beloved of the Philosophers. If you find the way to raise me up again, you will find the philosophical sulphur, that is, the Red Stone and Elixir of Life. Obey me and I will be your servant; free me and I will be your friend. Enslave me and I am a dangerous enemy; command me and I will make you mad; give me life and you will die. This will be a shorter reply tonight. I will add more tomorrow. Where I live near Washington DC, we had an alert this afternoon that a serious tornado was coming our way. That ended for some hours what I wanted to write about the topic right above here from my blog mate. If World War Annihilation takes place sometime soon, because of the insanities going on among many nation leaders and religious leaders who literally want to have the "End Times"... even though supposedly predicted in ancient times... come true specifically in our current time. I was very young just out of high school when I started college in Washington D.C. We young people in the Girl's Apartment house were terrified. It was the time of the Cuban Missile Crisis. There was a famous movie called "On the Beach" that portrayed a rather small group of people who had not been blown up like the rest of our world but were gathering together on the open sand areas while waiting in horror that the clouds of radiation would soon come their way and kill them. There was also the grim song that "we will all go together when we go... as there would be no living humans anywhere else on our blasted and poisoned planet. With all the far more powerful nuclear weapons that are horded by several nations, once the first attacks begin, all the other leaders who have their own nukes will join in the launches. THE END OF US ALL. Well, I started reading about history, religion, what happened in earlier centuries and why can't we figure out how to prevent this heartbreaking end of various men, women, and children who were at least sometimes capable of love, kindness, helping others, and at least trying to understand about the other races, religions, and numerous political fanaticisms. I still have hundreds of books that I was reading from around age 15 and trying to figure out if there was any purpose to our lives at all. This week in my Senior Residence, I am seriously trying to get rid of piles of old documents and letters, but I keep on finding and wanting to hold on to the ones that gave me some concepts of faith and caring about people of all backgrounds. Today I got an email that is offering new additions to one type of books that I studied in a Edgar Cayce group where I lived some decades ago. For now I am only pasting in here what the email said just before I was on my way to a safe room to hide from the tornado that of course tends to stir up fears of an abrupt end of our lives while we still want more time with our children and grandchildren. Are you looking for a deeper understanding of your spiritual nature and your place in the universe? Decades ago, a group of ordinary individuals were seeking the same answers to life’s questions that are still sought by many of us today. What the Edgar Cayce readings had to offer was a broader view as seen from the perspective of our higher selves—where there is no division, no separation by religion, only oneness. This view incorporates the philosophies of the world into a practical daily application for living that brings hope and profound change into the lives of those who apply it. Hi Suzanne, What I wanted to ask is: have we talked about Carl Rodgers. Maybe Rodgers is one of the many interested in whatever we seem to be getting more deeply involved in. I got fascinated with the Urantia material today that I had heard of before but never taken the time to try and understand its origin or purpose and today why Wieman's ideas would be incorporated in it. If as they say it was written or compiled for a time after the present crises through which civilization is passing then this would suggest that his ideas which were probably too far ahead the more popular 20th C theologians (for instance Tillich and Barth) his Process concepts are right for the universal perspective of the future. Hi Ann, I think we are on to something with Wieman and Carl Rodgers for instance that is at least very important to our own spiritual progress but will probably be helpful to others too. Also at some point, maybe it would be the time to add the Wink material, as the disciples' feeling of Jesus' continuing presence possibly can be tied into the notion of the "communion of saints" in which love and inspiration can still be shared between those of like mind and heart whether still alive or deceased. Offhand, I think Wieman, Russell, and others probably are aware of our efforts. In my last E-mail to you I gave an example about the others who may be helping Lee and Bob in their communications with us. Quote: Who are these many? He (Lee) said that my learning to understand about the often symbolic and synchronistic nature of our communications, which can lead to misunderstandings at times, is an ongoing process, and we will get better at it as we go along. I asked again, who are the many, who are some of them? He replied, do you remember who was always talking about how we are all in an ongoing process in our development? I thought about it... Dana Roblee (the professor who was Lee's best friend) used to say, we are in a process of becoming, the outcome of which we cannot know and which is probably infinite. Yes, Lee replied, Dana is one of the many with me. But, who else I asked? He said, listen carefully now, you will hear who else. I thought I heard a faint tune, but it seemed to me a song without words. I could not think of any words. Since I was alone in the room where I was, I began to imitate the tune softly out loud, lalalala-lala-lalala-lala... well, at that I suddenly burst into tears... and I said, Jack! (my half brother from my father's first marriage who was born in 1919 - 24 years older than I was). He used to whistle that tune when he came over to see our father when I was little. It was "Lili Marlene", the song that German and American soldiers both loved. Jack had been in World War II and accepted German surrenders because he knew German fluently, and later he was a noted professor who had written about the rise of Hitler. In late 1979, I had a dream that I should send a birthday card to my brother Jack, his and my birthdays were one day apart, but he had lived far away all my adult life and I had rarely ever seen him. I sent the card. He wrote back and for the first time mentioned our father's funeral and some other personal things we had never talked about. The next July 4th, Jack died suddenly at age 60, in 1980. So, Lee, said, yes, your brother is another one of the many who are with me. Last edited by Suzanne on Sun Jun 10, 2018 8:44 pm, edited 1 time in total. I was searching info on the transcendent function and saw this as I was scrolling. This is something I should have read long ago considering that I post about it here. Hello Everybody, As usual I have been busy with practical matters in my everyday life. Now since I am well settled into my new residence, it is much easier for me to be organizing the esoteric material from my old hand-written notebooks going back to from the 1960s to the late 1990s. My Blog mate Ann Elliott, when she was still alive and posting here for several years, used to exchange private emails with me about personal and family topics, or if either of us was sick or traveling, but we usually did not plan specific topics because that would ruin the "meaningful coincidence" of the Blog and some other topic threads on the rest of the forum. She knew from my telling her by email who Lee was and, as mentioned here before a few times, she received dictation communication from her deceased husband Bob Elliott who had studied to be a Shaman with Michael Harner. At first her husband reported that he knew of Lee's work with others in the Beyond but that he did not know Lee because they were on a different level. This below is a copy of her post from 2008. First for any forum newcomers, I should mention that Ann and Bob had met Lao Russell, widow of Walter Russell, in person when Lao was visiting and lecturing in California near to where Ann and her husband lived. Lao and Ann wrote many letters to one another for quite a number of years. Many of the vibrational sound combined with color healers of today derived part of their inspiration from Dr. Russell who was a classical organist himself and from his scientific charts and psychedelic cosmic paintings -- well before the hippies and other young interdimensional travelers were reporting such experiences widely in our American culture. I live in Virginia only several hours away from the beautiful Swannaoa Palace in Waynesboro Virginia where the Russells lived for several decades. I have been there many times over the years and had talks and walks with Lao Russell in person without being in a group visit. Since Lao passed on, there are some but not many of us older people aged 60 to 90 years old who knew her. Later this year, I am planning to meet up with some of those oldsters and the middle aged and much younger folks who know of and admire the teachings of Lao and Walter. With all the present fears about the possibility of worldwide nuclear war in our time, the Russell 1950s book Atomic Suicide is being read by newer generations. Finally below is the complete post that Ann wrote here in 2008. She is of course now wherever Bob is on the other side or in what ever higher realm they have moved to since then. Quotes from Bob by dictation through Ann who had also been Bob's secretary during his years as a lawyer. START: I have had no direct contact with Suzanne's friend Lee but I am aware of his presence here and the importance of his work and contribution. So much more can be accomplished when there is a conscious and deep love--such as we have—connection . She can and is assisting him and the larger work he is part of. I can tell you very little, both because I know little, understand less, and would not in any event be permitted to. However, she has the ability for direct communication with him and it should be through writing down what he instructs her as to how she can help him. Tell her about your work in Scotland in establishing the triangle. It is such a small piece of the puzzle we were given to work on/with. . . . There are a number of things I'd like to write about but I was searching some stuff in my computer just now and realized that I had a copy of Holy Wedding. -what is the deeper significance of synchronicity? Remo says it is BCI. Ok, well, I have a lot of them. Ridiculously meaningful ones. I want to know what to do with them, if that makes any sense. Follow them, yes, but mine are related to things that I do not fully understand. Perhaps I have gained some understanding of the qm subject along the way that I did not have before, but it's not like I'll make a career out of it. Anyway, synchronicity is incarnation in the mind, and bci incarnation in matter.. This might be for future reference. Edit: I guess there are times that I’d like to thank them as well.
#!/usr/bin/env Rscript source("/tmp/class-libs.R") class_name = "Stat 131a" devtools::install_github('DataComputing/DataComputing', ref='d5cebba')
{-# OPTIONS --cubical --safe #-} module Cubical.Data.SumFin.Base where open import Cubical.Foundations.Prelude open import Cubical.Data.Empty using (⊥; ⊥-elim) public open import Cubical.Data.Unit using (tt) renaming (Unit to ⊤) public open import Cubical.Data.Sum using (_⊎_; inl; inr) public open import Cubical.Data.Nat open import Cubical.Relation.Nullary open import Cubical.Relation.Nullary.DecidableEq private variable k : ℕ Fin : ℕ → Type₀ Fin zero = ⊥ Fin (suc n) = ⊤ ⊎ (Fin n) pattern fzero = inl tt pattern fsuc n = inr n finj : Fin k → Fin (suc k) finj {suc k} fzero = fzero finj {suc k} (fsuc n) = fsuc (finj {k} n) toℕ : Fin k → ℕ toℕ {suc k} (inl tt) = zero toℕ {suc k} (inr x) = suc (toℕ {k} x) toℕ-injective : {m n : Fin k} → toℕ m ≡ toℕ n → m ≡ n toℕ-injective {suc k} {fzero} {fzero} _ = refl toℕ-injective {suc k} {fzero} {fsuc x} p = ⊥-elim (znots p) toℕ-injective {suc k} {fsuc m} {fzero} p = ⊥-elim (snotz p) toℕ-injective {suc k} {fsuc m} {fsuc x} p = cong fsuc (toℕ-injective (injSuc p)) -- Thus, Fin k is discrete discreteFin : Discrete (Fin k) discreteFin fj fk with discreteℕ (toℕ fj) (toℕ fk) ... | yes p = yes (toℕ-injective p) ... | no ¬p = no (λ p → ¬p (cong toℕ p)) isSetFin : isSet (Fin k) isSetFin = Discrete→isSet discreteFin
[STATEMENT] lemma add_leaf_awalk_preserv: fixes u v a defines "T' \<equiv> \<lparr>verts = verts T \<union> {v}, arcs = arcs T \<union> {a}, tail = (tail T)(a := u), head = (head T)(a := v)\<rparr>" assumes "a \<notin> arcs T" and "awalk x p y" shows "pre_digraph.awalk T' x p y" [PROOF STATE] proof (prove) goal (1 subgoal): 1. pre_digraph.awalk T' x p y [PROOF STEP] using assms add_leaf_cas_preserv [PROOF STATE] proof (prove) using this: T' \<equiv> \<lparr>verts = verts T \<union> {v}, arcs = arcs T \<union> {a}, tail = (tail T)(a := u), head = (head T)(a := v)\<rparr> a \<notin> arcs T awalk x p y \<lbrakk>?a \<notin> arcs T; set ?p \<subseteq> arcs T; cas ?x ?p ?y\<rbrakk> \<Longrightarrow> pre_digraph.cas \<lparr>verts = verts T \<union> {?v}, arcs = arcs T \<union> {?a}, tail = (tail T)(?a := ?u), head = (head T)(?a := ?v)\<rparr> ?x ?p ?y goal (1 subgoal): 1. pre_digraph.awalk T' x p y [PROOF STEP] unfolding pre_digraph.awalk_def [PROOF STATE] proof (prove) using this: T' \<equiv> \<lparr>verts = verts T \<union> {v}, arcs = arcs T \<union> {a}, tail = (tail T)(a := u), head = (head T)(a := v)\<rparr> a \<notin> arcs T x \<in> verts T \<and> set p \<subseteq> arcs T \<and> cas x p y \<lbrakk>?a \<notin> arcs T; set ?p \<subseteq> arcs T; cas ?x ?p ?y\<rbrakk> \<Longrightarrow> pre_digraph.cas \<lparr>verts = verts T \<union> {?v}, arcs = arcs T \<union> {?a}, tail = (tail T)(?a := ?u), head = (head T)(?a := ?v)\<rparr> ?x ?p ?y goal (1 subgoal): 1. x \<in> verts T' \<and> set p \<subseteq> arcs T' \<and> pre_digraph.cas T' x p y [PROOF STEP] by auto
module ExactLength import EqNat data Vect : Nat -> Type -> Type where Nil : Vect Z ty (::) : ty -> Vect n ty -> Vect (S n) ty exactLength : (len : Nat) -> (input : Vect m a) -> Maybe (Vect len a) exactLength {m} len input = case checkEqNat m len of Nothing => Nothing Just (Same len) => Just input
*.............................................................. !------------------------------------------------------------------------- ! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! !------------------------------------------------------------------------- !BOP ! ! !ROUTINE: ODS_Julian() --- Returns the Julian day ! ! !DESCRIPTION: ! \label{ODS:Julian} ! The routine returns the Julian day number based on the ! calendar date. The algorithm was adopted from Press et al. ! The Julian day number on May 23, 1968 is 2,440,000. The ! year zero is treated as the year 1 since the year 0 does ! not exist. ! ! \bigskip {\bf Reference:} ! \begin{description} ! \item Press, William H., Saul A. Teukolsky, William T. ! Vetterling and Brian P. Flannery, 1992: {\em Numerical ! Recipes in Fortran}, 2nd Ed. Cambridge University ! Press, New York, NY, 963pp. ! \end{description} ! ! !INTERFACE: ! integer function ODS_Julian ( CalDate ) ! ! !INPUT PARAMETERS: implicit NONE integer CalDate ! Calendar date in the format YYYYMMDD ! where YYYY is the year, MM is the ! month and DD is the day. A negative ! number implies that the year is B.C. ! ! !OUTPUT PARAMETERS: ! integer JulDay Julian day number ! ! !SEE ALSO: ! ODS_Min2Cal() - Convert minutes since a given reference to ! "calendar" date and time ! ODS_Cal2Min() - Convert "calendar" date and time to minutes ! since a given reference ! ODS_Time2Cal() - converts ODS "time" attribute to "calendar" ! date and time ! ODS_Cal2Time() - converts "calendar" date and time to ODS ! time attribute ! ODS_CalDat() - calculates the "calendar" date and time ! from the Julian day ! ! !REVISION HISTORY: ! 13Apr1998 C. Redder Original code. Routine was developed to ! create ODS version 2.00 ! 19Nov1999 C. Redder Added a latex label in and moved the ! subroutine statement into the prologue. ! 06Dec1999 C. Redder Corrections to the documentation in the ! prologue. ! 10May2000 C. Redder Updated prologue to include the routines ! ODS_Min2Cal and ODS_Cal2Min in the ! "See Also" list ! !EOP !------------------------------------------------------------------------- * Other variables * --------------- integer Year integer Month integer Day integer iGreg ! Gregorian Calendar adopted Oct 12, 1582 parameter ( iGreg = 15 + 31 * ( 10 + 12 * 1582 ) ) integer JulDay integer jy, jm, ja Year = CalDate / 10000 Month = mod ( CalDate, 10000 ) / 100 Day = mod ( CalDate, 100 ) * Change year 0 to year 1 * ----------------------- if ( Year .eq. 0 ) Year = 1 * Account for the nonexisting year 0 * ---------------------------------- if ( Year .lt. 0 ) Year = Year + 1 if ( Month .gt. 2 ) then jy = Year jm = Month + 1 else jy = Year - 1 jm = Month + 13 endif JulDay = int ( 365.25 * jy ) . + int ( 30.6001 * jm ) . + Day + 1720995 * Test whether to change to Gregorian Celendar * -------------------------------------------- if ( Day + 31 * ( Month + 12 * Year ) .ge. iGreg) then ja = int ( 0.01 * jy ) Julday = JulDay + 2 - ja + int ( 0.25 * ja ) endif ODS_Julian = JulDay return end
Set Warnings "-notation-overridden". Require Import Category.Theory. Require Import Category.Structure.Monoidal. Require Import Category.Lib. Require Import Category.Structure.Monoidal.Proofs. Require Import Category.Instance.Sets. Require Import Category.Instance.Sets.Cartesian. Require Import Category.Structure.Monoidal.Internal.Product. Require Import Enriched.Category. Require Import Enriched.Functor. Require Import Enriched.Natural.Transformation. Generalizable All Variables. Set Primitive Projections. Set Universe Polymorphism. Unset Transparent Obligations. Section SetEnrichedCategory. Let V := InternalProduct_Monoidal (H:= Sets_Cartesian). Definition SetEnrichedCategory := EnrichedCategory (V := V). Program Definition FromOrdinaryCategory (C : Category) : SetEnrichedCategory := {| eobj := obj[C]; ehom := fun x y => {| carrier:= x ~> y; is_setoid := homset x y|}; eid := fun x => {| morphism:= fun _ => id |}; ecompose := fun x y z => {| morphism:= fun H => let (f, g) := H in (f ∘ g)%morphism |}; |}. Program Definition FromOrdinaryFunctor {C D : Category} (F : (C ⟶ D)%functor_type) : FromOrdinaryCategory C ⟶ FromOrdinaryCategory D := {| efobj := fobj[F]; efmap := fun x y => {| morphism:= fmap[F] |}; |}. Next Obligation. symmetry. apply fmap_comp. Qed. Program Definition FromOrdinaryTransformation {C D : Category} {F G : (C ⟶ D)%functor_type} (a : (F ⟹ G)%transform_type) : FromOrdinaryFunctor F ⟹ FromOrdinaryFunctor G := {| etransform := fun c => {| morphism := fun _ => a c |}; |}. Next Obligation. symmetry. apply naturality. Qed. End SetEnrichedCategory.
Formal statement is: lemma residue_const:"residue (\<lambda>_. c) z = 0" Informal statement is: The residue of a constant function is zero.
[STATEMENT] lemma ket_k_dim: "ket_k k \<in> carrier_vec K" [PROOF STATE] proof (prove) goal (1 subgoal): 1. ket_k k \<in> carrier_vec K [PROOF STEP] unfolding ket_k_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. Matrix.vec K (\<lambda>ka. if ka = k then 1 else 0) \<in> carrier_vec K [PROOF STEP] by auto
(* Title: HOL/MicroJava/DFA/Product.thy Author: Tobias Nipkow Copyright 2000 TUM *) section \<open>Products as Semilattices\<close> theory Product imports Err begin definition le :: "'a ord \<Rightarrow> 'b ord \<Rightarrow> ('a * 'b) ord" where "le rA rB == %(a,b) (a',b'). a <=_rA a' & b <=_rB b'" definition sup :: "'a ebinop \<Rightarrow> 'b ebinop \<Rightarrow> ('a * 'b)ebinop" where "sup f g == %(a1,b1)(a2,b2). Err.sup Pair (a1 +_f a2) (b1 +_g b2)" definition esl :: "'a esl \<Rightarrow> 'b esl \<Rightarrow> ('a * 'b ) esl" where "esl == %(A,rA,fA) (B,rB,fB). (A \<times> B, le rA rB, sup fA fB)" abbreviation lesubprod_sntax :: "'a * 'b \<Rightarrow> 'a ord \<Rightarrow> 'b ord \<Rightarrow> 'a * 'b \<Rightarrow> bool" ("(_ /<='(_,_') _)" [50, 0, 0, 51] 50) where "p <=(rA,rB) q == p <=_(le rA rB) q" lemma unfold_lesub_prod: "p <=(rA,rB) q == le rA rB p q" by (simp add: lesub_def) lemma le_prod_Pair_conv [iff]: "((a1,b1) <=(rA,rB) (a2,b2)) = (a1 <=_rA a2 & b1 <=_rB b2)" by (simp add: lesub_def le_def) lemma less_prod_Pair_conv: "((a1,b1) <_(Product.le rA rB) (a2,b2)) = (a1 <_rA a2 & b1 <=_rB b2 | a1 <=_rA a2 & b1 <_rB b2)" apply (unfold lesssub_def) apply simp apply blast done lemma order_le_prod [iff]: "order(Product.le rA rB) = (order rA & order rB)" apply (unfold Semilat.order_def) apply simp apply meson done lemma acc_le_prodI [intro!]: "\<lbrakk> acc r\<^sub>A; acc r\<^sub>B \<rbrakk> \<Longrightarrow> acc(Product.le r\<^sub>A r\<^sub>B)" apply (unfold acc_def) apply (rule wf_subset) apply (erule wf_lex_prod) apply assumption apply (auto simp add: lesssub_def less_prod_Pair_conv lex_prod_def) done lemma unfold_plussub_lift2: "e1 +_(lift2 f) e2 == lift2 f e1 e2" by (simp add: plussub_def) lemma plus_eq_Err_conv [simp]: assumes "x \<in> A" and "y \<in> A" and "semilat(err A, Err.le r, lift2 f)" shows "(x +_f y = Err) = (\<not>(\<exists>z\<in>A. x <=_r z & y <=_r z))" proof - have plus_le_conv2: "\<And>r f z. \<lbrakk> z \<in> err A; semilat (err A, r, f); OK x \<in> err A; OK y \<in> err A; OK x +_f OK y <=_r z\<rbrakk> \<Longrightarrow> OK x <=_r z \<and> OK y <=_r z" by (rule Semilat.plus_le_conv [OF Semilat.intro, THEN iffD1]) from assms show ?thesis apply (rule_tac iffI) apply clarify apply (drule OK_le_err_OK [THEN iffD2]) apply (drule OK_le_err_OK [THEN iffD2]) apply (drule Semilat.lub [OF Semilat.intro, of _ _ _ "OK x" _ "OK y"]) apply assumption apply assumption apply simp apply simp apply simp apply simp apply (case_tac "x +_f y") apply assumption apply (rename_tac "z") apply (subgoal_tac "OK z \<in> err A") apply (frule plus_le_conv2) apply assumption apply simp apply blast apply simp apply (blast dest: Semilat.orderI [OF Semilat.intro] order_refl) apply blast apply (erule subst) apply (unfold semilat_def err_def closed_def) apply simp done qed lemma err_semilat_Product_esl: "\<And>L1 L2. \<lbrakk> err_semilat L1; err_semilat L2 \<rbrakk> \<Longrightarrow> err_semilat(Product.esl L1 L2)" apply (unfold esl_def Err.sl_def) apply (simp (no_asm_simp) only: split_tupled_all) apply simp apply (simp (no_asm) only: semilat_Def) apply (simp (no_asm_simp) only: Semilat.closedI [OF Semilat.intro] closed_lift2_sup) apply (simp (no_asm) only: unfold_lesub_err Err.le_def unfold_plussub_lift2 sup_def) apply (auto elim: semilat_le_err_OK1 semilat_le_err_OK2 simp add: lift2_def split: err.split) apply (blast dest: Semilat.orderI [OF Semilat.intro]) apply (blast dest: Semilat.orderI [OF Semilat.intro]) apply (rule OK_le_err_OK [THEN iffD1]) apply (erule subst, subst OK_lift2_OK [symmetric], rule Semilat.lub [OF Semilat.intro]) apply simp apply simp apply simp apply simp apply simp apply simp apply (rule OK_le_err_OK [THEN iffD1]) apply (erule subst, subst OK_lift2_OK [symmetric], rule Semilat.lub [OF Semilat.intro]) apply simp apply simp apply simp apply simp apply simp apply simp done end
\chapter{Datasets and running conditions} \label{chap:prod:data} The data used for this measurement were collected at the beginning of \runtwo\ of the \ac{LHC}, during a special 15-day `early measurements' period. The number of bunches in the accelerator was gradually increased from 50 at the beginning to 482 bunches at the end of the period, with the number colliding at IP8 increasing from 36 to 397 bunches. In addition, the minimum bunch spacing was set to \SI{50}{\nano\second}, as in \runone, rather than the nominal \runtwo\ spacing of \SI{25}{\nano\second}. These steps were taken primarily for machine safety, as the \ac{LHC} began operating at a new centre-of-mass energy of \SI{13}{\TeV}. The combined dataset used for the measurement corresponds to an integrated luminosity of \begin{equation} \intlumi = \SI{\xsectotlumi}{\per\pico\barn}. \label{eqn:prod:xsectotlumi} \end{equation} The integrated luminosity measurement is performed using a beam-gas imaging technique~\cite{LHCb-PAPER-2014-047}. For reference, the total integrated luminosity collected in \runone\ at \lhcb\ was \SI{3}{\per\femto\barn}, whilst that for the \sqrtseq{7}\ \lhcb\ open charm production measurement was \SI{15}{\per\nb}. All data were taken with the \lhcb\ dipole magnet in the `down' polarity, and were processed via the Turbo stream data flow, as described in \cref{chap:intro:lhcb:detector}. A matching set of simulated \ac{MC} events is also used in the analysis, which is described in \cref{chap:prod:data:mc}. The selection of events and charm candidates, both in the trigger and offline, will be described in \cref{chap:prod:sel}. \section{Simulated data samples} \label{chap:prod:data:mc} This analysis uses simulated samples of \DstToDzpi\ with \DzToKpi, \DpToKpipi, and \DspToKKpi\ decays. The \DstToDzpi\ sample is sufficient for studying both \DstToDzpi\ and (untagged) \DzToKpi\ efficiencies, as the effect of the soft pion is parameterised in \PDzero\ \pT\ and \rapidity. In addition, the \DspToKKpi\ sample is sufficient for studying \DspTophipi\ decays, given that the kaon pair is required to be within the same mass window as applied to the data. For each decay mode, samples of \num{2.5} million events are generated, with an additional \num{1} million events generated where the signal charm hadron is required to have $\pT > \SI{10}{\GeVc}$. To save computing resources, the signal decays are required to be within the \lhcb\ geometric acceptance after \evtgen\ has run. This requires all charged final-state particles to have a positive $z$ component of their three-momentum and to satisfy \begin{equation} 10 < \theta < \SI{400}{\milli\radian}, \label{eqn:prod:data:lhcb_acceptance} \end{equation} where $\theta$ is the polar angle. As this cut is not \SI{100}{\percent} efficient in some \pTy\ bins, its efficiency must be assessed, as described in \cref{chap:prod:effs:acc}, and so dedicated, generator-only datasets are also produced. These data are not processed beyond the \evtgen\ step. In addition to the usual truth-matching procedure discussed in \cref{chap:intro:lhcb:simulation}, a prompt/secondary flag is computed for each candidate charm vertex, based on the true lifetime of each particle preceding it in the true decay chain. If the lifetime of any particle in the true ancestry of the charm candidate exceeds a threshold of \SI{0.1}{\femto\second}, the candidate is flagged as secondary.\footnotemark\ In what follows, the \ac{MC} data have been filtered such that only truth-matched, prompt charm hadron candidates remain, unless stated otherwise. \footnotetext{% The order of magnitude of the lifetimes of the ground-state charm and beauty hadrons is between $0.1$ and \SI{1}{\pico\second}. } \section{Crossing angle correction} \label{chap:prod:data:crossing_angle} Due to the non-zero crossing angles of the proton beams, the \pp\ collision frame is boosted with respect to the laboratory frame. As this measurement is made in bins of charm hadron transverse momentum and rapidity measured in the \pp\ collision rest frame, a correction is applied to the \pT\ and \rapidity\ measured in the laboratory frame. Example \pT\ and \rapidity\ distributions as measured in the laboratory and \pp\ centre-of-mass frames are given for the simulated \DstToDzpi\ dataset in \cref{fig:prod:data:com_boost}. Whenever charm hadron \pT\ or \rapidity\ are mentioned, it is the \pp\ rest frame quantities that are used, unless stated otherwise. \begin{figure}% \begin{subfigure}[b]{0.5\textwidth} \centering \includegraphics[width=\textwidth]{production/data/D0ToKpi_MC_PT} \caption{\pT} \label{fig:prod:data:com_boost:pt} \end{subfigure} \begin{subfigure}[b]{0.5\textwidth} \centering \includegraphics[width=\textwidth]{production/data/D0ToKpi_MC_Y} \caption{\rapidity} \label{fig:prod:data:com_boost:y} \end{subfigure} \caption{% Distributions of \PDzero\ \pT~(\subref*{fig:prod:data:com_boost:pt}) and \rapidity~(\subref*{fig:prod:data:com_boost:y}) as measured in the laboratory frame (black) and in the proton-proton centre-of-mass~(CoM) frame (red), in the simulated \DstToDzpi, with \DzToKpi, dataset. } \label{fig:prod:data:com_boost} \end{figure}
Base.:(==)(x::InfExtendedTime, y::InfExtendedTime) = (isfinite(x) && isfinite(y)) ? x.finitevalue == y.finitevalue : x.flag == y.flag Base.:(==)(x::Infinite, y::T) where {T<:InfExtendedTime} = T(x) == y Base.:(==)(x::T, y::Infinite) where {T<:InfExtendedTime} = x == T(y) Base.hash(x::InfExtendedTime, h::UInt) = isfinite(x) ? hash(x.finitevalue, h) : hash(x.flag, h) function Base.isless(x::InfExtendedTime, y::InfExtendedTime) if isinf(x) return isneginf(x) && !isneginf(y) elseif isinf(y) return isposinf(y) else return x.finitevalue < y.finitevalue end end Base.isless(x::Infinite, y::T) where {T<:InfExtendedTime} = isless(T(x), y) Base.isless(x::T, y::Infinite) where {T<:InfExtendedTime} = isless(x, T(y)) Base.:≤(x::Infinite, y::T) where {T<:InfExtendedTime} = T(x) ≤ y Base.:≤(x::T, y::Infinite) where {T<:InfExtendedTime} = x ≤ T(y)
A function $f$ is continuous at $a$ if and only if for every sequence $x_n$ converging to $a$, the sequence $f(x_n)$ converges to $f(a)$.
State Before: α : Type u x y : FreeRing α ⊢ ↑(x - y) = ↑x - ↑y State After: no goals Tactic: rw [castFreeCommRing, map_sub]
data Vect : Nat -> Type -> Type where Nil : Vect Z a (::) : a -> Vect k a -> Vect (S k) a %name Vect xs, ys, zs elem : Eq a => (value : a) -> (xs : Vect n a) -> Bool elem value [] = False elem value (x :: xs) = case value == x of False => elem value xs True => True
-- --------------------------------------------------------------- [ Types.idr ] -- Module : Types.idr -- Copyright : (c) Jan de Muijnck-Hughes -- License : see LICENSE -- --------------------------------------------------------------------- [ EOH ] module Frigg.Types import Sif.Types import Sif.Pattern import Readability import Readability.Process.XML public export data PEvalResult : Type where ResSif : EvalResult -> PEvalResult ResRead : ReadResult -> PEvalResult ResWTA : Float -> PEvalResult Show PEvalResult where show (ResSif res) = show res show (ResRead res) = show res show (ResWTA res) = show res -- --------------------------------------------------------------------- [ EOF ]
from skimage.draw import polygon as draw_polygon, circle as draw_circle, ellipse as draw_ellipse import numpy as np class Shape(object): def __init__(self,name='unamed',type=None,img_sz=None): self.name = name self.type = type self.shape_info = {} self.color_info = {} self.img_sz = img_sz self.index = None self.overlay=False def get_img_sz(self): return self.img_sz def get_index(self): return self.index def get_standard_rotation(self,rotation): if not ( rotation >=-180 and rotation <=180): raise ValueError("the rotation should between -180 and 180") standard_rotation = rotation/180.*np.pi return standard_rotation def get_shape_info(self): return self.shape_info def get_color_info(self): return self.color_info def get_numpy(self,data): if isinstance(data, np.ndarray): return data else: return np.array(data) def get_standard_coord(self,pos,syn=True,standard=1.): assert all([p>=-1 and p<=1 for p in pos]) pos = self.get_numpy(pos) if syn==True: assert len(self.img_sz) == len(pos) img_sz = self.get_numpy(self.img_sz) standard_pos = img_sz*(0.5*pos+0.5) else: standard_pos = standard*(0.5*pos+0.5) return standard_pos def get_standard_length(self,length,syn=True,standard=1.): length = self.get_numpy(length) if syn is True: assert len(self.img_sz) == len(length) img_sz = self.get_numpy(self.img_sz) standard_length = img_sz/2.*length else: standard_length = standard/2 * length return standard_length
\documentclass{lib/styles/default-style} \begin{document} \pagestyle{no-numbered} \unnumberedSection{\textbf{SET}} \subsection*{\textbf{Description}} Set uses to to simplify code writing, when build tables and lists At first, init named set, with: \verb+\initSet{setName}+ command Lets create 'TestSet' set, and write some data \initSet{TestSet} \subsection*{\textbf{Write}} Write operation: \verb+\addVariableToSet{setName}{data}+ Lets push value 'First index data' \addVariableToSet{TestSet}{First index data} \subsection*{\textbf{Read}} Now you can access data, using \verb+\getVariableFromSet{setName}{index}+ command So, value in TestSet[0] = \getVariableFromSet{TestSet}{0} \end{document}
/- Copyright (c) 2021 Yaël Dillies, Bhavik Mehta. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yaël Dillies, Bhavik Mehta -/ import algebra.big_operators.order import data.nat.basic /-! # Equitable functions This file defines equitable functions. A function `f` is equitable on a set `s` if `f a₁ ≤ f a₂ + 1` for all `a₁, a₂ ∈ s`. This is mostly useful when the codomain of `f` is `ℕ` or `ℤ` (or more generally a successor order). ## TODO `ℕ` can be replaced by any `succ_order` + `conditionally_complete_monoid`, but we don't have the latter yet. -/ open_locale big_operators variables {α β : Type*} namespace set /-- A set is equitable if no element value is more than one bigger than another. -/ def equitable_on [has_le β] [has_add β] [has_one β] (s : set α) (f : α → β) : Prop := ∀ ⦃a₁ a₂⦄, a₁ ∈ s → a₂ ∈ s → f a₁ ≤ f a₂ + 1 @[simp] lemma equitable_on_empty [has_le β] [has_add β] [has_one β] (f : α → β) : equitable_on ∅ f := λ a _ ha, (set.not_mem_empty _ ha).elim lemma equitable_on_iff_exists_le_le_add_one {s : set α} {f : α → ℕ} : s.equitable_on f ↔ ∃ b, ∀ a ∈ s, b ≤ f a ∧ f a ≤ b + 1 := begin refine ⟨_, λ ⟨b, hb⟩ x y hx hy, (hb x hx).2.trans (add_le_add_right (hb y hy).1 _)⟩, obtain rfl | ⟨x, hx⟩ := s.eq_empty_or_nonempty, { simp }, intros hs, by_cases h : ∀ y ∈ s, f x ≤ f y, { exact ⟨f x, λ y hy, ⟨h _ hy, hs hy hx⟩⟩ }, push_neg at h, obtain ⟨w, hw, hwx⟩ := h, refine ⟨f w, λ y hy, ⟨nat.le_of_succ_le_succ _, hs hy hw⟩⟩, rw (nat.succ_le_of_lt hwx).antisymm (hs hx hw), exact hs hx hy, end lemma equitable_on_iff_exists_image_subset_Icc {s : set α} {f : α → ℕ} : s.equitable_on f ↔ ∃ b, f '' s ⊆ Icc b (b + 1) := by simpa only [image_subset_iff] using equitable_on_iff_exists_le_le_add_one lemma equitable_on_iff_exists_eq_eq_add_one {s : set α} {f : α → ℕ} : s.equitable_on f ↔ ∃ b, ∀ a ∈ s, f a = b ∨ f a = b + 1 := by simp_rw [equitable_on_iff_exists_le_le_add_one, nat.le_and_le_add_one_iff] section ordered_semiring variables [ordered_semiring β] lemma subsingleton.equitable_on {s : set α} (hs : s.subsingleton) (f : α → β) : s.equitable_on f := λ i j hi hj, by { rw hs hi hj, exact le_add_of_nonneg_right zero_le_one } lemma equitable_on_singleton (a : α) (f : α → β) : set.equitable_on {a} f := set.subsingleton_singleton.equitable_on f end ordered_semiring end set open set namespace finset lemma equitable_on_iff {s : finset α} {f : α → ℕ} : equitable_on (s : set α) f ↔ ∀ a ∈ s, f a = (∑ i in s, f i) / s.card ∨ f a = (∑ i in s, f i) / s.card + 1 := by simp_rw [equitable_on_iff_le_le_add_one, nat.le_and_le_add_one_iff] end finset
||| Position and geometry of Hexagonal grid system ||| https://www.redblobgames.com/grids/hexagons/ module Bautzen.Pos import Data.Nat import Data.Nat.DivMod import Data.Nat.Parity import Data.ZZ import public Data.ZZ.Extra import Data.Maybe.Extra -- Positions & Map ||| A position/hex of the game board encoded as a pair of `Nat` ||| with bounds public export data Pos : Type where Hex : (col : Nat) -> (row : Nat) -> { auto cbound : LTE col 22 } -> { auto rbound : LTE row 12 } -> Pos public export Eq Pos where (==) (Hex col row) (Hex col' row') = col == col' && row == row' public export Show Pos where show (Hex c r) = show2Digits c ++ show2Digits r where show2Digits : Nat -> String show2Digits n = if n < 9 then "0" ++ show (n + 1) else show (n + 1) public export Ord Pos where compare (Hex col row) (Hex col' row') = case compare col col' of LT => LT EQ => compare row row' GT => GT ||| Cube coordinates. ||| Cube coordinates stem from the observation a 2-D hexagonal grid is equivalent ||| to a diagonal "slice" of a 3-D cubic grid. Using cubic coordinates makes it ||| much easier to compute geometric values. ||| We only store the `x` (column) and `z` (depth) coordinates instead ||| of a triple as the `y` dimension can be simply recovered as `-x -z`. public export data Cube : Type where MkCube : (x : ZZ) -> (z : ZZ) -> Cube ||| Compute the L1 distance between 2 `Cube`s ||| see [Red Blob Games](https://www.redblobgames.com/grids/hexagons/#distances-cube) page ||| for details on the (pretty cool) algorithm. public export cubeDistance : Cube -> Cube -> Nat cubeDistance (MkCube x z) (MkCube x' z') = let y = negate x - z y' = negate x' - z' in max (max (absZ (x - x')) (absZ (y - y'))) (absZ (z - z')) public export posToCube : Pos -> Cube posToCube (Hex col row) = let x = cast col sign = if odd col then 1 else 0 z = cast row - divZZNZ (x - sign) 2 {z = PosSIsNotZ } in MkCube x z public export distance : Pos -> Pos -> Nat distance x y = let c1 = posToCube x c2 = posToCube y in cubeDistance c1 c2 public export data Mvmt : Type where Dec : Mvmt Neut : Mvmt Inc : Mvmt public export succNotLTEZ : Not (LTE (S m) Z) succNotLTEZ LTEZero impossible public export fromLTESucc : LTE (S m) (S n) -> LTE m n fromLTESucc (LTESucc x) = x public export lteSuccR : LTE n m -> LTE n (S m) lteSuccR LTEZero = LTEZero lteSuccR (LTESucc x) = LTESucc (lteSuccR x) public export lteSuccL : LTE (S n) m -> LTE n m lteSuccL (LTESucc x) = lteSuccR x public export isLte : (m, n : Nat) -> Dec (LTE m n) isLte Z n = Yes LTEZero isLte (S k) Z = No succNotLTEZ isLte (S k) (S j) = case isLte k j of No contra => No (contra . fromLTESucc) Yes prf => Yes (LTESucc prf) public export shiftPos : (x : Nat) -> {bound : Nat} -> (prf : LTE x bound) -> Mvmt -> Maybe (n : Nat ** LTE n bound) shiftPos Z prf Dec = Nothing shiftPos (S k) prf Dec = Just (k ** lteSuccL prf) shiftPos x prf Neut = Just (x ** prf) shiftPos x prf Inc {bound} with (isLte (S x) bound) shiftPos x prf Inc | (Yes y) = Just (S x ** y) shiftPos x prf Inc | (No contra) = Nothing public export makePos : (pos : Pos) -> (Mvmt, Mvmt) -> Maybe Pos makePos (Hex col row {cbound} {rbound} ) (a, b) = do (c' ** p1) <- shiftPos col cbound a (r' ** p2) <- shiftPos row rbound b pure $ Hex c' r' {cbound = p1} {rbound = p2} public export oddShifts : List (Mvmt, Mvmt) oddShifts = [ (Dec, Neut) , (Neut, Dec) , (Inc, Neut) , (Inc, Inc) , (Neut, Inc) , (Dec, Inc) ] public export evenShifts : List (Mvmt, Mvmt) evenShifts = [ (Dec, Dec) , (Neut, Dec) , (Inc, Dec) , (Inc, Neut) , (Neut, Inc) , (Dec, Neut) ] ||| Compute the neighbours of a given position ||| There are at most 6 neighbours, with side and corner hexes having of ||| course less. public export neighbours : (pos : Pos) -> List Pos neighbours (Hex col row) with (col `divMod` (S Z)) neighbours (Hex col@(Z + (q * (S(S Z)))) row) | (MkDivMod q Z remainderSmall) = catMaybes $ map (makePos (Hex col row)) evenShifts neighbours (Hex col@((S Z) + (q * (S(S Z)))) row) | (MkDivMod q (S Z) remainderSmall) = catMaybes $ map (makePos (Hex col row)) oddShifts neighbours (Hex ((S (S r)) + (q * (S(S Z)))) _) | (MkDivMod q (S (S r)) LTEZero) impossible neighbours (Hex ((S (S r)) + (q * (S(S Z)))) _) | (MkDivMod q (S (S r)) (LTESucc lte)) = absurd $ succNotLTEZ (fromLTESucc lte) namespace PosTest neighbours1_test : (neighbours (Hex 3 3) = [ Hex 2 3, Hex 3 2, Hex 4 3, Hex 4 4, Hex 3 4, Hex 2 4] ) neighbours1_test = Refl neighbours_test : (neighbours (Hex 2 2) = [ Hex 1 1, Hex 2 1, Hex 3 1, Hex 3 2, Hex 2 3, Hex 1 2] ) neighbours_test = Refl -- distance_to_odd_neighbours_is_1 : map (distance (Hex 3 2)) (neighbours (Hex 3 2)) = [ 1, 1, 1, 1, 1, 1 ] -- distance_to_odd_neighbours_is_1 = Refl -- distance_to_even_neighbours_is_1 : map (distance (Hex 2 2)) (neighbours (Hex 2 2)) = [ 1, 1, 1, 1, 1, 1 ] -- distance_to_even_neighbours_is_1 = Refl -- distance_test : distance (Hex 3 2) (Hex 4 4) = 2 -- distance_test = Refl
If $S$ is contractible and $T$ is path-connected, then any two continuous maps from $S$ to $T$ are homotopic.
(** * Homotopy theory of simplicial sets. Vladimir Voevodsky started on Nov. 22, 2014 (with Alexander Vishik) *) (* Preamble *) Require Import UniMath.MoreFoundations.Tactics. Require Export UniMath.Combinatorics.FiniteSets. (* Require Export UniMath.Combinatorics.OrderedSets. *) Require Export UniMath.CategoryTheory.Core.Categories. Require Export UniMath.CategoryTheory.Core.Functors. Require Export UniMath.CategoryTheory.FunctorCategory. Require Export UniMath.CategoryTheory.categories.HSET.Core. Require Export UniMath.CategoryTheory.categories.HSET.Univalence. Require Export UniMath.CategoryTheory.opp_precat. (* To upstream files *) (* The pre-category data for the category Delta *) Local Open Scope stn. Definition monfunstn ( n m : nat ) : UU := ∑ f : ⟦ n ⟧ -> ⟦ m ⟧, ∏ (x y: ⟦n⟧), x ≤ y -> f x ≤ f y. Definition make_monfunstn { m n : nat } f is := (f,,is) : monfunstn m n. Definition monfunstnpr1 {n m : nat} : monfunstn n m -> ⟦ n ⟧ -> ⟦ m ⟧ := pr1. Lemma monfunstnpr1_isInjective {m n} (f g : monfunstn m n) : monfunstnpr1 f = monfunstnpr1 g -> f = g. Proof. intros e. apply subtypePath. { intros h. apply impred; intro i. apply impred; intro j. apply impred; intro l. apply propproperty. } exact e. Defined. Coercion monfunstnpr1 : monfunstn >-> Funclass . Lemma isasetmonfunstn n m : isaset ( monfunstn n m ) . Proof. intros . apply ( isofhleveltotal2 2 ) . { apply impred. intro t. apply isasetstn. } intro f. apply impred; intro i. apply impred; intro j. apply impred; intro l. apply isasetaprop, propproperty. Defined. Definition monfunstnid n : monfunstn n n := make_monfunstn (idfun _) (λ x y is, is). Definition monfunstncomp { n m k : nat } ( f : monfunstn n m ) ( g : monfunstn m k ) : monfunstn n k . Proof. intros . exists ( g ∘ f ) . intros i j l. unfold funcomp. apply ( pr2 g ). apply ( pr2 f ) . assumption. Defined. Definition precatDelta : precategory . Proof. use tpair. { use tpair. { exists nat. intros m n. exact (monfunstn (S m) (S n)). } { split. { intros m. apply monfunstnid. } { intros l m n f g. exact (monfunstncomp f g). } } } apply is_precategory_one_assoc_to_two. simpl. split. { simpl. split. { intros m n f. now apply monfunstnpr1_isInjective. } { intros m n f. now apply monfunstnpr1_isInjective. } } { simpl. intros m n o p f g h. now apply monfunstnpr1_isInjective. } Defined. Local Open Scope cat. Definition has_homsets_precatDelta : has_homsets precatDelta. Proof. intros a b. cbn. apply isasetmonfunstn. Qed. Definition catDelta : category := make_category precatDelta has_homsets_precatDelta. Definition sSet := functor_category catDelta^op category_HSET. (* V.V. with Sasha Vishik, Nov. 23, 2014 *) (* End of file *)
Load LFindLoad. From lfind Require Import LFind. From QuickChick Require Import QuickChick. From adtind Require Import goal21. Derive Show for lst. Derive Arbitrary for lst. Instance Dec_Eq_lst : Dec_Eq lst. Proof. dec_eq. Qed. Lemma conj1synthconj2 : forall (lv0 : lst), (@eq lst (append Nil lv0) lv0). Admitted. QuickChick conj1synthconj2.
[GOAL] M : Type u_1 inst✝¹ : Zero M l : List M inst✝ : DecidablePred fun x => getD l x 0 ≠ 0 n✝ n : ℕ ⊢ n ∈ Finset.filter (fun i => getD l i 0 ≠ 0) (Finset.range (length l)) ↔ (fun i => getD l i 0) n ≠ 0 [PROOFSTEP] simp only [Ne.def, Finset.mem_filter, Finset.mem_range, and_iff_right_iff_imp] [GOAL] M : Type u_1 inst✝¹ : Zero M l : List M inst✝ : DecidablePred fun x => getD l x 0 ≠ 0 n✝ n : ℕ ⊢ ¬getD l n 0 = 0 → n < length l [PROOFSTEP] contrapose! [GOAL] M : Type u_1 inst✝¹ : Zero M l : List M inst✝ : DecidablePred fun x => getD l x 0 ≠ 0 n✝ n : ℕ ⊢ length l ≤ n → getD l n 0 = 0 [PROOFSTEP] exact getD_eq_default _ _ [GOAL] M : Type u_1 inst✝² : Zero M l : List M inst✝¹ : DecidablePred fun x => getD l x 0 ≠ 0 n : ℕ inst✝ : DecidablePred fun i => getD [] i 0 ≠ 0 ⊢ toFinsupp [] = 0 [PROOFSTEP] ext [GOAL] case h M : Type u_1 inst✝² : Zero M l : List M inst✝¹ : DecidablePred fun x => getD l x 0 ≠ 0 n : ℕ inst✝ : DecidablePred fun i => getD [] i 0 ≠ 0 a✝ : ℕ ⊢ ↑(toFinsupp []) a✝ = ↑0 a✝ [PROOFSTEP] simp [GOAL] M : Type u_1 inst✝² : Zero M l : List M inst✝¹ : DecidablePred fun x => getD l x 0 ≠ 0 n : ℕ x : M inst✝ : DecidablePred fun x_1 => getD [x] x_1 0 ≠ 0 ⊢ toFinsupp [x] = Finsupp.single 0 x [PROOFSTEP] ext ⟨_ | i⟩ [GOAL] case h.zero M : Type u_1 inst✝² : Zero M l : List M inst✝¹ : DecidablePred fun x => getD l x 0 ≠ 0 n : ℕ x : M inst✝ : DecidablePred fun x_1 => getD [x] x_1 0 ≠ 0 ⊢ ↑(toFinsupp [x]) Nat.zero = ↑(Finsupp.single 0 x) Nat.zero [PROOFSTEP] simp [Finsupp.single_apply, (Nat.zero_lt_succ _).ne] [GOAL] case h.succ M : Type u_1 inst✝² : Zero M l : List M inst✝¹ : DecidablePred fun x => getD l x 0 ≠ 0 n : ℕ x : M inst✝ : DecidablePred fun x_1 => getD [x] x_1 0 ≠ 0 n✝ : ℕ ⊢ ↑(toFinsupp [x]) (Nat.succ n✝) = ↑(Finsupp.single 0 x) (Nat.succ n✝) [PROOFSTEP] simp [Finsupp.single_apply, (Nat.zero_lt_succ _).ne] [GOAL] M : Type u_1 inst✝⁵ : Zero M l : List M inst✝⁴ : DecidablePred fun x => getD l x 0 ≠ 0 n : ℕ R : Type u_2 inst✝³ : AddZeroClass R l₁ l₂ : List R inst✝² : DecidablePred fun x => getD (l₁ ++ l₂) x 0 ≠ 0 inst✝¹ : DecidablePred fun x => getD l₁ x 0 ≠ 0 inst✝ : DecidablePred fun x => getD l₂ x 0 ≠ 0 ⊢ toFinsupp (l₁ ++ l₂) = toFinsupp l₁ + Finsupp.embDomain (addLeftEmbedding (length l₁)) (toFinsupp l₂) [PROOFSTEP] ext n [GOAL] case h M : Type u_1 inst✝⁵ : Zero M l : List M inst✝⁴ : DecidablePred fun x => getD l x 0 ≠ 0 n✝ : ℕ R : Type u_2 inst✝³ : AddZeroClass R l₁ l₂ : List R inst✝² : DecidablePred fun x => getD (l₁ ++ l₂) x 0 ≠ 0 inst✝¹ : DecidablePred fun x => getD l₁ x 0 ≠ 0 inst✝ : DecidablePred fun x => getD l₂ x 0 ≠ 0 n : ℕ ⊢ ↑(toFinsupp (l₁ ++ l₂)) n = ↑(toFinsupp l₁ + Finsupp.embDomain (addLeftEmbedding (length l₁)) (toFinsupp l₂)) n [PROOFSTEP] simp only [toFinsupp_apply, Finsupp.add_apply] [GOAL] case h M : Type u_1 inst✝⁵ : Zero M l : List M inst✝⁴ : DecidablePred fun x => getD l x 0 ≠ 0 n✝ : ℕ R : Type u_2 inst✝³ : AddZeroClass R l₁ l₂ : List R inst✝² : DecidablePred fun x => getD (l₁ ++ l₂) x 0 ≠ 0 inst✝¹ : DecidablePred fun x => getD l₁ x 0 ≠ 0 inst✝ : DecidablePred fun x => getD l₂ x 0 ≠ 0 n : ℕ ⊢ getD (l₁ ++ l₂) n 0 = getD l₁ n 0 + ↑(Finsupp.embDomain (addLeftEmbedding (length l₁)) (toFinsupp l₂)) n [PROOFSTEP] cases lt_or_le n l₁.length with | inl h => rw [getD_append _ _ _ _ h, Finsupp.embDomain_notin_range, add_zero] rintro ⟨k, rfl : length l₁ + k = n⟩ exact h.not_le (self_le_add_right _ _) | inr h => rcases exists_add_of_le h with ⟨k, rfl⟩ rw [getD_append_right _ _ _ _ h, add_tsub_cancel_left, getD_eq_default _ _ h, zero_add] exact Eq.symm (Finsupp.embDomain_apply _ _ _) [GOAL] case h M : Type u_1 inst✝⁵ : Zero M l : List M inst✝⁴ : DecidablePred fun x => getD l x 0 ≠ 0 n✝ : ℕ R : Type u_2 inst✝³ : AddZeroClass R l₁ l₂ : List R inst✝² : DecidablePred fun x => getD (l₁ ++ l₂) x 0 ≠ 0 inst✝¹ : DecidablePred fun x => getD l₁ x 0 ≠ 0 inst✝ : DecidablePred fun x => getD l₂ x 0 ≠ 0 n : ℕ x✝ : n < length l₁ ∨ length l₁ ≤ n ⊢ getD (l₁ ++ l₂) n 0 = getD l₁ n 0 + ↑(Finsupp.embDomain (addLeftEmbedding (length l₁)) (toFinsupp l₂)) n [PROOFSTEP] cases lt_or_le n l₁.length with | inl h => rw [getD_append _ _ _ _ h, Finsupp.embDomain_notin_range, add_zero] rintro ⟨k, rfl : length l₁ + k = n⟩ exact h.not_le (self_le_add_right _ _) | inr h => rcases exists_add_of_le h with ⟨k, rfl⟩ rw [getD_append_right _ _ _ _ h, add_tsub_cancel_left, getD_eq_default _ _ h, zero_add] exact Eq.symm (Finsupp.embDomain_apply _ _ _) [GOAL] case h.inl M : Type u_1 inst✝⁵ : Zero M l : List M inst✝⁴ : DecidablePred fun x => getD l x 0 ≠ 0 n✝ : ℕ R : Type u_2 inst✝³ : AddZeroClass R l₁ l₂ : List R inst✝² : DecidablePred fun x => getD (l₁ ++ l₂) x 0 ≠ 0 inst✝¹ : DecidablePred fun x => getD l₁ x 0 ≠ 0 inst✝ : DecidablePred fun x => getD l₂ x 0 ≠ 0 n : ℕ h : n < length l₁ ⊢ getD (l₁ ++ l₂) n 0 = getD l₁ n 0 + ↑(Finsupp.embDomain (addLeftEmbedding (length l₁)) (toFinsupp l₂)) n [PROOFSTEP] | inl h => rw [getD_append _ _ _ _ h, Finsupp.embDomain_notin_range, add_zero] rintro ⟨k, rfl : length l₁ + k = n⟩ exact h.not_le (self_le_add_right _ _) [GOAL] case h.inl M : Type u_1 inst✝⁵ : Zero M l : List M inst✝⁴ : DecidablePred fun x => getD l x 0 ≠ 0 n✝ : ℕ R : Type u_2 inst✝³ : AddZeroClass R l₁ l₂ : List R inst✝² : DecidablePred fun x => getD (l₁ ++ l₂) x 0 ≠ 0 inst✝¹ : DecidablePred fun x => getD l₁ x 0 ≠ 0 inst✝ : DecidablePred fun x => getD l₂ x 0 ≠ 0 n : ℕ h : n < length l₁ ⊢ getD (l₁ ++ l₂) n 0 = getD l₁ n 0 + ↑(Finsupp.embDomain (addLeftEmbedding (length l₁)) (toFinsupp l₂)) n [PROOFSTEP] rw [getD_append _ _ _ _ h, Finsupp.embDomain_notin_range, add_zero] [GOAL] case h.inl.h M : Type u_1 inst✝⁵ : Zero M l : List M inst✝⁴ : DecidablePred fun x => getD l x 0 ≠ 0 n✝ : ℕ R : Type u_2 inst✝³ : AddZeroClass R l₁ l₂ : List R inst✝² : DecidablePred fun x => getD (l₁ ++ l₂) x 0 ≠ 0 inst✝¹ : DecidablePred fun x => getD l₁ x 0 ≠ 0 inst✝ : DecidablePred fun x => getD l₂ x 0 ≠ 0 n : ℕ h : n < length l₁ ⊢ ¬n ∈ Set.range ↑(addLeftEmbedding (length l₁)) [PROOFSTEP] rintro ⟨k, rfl : length l₁ + k = n⟩ [GOAL] case h.inl.h.intro M : Type u_1 inst✝⁵ : Zero M l : List M inst✝⁴ : DecidablePred fun x => getD l x 0 ≠ 0 n : ℕ R : Type u_2 inst✝³ : AddZeroClass R l₁ l₂ : List R inst✝² : DecidablePred fun x => getD (l₁ ++ l₂) x 0 ≠ 0 inst✝¹ : DecidablePred fun x => getD l₁ x 0 ≠ 0 inst✝ : DecidablePred fun x => getD l₂ x 0 ≠ 0 k : ℕ h : length l₁ + k < length l₁ ⊢ False [PROOFSTEP] exact h.not_le (self_le_add_right _ _) [GOAL] case h.inr M : Type u_1 inst✝⁵ : Zero M l : List M inst✝⁴ : DecidablePred fun x => getD l x 0 ≠ 0 n✝ : ℕ R : Type u_2 inst✝³ : AddZeroClass R l₁ l₂ : List R inst✝² : DecidablePred fun x => getD (l₁ ++ l₂) x 0 ≠ 0 inst✝¹ : DecidablePred fun x => getD l₁ x 0 ≠ 0 inst✝ : DecidablePred fun x => getD l₂ x 0 ≠ 0 n : ℕ h : length l₁ ≤ n ⊢ getD (l₁ ++ l₂) n 0 = getD l₁ n 0 + ↑(Finsupp.embDomain (addLeftEmbedding (length l₁)) (toFinsupp l₂)) n [PROOFSTEP] | inr h => rcases exists_add_of_le h with ⟨k, rfl⟩ rw [getD_append_right _ _ _ _ h, add_tsub_cancel_left, getD_eq_default _ _ h, zero_add] exact Eq.symm (Finsupp.embDomain_apply _ _ _) [GOAL] case h.inr M : Type u_1 inst✝⁵ : Zero M l : List M inst✝⁴ : DecidablePred fun x => getD l x 0 ≠ 0 n✝ : ℕ R : Type u_2 inst✝³ : AddZeroClass R l₁ l₂ : List R inst✝² : DecidablePred fun x => getD (l₁ ++ l₂) x 0 ≠ 0 inst✝¹ : DecidablePred fun x => getD l₁ x 0 ≠ 0 inst✝ : DecidablePred fun x => getD l₂ x 0 ≠ 0 n : ℕ h : length l₁ ≤ n ⊢ getD (l₁ ++ l₂) n 0 = getD l₁ n 0 + ↑(Finsupp.embDomain (addLeftEmbedding (length l₁)) (toFinsupp l₂)) n [PROOFSTEP] rcases exists_add_of_le h with ⟨k, rfl⟩ [GOAL] case h.inr.intro M : Type u_1 inst✝⁵ : Zero M l : List M inst✝⁴ : DecidablePred fun x => getD l x 0 ≠ 0 n : ℕ R : Type u_2 inst✝³ : AddZeroClass R l₁ l₂ : List R inst✝² : DecidablePred fun x => getD (l₁ ++ l₂) x 0 ≠ 0 inst✝¹ : DecidablePred fun x => getD l₁ x 0 ≠ 0 inst✝ : DecidablePred fun x => getD l₂ x 0 ≠ 0 k : ℕ h : length l₁ ≤ length l₁ + k ⊢ getD (l₁ ++ l₂) (length l₁ + k) 0 = getD l₁ (length l₁ + k) 0 + ↑(Finsupp.embDomain (addLeftEmbedding (length l₁)) (toFinsupp l₂)) (length l₁ + k) [PROOFSTEP] rw [getD_append_right _ _ _ _ h, add_tsub_cancel_left, getD_eq_default _ _ h, zero_add] [GOAL] case h.inr.intro M : Type u_1 inst✝⁵ : Zero M l : List M inst✝⁴ : DecidablePred fun x => getD l x 0 ≠ 0 n : ℕ R : Type u_2 inst✝³ : AddZeroClass R l₁ l₂ : List R inst✝² : DecidablePred fun x => getD (l₁ ++ l₂) x 0 ≠ 0 inst✝¹ : DecidablePred fun x => getD l₁ x 0 ≠ 0 inst✝ : DecidablePred fun x => getD l₂ x 0 ≠ 0 k : ℕ h : length l₁ ≤ length l₁ + k ⊢ getD l₂ k 0 = ↑(Finsupp.embDomain (addLeftEmbedding (length l₁)) (toFinsupp l₂)) (length l₁ + k) [PROOFSTEP] exact Eq.symm (Finsupp.embDomain_apply _ _ _) [GOAL] M : Type u_1 inst✝⁴ : Zero M l : List M inst✝³ : DecidablePred fun x => getD l x 0 ≠ 0 n : ℕ R : Type u_2 inst✝² : AddZeroClass R x : R xs : List R inst✝¹ : DecidablePred fun x_1 => getD (x :: xs) x_1 0 ≠ 0 inst✝ : DecidablePred fun x => getD xs x 0 ≠ 0 ⊢ toFinsupp (x :: xs) = Finsupp.single 0 x + Finsupp.embDomain { toFun := Nat.succ, inj' := Nat.succ_injective } (toFinsupp xs) [PROOFSTEP] classical convert toFinsupp_append [x] xs using 3 · exact (toFinsupp_singleton x).symm · ext n exact add_comm n 1 [GOAL] M : Type u_1 inst✝⁴ : Zero M l : List M inst✝³ : DecidablePred fun x => getD l x 0 ≠ 0 n : ℕ R : Type u_2 inst✝² : AddZeroClass R x : R xs : List R inst✝¹ : DecidablePred fun x_1 => getD (x :: xs) x_1 0 ≠ 0 inst✝ : DecidablePred fun x => getD xs x 0 ≠ 0 ⊢ toFinsupp (x :: xs) = Finsupp.single 0 x + Finsupp.embDomain { toFun := Nat.succ, inj' := Nat.succ_injective } (toFinsupp xs) [PROOFSTEP] convert toFinsupp_append [x] xs using 3 [GOAL] case h.e'_3.h.e'_5 M : Type u_1 inst✝⁴ : Zero M l : List M inst✝³ : DecidablePred fun x => getD l x 0 ≠ 0 n : ℕ R : Type u_2 inst✝² : AddZeroClass R x : R xs : List R inst✝¹ : DecidablePred fun x_1 => getD (x :: xs) x_1 0 ≠ 0 inst✝ : DecidablePred fun x => getD xs x 0 ≠ 0 ⊢ Finsupp.single 0 x = toFinsupp [x] [PROOFSTEP] exact (toFinsupp_singleton x).symm [GOAL] case h.e'_3.h.e'_6.h.e'_5 M : Type u_1 inst✝⁴ : Zero M l : List M inst✝³ : DecidablePred fun x => getD l x 0 ≠ 0 n : ℕ R : Type u_2 inst✝² : AddZeroClass R x : R xs : List R inst✝¹ : DecidablePred fun x_1 => getD (x :: xs) x_1 0 ≠ 0 inst✝ : DecidablePred fun x => getD xs x 0 ≠ 0 ⊢ { toFun := Nat.succ, inj' := Nat.succ_injective } = addLeftEmbedding (length [x]) [PROOFSTEP] ext n [GOAL] case h.e'_3.h.e'_6.h.e'_5.h M : Type u_1 inst✝⁴ : Zero M l : List M inst✝³ : DecidablePred fun x => getD l x 0 ≠ 0 n✝ : ℕ R : Type u_2 inst✝² : AddZeroClass R x : R xs : List R inst✝¹ : DecidablePred fun x_1 => getD (x :: xs) x_1 0 ≠ 0 inst✝ : DecidablePred fun x => getD xs x 0 ≠ 0 n : ℕ ⊢ ↑{ toFun := Nat.succ, inj' := Nat.succ_injective } n = ↑(addLeftEmbedding (length [x])) n [PROOFSTEP] exact add_comm n 1 [GOAL] M : Type u_1 inst✝⁴ : Zero M l : List M inst✝³ : DecidablePred fun x => getD l x 0 ≠ 0 n : ℕ R : Type u_2 inst✝² : AddZeroClass R x : R xs : List R inst✝¹ : DecidablePred fun i => getD (xs ++ [x]) i 0 ≠ 0 inst✝ : DecidablePred fun i => getD xs i 0 ≠ 0 ⊢ toFinsupp (xs ++ [x]) = toFinsupp xs + Finsupp.single (length xs) x [PROOFSTEP] classical rw [toFinsupp_append, toFinsupp_singleton, Finsupp.embDomain_single, addLeftEmbedding_apply, add_zero] [GOAL] M : Type u_1 inst✝⁴ : Zero M l : List M inst✝³ : DecidablePred fun x => getD l x 0 ≠ 0 n : ℕ R : Type u_2 inst✝² : AddZeroClass R x : R xs : List R inst✝¹ : DecidablePred fun i => getD (xs ++ [x]) i 0 ≠ 0 inst✝ : DecidablePred fun i => getD xs i 0 ≠ 0 ⊢ toFinsupp (xs ++ [x]) = toFinsupp xs + Finsupp.single (length xs) x [PROOFSTEP] rw [toFinsupp_append, toFinsupp_singleton, Finsupp.embDomain_single, addLeftEmbedding_apply, add_zero] [GOAL] M : Type u_1 inst✝³ : Zero M l✝ : List M inst✝² : DecidablePred fun x => getD l✝ x 0 ≠ 0 n : ℕ R : Type u_2 inst✝¹ : AddMonoid R l : List R inst✝ : DecidablePred fun x => getD l x 0 ≠ 0 ⊢ toFinsupp l = sum (map (fun nr => Finsupp.single nr.fst nr.snd) (enum l)) [PROOFSTEP] revert l [GOAL] M : Type u_1 inst✝² : Zero M l : List M inst✝¹ : DecidablePred fun x => getD l x 0 ≠ 0 n : ℕ R : Type u_2 inst✝ : AddMonoid R ⊢ ∀ (l : List R) [inst : DecidablePred fun x => getD l x 0 ≠ 0], toFinsupp l = sum (map (fun nr => Finsupp.single nr.fst nr.snd) (enum l)) [PROOFSTEP] intro l [GOAL] M : Type u_1 inst✝² : Zero M l✝ : List M inst✝¹ : DecidablePred fun x => getD l✝ x 0 ≠ 0 n : ℕ R : Type u_2 inst✝ : AddMonoid R l : List R ⊢ ∀ [inst : DecidablePred fun x => getD l x 0 ≠ 0], toFinsupp l = sum (map (fun nr => Finsupp.single nr.fst nr.snd) (enum l)) [PROOFSTEP] induction l using List.reverseRecOn with | H0 => exact toFinsupp_nil | H1 x xs ih => classical simp [toFinsupp_concat_eq_toFinsupp_add_single, enum_append, ih] [GOAL] M : Type u_1 inst✝² : Zero M l✝ : List M inst✝¹ : DecidablePred fun x => getD l✝ x 0 ≠ 0 n : ℕ R : Type u_2 inst✝ : AddMonoid R l : List R ⊢ ∀ [inst : DecidablePred fun x => getD l x 0 ≠ 0], toFinsupp l = sum (map (fun nr => Finsupp.single nr.fst nr.snd) (enum l)) [PROOFSTEP] induction l using List.reverseRecOn with | H0 => exact toFinsupp_nil | H1 x xs ih => classical simp [toFinsupp_concat_eq_toFinsupp_add_single, enum_append, ih] [GOAL] case H0 M : Type u_1 inst✝² : Zero M l : List M inst✝¹ : DecidablePred fun x => getD l x 0 ≠ 0 n : ℕ R : Type u_2 inst✝ : AddMonoid R ⊢ ∀ [inst : DecidablePred fun x => getD [] x 0 ≠ 0], toFinsupp [] = sum (map (fun nr => Finsupp.single nr.fst nr.snd) (enum [])) [PROOFSTEP] | H0 => exact toFinsupp_nil [GOAL] case H0 M : Type u_1 inst✝² : Zero M l : List M inst✝¹ : DecidablePred fun x => getD l x 0 ≠ 0 n : ℕ R : Type u_2 inst✝ : AddMonoid R ⊢ ∀ [inst : DecidablePred fun x => getD [] x 0 ≠ 0], toFinsupp [] = sum (map (fun nr => Finsupp.single nr.fst nr.snd) (enum [])) [PROOFSTEP] exact toFinsupp_nil [GOAL] case H1 M : Type u_1 inst✝² : Zero M l : List M inst✝¹ : DecidablePred fun x => getD l x 0 ≠ 0 n : ℕ R : Type u_2 inst✝ : AddMonoid R x : List R xs : R ih : ∀ [inst : DecidablePred fun x_1 => getD x x_1 0 ≠ 0], toFinsupp x = sum (map (fun nr => Finsupp.single nr.fst nr.snd) (enum x)) ⊢ ∀ [inst : DecidablePred fun x_1 => getD (x ++ [xs]) x_1 0 ≠ 0], toFinsupp (x ++ [xs]) = sum (map (fun nr => Finsupp.single nr.fst nr.snd) (enum (x ++ [xs]))) [PROOFSTEP] | H1 x xs ih => classical simp [toFinsupp_concat_eq_toFinsupp_add_single, enum_append, ih] [GOAL] case H1 M : Type u_1 inst✝² : Zero M l : List M inst✝¹ : DecidablePred fun x => getD l x 0 ≠ 0 n : ℕ R : Type u_2 inst✝ : AddMonoid R x : List R xs : R ih : ∀ [inst : DecidablePred fun x_1 => getD x x_1 0 ≠ 0], toFinsupp x = sum (map (fun nr => Finsupp.single nr.fst nr.snd) (enum x)) ⊢ ∀ [inst : DecidablePred fun x_1 => getD (x ++ [xs]) x_1 0 ≠ 0], toFinsupp (x ++ [xs]) = sum (map (fun nr => Finsupp.single nr.fst nr.snd) (enum (x ++ [xs]))) [PROOFSTEP] classical simp [toFinsupp_concat_eq_toFinsupp_add_single, enum_append, ih] [GOAL] case H1 M : Type u_1 inst✝² : Zero M l : List M inst✝¹ : DecidablePred fun x => getD l x 0 ≠ 0 n : ℕ R : Type u_2 inst✝ : AddMonoid R x : List R xs : R ih : ∀ [inst : DecidablePred fun x_1 => getD x x_1 0 ≠ 0], toFinsupp x = sum (map (fun nr => Finsupp.single nr.fst nr.snd) (enum x)) ⊢ ∀ [inst : DecidablePred fun x_1 => getD (x ++ [xs]) x_1 0 ≠ 0], toFinsupp (x ++ [xs]) = sum (map (fun nr => Finsupp.single nr.fst nr.snd) (enum (x ++ [xs]))) [PROOFSTEP] simp [toFinsupp_concat_eq_toFinsupp_add_single, enum_append, ih]
\input{setup/preamble.tex}% package inclusion and set up of the document \input{setup/hyphenations.tex}% \input{setup/macros.tex}% my new macros \begin{document} \DeclareGraphicsExtensions{.png,.jpg,.pdf, eps.} % \def\bLabel{\renewcommand\@biblabel[1]{[##1]}} % \def\citeP[#1]{[\hyperref[ch:listOfPublications]{#1}]} \def\citeP[#1]{[\ref{paper:#1}]} \def\ctxt{\text{c}} %connection subscript (text) \def\stxt{\text{s}} %string subscript (text) \def\ptxt{\text{p}} %plate subscript (text) \def\mtxt{\text{m}} %mass subscript (text) \def\itxt{\text{i}} %point of 'interest' subscript (text) \def\Btxt{\text{B}} %bow subscript (text) \def\etxt{\text{e}} %excitation subscript (text) \def\rtxt{\text{r}} %lip reed subscript (text) \def\ttxt{\text{t}} %tube subscript (text) \def\sgn{\text{sgn}} \def\sm{\text{sm}} %string-mass interaction tromba \def\mp{\text{mp}} %mass-plate interaction tromba \def\Nfrac{\mathcal{N}} \def\flip{\leftarrow} \def\Ucal{\mathbfcal{U}} % states \def\uln{u_l^n} \def\wln{w_l^n} \def\wmn{w_m^n} \def\un{u^n} \def\ulmn{u_{l,m}^n} \def\ulm{u_{l,m}} \def\uqn{u_q^n} \def\wlmn{w_{l,m}^n} \def\zlmn{z_{l,m}^n} \def\ubr{u_\text{br}} \def\zbr{z_\text{br}} \def\qln{q_l^n} \def\ulun{u_{l_u}^n} \def\ulcn{u_{l_\ctxt}^n} \def\wlwn{w_{l_w}^n} \def\wmcn{w_{m_\ctxt}^n} \def\wmn{w_m^n} \def\Psiln{\Psi_l^n} \def\Psinp{\Psi_l^{n+1}} \def\Psinm{\Psi_l^{n-1}} \def\Psilp{\Psi_{l+1}^n} \def\Psilm{\Psi_{l-1}^n} % bold symbols (state vectors and matrices) \def\u{\mathbf{u}} \def\w{\mathbf{w}} \def\I{\mathbf{I}} \def\A{\mathbf{A}} \def\B{\mathbf{B}} \def\C{\mathbf{C}} \def\Q{\mathbf{Q}} \def\U{\mathbf{U}} \def\J{\mathbf{J}} \def\i{\mathbf{i}} \def\j{\mathbf{j}} % interpolators \def\Iu{I_{l, u}(x_\ctxt)} \def\Iw{I_{m, w}(\chi_\ctxt)} \def\Ju{J_{l, u}(x_\ctxt)} \def\Jw{J_{m, w}(\chi_\ctxt)} \def\Iq{I_q(\chi_\ctxt)} \def\Ilm{I_{l,m}(x_\ctxt)} \def\uStack{\boldsymbol{u}} % mathfraks \def\H{\mathfrak{H}} \def\h{\mathfrak{h}} \def\t{\mathfrak{t}} \def\v{\mathfrak{v}} \def\q{\mathfrak{q}} \def\b{\mathfrak{b}} \def\p{\mathfrak{p}} % continuous operators \def\ptt{\partial_t^2} \def\pxx{\partial_x^2} \def\pxxx{\partial_x^3} \def\pxxxx{\partial_x^4} \def\pyy{\partial_y^2} \def\pt{\partial_t} \def\px{\partial_x} \def\py{\partial_y} % discrete operators \def\dtt{\delta_{tt}} \def\dxx{\delta_{xx}} \def\dxxx{\delta_{xxx}} \def\dxxxx{\delta_{xxxx}} \def\dcc{\delta_{\chi\chi}} \def\dcccc{\delta_{\chi\chi\chi\chi}} \def\dtd{\delta_{t\cdot}} \def\dtp{\delta_{t+}} \def\dtm{\delta_{t-}} \def\dxd{\delta_{x\cdot}} \def\dxp{\delta_{x+}} \def\dxm{\delta_{x-}} \def\dyd{\delta_{y\cdot}} \def\dyp{\delta_{y+}} \def\dym{\delta_{y-}} \def\mtt{\mu_{tt}} \def\mtd{\mu_{t\cdot}} \def\mtp{\mu_{t+}} \def\mtm{\mu_{t-}} \def\mxx{\mu_{xx}} \def\mxd{\mu_{x\cdot}} \def\mxp{\mu_{x+}} \def\mxm{\mu_{x-}} \def\dDelta{\delta_{\Delta}} % \def\dDbox{\delta_{\Delta\boxplus}} \def\dyy{\delta_{yy}} % matrix operators \def\Dxx{\mathbf{D}_{xx}} \def\Dxxxx{\mathbf{D}_{xxxx}} \def\DDeltamat{\mathbf{D}_\Delta} \def\DDeltaDelta{\mathbf{D}_{\Delta\Delta}} \def\Dyy{\mathbf{D}_{yy}} % often-used variables \def\sz{\sigma_{0}} \def\so{\sigma_{1}} \def\vrel{v_\text{rel}} \def\Sbar{\bar{S}} \def\Sm{S_{l-1/2}} \def\Sp{S_{l+1/2}} \def\szX[#1]{\sigma_{0,{#1}}} \def\soX[#1]{\sigma_{1,{#1}}} \def\fs{f_\text{s}} \def\el{\epsilon_\text{l}} \def\er{\epsilon_\text{r}} % mathcals \def\D{\mathcal{D}} \def\L{\mathcal{L}} \def\OO{\mathcal{O}} \def\S{\mathcal{S}} % flooring ceiling \def\floor[#1]{\left\lfloor #1 \right\rfloor} \def\ceil[#1]{\left\lceil #1 \right\rceil} \def\ansatz{\ \overset{\mathcal{A}}{\Longrightarrow}\ } % other \def\qaq{\quad \text{and} \quad} \def\qwiq{\quad \text{with} \quad} \def\qwhq{\quad \text{where} \quad} \def\mystrut{\rule[-.2\baselineskip]{0pt}{\baselineskip}} \def\th{\textsuperscript{th} } \def\thOrder{\textsuperscript{th}-order } \def\boldPhi{\boldsymbol{\phi}} \def\boldPsi{\boldsymbol{\Psi}} \def\eig{\text{eig}} %frontmatter \frontmatter \pagestyle{empty} %disable headers and footers \pagenumbering{roman} %use roman page numbering in the frontmatter \input{frontmatter/frontpage} \input{frontmatter/colophon} \input{frontmatter/cv} % \input{frontmatter/acknowledgements} \input{frontmatter/abstract} \input{frontmatter/preface} \input{frontmatter/listOfPublications} \input{frontmatter/listOfSymbols.tex} \input{frontmatter/listOfAbbreviations.tex} \cleardoublepage \pdfbookmark[0]{Contents}{label:contents} \pagestyle{fancy} %enable headers and footers again \tableofcontents % \listoffigures % \listoftables \listoftodos \cleardoublepage \def\makeAlph[#1]{\symbol{\numexpr96+#1}} %mainmatter \mainmatter \part{Introduction}\label{part:introduction} \input{introduction/introduction} % \part{Resonators}\label{part:resonators} % \input{resonators/resonators} % \part{Exciters}\label{part:exciters} % \input{exciters/exciters} % \part{Interactions}\label{part:interactions} % \input{interactions/interactions} % % \part{Real-Time Implementation and Control}\label{part:realtime} % % \input{realtime/realtime} % \part{Contributions}\label{part:contributions} % \input{contributions/contributions} <<<<<<< HEAD % % \part{Complete Instruments}\label{part:instrument} % % \input{instrument/instrument} ======= % % % % \part{Complete Instruments}\label{part:instrument} % % % % \input{instrument/instrument} >>>>>>> master % \part{Conclusions and Perspectives}\label{part:conclusion} % \input{conclusion/conclusion.tex} \pagebreak % \nocite{*} %only if you want all references in there \todo{check whether all references are used} \makeatletter \renewenvironment{thebibliography}[1] {\chapter*{\bibname}% \@mkboth{\MakeUppercase\bibname}{\MakeUppercase\bibname}% \list{\@biblabel{\@arabic\c@enumiv}}% {\settowidth\labelwidth{\@biblabel{#1}}% \leftmargin\labelwidth \advance\leftmargin\labelsep \@openbib@code \usecounter{enumiv}% \let\p@enumiv\@empty \renewcommand\theenumiv{\@arabic\c@enumiv}}% \sloppy \clubpenalty4000 \@clubpenalty \clubpenalty \widowpenalty4000% \sfcode`\.\@m} {\def\@noitemerr {\@latex@warning{Empty `thebibliography' environment}}% \endlist} \makeatother {\bibliographystyle{IEEEtran}\bibliography{bib/mybib}} \addcontentsline{toc}{chapter}{References}\todo{check whether to sort references or not} %backmatter \newif\ifapp \apptrue % uncomment to include appendices \newif\ifpapers % \paperstrue % uncomment to include papers \ifapp \renewcommand\thesection{\thechapter.\arabic{section}} \begin{subappendices} \begin{appendices} \renewcommand\appendixname{Paper} \setcounter{chapter}{0} % reset stuff \renewcommand\thesection{\arabic{section}} \part{Papers}\label{part:papers} % \pagestyle{plain} \titleformat{%command \chapter }[%shape display% ]{%format \normalfont\huge }{%label \begin{center}\color{aaublue}\chaptertitlename\ \thechapter\end{center} }{%style 1cm }{%code before title \thispagestyle{empty}\begin{center}\Large }[%code after title \end{center} ] \input{papers/papers.tex} \pagestyle{fancy} % \input{papers/paperA/paperA.tex} \end{appendices} \begin{appendices} \renewcommand\appendixname{Appendix} \renewcommand\thesection{\thechapter.\arabic{section}} \renewcommand{\theequation}{\thechapter.\arabic{equation}} \setcounter{chapter}{0} % reset stuff \part{Appendix} \label{part:appendix} \input{appendices/appendices} \end{appendices} \end{subappendices} \fi % \titleformat{%command % \chapter % }[%shape % display% % ]{%format % \normalfont\huge % }{%label % \begin{center}\color{aaublue}\chaptertitlename\ \thechapter\end{center} % }{%style % 1cm % }{%code before title % \thispagestyle{empty}\begin{center}\Large % [%code after title % ] % \end{center}} \pagebreak \pagestyle{empty} \justify \todo{format the blurb}Digital versions of musical instruments have been created for several decades, and for good reason! They are more compact, more easy to maintain, and less difficult to play than their real-life counterparts. One way to digitise an instrument is to record it and play back the samples, but this does not capture the entire range of expression of the real instrument. Simulating an instrument based on its physics, including its geometry and material properties, is much more flexible to player control. Although it requires more computational power to generate the sound in real time, the simulation could possibly go beyond what is physically possible. A violin growing into a cello, bowing your trumpet, your imagination is the limit... % \includepaper{papers/paperA/paperA} % \includepaper{papers/paperB/paperB} \end{document}
(* Title: Computing Square Roots using the Babylonian Method Author: René Thiemann <[email protected]> Maintainer: René Thiemann License: LGPL *) (* Copyright 2009-2014 René Thiemann This file is part of IsaFoR/CeTA. IsaFoR/CeTA is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. IsaFoR/CeTA is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with IsaFoR/CeTA. If not, see <http://www.gnu.org/licenses/>. *) theory Sqrt_Babylonian imports Sqrt_Babylonian_Auxiliary NthRoot_Impl begin section \<open>Executable algorithms for square roots\<close> text \<open> This theory provides executable algorithms for computing square-roots of numbers which are all based on the Babylonian method (which is also known as Heron's method or Newton's method). For integers / naturals / rationals precise algorithms are given, i.e., here $sqrt\ x$ delivers a list of all integers / naturals / rationals $y$ where $y^2 = x$. To this end, the Babylonian method has been adapted by using integer-divisions. In addition to the precise algorithms, we also provide approximation algorithms. One works for arbitrary linear ordered fields, where some number $y$ is computed such that @{term "abs(y^2 - x) < \<epsilon>"}. Moreover, for the naturals, integers, and rationals we provide algorithms to compute @{term "floor (sqrt x)"} and @{term "ceiling (sqrt x)"} which are all based on the underlying algorithm that is used to compute the precise square-roots on integers, if these exist. The major motivation for developing the precise algorithms was given by \ceta{} \cite{CeTA}, a tool for certifiying termination proofs. Here, non-linear equations of the form $(a_1x_1 + \dots a_nx_n)^2 = p$ had to be solved over the integers, where $p$ is a concrete polynomial. For example, for the equation $(ax + by)^2 = 4x^2 - 12xy + 9y^2$ one easily figures out that $a^2 = 4, b^2 = 9$, and $ab = -6$, which results in a possible solution $a = \sqrt 4 = 2, b = - \sqrt 9 = -3$. \<close> subsection \<open>The Babylonian method\<close> text \<open> The Babylonian method for computing $\sqrt n$ iteratively computes \[ x_{i+1} = \frac{\frac n{x_i} + x_i}2 \] until $x_i^2 \approx n$. Note that if $x_0^2 \geq n$, then for all $i$ we have both $x_i^2 \geq n$ and $x_i \geq x_{i+1}$. \<close> subsection \<open>The Babylonian method using integer division\<close> text \<open> First, the algorithm is developed for the non-negative integers. Here, the division operation $\frac xy$ is replaced by @{term "x div y = \<lfloor>of_int x / of_int y\<rfloor>"}. Note that replacing @{term "\<lfloor>of_int x / of_int y\<rfloor>"} by @{term "\<lceil>of_int x / of_int y\<rceil>"} would lead to non-termination in the following algorithm. We explicititly develop the algorithm on the integers and not on the naturals, as the calculations on the integers have been much easier. For example, $y - x + x = y$ on the integers, which would require the side-condition $y \geq x$ for the naturals. These conditions will make the reasoning much more tedious---as we have experienced in an earlier state of this development where everything was based on naturals. Since the elements $x_0, x_1, x_2,\dots$ are monotone decreasing, in the main algorithm we abort as soon as $x_i^2 \leq n$.\<close> text \<open>\textbf{Since in the meantime, all of these algorithms have been generalized to arbitrary $p$-th roots in @{theory Sqrt_Babylonian.NthRoot_Impl}, we just instantiate the general algorithms by $p = 2$ and then provide specialized code equations which are more efficient than the general purpose algorithms.}\<close> definition sqrt_int_main' :: "int \<Rightarrow> int \<Rightarrow> int \<times> bool" where [simp]: "sqrt_int_main' x n = root_int_main' 1 1 2 x n" lemma sqrt_int_main'_code[code]: "sqrt_int_main' x n = (let x2 = x * x in if x2 \<le> n then (x, x2 = n) else sqrt_int_main' ((n div x + x) div 2) n)" using root_int_main'.simps[of 1 1 2 x n] unfolding Let_def by auto definition sqrt_int_main :: "int \<Rightarrow> int \<times> bool" where [simp]: "sqrt_int_main x = root_int_main 2 x" lemma sqrt_int_main_code[code]: "sqrt_int_main x = sqrt_int_main' (start_value x 2) x" by (simp add: root_int_main_def Let_def) definition sqrt_int :: "int \<Rightarrow> int list" where "sqrt_int x = root_int 2 x" lemma sqrt_int_code[code]: "sqrt_int x = (if x < 0 then [] else case sqrt_int_main x of (y,True) \<Rightarrow> if y = 0 then [0] else [y,-y] | _ \<Rightarrow> [])" proof - interpret fixed_root 2 1 by (unfold_locales, auto) obtain b y where res: "root_int_main 2 x = (b,y)" by force show ?thesis unfolding sqrt_int_def root_int_def Let_def using root_int_main[OF _ res] using res by simp qed lemma sqrt_int[simp]: "set (sqrt_int x) = {y. y * y = x}" unfolding sqrt_int_def by (simp add: power2_eq_square) lemma sqrt_int_pos: assumes res: "sqrt_int x = Cons s ms" shows "s \<ge> 0" proof - note res = res[unfolded sqrt_int_code Let_def, simplified] from res have x0: "x \<ge> 0" by (cases ?thesis, auto) obtain ss b where call: "sqrt_int_main x = (ss,b)" by force from res[unfolded call] x0 have "ss = s" by (cases b, cases "ss = 0", auto) from root_int_main(1)[OF x0 call[unfolded this sqrt_int_main_def]] show ?thesis . qed definition [simp]: "sqrt_int_floor_pos x = root_int_floor_pos 2 x" lemma sqrt_int_floor_pos_code[code]: "sqrt_int_floor_pos x = fst (sqrt_int_main x)" by (simp add: root_int_floor_pos_def) lemma sqrt_int_floor_pos: assumes x: "x \<ge> 0" shows "sqrt_int_floor_pos x = \<lfloor> sqrt (of_int x) \<rfloor>" using root_int_floor_pos[OF x, of 2] by (simp add: sqrt_def) definition [simp]: "sqrt_int_ceiling_pos x = root_int_ceiling_pos 2 x" lemma sqrt_int_ceiling_pos_code[code]: "sqrt_int_ceiling_pos x = (case sqrt_int_main x of (y,b) \<Rightarrow> if b then y else y + 1)" by (simp add: root_int_ceiling_pos_def) lemma sqrt_int_ceiling_pos: assumes x: "x \<ge> 0" shows "sqrt_int_ceiling_pos x = \<lceil> sqrt (of_int x) \<rceil>" using root_int_ceiling_pos[OF x, of 2] by (simp add: sqrt_def) definition "sqrt_int_floor x = root_int_floor 2 x" lemma sqrt_int_floor_code[code]: "sqrt_int_floor x = (if x \<ge> 0 then sqrt_int_floor_pos x else - sqrt_int_ceiling_pos (- x))" unfolding sqrt_int_floor_def root_int_floor_def by simp lemma sqrt_int_floor[simp]: "sqrt_int_floor x = \<lfloor> sqrt (of_int x) \<rfloor>" by (simp add: sqrt_int_floor_def sqrt_def) definition "sqrt_int_ceiling x = root_int_ceiling 2 x" lemma sqrt_int_ceiling_code[code]: "sqrt_int_ceiling x = (if x \<ge> 0 then sqrt_int_ceiling_pos x else - sqrt_int_floor_pos (- x))" unfolding sqrt_int_ceiling_def root_int_ceiling_def by simp lemma sqrt_int_ceiling[simp]: "sqrt_int_ceiling x = \<lceil> sqrt (of_int x) \<rceil>" by (simp add: sqrt_int_ceiling_def sqrt_def) lemma sqrt_int_ceiling_bound: "0 \<le> x \<Longrightarrow> x \<le> (sqrt_int_ceiling x)^2" unfolding sqrt_int_ceiling using le_of_int_ceiling sqrt_le_D by (metis of_int_power_le_of_int_cancel_iff) subsection \<open>Square roots for the naturals\<close> definition sqrt_nat :: "nat \<Rightarrow> nat list" where "sqrt_nat x = root_nat 2 x" lemma sqrt_nat_code[code]: "sqrt_nat x \<equiv> map nat (take 1 (sqrt_int (int x)))" unfolding sqrt_nat_def root_nat_def sqrt_int_def by simp lemma sqrt_nat[simp]: "set (sqrt_nat x) = { y. y * y = x}" unfolding sqrt_nat_def using root_nat[of 2 x] by (simp add: power2_eq_square) definition sqrt_nat_floor :: "nat \<Rightarrow> int" where "sqrt_nat_floor x = root_nat_floor 2 x" lemma sqrt_nat_floor_code[code]: "sqrt_nat_floor x = sqrt_int_floor_pos (int x)" unfolding sqrt_nat_floor_def root_nat_floor_def by simp lemma sqrt_nat_floor[simp]: "sqrt_nat_floor x = \<lfloor> sqrt (real x) \<rfloor>" unfolding sqrt_nat_floor_def by (simp add: sqrt_def) definition sqrt_nat_ceiling :: "nat \<Rightarrow> int" where "sqrt_nat_ceiling x = root_nat_ceiling 2 x" lemma sqrt_nat_ceiling_code[code]: "sqrt_nat_ceiling x = sqrt_int_ceiling_pos (int x)" unfolding sqrt_nat_ceiling_def root_nat_ceiling_def by simp lemma sqrt_nat_ceiling[simp]: "sqrt_nat_ceiling x = \<lceil> sqrt (real x) \<rceil>" unfolding sqrt_nat_ceiling_def by (simp add: sqrt_def) subsection \<open>Square roots for the rationals\<close> definition sqrt_rat :: "rat \<Rightarrow> rat list" where "sqrt_rat x = root_rat 2 x" lemma sqrt_rat_code[code]: "sqrt_rat x = (case quotient_of x of (z,n) \<Rightarrow> (case sqrt_int n of [] \<Rightarrow> [] | sn # xs \<Rightarrow> map (\<lambda> sz. of_int sz / of_int sn) (sqrt_int z)))" proof - obtain z n where q: "quotient_of x = (z,n)" by force show ?thesis unfolding sqrt_rat_def root_rat_def q split sqrt_int_def by (cases "root_int 2 n", auto) qed lemma sqrt_rat[simp]: "set (sqrt_rat x) = { y. y * y = x}" unfolding sqrt_rat_def using root_rat[of 2 x] by (simp add: power2_eq_square) lemma sqrt_rat_pos: assumes sqrt: "sqrt_rat x = Cons s ms" shows "s \<ge> 0" proof - obtain z n where q: "quotient_of x = (z,n)" by force note sqrt = sqrt[unfolded sqrt_rat_code q, simplified] let ?sz = "sqrt_int z" let ?sn = "sqrt_int n" from q have n: "n > 0" by (rule quotient_of_denom_pos) from sqrt obtain sz mz where sz: "?sz = sz # mz" by (cases ?sn, auto) from sqrt obtain sn mn where sn: "?sn = sn # mn" by (cases ?sn, auto) from sqrt_int_pos[OF sz] sqrt_int_pos[OF sn] have pos: "0 \<le> sz" "0 \<le> sn" by auto from sqrt sz sn have s: "s = of_int sz / of_int sn" by auto show ?thesis unfolding s using pos by (metis of_int_0_le_iff zero_le_divide_iff) qed definition sqrt_rat_floor :: "rat \<Rightarrow> int" where "sqrt_rat_floor x = root_rat_floor 2 x" lemma sqrt_rat_floor_code[code]: "sqrt_rat_floor x = (case quotient_of x of (a,b) \<Rightarrow> sqrt_int_floor (a * b) div b)" unfolding sqrt_rat_floor_def root_rat_floor_def by (simp add: sqrt_def) lemma sqrt_rat_floor[simp]: "sqrt_rat_floor x = \<lfloor> sqrt (of_rat x) \<rfloor>" unfolding sqrt_rat_floor_def by (simp add: sqrt_def) definition sqrt_rat_ceiling :: "rat \<Rightarrow> int" where "sqrt_rat_ceiling x = root_rat_ceiling 2 x" lemma sqrt_rat_ceiling_code[code]: "sqrt_rat_ceiling x = - (sqrt_rat_floor (-x))" unfolding sqrt_rat_ceiling_def sqrt_rat_floor_def root_rat_ceiling_def by simp lemma sqrt_rat_ceiling: "sqrt_rat_ceiling x = \<lceil> sqrt (of_rat x) \<rceil>" unfolding sqrt_rat_ceiling_def by (simp add: sqrt_def) lemma sqr_rat_of_int: assumes x: "x * x = rat_of_int i" shows "\<exists> j :: int. j * j = i" proof - from x have mem: "x \<in> set (sqrt_rat (rat_of_int i))" by simp from x have "rat_of_int i \<ge> 0" by (metis zero_le_square) hence *: "quotient_of (rat_of_int i) = (i,1)" by (metis quotient_of_int) have 1: "sqrt_int 1 = [1,-1]" by code_simp from mem sqrt_rat_code * split 1 have x: "x \<in> rat_of_int ` {y. y * y = i}" by auto thus ?thesis by auto qed subsection \<open>Approximating square roots\<close> text \<open> The difference to the previous algorithms is that now we abort, once the distance is below $\epsilon$. Moreover, here we use standard division and not integer division. This part is not yet generalized by @{theory Sqrt_Babylonian.NthRoot_Impl}. We first provide the executable version without guard @{term "x > 0"} as partial function, and afterwards prove termination and soundness for a similar algorithm that is defined within the upcoming locale. \<close> partial_function (tailrec) sqrt_approx_main_impl :: "'a :: linordered_field \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> 'a" where [code]: "sqrt_approx_main_impl \<epsilon> n x = (if x * x - n < \<epsilon> then x else sqrt_approx_main_impl \<epsilon> n ((n / x + x) / 2))" text \<open>We setup a locale where we ensure that we have standard assumptions: positive $\epsilon$ and positive $n$. We require sort @{term floor_ceiling}, since @{term "\<lfloor> x \<rfloor>"} is used for the termination argument.\<close> locale sqrt_approximation = fixes \<epsilon> :: "'a :: {linordered_field,floor_ceiling}" and n :: 'a assumes \<epsilon> : "\<epsilon> > 0" and n: "n > 0" begin function sqrt_approx_main :: "'a \<Rightarrow> 'a" where "sqrt_approx_main x = (if x > 0 then (if x * x - n < \<epsilon> then x else sqrt_approx_main ((n / x + x) / 2)) else 0)" by pat_completeness auto text \<open>Termination essentially is a proof of convergence. Here, one complication is the fact that the limit is not always defined. E.g., if @{typ "'a"} is @{typ rat} then there is no square root of 2. Therefore, the error-rate $\frac x{\sqrt n} - 1$ is not expressible. Instead we use the expression $\frac{x^2}n - 1$ as error-rate which does not require any square-root operation.\<close> termination proof - define er where "er x = (x * x / n - 1)" for x define c where "c = 2 * n / \<epsilon>" define m where "m x = nat \<lfloor> c * er x \<rfloor>" for x have c: "c > 0" unfolding c_def using n \<epsilon> by auto show ?thesis proof show "wf (measures [m])" by simp next fix x assume x: "0 < x" and xe: "\<not> x * x - n < \<epsilon>" define y where "y = (n / x + x) / 2" show "((n / x + x) / 2,x) \<in> measures [m]" unfolding y_def[symmetric] proof (rule measures_less) from n have inv_n: "1 / n > 0" by auto from xe have "x * x - n \<ge> \<epsilon>" by simp from this[unfolded mult_le_cancel_left_pos[OF inv_n, of \<epsilon>, symmetric]] have erxen: "er x \<ge> \<epsilon> / n" unfolding er_def using n by (simp add: field_simps) have en: "\<epsilon> / n > 0" and ne: "n / \<epsilon> > 0" using \<epsilon> n by auto from en erxen have erx: "er x > 0" by linarith have pos: "er x * 4 + er x * (er x * 4) > 0" using erx by (auto intro: add_pos_nonneg) have "er y = 1 / 4 * (n / (x * x) - 2 + x * x / n)" unfolding er_def y_def using x n by (simp add: field_simps) also have "\<dots> = 1 / 4 * er x * er x / (1 + er x)" unfolding er_def using x n by (simp add: field_simps) finally have "er y = 1 / 4 * er x * er x / (1 + er x)" . also have "\<dots> < 1 / 4 * (1 + er x) * er x / (1 + er x)" using erx erx pos by (auto simp: field_simps) also have "\<dots> = er x / 4" using erx by (simp add: field_simps) finally have er_y_x: "er y \<le> er x / 4" by linarith from erxen have "c * er x \<ge> 2" unfolding c_def mult_le_cancel_left_pos[OF ne, of _ "er x", symmetric] using n \<epsilon> by (auto simp: field_simps) hence pos: "\<lfloor>c * er x\<rfloor> > 0" "\<lfloor>c * er x\<rfloor> \<ge> 2" by auto show "m y < m x" unfolding m_def nat_mono_iff[OF pos(1)] proof - have "\<lfloor>c * er y\<rfloor> \<le> \<lfloor>c * (er x / 4)\<rfloor>" by (rule floor_mono, unfold mult_le_cancel_left_pos[OF c], rule er_y_x) also have "\<dots> < \<lfloor>c * er x / 4 + 1\<rfloor>" by auto also have "\<dots> \<le> \<lfloor>c * er x\<rfloor>" by (rule floor_mono, insert pos(2), simp add: field_simps) finally show "\<lfloor>c * er y\<rfloor> < \<lfloor>c * er x\<rfloor>" . qed qed qed qed text \<open>Once termination is proven, it is easy to show equivalence of @{const sqrt_approx_main_impl} and @{const sqrt_approx_main}.\<close> lemma sqrt_approx_main_impl: "x > 0 \<Longrightarrow> sqrt_approx_main_impl \<epsilon> n x = sqrt_approx_main x" proof (induct x rule: sqrt_approx_main.induct) case (1 x) hence x: "x > 0" by auto hence nx: "0 < (n / x + x) / 2" using n by (auto intro: pos_add_strict) note simps = sqrt_approx_main_impl.simps[of _ _ x] sqrt_approx_main.simps[of x] show ?case proof (cases "x * x - n < \<epsilon>") case True thus ?thesis unfolding simps using x by auto next case False show ?thesis using 1(1)[OF x False nx] unfolding simps using x False by auto qed qed text \<open>Also soundness is not complicated.\<close> lemma sqrt_approx_main_sound: assumes x: "x > 0" and xx: "x * x > n" shows "sqrt_approx_main x * sqrt_approx_main x > n \<and> sqrt_approx_main x * sqrt_approx_main x - n < \<epsilon>" using assms proof (induct x rule: sqrt_approx_main.induct) case (1 x) from 1 have x: "x > 0" "(x > 0) = True" by auto note simp = sqrt_approx_main.simps[of x, unfolded x if_True] show ?case proof (cases "x * x - n < \<epsilon>") case True with 1 show ?thesis unfolding simp by simp next case False let ?y = "(n / x + x) / 2" from False simp have simp: "sqrt_approx_main x = sqrt_approx_main ?y" by simp from n x have y: "?y > 0" by (auto intro: pos_add_strict) note IH = 1(1)[OF x(1) False y] from x have x4: "4 * x * x > 0" by (auto intro: mult_sign_intros) show ?thesis unfolding simp proof (rule IH) show "n < ?y * ?y" unfolding mult_less_cancel_left_pos[OF x4, of n, symmetric] proof - have id: "4 * x * x * (?y * ?y) = 4 * x * x * n + (n - x * x) * (n - x * x)" using x(1) by (simp add: field_simps) from 1(3) have "x * x - n > 0" by auto from mult_pos_pos[OF this this] show "4 * x * x * n < 4 * x * x * (?y * ?y)" unfolding id by (simp add: field_simps) qed qed qed qed end text \<open>It remains to assemble everything into one algorithm.\<close> definition sqrt_approx :: "'a :: {linordered_field,floor_ceiling} \<Rightarrow> 'a \<Rightarrow> 'a" where "sqrt_approx \<epsilon> x \<equiv> if \<epsilon> > 0 then (if x = 0 then 0 else let xpos = abs x in sqrt_approx_main_impl \<epsilon> xpos (xpos + 1)) else 0" lemma sqrt_approx: assumes \<epsilon>: "\<epsilon> > 0" shows "\<bar>sqrt_approx \<epsilon> x * sqrt_approx \<epsilon> x - \<bar>x\<bar>\<bar> < \<epsilon>" proof (cases "x = 0") case True with \<epsilon> show ?thesis unfolding sqrt_approx_def by auto next case False let ?x = "\<bar>x\<bar>" let ?sqrti = "sqrt_approx_main_impl \<epsilon> ?x (?x + 1)" let ?sqrt = "sqrt_approximation.sqrt_approx_main \<epsilon> ?x (?x + 1)" define sqrt where "sqrt = ?sqrt" from False have x: "?x > 0" "?x + 1 > 0" by auto interpret sqrt_approximation \<epsilon> ?x by (unfold_locales, insert x \<epsilon>, auto) from False \<epsilon> have "sqrt_approx \<epsilon> x = ?sqrti" unfolding sqrt_approx_def by (simp add: Let_def) also have "?sqrti = ?sqrt" by (rule sqrt_approx_main_impl, auto) finally have id: "sqrt_approx \<epsilon> x = sqrt" unfolding sqrt_def . have sqrt: "sqrt * sqrt > ?x \<and> sqrt * sqrt - ?x < \<epsilon>" unfolding sqrt_def by (rule sqrt_approx_main_sound[OF x(2)], insert x mult_pos_pos[OF x(1) x(1)], auto simp: field_simps) show ?thesis unfolding id using sqrt by auto qed subsection \<open>Some tests\<close> text \<open>Testing executabity and show that sqrt 2 is irrational\<close> lemma "\<not> (\<exists> i :: rat. i * i = 2)" proof - have "set (sqrt_rat 2) = {}" by eval thus ?thesis by simp qed text \<open>Testing speed\<close> lemma "\<not> (\<exists> i :: int. i * i = 1234567890123456789012345678901234567890)" proof - have "set (sqrt_int 1234567890123456789012345678901234567890) = {}" by eval thus ?thesis by simp qed text \<open>The following test\<close> value "let \<epsilon> = 1 / 100000000 :: rat; s = sqrt_approx \<epsilon> 2 in (s, s * s - 2, \<bar>s * s - 2\<bar> < \<epsilon>)" text \<open>results in (1.4142135623731116, 4.738200762148612e-14, True).\<close> end
[STATEMENT] lemma le_mask_shiftl_le_mask: "s = m + n \<Longrightarrow> x \<le> mask n \<Longrightarrow> x << m \<le> mask s" for x :: \<open>'a::len word\<close> [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>s = m + n; x \<le> mask n\<rbrakk> \<Longrightarrow> x << m \<le> mask s [PROOF STEP] by (simp add: le_mask_iff shiftl_shiftr3)
# Model-Based Multi-Compartment Constrained Spherical Deconvolution Constrained Spherical Deconvolution (CSD) *(Tournier et al. 2007)* is one of the most proven ways to estimate Fiber Orientation Distributions (FODs) that describe the orientation of the white matter tissue. In short, CSD's *single-shell* formulation states that any composition of oriented white matter (dispersion/crossings) can be described as the spherical convolution of a *positive* probability density on the sphere and a convolution kernel $K$ that describes one parallel axon bundle. With some abuse of notation: $$ \begin{align} \begin{aligned} E_{\textrm{CSD}}= \overbrace{\operatorname{FOD}(\textbf{c})}^{\textrm{Fiber Distribution}}\,*_{\mathbb{S}^2}\,\overbrace{K(\cdot)}^{\textrm{Convolution Kernel}}\quad\textrm{subject to}\quad \operatorname{FOD}(\textbf{c}) > 0. \end{aligned} \end{align}$$ Here, the FOD is described in terms of a truncated even spherical harmonics series $FOD=\sum_{l=0}^{lmax}\sum_{m=-l}^l\textbf{c}_{lm}Y_{lm}$, with $l$ and $m$ describing the order and moment of the spherical harmonic *(Descoteaux et al. 2006)*. Furthermore, $*_{\mathbb{S}^2}$ describes a spherical convolution on the $\mathbb{S}^2$ sphere, and $K$ describes the *rotational* harmonics describing a single axon bundle. The kernel $K$ is typically estimated from the data *(Tournier et al. 2007, Tax et al. 2014)*, and CSD is therefore considered a *model-free* approach. The multi-shell implementation of CSD is straight-forward in that the spherical convolution is just performed per acquisition shell. Estimating multi-shell CSD with multiple convolution kernels has also successfully been explored in Multi-Tissue CSD (MT-CSD) *(Jeurissen et al. 2014*). As with `MultiCompartmentModel` and `MultiCompartmentSphericalMeanModel`, Dmipy allows the user to create a Multi-Compartment CSD (MC-CSD) model using `MultiCompartmentSphericalHarmonicsModel`. If all parameters of the convolution kernel models are fixed, it is possible to simultaneously fit the the FOD and volume fractions of **one** anisotropic model (e.g. a Stick, Zeppelin or possibly distributed Cylinder) and $N$ isotropic compartments (Gaussian Ball or Spheres). $$ \begin{align} \begin{aligned} E_{\textrm{MC-CSD}}= vf_1 \times \overbrace{\operatorname{FOD}(\textbf{c})}^{\textrm{Fiber Distribution}}\,*_{\mathbb{S}^2}\,\overbrace{K_{\boldsymbol{\mu}}(\cdot)}^{\textrm{Anisotropic Kernel}}+\sum_{i=2}^{N+1}vf_i\times\overbrace{K_{iso}(\cdot)}^{\textrm{Isotropic Kernel}}\quad\textrm{subject to}\quad \operatorname{FOD}(\textbf{c}) > 0, \sum_{i=1}^Nvf_i=1. \end{aligned} \end{align}$$ NOTE: Dmipy's MC-CSD is NOT the same as Jeurissen et al.'s MT-CSD because at this point we are not including T2 differences between the compartment models. We will explore this in a later example. ## Multi-Shell Single-Compartment CSD As our first example we will reproduce the standard CSD formulation, using only a single Zeppelin kernel describing a white matter bundle. Notice that in this model representation we must define the maximum spherical harmonics order at which to truncate the FOD. ```python from dmipy.core.modeling_framework import MultiCompartmentSphericalHarmonicsModel from dmipy.signal_models import gaussian_models zeppelin = gaussian_models.G2Zeppelin() csd_mod = MultiCompartmentSphericalHarmonicsModel([zeppelin], sh_order=8) ``` ### Response Function Estimation Before fitting the FOD, we must first fix the parameters of the Zeppelin convolution kernel. First, we load our example slice of HCP data. ```python from dmipy.data import saved_data scheme_hcp, data_hcp = saved_data.wu_minn_hcp_coronal_slice() ``` This data slice originates from Subject 100307 of the Human Connectome Project, WU-Minn Consortium (Principal Investigators: David Van Essen and Kamil Ugurbil; 1U54MH091657) funded by the 16 NIH Institutes and Centers that support the NIH Blueprint for Neuroscience Research; and by the McDonnell Center for Systems Neuroscience at Washington University. ```python import matplotlib.pyplot as plt import matplotlib.patches as patches %matplotlib inline fig, ax = plt.subplots(1) ax.imshow(data_hcp[:, 0, :, 0].T, origin=True) rect = patches.Rectangle((70,70),20,20,linewidth=1,edgecolor='r',facecolor='none') ax.add_patch(rect) ax.set_axis_off() ax.set_title('HCP coronal slice B0 with ROI'); ``` We gratefully make use of Dipy's automatic response function estimation. We convert Dmipy's acquisition scheme to a Dipy gradient table using `gtab_dmipy2dipy`. ```python from dmipy.core.acquisition_scheme import gtab_dmipy2dipy from dipy.reconst.csdeconv import auto_response gtab = gtab_dmipy2dipy(scheme_hcp) response, ratio = auto_response(gtab, data_hcp, roi_radius=10, fa_thr=0.7) lambdas = response[0] lambdas ``` array([0.00130588, 0.00023963, 0.00023963]) Notice that Dipy's response provides the Zeppelin's eigenvalues in mm$^2$/s, so we must first put them in SI units. ```python lambda_par = lambdas[0] * 1e-6 lambda_perp = lambdas[1] * 1e-6 ``` We then fix the parameters of the kernel. ```python csd_mod.set_fixed_parameter('G2Zeppelin_1_lambda_par', lambda_par) csd_mod.set_fixed_parameter('G2Zeppelin_1_lambda_perp', lambda_perp) ``` ### Fitting CSD to HCP data and visualizing features We are now ready to fit the CSD model as per usual. Using the default `solver='csd'` option will let the algorithm decide which optimizer to use - if volume fractions need to be estimated it will use `csd_cvxpy`, and otherwise the basic CSD `csd_tournier07` optimizer proposed by *(Tournier et al. 2007)*. Using the `csd` option will also automatically select whether to enforce that the FOD integrates to one over the sphere (i.e. it is a proper distribution). If a non-voxel-varying convolution kernel is given, it will set `unity_constraint=False`, otherwise it will be set to `True`. ```python csd_fit = csd_mod.fit( acquisition_scheme=scheme_hcp, data=data_hcp, mask=data_hcp[..., 0]>0, solver='csd') ``` Parallel processing turned off for tournier07 optimizer because it does not improve fitting speed. Setup Tournier07 FOD optimizer in 1.45701694489 seconds Fitting of 8181 voxels complete in 16.890802145 seconds. Average of 0.00206463783706 seconds per voxel. The `csd_fit` instance now constains the spherical harmonics of the FOD for every fitted voxel. For the used spherical harmonics order of $8$ that means we have estimated a total of 45 coefficients: ```python csd_fit.fitted_parameters_vector.shape ``` (145, 1, 145, 45) We have can estimate 2 metric that quantify FOD anisotropy: - The anisotropy index (AI) following *(Jespersen et al. 2007)*, ranging from 1 for completely anistropic, to zero being isotropic. - The norm of the laplacian of the spherical harmonics of the FOD, explored in e.g. *(Descoteaux et al. 2006)*, which is zero for isotropic FODs and increasingly positive for more anistropic FODs. It is relevant to not that AI is dependent on the value of the spherical mean of the FOD, but the Laplacian-norm is not. ```python ai = csd_fit.anisotropy_index() lb = csd_fit.norm_of_laplacian_fod() fig, axs = plt.subplots(1, 2, figsize=[10, 5]) axs = axs.ravel() for counter, (name, values) in enumerate(zip(['Anisotropy Index', 'Norm of FOD Laplacian'], [ai, lb])): cf = axs[counter].imshow(values.squeeze().T, origin=True, interpolation='nearest') axs[counter].set_title(name) axs[counter].set_axis_off() fig.colorbar(cf, ax=axs[counter], shrink=0.8) ``` As you can see, the AI and laplacian-norm anisotropy metrics have different contrasts: - AI shows well the constrast between white and grey matter, but not too much between different white matter areas. - The laplacian-norm emphasizes anisotropic/coherent white matter areas, but does not show as clear the grey/white matter areas. ### Visualizing Fiber Orientation Distributions As before, we can again visualize the FOD estimated using CSD. ```python from dipy.data import get_sphere from dipy.viz.actor import slicer from dipy.viz import fvtk import numpy as np import matplotlib.image as mpimg sphere = get_sphere(name='symmetric724') fods = csd_fit.fod(sphere.vertices) affine = np.eye(4) affine[0,3] = -10 affine[1,3] = -10 ai_im = slicer(ai[70:90,0, 70:90, None], interpolation='nearest', affine=affine, opacity=0.7) ren = fvtk.ren() fod_spheres = fvtk.sphere_funcs(fods[70:90,:, 70:90], sphere, scale=1., norm=False) fod_spheres.RotateX(90) fod_spheres.RotateZ(180) fod_spheres.RotateY(180) fvtk.add(ren, fod_spheres) fvtk.add(ren, ai_im) fvtk.record(ren=ren, size=[700, 700]) img = mpimg.imread('dipy.png') plt.figure(figsize=[10, 10]) plt.imshow(img[100:-97, 100:-85]) plt.title('CSD FODs with Anistropy Index background', fontsize=20) plt.axis('off'); ``` Notice that in white matter the FODs are nicely shaped and we have a high AI, but in non-white matter they are very noisy and prominent. This is because the response function that we provided does not resemble the data in these areas, and we therefore get a non-sensical result, which is especially detrimental in partial-volumed voxels at the edge of the ventricles. In the following Multi-Compartment CSD example we will address this. ## Multi-Shell Multi-Compartment CSD Dmipy allows to estimate multiple convolution kernels at the same time, i.e., multi-compartment CSD. The only limitation is that we only allow for one *anisotropic* kernel, like the Zeppelin before, and any number of isotropic compartments, like a Gaussian Ball or Sphere, as long as all the kernel parameters are fixed. For this example, let us insert both a Zeppelin and a Ball into the MultiCompartmentSphericalHarmonicsModel to fit the white matter and CSF, respectively. ```python ball = gaussian_models.G1Ball() mod_mc_csd = MultiCompartmentSphericalHarmonicsModel( [zeppelin, ball], sh_order=8) mod_mc_csd.parameter_names ``` ['G2Zeppelin_1_lambda_perp', 'G1Ball_1_lambda_iso', 'G2Zeppelin_1_lambda_par', 'partial_volume_0', 'partial_volume_1', 'sh_coeff'] Notice that we must now fix both the Zeppelin's and the Ball's diffusivities. For now, let us fix the Ball's diffusivity to free-water and keep the Zeppelin's diffusivities as before. ```python mod_mc_csd.set_fixed_parameter('G2Zeppelin_1_lambda_par', lambda_par) mod_mc_csd.set_fixed_parameter('G2Zeppelin_1_lambda_perp', lambda_perp) mod_mc_csd.set_fixed_parameter('G1Ball_1_lambda_iso', 3e-9) ``` Since we use multiple kernels, the algorithm now chooses the 'csd_cvxpy' optimizer to solve the CSD problem. It can estimate the volume fractions of one anisotropic kernel and several isotropic kernels at the same time, but it is also significantly slower than the simpler 'csd_tournier07' optimizer. Estimating volume fractions in this framework will automatically set `unity_constraint=True` since the volume fractions of white matter and CSD should add up to one. ```python fit_mc_csd = mod_mc_csd.fit( acquisition_scheme=scheme_hcp, data=data_hcp, mask=data_hcp[..., 0]>0, solver='csd') ``` Setup CVXPY FOD optimizer in 0.00613498687744 seconds Using parallel processing with 8 workers. Fitting of 8181 voxels complete in 126.349025011 seconds. Average of 0.0154442030328 seconds per voxel. This time, we can also visualize the estimated volume fractions of the different kernel models. ```python fitted_parameters = fit_mc_csd.fitted_parameters fig, axs = plt.subplots(1, 2, figsize=[10, 5]) axs = axs.ravel() counter = 0 for name, values in fitted_parameters.items(): if values.squeeze().ndim != 2: continue cf = axs[counter].imshow(values.squeeze().T, origin=True, interpolation='nearest') axs[counter].set_title(name) axs[counter].set_axis_off() fig.colorbar(cf, ax=axs[counter], shrink=0.8) counter += 1 ``` Here, partial_volume_0 corresponds to the Zeppelin representing white matter. partial_volume_1 corresponds to the Ball representing CSF. Notice it is not perfect and the Ball fraction is also significant in white matter . This is because 1) we did not include TE differences for white matter and CSF as *(Jeurissen et al. 2014)*, and 2) because we did not estimate the diffusivity of the CSF from the data. Still when we visualize the FODs we can see that a simple approach like this already helped. ```python fods = fit_mc_csd.fod(sphere.vertices) vf0_im = slicer(fitted_parameters['partial_volume_0'][70:90,0, 70:90, None], interpolation='nearest', affine=affine, opacity=0.7) ren = fvtk.ren() fod_spheres = fvtk.sphere_funcs(fods[70:90,:, 70:90], sphere, scale=1., norm=False) fod_spheres.RotateX(90) fod_spheres.RotateZ(180) fod_spheres.RotateY(180) fvtk.add(ren, fod_spheres) fvtk.add(ren, vf0_im) fvtk.record(ren=ren, size=[700, 700]) img = mpimg.imread('dipy.png') plt.figure(figsize=[10, 10]) plt.imshow(img[100:-97, 100:-85]) plt.title('MC-CSD FODs with WM volume fraction background', fontsize=20) plt.axis('off'); ``` Despite our very simplistic implementation, notice how the FODs at the edge of ventricles are now less noisy and properly aligned, and the CSF FODs are nearly invisible because they are scaled with the volume fraction. In this example we illustrated Dmipy's model-based multi-compartment CSD implementation. In a next example we will illustrate a proper model-free implementation of Multi-Tissue CSD implementation by *(Jeurissen et al. 2014)*. ## References - Tournier, J-Donald, Fernando Calamante, and Alan Connelly. "Robust determination of the fibre orientation distribution in diffusion MRI: non-negativity constrained super-resolved spherical deconvolution." Neuroimage 35.4 (2007): 1459-1472. - Descoteaux, Maxime, et al. "Regularized, fast, and robust analytical Q‐ball imaging." Magnetic resonance in medicine 58.3 (2007): 497-510. - Tax, Chantal MW, et al. "Recursive calibration of the fiber response function for spherical deconvolution of diffusion MRI data." Neuroimage 86 (2014): 67-80. - Jespersen, Sune N., et al. "Modeling dendrite density from magnetic resonance diffusion measurements." Neuroimage 34.4 (2007): 1473-1486. - Jeurissen, Ben, et al. "Multi-tissue constrained spherical deconvolution for improved analysis of multi-shell diffusion MRI data." NeuroImage 103 (2014): 411-426.
/- Copyright (c) 2020 David Wärn. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: David Wärn -/ import logic.encodable.basic import order.atoms /-! # Order ideals, cofinal sets, and the Rasiowa–Sikorski lemma ## Main definitions Throughout this file, `P` is at least a preorder, but some sections require more structure, such as a bottom element, a top element, or a join-semilattice structure. - `order.ideal P`: the type of nonempty, upward directed, and downward closed subsets of `P`. Dual to the notion of a filter on a preorder. - `order.is_ideal P`: a predicate for when a `set P` is an ideal. - `order.ideal.principal p`: the principal ideal generated by `p : P`. - `order.ideal.is_proper P`: a predicate for proper ideals. Dual to the notion of a proper filter. - `order.ideal.is_maximal`: a predicate for maximal ideals. Dual to the notion of an ultrafilter. - `ideal_Inter_nonempty P`: a predicate for when the intersection of all ideals of `P` is nonempty. - `order.cofinal P`: the type of subsets of `P` containing arbitrarily large elements. Dual to the notion of 'dense set' used in forcing. - `order.ideal_of_cofinals p 𝒟`, where `p : P`, and `𝒟` is a countable family of cofinal subsets of P: an ideal in `P` which contains `p` and intersects every set in `𝒟`. (This a form of the Rasiowa–Sikorski lemma.) ## References - <https://en.wikipedia.org/wiki/Ideal_(order_theory)> - <https://en.wikipedia.org/wiki/Cofinal_(mathematics)> - <https://en.wikipedia.org/wiki/Rasiowa%E2%80%93Sikorski_lemma> Note that for the Rasiowa–Sikorski lemma, Wikipedia uses the opposite ordering on `P`, in line with most presentations of forcing. ## TODO `order.ideal.ideal_Inter_nonempty` is a complicated way to say that `P` has a bottom element. It should be replaced by this clearer condition, which could be called strong directedness and which is a Prop version of `order_bot`. ## Tags ideal, cofinal, dense, countable, generic -/ open function namespace order variables {P : Type*} /-- An ideal on an order `P` is a subset of `P` that is - nonempty - upward directed (any pair of elements in the ideal has an upper bound in the ideal) - downward closed (any element less than an element of the ideal is in the ideal). -/ structure ideal (P) [has_le P] := (carrier : set P) (nonempty : carrier.nonempty) (directed : directed_on (≤) carrier) (mem_of_le : ∀ {x y : P}, x ≤ y → y ∈ carrier → x ∈ carrier) /-- A subset of a preorder `P` is an ideal if it is - nonempty - upward directed (any pair of elements in the ideal has an upper bound in the ideal) - downward closed (any element less than an element of the ideal is in the ideal). -/ @[mk_iff] structure is_ideal {P} [has_le P] (I : set P) : Prop := (nonempty : I.nonempty) (directed : directed_on (≤) I) (mem_of_le : ∀ {x y : P}, x ≤ y → y ∈ I → x ∈ I) attribute [protected] ideal.nonempty ideal.directed is_ideal.nonempty is_ideal.directed /-- Create an element of type `order.ideal` from a set satisfying the predicate `order.is_ideal`. -/ def is_ideal.to_ideal [has_le P] {I : set P} (h : is_ideal I) : ideal P := ⟨I, h.1, h.2, h.3⟩ namespace ideal section has_le variables [has_le P] {I J : ideal P} {x y : P} /-- An ideal of `P` can be viewed as a subset of `P`. -/ instance : has_coe (ideal P) (set P) := ⟨carrier⟩ /-- For the notation `x ∈ I`. -/ instance : has_mem P (ideal P) := ⟨λ x I, x ∈ (I : set P)⟩ @[simp] lemma mem_coe : x ∈ (I : set P) ↔ x ∈ I := iff_of_eq rfl /-- Two ideals are equal when their underlying sets are equal. -/ @[ext] lemma ext : ∀ {I J : ideal P}, (I : set P) = J → I = J | ⟨_, _, _, _⟩ ⟨_, _, _, _⟩ rfl := rfl lemma coe_injective : injective (coe : ideal P → set P) := λ _ _, ext @[simp, norm_cast] lemma coe_inj : (I : set P) = J ↔ I = J := ⟨by ext, congr_arg _⟩ lemma ext_iff : I = J ↔ (I : set P) = J := coe_inj.symm protected lemma is_ideal (I : ideal P) : is_ideal (I : set P) := ⟨I.2, I.3, I.4⟩ /-- The partial ordering by subset inclusion, inherited from `set P`. -/ instance : partial_order (ideal P) := partial_order.lift coe coe_injective @[trans] lemma mem_of_mem_of_le : x ∈ I → I ≤ J → x ∈ J := @set.mem_of_mem_of_subset P x I J /-- A proper ideal is one that is not the whole set. Note that the whole set might not be an ideal. -/ @[mk_iff] class is_proper (I : ideal P) : Prop := (ne_univ : (I : set P) ≠ set.univ) lemma is_proper_of_not_mem {I : ideal P} {p : P} (nmem : p ∉ I) : is_proper I := ⟨λ hp, begin change p ∉ ↑I at nmem, rw hp at nmem, exact nmem (set.mem_univ p), end⟩ /-- An ideal is maximal if it is maximal in the collection of proper ideals. Note that `is_coatom` is less general because ideals only have a top element when `P` is directed and nonempty. -/ @[mk_iff] class is_maximal (I : ideal P) extends is_proper I : Prop := (maximal_proper : ∀ ⦃J : ideal P⦄, I < J → (J : set P) = set.univ) variable (P) /-- An order `P` has the `ideal_Inter_nonempty` property if the intersection of all ideals is nonempty. Most importantly, the ideals of a `semilattice_sup` with this property form a complete lattice. TODO: This is equivalent to the existence of a bottom element and shouldn't be specialized to ideals. -/ class ideal_Inter_nonempty : Prop := (Inter_nonempty : (⋂ (I : ideal P), (I : set P)).nonempty) variable {P} lemma Inter_nonempty [ideal_Inter_nonempty P] : (⋂ (I : ideal P), (I : set P)).nonempty := ideal_Inter_nonempty.Inter_nonempty lemma ideal_Inter_nonempty.exists_all_mem [ideal_Inter_nonempty P] : ∃ a : P, ∀ I : ideal P, a ∈ I := begin change ∃ (a : P), ∀ (I : ideal P), a ∈ (I : set P), rw ← set.nonempty_Inter, exact Inter_nonempty, end lemma ideal_Inter_nonempty_of_exists_all_mem (h : ∃ a : P, ∀ I : ideal P, a ∈ I) : ideal_Inter_nonempty P := { Inter_nonempty := by rwa set.nonempty_Inter } lemma ideal_Inter_nonempty_iff : ideal_Inter_nonempty P ↔ ∃ a : P, ∀ I : ideal P, a ∈ I := ⟨λ _, by exactI ideal_Inter_nonempty.exists_all_mem, ideal_Inter_nonempty_of_exists_all_mem⟩ lemma inter_nonempty [is_directed P (swap (≤))] (I J : ideal P) : (I ∩ J : set P).nonempty := begin obtain ⟨a, ha⟩ := I.nonempty, obtain ⟨b, hb⟩ := J.nonempty, obtain ⟨c, hac, hbc⟩ := directed_of (swap (≤)) a b, exact ⟨c, I.mem_of_le hac ha, J.mem_of_le hbc hb⟩, end end has_le section preorder variables [preorder P] {I J : ideal P} {x y : P} /-- The smallest ideal containing a given element. -/ def principal (p : P) : ideal P := { carrier := { x | x ≤ p }, nonempty := ⟨p, le_rfl⟩, directed := λ x hx y hy, ⟨p, le_rfl, hx, hy⟩, mem_of_le := λ x y hxy hy, le_trans hxy hy, } instance [inhabited P] : inhabited (ideal P) := ⟨ideal.principal default⟩ @[simp] lemma principal_le_iff : principal x ≤ I ↔ x ∈ I := ⟨λ (h : ∀ {y}, y ≤ x → y ∈ I), h (le_refl x), λ h_mem y (h_le : y ≤ x), I.mem_of_le h_le h_mem⟩ @[simp] lemma mem_principal : x ∈ principal y ↔ x ≤ y := iff.rfl lemma mem_compl_of_ge {x y : P} : x ≤ y → x ∈ (I : set P)ᶜ → y ∈ (I : set P)ᶜ := λ h, mt (I.mem_of_le h) end preorder section order_bot /-- A specific witness of `I.nonempty` when `P` has a bottom element. -/ @[simp] lemma bot_mem [has_le P] [order_bot P] {I : ideal P} : ⊥ ∈ I := I.mem_of_le bot_le I.nonempty.some_mem variables [preorder P] [order_bot P] {I : ideal P} /-- There is a bottom ideal when `P` has a bottom element. -/ instance : order_bot (ideal P) := { bot := principal ⊥, bot_le := by simp } @[priority 100] instance order_bot.ideal_Inter_nonempty : ideal_Inter_nonempty P := by { rw ideal_Inter_nonempty_iff, exact ⟨⊥, λ I, bot_mem⟩ } end order_bot section directed variables [has_le P] [is_directed P (≤)] [nonempty P] {I : ideal P} /-- In a directed and nonempty order, the top ideal of a is `set.univ`. -/ instance : order_top (ideal P) := { top := { carrier := set.univ, nonempty := set.univ_nonempty, directed := directed_on_univ, mem_of_le := λ _ _ _ _, trivial }, le_top := λ I, le_top } @[simp] lemma coe_top : ((⊤ : ideal P) : set P) = set.univ := rfl lemma is_proper_of_ne_top (ne_top : I ≠ ⊤) : is_proper I := ⟨λ h, ne_top $ ext h⟩ lemma is_proper.ne_top (hI : is_proper I) : I ≠ ⊤ := begin intro h, rw [ext_iff, coe_top] at h, apply hI.ne_univ, assumption, end lemma _root_.is_coatom.is_proper (hI : is_coatom I) : is_proper I := is_proper_of_ne_top hI.1 lemma is_proper_iff_ne_top : is_proper I ↔ I ≠ ⊤ := ⟨λ h, h.ne_top, λ h, is_proper_of_ne_top h⟩ lemma is_maximal.is_coatom (h : is_maximal I) : is_coatom I := ⟨is_maximal.to_is_proper.ne_top, λ _ _, by { rw [ext_iff, coe_top], exact is_maximal.maximal_proper ‹_› }⟩ lemma is_maximal.is_coatom' [is_maximal I] : is_coatom I := is_maximal.is_coatom ‹_› lemma _root_.is_coatom.is_maximal (hI : is_coatom I) : is_maximal I := { maximal_proper := λ _ _, by simp [hI.2 _ ‹_›], ..is_coatom.is_proper ‹_› } lemma is_maximal_iff_is_coatom : is_maximal I ↔ is_coatom I := ⟨λ h, h.is_coatom, λ h, h.is_maximal⟩ end directed section order_top variables [has_le P] [order_top P] {I : ideal P} lemma top_of_top_mem (hI : ⊤ ∈ I) : I = ⊤ := by { ext, exact iff_of_true (I.mem_of_le le_top hI) trivial } lemma is_proper.top_not_mem (hI : is_proper I) : ⊤ ∉ I := λ h, hI.ne_top $ top_of_top_mem h end order_top section semilattice_sup variables [semilattice_sup P] {x y : P} {I : ideal P} /-- A specific witness of `I.directed` when `P` has joins. -/ lemma sup_mem (x y ∈ I) : x ⊔ y ∈ I := let ⟨z, h_mem, hx, hy⟩ := I.directed x ‹_› y ‹_› in I.mem_of_le (sup_le hx hy) h_mem @[simp] lemma sup_mem_iff : x ⊔ y ∈ I ↔ x ∈ I ∧ y ∈ I := ⟨λ h, ⟨I.mem_of_le le_sup_left h, I.mem_of_le le_sup_right h⟩, λ h, sup_mem x h.left y h.right⟩ end semilattice_sup section semilattice_sup_directed variables [semilattice_sup P] [is_directed P (swap (≤))] {x : P} {I J K : ideal P} /-- The infimum of two ideals of a co-directed order is their intersection. -/ instance : has_inf (ideal P) := ⟨λ I J, { carrier := I ∩ J, nonempty := inter_nonempty I J, directed := λ x ⟨_, _⟩ y ⟨_, _⟩, ⟨x ⊔ y, ⟨sup_mem x ‹_› y ‹_›, sup_mem x ‹_› y ‹_›⟩, by simp⟩, mem_of_le := λ x y h ⟨_, _⟩, ⟨mem_of_le I h ‹_›, mem_of_le J h ‹_›⟩ }⟩ /-- The supremum of two ideals of a co-directed order is the union of the down sets of the pointwise supremum of `I` and `J`. -/ instance : has_sup (ideal P) := ⟨λ I J, { carrier := {x | ∃ (i ∈ I) (j ∈ J), x ≤ i ⊔ j}, nonempty := by { cases inter_nonempty I J, exact ⟨w, w, h.1, w, h.2, le_sup_left⟩ }, directed := λ x ⟨xi, _, xj, _, _⟩ y ⟨yi, _, yj, _, _⟩, ⟨x ⊔ y, ⟨xi ⊔ yi, sup_mem xi ‹_› yi ‹_›, xj ⊔ yj, sup_mem xj ‹_› yj ‹_›, sup_le (calc x ≤ xi ⊔ xj : ‹_› ... ≤ (xi ⊔ yi) ⊔ (xj ⊔ yj) : sup_le_sup le_sup_left le_sup_left) (calc y ≤ yi ⊔ yj : ‹_› ... ≤ (xi ⊔ yi) ⊔ (xj ⊔ yj) : sup_le_sup le_sup_right le_sup_right)⟩, le_sup_left, le_sup_right⟩, mem_of_le := λ x y _ ⟨yi, _, yj, _, _⟩, ⟨yi, ‹_›, yj, ‹_›, le_trans ‹x ≤ y› ‹_›⟩ }⟩ instance : lattice (ideal P) := { sup := (⊔), le_sup_left := λ I J (i ∈ I), by { cases J.nonempty, exact ⟨i, ‹_›, w, ‹_›, le_sup_left⟩ }, le_sup_right := λ I J (j ∈ J), by { cases I.nonempty, exact ⟨w, ‹_›, j, ‹_›, le_sup_right⟩ }, sup_le := λ I J K hIK hJK a ⟨i, hi, j, hj, ha⟩, K.mem_of_le ha $ sup_mem i (mem_of_mem_of_le hi hIK) j (mem_of_mem_of_le hj hJK), inf := (⊓), inf_le_left := λ I J, set.inter_subset_left I J, inf_le_right := λ I J, set.inter_subset_right I J, le_inf := λ I J K, set.subset_inter, .. ideal.partial_order } @[simp] lemma mem_inf : x ∈ I ⊓ J ↔ x ∈ I ∧ x ∈ J := iff.rfl @[simp] lemma mem_sup : x ∈ I ⊔ J ↔ ∃ (i ∈ I) (j ∈ J), x ≤ i ⊔ j := iff.rfl lemma lt_sup_principal_of_not_mem (hx : x ∉ I) : I < I ⊔ principal x := le_sup_left.lt_of_ne $ λ h, hx $ by simpa only [left_eq_sup, principal_le_iff] using h end semilattice_sup_directed section ideal_Inter_nonempty variables [preorder P] [ideal_Inter_nonempty P] @[priority 100] instance ideal_Inter_nonempty.to_directed_ge : is_directed P (swap (≤)) := ⟨λ a b, begin obtain ⟨c, hc⟩ : ∃ a, ∀ I : ideal P, a ∈ I := ideal_Inter_nonempty.exists_all_mem, exact ⟨c, hc (principal a), hc (principal b)⟩, end⟩ variables {α β γ : Type*} {ι : Sort*} lemma ideal_Inter_nonempty.all_Inter_nonempty {f : ι → ideal P} : (⋂ x, (f x : set P)).nonempty := begin obtain ⟨a, ha⟩ : ∃ a : P, ∀ I : ideal P, a ∈ I := ideal_Inter_nonempty.exists_all_mem, exact ⟨a, by simp [ha]⟩ end lemma ideal_Inter_nonempty.all_bInter_nonempty {f : α → ideal P} {s : set α} : (⋂ x ∈ s, (f x : set P)).nonempty := begin obtain ⟨a, ha⟩ : ∃ a : P, ∀ I : ideal P, a ∈ I := ideal_Inter_nonempty.exists_all_mem, exact ⟨a, by simp [ha]⟩ end end ideal_Inter_nonempty section semilattice_sup_ideal_Inter_nonempty variables [semilattice_sup P] [ideal_Inter_nonempty P] {x : P} {I J K : ideal P} instance : has_Inf (ideal P) := { Inf := λ s, { carrier := ⋂ (I ∈ s), (I : set P), nonempty := ideal_Inter_nonempty.all_bInter_nonempty, directed := λ x hx y hy, ⟨x ⊔ y, ⟨λ S ⟨I, hS⟩, begin simp only [←hS, sup_mem_iff, mem_coe, set.mem_Inter], intro hI, rw set.mem_Inter₂ at *, exact ⟨hx _ hI, hy _ hI⟩ end, le_sup_left, le_sup_right⟩⟩, mem_of_le := λ x y hxy hy, begin rw set.mem_Inter₂ at *, exact λ I hI, mem_of_le I ‹_› (hy I hI) end } } variables {s : set (ideal P)} @[simp] lemma mem_Inf : x ∈ Inf s ↔ ∀ I ∈ s, x ∈ I := by { change x ∈ (⋂ (I ∈ s), (I : set P)) ↔ ∀ I ∈ s, x ∈ I, simp } @[simp] lemma coe_Inf : ↑(Inf s) = ⋂ (I ∈ s), (I : set P) := rfl lemma Inf_le (hI : I ∈ s) : Inf s ≤ I := λ _ hx, hx I ⟨I, by simp [hI]⟩ lemma le_Inf (h : ∀ J ∈ s, I ≤ J) : I ≤ Inf s := λ _ _, by { simp only [mem_coe, coe_Inf, set.mem_Inter], tauto } lemma is_glb_Inf : is_glb s (Inf s) := ⟨λ _, Inf_le, λ _, le_Inf⟩ instance : complete_lattice (ideal P) := { ..ideal.lattice, ..complete_lattice_of_Inf (ideal P) (λ _, @is_glb_Inf _ _ _ _) } end semilattice_sup_ideal_Inter_nonempty section distrib_lattice variables [distrib_lattice P] variables {I J : ideal P} lemma eq_sup_of_le_sup {x i j: P} (hi : i ∈ I) (hj : j ∈ J) (hx : x ≤ i ⊔ j) : ∃ (i' ∈ I) (j' ∈ J), x = i' ⊔ j' := begin refine ⟨x ⊓ i, I.mem_of_le inf_le_right hi, x ⊓ j, J.mem_of_le inf_le_right hj, _⟩, calc x = x ⊓ (i ⊔ j) : left_eq_inf.mpr hx ... = (x ⊓ i) ⊔ (x ⊓ j) : inf_sup_left, end lemma coe_sup_eq : ↑(I ⊔ J) = {x | ∃ i ∈ I, ∃ j ∈ J, x = i ⊔ j} := begin ext, rw [mem_coe, mem_sup], exact ⟨λ ⟨_, _, _, _, _⟩, eq_sup_of_le_sup ‹_› ‹_› ‹_›, λ ⟨i, _, j, _, _⟩, ⟨i, ‹_›, j, ‹_›, le_of_eq ‹_›⟩⟩ end end distrib_lattice section boolean_algebra variables [boolean_algebra P] {x : P} {I : ideal P} lemma is_proper.not_mem_of_compl_mem (hI : is_proper I) (hxc : xᶜ ∈ I) : x ∉ I := begin intro hx, apply hI.top_not_mem, have ht : x ⊔ xᶜ ∈ I := sup_mem _ ‹_› _ ‹_›, rwa sup_compl_eq_top at ht, end lemma is_proper.not_mem_or_compl_not_mem (hI : is_proper I) : x ∉ I ∨ xᶜ ∉ I := have h : xᶜ ∈ I → x ∉ I := hI.not_mem_of_compl_mem, by tauto end boolean_algebra end ideal /-- For a preorder `P`, `cofinal P` is the type of subsets of `P` containing arbitrarily large elements. They are the dense sets in the topology whose open sets are terminal segments. -/ structure cofinal (P) [preorder P] := (carrier : set P) (mem_gt : ∀ x : P, ∃ y ∈ carrier, x ≤ y) namespace cofinal variables [preorder P] instance : inhabited (cofinal P) := ⟨{ carrier := set.univ, mem_gt := λ x, ⟨x, trivial, le_rfl⟩ }⟩ instance : has_mem P (cofinal P) := ⟨λ x D, x ∈ D.carrier⟩ variables (D : cofinal P) (x : P) /-- A (noncomputable) element of a cofinal set lying above a given element. -/ noncomputable def above : P := classical.some $ D.mem_gt x lemma above_mem : D.above x ∈ D := exists.elim (classical.some_spec $ D.mem_gt x) $ λ a _, a lemma le_above : x ≤ D.above x := exists.elim (classical.some_spec $ D.mem_gt x) $ λ _ b, b end cofinal section ideal_of_cofinals variables [preorder P] (p : P) {ι : Type*} [encodable ι] (𝒟 : ι → cofinal P) /-- Given a starting point, and a countable family of cofinal sets, this is an increasing sequence that intersects each cofinal set. -/ noncomputable def sequence_of_cofinals : ℕ → P | 0 := p | (n+1) := match encodable.decode ι n with | none := sequence_of_cofinals n | some i := (𝒟 i).above (sequence_of_cofinals n) end lemma sequence_of_cofinals.monotone : monotone (sequence_of_cofinals p 𝒟) := by { apply monotone_nat_of_le_succ, intros n, dunfold sequence_of_cofinals, cases encodable.decode ι n, { refl }, { apply cofinal.le_above }, } lemma sequence_of_cofinals.encode_mem (i : ι) : sequence_of_cofinals p 𝒟 (encodable.encode i + 1) ∈ 𝒟 i := by { dunfold sequence_of_cofinals, rw encodable.encodek, apply cofinal.above_mem, } /-- Given an element `p : P` and a family `𝒟` of cofinal subsets of a preorder `P`, indexed by a countable type, `ideal_of_cofinals p 𝒟` is an ideal in `P` which - contains `p`, according to `mem_ideal_of_cofinals p 𝒟`, and - intersects every set in `𝒟`, according to `cofinal_meets_ideal_of_cofinals p 𝒟`. This proves the Rasiowa–Sikorski lemma. -/ def ideal_of_cofinals : ideal P := { carrier := { x : P | ∃ n, x ≤ sequence_of_cofinals p 𝒟 n }, nonempty := ⟨p, 0, le_rfl⟩, directed := λ x ⟨n, hn⟩ y ⟨m, hm⟩, ⟨_, ⟨max n m, le_rfl⟩, le_trans hn $ sequence_of_cofinals.monotone p 𝒟 (le_max_left _ _), le_trans hm $ sequence_of_cofinals.monotone p 𝒟 (le_max_right _ _) ⟩, mem_of_le := λ x y hxy ⟨n, hn⟩, ⟨n, le_trans hxy hn⟩, } lemma mem_ideal_of_cofinals : p ∈ ideal_of_cofinals p 𝒟 := ⟨0, le_rfl⟩ /-- `ideal_of_cofinals p 𝒟` is `𝒟`-generic. -/ lemma cofinal_meets_ideal_of_cofinals (i : ι) : ∃ x : P, x ∈ 𝒟 i ∧ x ∈ ideal_of_cofinals p 𝒟 := ⟨_, sequence_of_cofinals.encode_mem p 𝒟 i, _, le_rfl⟩ end ideal_of_cofinals end order
REBOL [ System: "REBOL [R3] Language Interpreter and Run-time Environment" Title: "Extension datatypes" Rights: { Copyright 2012 REBOL Technologies REBOL is a trademark of REBOL Technologies } License: { Licensed under the Apache License, Version 2.0 See: http://www.apache.org/licenses/LICENSE-2.0 } Purpose: { Used to build C enums and definitions for extensions. } ] end 0 0 unset * null none * null handle * ptr logic 4 32 integer * 64 decimal * 64 percent * 64 char 10 32 pair * 64 tuple * 64 time * 64 date * date word 16 sym set-word * sym get-word * sym lit-word * sym refinement * sym issue * sym string 24 ser file * ser email * ser url * ser tag * ser block 32 ser paren * ser path * ser set-path * ser get-path * ser lit-path * ser binary 40 ser bitset * ser vector * ser image * image gob 47 ser object 48 ptr module * ptr
# Simplicial constructions for a proof of the Hurewicz theorem `is_element/K/hurewicz` := (A::set) -> (d::posint) -> proc(x) if not(`is_element/prime_simplex`(A)(x)) then return false; fi; if nops(select(a -> x[a]>0,A)) > d then return false; fi; return true; end: `random_element/K/hurewicz` := (A::set) -> (d::posint) -> proc() local k,i,B,x,a; k := rand(1..min(nops(A),d))(); B := {}; for i from 1 to k do B := {op(B),random_element_of(A minus B)}; od: x := `random_element/prime_simplex`(B)(); for a in A minus B do x[a] := 0; od; return eval(x); end: `list_elements/K_hurewicz` := NULL: `count_elements/K_hurewicz` := NULL: ###################################################################### `is_element/L/hurewicz` := (A::set) -> (d::posint) -> proc(tx) local t,x; if not(type(tx,list)) and nops(tx) = 2 then return false; fi; t,x := op(tx); if not(type(t,realcons) and is(t >= 0) and is(t <= 1)) then return false; fi; if not(`is_element/prime_simplex`(A)(x)) then return false; fi; if t = 1 then return true; fi; if `is_element/K/hurewicz`(A)(d)(x) then return true; fi; return false; end: `random_element/L/hurewicz` := (A::set) -> (d::posint) -> proc() if rand(1..2)() = 1 then return [1,`random_element/prime_simplex`(A)()]; else return [rand(0..12)()/12,`random_element/K/hurewicz`(A)(d)()]; fi; end: `list_elements/L_hurewicz` := NULL: `count_elements/L_hurewicz` := NULL: ###################################################################### `u1/hurewicz` := (A::set) -> (d::posint) -> proc(x) local JJ,J,j; if nops(A) <= d then return 0; fi; JJ := combinat[choose](A,nops(A) - d); return min(seq(max(seq(x[j],j in J)),J in JJ)); end: `w1/hurewicz` := (A::set) -> (d::posint) -> proc(tx) local t,x,u1; t,x := op(tx); u1 := `u1/hurewicz`(A)(d)(x); return min(u1+(1+t)/2,1); end: `f1/hurewicz` := (A::set) -> (d::posint) -> proc(tx) local t,x,y,a; t,x := op(tx); y := table(): for a in A do y[a] := max(0,x[a]-(1-t)/2,2*x[a]-1); od: return eval(y); end: `h1/hurewicz` := (A::set) -> (d::posint) -> proc(tx) return [`w1/hurewicz`(A)(d)(tx),`f1/hurewicz`(A)(d)(tx)]; end:
Everyone likes to be liked. This is a simple, human truth: As mammals, we are driven by a desire to bond with others, feel accepted, and be acknowledged for a job well done. In fact, research from the University of Iowa shows that feeling accepted is more motivating than even financial incentives and improves our productivity and performance. But there’s a difference between wanting and needing others’ approval. If, for example, you find yourself obsessing over the number of “likes” you get on social media or forever waiting for your supervisor or coworkers to notice that you’ve done something well, you might be caught in a toxic cycle of approval addiction. Approval addiction is a constant need for acceptance and approval that drives your daily actions to the detriment of your own needs, says Lynn Taylor, author of Tame Your Terrible Office Tyrant: How to Manage Childish Boss Behavior & Thrive in Your Job. Approval addicts usually wind up worn out and disappointed, adds psychologist Linda Sapadin, author of Master Your Fears: How to Triumph Over Your Worries and Get On With Your Life. They spend so much energy seeking positive feedback that they have less energy to take care of themselves or work toward their meaningful goals. And, Taylor says, constantly seeking approval can cause people to become workaholics, suffer health problems, and make choices that result in less-than-ideal consequences. The first step is to build awareness and adjust your priorities accordingly. To begin, ask yourself some tough questions. What are your primary goals? What do you want to do? Then evaluate how you spend your time: Are you neglecting your dreams by doing too much for others? With this awareness, you can deliberately choose activities that support your own needs, Taylor says, and step out of the people-pleasing cycle. Use the insights gleaned from your personal audit to establish some ground rules and boundaries for yourself. Spend more time on the things that leave you feeling positive and focused on your dreams, needs, and values. Say “yes” to the tasks and activities that align with those goals—and don’t be afraid to say, “No, but thanks for thinking of me,” to those that don’t, Sapadin adds. These guidelines will help you manage your time and energy in a way that leaves plenty left over to help others if you choose. “If your objective is to set your own standards and appreciate your accomplishments and self-worth, you can be 100 percent successful,” Sapadin says. “You can set realistic goals and monitor them based on your own criteria.” And that goes a long way to building not only a sense of self-worth, but a sense of self-efficacy (the belief that you are capable of solving a task or problem), resilience, and even purpose. You can have that right now—and you don’t need anyone’s approval to get it. Polly Campbell has been speaking and writing about psychology, resilience, and wellness topics for more than 20 years. She is the author of three books: How to Live an Awesome Life: How to Live Well. Do Good. Be Happy; Imperfect Spirituality: Extraordinary Enlightenment for Ordinary People; and How to Reach Enlightenment. Tweet her @PLCampbell.
[STATEMENT] lemma rep_in_Hom: assumes "arr f" shows "rep f \<in> Hom (DOM f) (COD f)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. rep f \<in> Hom (DOM f) (COD f) [PROOF STEP] using assms [PROOF STATE] proof (prove) using this: arr f goal (1 subgoal): 1. rep f \<in> Hom (DOM f) (COD f) [PROOF STEP] by simp
Formal statement is: lemma LIMSEQ_inverse_real_of_nat_add_minus: "(\<lambda>n. r + -inverse (real (Suc n))) \<longlonglongrightarrow> r" Informal statement is: The sequence $r - \frac{1}{n+1}$ converges to $r$.
function OutlineExtractor(arg0::Vector3D, arg1::Vector3D) return OutlineExtractor((Vector3D, Vector3D), arg0, arg1) end function get_outline(obj::OutlineExtractor, arg0::PolyhedronsSet) return jcall(obj, "getOutline", Vector{Vector{Vector2D}}, (PolyhedronsSet,), arg0) end
@testset "Aggregates" begin d = [Dict(:a => 2), Dict(:a => 4), Dict(:a => 6)] s = Dict(:sum => Sum(:a), :count => Count(), :avg => Avg(:a)) for x in d fit!(s, x) end @test value(s[:avg]) == 4.0 @test value(s[:count]) == 3 @test value(s[:sum]) == 12.0 end
(* * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * * SPDX-License-Identifier: GPL-2.0-only *) theory ArchInterrupt_AI imports "../Interrupt_AI" begin context Arch begin global_naming RISCV64 primrec arch_irq_control_inv_valid_real :: "arch_irq_control_invocation \<Rightarrow> 'a::state_ext state \<Rightarrow> bool" where "arch_irq_control_inv_valid_real (RISCVIRQControlInvocation irq dest_slot src_slot trigger) = (cte_wp_at ((=) cap.NullCap) dest_slot and cte_wp_at ((=) cap.IRQControlCap) src_slot and ex_cte_cap_wp_to is_cnode_cap dest_slot and real_cte_at dest_slot and K (irq \<le> maxIRQ \<and> irq \<noteq> irqInvalid))" defs arch_irq_control_inv_valid_def: "arch_irq_control_inv_valid \<equiv> arch_irq_control_inv_valid_real" named_theorems Interrupt_AI_asms lemma (* decode_irq_control_invocation_inv *)[Interrupt_AI_asms]: "\<lbrace>P\<rbrace> decode_irq_control_invocation label args slot caps \<lbrace>\<lambda>rv. P\<rbrace>" apply (simp add: decode_irq_control_invocation_def Let_def arch_check_irq_def arch_decode_irq_control_invocation_def whenE_def, safe) apply (wp | simp)+ done lemma decode_irq_control_valid [Interrupt_AI_asms]: "\<lbrace>\<lambda>s. invs s \<and> (\<forall>cap \<in> set caps. s \<turnstile> cap) \<and> (\<forall>cap \<in> set caps. is_cnode_cap cap \<longrightarrow> (\<forall>r \<in> cte_refs cap (interrupt_irq_node s). ex_cte_cap_wp_to is_cnode_cap r s)) \<and> cte_wp_at ((=) cap.IRQControlCap) slot s\<rbrace> decode_irq_control_invocation label args slot caps \<lbrace>irq_control_inv_valid\<rbrace>,-" apply (simp add: decode_irq_control_invocation_def Let_def split_def whenE_def arch_check_irq_def arch_decode_irq_control_invocation_def split del: if_split cong: if_cong) apply (wpsimp wp: ensure_empty_stronger simp: cte_wp_at_eq_simp arch_irq_control_inv_valid_def | wp (once) hoare_drop_imps)+ apply (clarsimp simp: linorder_not_less word_le_nat_alt unat_ucast maxIRQ_def) apply (cases caps; clarsimp simp: cte_wp_at_eq_simp) apply (intro conjI impI; clarsimp) apply (drule ucast_ucast_mask_eq) apply (subst and_mask_eq_iff_le_mask) apply (simp add: mask_def word_le_nat_alt) apply fast done lemma get_irq_slot_different_ARCH[Interrupt_AI_asms]: "\<lbrace>\<lambda>s. valid_global_refs s \<and> ex_cte_cap_wp_to is_cnode_cap ptr s\<rbrace> get_irq_slot irq \<lbrace>\<lambda>rv s. rv \<noteq> ptr\<rbrace>" apply (simp add: get_irq_slot_def) apply wp apply (clarsimp simp: valid_global_refs_def valid_refs_def ex_cte_cap_wp_to_def) apply (elim allE, erule notE, erule cte_wp_at_weakenE) apply (clarsimp simp: global_refs_def is_cap_simps cap_range_def) done lemma is_derived_use_interrupt_ARCH[Interrupt_AI_asms]: "(is_ntfn_cap cap \<and> interrupt_derived cap cap') \<longrightarrow> (is_derived m p cap cap')" apply (clarsimp simp: is_cap_simps) apply (clarsimp simp: interrupt_derived_def is_derived_def) apply (clarsimp simp: cap_master_cap_def split: cap.split_asm) apply (simp add: is_cap_simps is_pt_cap_def vs_cap_ref_def) done lemma maskInterrupt_invs_ARCH[Interrupt_AI_asms]: "\<lbrace>invs and (\<lambda>s. \<not>b \<longrightarrow> interrupt_states s irq \<noteq> IRQInactive)\<rbrace> do_machine_op (maskInterrupt b irq) \<lbrace>\<lambda>rv. invs\<rbrace>" apply (simp add: do_machine_op_def split_def maskInterrupt_def) apply wp apply (clarsimp simp: in_monad invs_def valid_state_def all_invs_but_valid_irq_states_for_def valid_irq_states_but_def valid_irq_masks_but_def valid_machine_state_def cur_tcb_def valid_irq_states_def valid_irq_masks_def) done lemma no_cap_to_obj_with_diff_IRQHandler_ARCH[Interrupt_AI_asms]: "no_cap_to_obj_with_diff_ref (IRQHandlerCap irq) S = \<top>" by (rule ext, simp add: no_cap_to_obj_with_diff_ref_def cte_wp_at_caps_of_state obj_ref_none_no_asid) lemma (* set_irq_state_valid_cap *)[Interrupt_AI_asms]: "\<lbrace>valid_cap cap\<rbrace> set_irq_state IRQSignal irq \<lbrace>\<lambda>rv. valid_cap cap\<rbrace>" apply (clarsimp simp: set_irq_state_def) apply (wp do_machine_op_valid_cap) apply (auto simp: valid_cap_def valid_untyped_def split: cap.splits option.splits arch_cap.splits split del: if_split) done crunch valid_global_refs[Interrupt_AI_asms]: set_irq_state "valid_global_refs" lemma invoke_irq_handler_invs'[Interrupt_AI_asms]: assumes dmo_ex_inv[wp]: "\<And>f. \<lbrace>invs and ex_inv\<rbrace> do_machine_op f \<lbrace>\<lambda>rv::unit. ex_inv\<rbrace>" assumes cap_insert_ex_inv[wp]: "\<And>cap src dest. \<lbrace>ex_inv and invs and K (src \<noteq> dest)\<rbrace> cap_insert cap src dest \<lbrace>\<lambda>_.ex_inv\<rbrace>" assumes cap_delete_one_ex_inv[wp]: "\<And>cap. \<lbrace>ex_inv and invs\<rbrace> cap_delete_one cap \<lbrace>\<lambda>_.ex_inv\<rbrace>" shows "\<lbrace>invs and ex_inv and irq_handler_inv_valid i\<rbrace> invoke_irq_handler i \<lbrace>\<lambda>rv s. invs s \<and> ex_inv s\<rbrace>" proof - have cap_insert_invs_ex_invs[wp]: "\<And>cap src dest. \<lbrace>ex_inv and (invs and cte_wp_at (\<lambda>c. c = NullCap) dest and valid_cap cap and tcb_cap_valid cap dest and ex_cte_cap_wp_to (appropriate_cte_cap cap) dest and (\<lambda>s. \<forall>r\<in>obj_refs cap. \<forall>p'. dest \<noteq> p' \<and> cte_wp_at (\<lambda>cap'. r \<in> obj_refs cap') p' s \<longrightarrow> cte_wp_at (Not \<circ> is_zombie) p' s \<and> \<not> is_zombie cap) and (\<lambda>s. cte_wp_at (is_derived (cdt s) src cap) src s) and (\<lambda>s. cte_wp_at (\<lambda>cap'. \<forall>irq\<in>cap_irqs cap - cap_irqs cap'. irq_issued irq s) src s) and (\<lambda>s. \<forall>t R. cap = ReplyCap t False R \<longrightarrow> st_tcb_at awaiting_reply t s \<and> \<not> has_reply_cap t s) and K (\<not> is_master_reply_cap cap))\<rbrace> cap_insert cap src dest \<lbrace>\<lambda>rv s. invs s \<and> ex_inv s\<rbrace>" apply wp apply (auto simp: cte_wp_at_caps_of_state) done show ?thesis apply (cases i, simp_all) apply (wp maskInterrupt_invs_ARCH) apply simp+ apply (rename_tac irq cap prod) apply (rule hoare_pre) apply (wp valid_cap_typ [OF cap_delete_one_typ_at]) apply (strengthen real_cte_tcb_valid) apply (wp real_cte_at_typ_valid [OF cap_delete_one_typ_at]) apply (rule_tac Q="\<lambda>rv s. is_ntfn_cap cap \<and> invs s \<and> cte_wp_at (is_derived (cdt s) prod cap) prod s" in hoare_post_imp) apply (clarsimp simp: is_cap_simps is_derived_def cte_wp_at_caps_of_state) apply (simp split: if_split_asm) apply (simp add: cap_master_cap_def split: cap.split_asm) apply (drule cte_wp_valid_cap [OF caps_of_state_cteD] | clarsimp)+ apply (clarsimp simp: cap_master_cap_simps valid_cap_def obj_at_def is_ntfn is_tcb is_cap_table split: option.split_asm dest!:cap_master_cap_eqDs) apply (wp cap_delete_one_still_derived) apply simp apply (wp get_irq_slot_ex_cte get_irq_slot_different_ARCH hoare_drop_imps) apply (clarsimp simp: valid_state_def invs_def appropriate_cte_cap_def is_cap_simps) apply (erule cte_wp_at_weakenE, simp add: is_derived_use_interrupt_ARCH) apply (wp| simp add: )+ done qed lemma (* invoke_irq_control_invs *) [Interrupt_AI_asms]: "\<lbrace>invs and irq_control_inv_valid i\<rbrace> invoke_irq_control i \<lbrace>\<lambda>rv. invs\<rbrace>" apply (cases i, simp_all) apply (wp cap_insert_simple_invs | simp add: IRQHandler_valid is_cap_simps no_cap_to_obj_with_diff_IRQHandler_ARCH | strengthen real_cte_tcb_valid)+ apply (clarsimp simp: cte_wp_at_caps_of_state is_simple_cap_def is_cap_simps is_pt_cap_def safe_parent_for_def is_simple_cap_arch_def ex_cte_cap_to_cnode_always_appropriate_strg) apply (rename_tac irq_control, case_tac irq_control) apply (simp add: arch_irq_control_inv_valid_def) apply (wp cap_insert_simple_invs | simp add: IRQHandler_valid is_cap_simps no_cap_to_obj_with_diff_IRQHandler_ARCH | strengthen real_cte_tcb_valid)+ apply (clarsimp simp: cte_wp_at_caps_of_state is_simple_cap_def is_simple_cap_arch_def is_cap_simps is_pt_cap_def safe_parent_for_def ex_cte_cap_to_cnode_always_appropriate_strg) done crunch device_state_inv[wp]: resetTimer "\<lambda>ms. P (device_state ms)" lemma resetTimer_invs_ARCH[Interrupt_AI_asms]: "\<lbrace>invs\<rbrace> do_machine_op resetTimer \<lbrace>\<lambda>_. invs\<rbrace>" apply (wp dmo_invs) apply safe apply (drule_tac Q="%_ b. underlying_memory b p = underlying_memory m p" in use_valid) apply (simp add: resetTimer_def machine_op_lift_def machine_rest_lift_def split_def) apply wp apply (clarsimp+)[2] apply(erule use_valid, wp no_irq_resetTimer no_irq, assumption) done lemma empty_fail_ackInterrupt_ARCH[Interrupt_AI_asms]: "empty_fail (ackInterrupt irq)" by (wp | simp add: ackInterrupt_def)+ lemma empty_fail_maskInterrupt_ARCH[Interrupt_AI_asms]: "empty_fail (maskInterrupt f irq)" by (wp | simp add: maskInterrupt_def)+ lemma dmo_st_tcb_cur[wp]: "\<lbrace>\<lambda>s. st_tcb_at P (cur_thread s) s\<rbrace> do_machine_op f \<lbrace>\<lambda>rv s. st_tcb_at P (cur_thread s) s\<rbrace>" by (rule hoare_lift_Pf[where f=cur_thread]; wp) lemma dmo_ex_nonz_cap_to[wp]: "\<lbrace>\<lambda>s. ex_nonz_cap_to (cur_thread s) s\<rbrace> do_machine_op f \<lbrace>\<lambda>rv s. ex_nonz_cap_to (cur_thread s) s\<rbrace>" by (rule hoare_lift_Pf[where f=cur_thread]; wp) lemma conj_imp_strg: "P \<Longrightarrow> (A \<longrightarrow> P) \<and> (B \<longrightarrow> P)" by simp lemma runnable_eq: "runnable st = (st = Running \<or> st = Restart)" by (cases st; simp) lemma halted_eq: "halted st = (st = Inactive \<or> st = IdleThreadState)" by (cases st; simp) lemma handle_reserved_irq_invs[wp]: "\<lbrace>invs\<rbrace> handle_reserved_irq irq \<lbrace>\<lambda>_. invs\<rbrace>" unfolding handle_reserved_irq_def by (wpsimp simp: non_kernel_IRQs_def) lemma (* handle_interrupt_invs *) [Interrupt_AI_asms]: "\<lbrace>invs\<rbrace> handle_interrupt irq \<lbrace>\<lambda>_. invs\<rbrace>" apply (simp add: handle_interrupt_def) apply (rule conjI; rule impI) apply (simp add: do_machine_op_bind empty_fail_ackInterrupt_ARCH empty_fail_maskInterrupt_ARCH) apply (wp dmo_maskInterrupt_invs maskInterrupt_invs_ARCH dmo_ackInterrupt send_signal_interrupt_states | wpc | simp)+ apply (wp get_cap_wp send_signal_interrupt_states ) apply (rule_tac Q="\<lambda>rv. invs and (\<lambda>s. st = interrupt_states s irq)" in hoare_post_imp) apply (clarsimp simp: ex_nonz_cap_to_def invs_valid_objs) apply (intro allI exI, erule cte_wp_at_weakenE) apply (clarsimp simp: is_cap_simps) apply (wpsimp wp: hoare_drop_imps resetTimer_invs_ARCH simp: get_irq_state_def | rule conjI)+ done lemma sts_arch_irq_control_inv_valid[wp, Interrupt_AI_asms]: "\<lbrace>arch_irq_control_inv_valid i\<rbrace> set_thread_state t st \<lbrace>\<lambda>rv. arch_irq_control_inv_valid i\<rbrace>" apply (simp add: arch_irq_control_inv_valid_def) apply (cases i, simp) apply (wpsimp wp: ex_cte_cap_to_pres simp: cap_table_at_typ) done end interpretation Interrupt_AI?: Interrupt_AI proof goal_cases interpret Arch . case 1 show ?case by (intro_locales; (unfold_locales, simp_all add: Interrupt_AI_asms)?) qed end
lemma lmeasurable_cbox [iff]: "cbox a b \<in> lmeasurable" and lmeasurable_box [iff]: "box a b \<in> lmeasurable"
lemma LIMSEQ_offset: "(\<lambda>n. f (n + k)) \<longlonglongrightarrow> a \<Longrightarrow> f \<longlonglongrightarrow> a"
-- (Pre)additive categories {-# OPTIONS --safe #-} module Cubical.Categories.Additive.Base where open import Cubical.Algebra.AbGroup.Base open import Cubical.Categories.Category.Base open import Cubical.Categories.Limits.Initial open import Cubical.Categories.Limits.Terminal open import Cubical.Foundations.Prelude private variable ℓ ℓ' : Level -- Preadditive categories module _ (C : Category ℓ ℓ') where open Category C record PreaddCategoryStr : Type (ℓ-max ℓ (ℓ-suc ℓ')) where field homAbStr : (x y : ob) → AbGroupStr Hom[ x , y ] -- Polymorphic abelian group operations 0h = λ {x} {y} → AbGroupStr.0g (homAbStr x y) -_ = λ {x} {y} → AbGroupStr.-_ (homAbStr x y) _+_ = λ {x} {y} → AbGroupStr._+_ (homAbStr x y) _-_ : ∀ {x y} (f g : Hom[ x , y ]) → Hom[ x , y ] f - g = f + (- g) infixr 7 _+_ infixl 7.5 _-_ infix 8 -_ field ⋆distl+ : {x y z : ob} → (f : Hom[ x , y ]) → (g g' : Hom[ y , z ]) → f ⋆ (g + g') ≡ (f ⋆ g) + (f ⋆ g') ⋆distr+ : {x y z : ob} → (f f' : Hom[ x , y ]) → (g : Hom[ y , z ]) → (f + f') ⋆ g ≡ (f ⋆ g) + (f' ⋆ g) record PreaddCategory (ℓ ℓ' : Level) : Type (ℓ-suc (ℓ-max ℓ ℓ')) where field cat : Category ℓ ℓ' preadd : PreaddCategoryStr cat open Category cat public open PreaddCategoryStr preadd public -- Additive categories module _ (C : PreaddCategory ℓ ℓ') where open PreaddCategory C -- Zero object record ZeroObject : Type (ℓ-max ℓ ℓ') where field z : ob zInit : isInitial cat z zTerm : isTerminal cat z -- Biproducts record IsBiproduct {x y x⊕y : ob} (i₁ : Hom[ x , x⊕y ]) (i₂ : Hom[ y , x⊕y ]) (π₁ : Hom[ x⊕y , x ]) (π₂ : Hom[ x⊕y , y ]) : Type (ℓ-max ℓ ℓ') where field i₁⋆π₁ : i₁ ⋆ π₁ ≡ id i₁⋆π₂ : i₁ ⋆ π₂ ≡ 0h i₂⋆π₁ : i₂ ⋆ π₁ ≡ 0h i₂⋆π₂ : i₂ ⋆ π₂ ≡ id ∑π⋆i : π₁ ⋆ i₁ + π₂ ⋆ i₂ ≡ id record Biproduct (x y : ob) : Type (ℓ-max ℓ ℓ') where field x⊕y : ob i₁ : Hom[ x , x⊕y ] i₂ : Hom[ y , x⊕y ] π₁ : Hom[ x⊕y , x ] π₂ : Hom[ x⊕y , y ] isBipr : IsBiproduct i₁ i₂ π₁ π₂ open IsBiproduct isBipr public -- Additive categories record AdditiveCategoryStr : Type (ℓ-max ℓ (ℓ-suc ℓ')) where field zero : ZeroObject biprod : ∀ x y → Biproduct x y -- Biproduct notation open Biproduct _⊕_ = λ (x y : ob) → biprod x y .x⊕y infixr 6 _⊕_ record AdditiveCategory (ℓ ℓ' : Level) : Type (ℓ-suc (ℓ-max ℓ ℓ')) where field preaddcat : PreaddCategory ℓ ℓ' addit : AdditiveCategoryStr preaddcat open PreaddCategory preaddcat public open AdditiveCategoryStr addit public
import cv2 import numpy as np import time captured_video = cv2.VideoCapture(0) currentFrame = 0 # Define the codec and create VideoWriter object # uncomment to save output as video: # fourcc = cv2.VideoWriter_fourcc(*'XVID') # out = cv2.VideoWriter('output.avi',fourcc, 20, (640,480)) time.sleep(3) background = 0 for i in range(25): returned_val, background = captured_video.read() background = np.flip(background, axis=1) while captured_video.isOpened(): returned_val, image = captured_video.read() if not returned_val: break image = np.flip(image, axis=1) hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) # hsv for green colour lower_color = np.array([60, 60, 60]) upper_color = np.array([75, 255, 255]) mask_1 = cv2.inRange(hsv, lower_color, upper_color) # hsv for green colour lower_color = np.array([75, 60, 60]) upper_color = np.array([90, 255, 255]) mask_2 = cv2.inRange(hsv, lower_color, upper_color) mask_1 += mask_2 mask_1 = cv2.morphologyEx(mask_1, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8), iterations=2) mask_1 = cv2.dilate(mask_1, np.ones((3, 3), np.uint8), iterations=1) mask_2 = cv2.bitwise_not(mask_1) result_1 = cv2.bitwise_and(background, background, mask=mask_1) result_2 = cv2.bitwise_and(image, image, mask=mask_2) output = cv2.addWeighted(result_1, 1, result_2, 1, 0) # Uncomment to save output as video: # out.write(output) cv2.imshow('Make Invisible', output) key = cv2.waitKey(10) if key == 27: break cv2.destroyAllWindows() captured_video.release()
State Before: α : Type u_1 β : Type ?u.330764 γ : Type ?u.330767 f : α → ℝ≥0 hf : Summable f i : α ⊢ (∑' (x : α), f x) = f i + ∑' (x : α), if x = i then 0 else f x State After: α : Type u_1 β : Type ?u.330764 γ : Type ?u.330767 f : α → ℝ≥0 hf : Summable f i i' : α ⊢ update (fun x => f x) i 0 i' ≤ f i' Tactic: refine' tsum_eq_add_tsum_ite' i (NNReal.summable_of_le (fun i' => _) hf) State Before: α : Type u_1 β : Type ?u.330764 γ : Type ?u.330767 f : α → ℝ≥0 hf : Summable f i i' : α ⊢ update (fun x => f x) i 0 i' ≤ f i' State After: α : Type u_1 β : Type ?u.330764 γ : Type ?u.330767 f : α → ℝ≥0 hf : Summable f i i' : α ⊢ (if i' = i then 0 else f i') ≤ f i' Tactic: rw [Function.update_apply] State Before: α : Type u_1 β : Type ?u.330764 γ : Type ?u.330767 f : α → ℝ≥0 hf : Summable f i i' : α ⊢ (if i' = i then 0 else f i') ≤ f i' State After: no goals Tactic: split_ifs <;> simp only [zero_le', le_rfl]
Load LFindLoad. From lfind Require Import LFind. From QuickChick Require Import QuickChick. From adtind Require Import goal33. Derive Show for natural. Derive Arbitrary for natural. Instance Dec_Eq_natural : Dec_Eq natural. Proof. dec_eq. Qed. Lemma conj22synthconj1 : forall (lv0 : natural) (lv1 : natural) (lv2 : natural), (@eq natural (plus (plus lv0 lv1) (Succ lv2)) (plus lv1 (plus (Succ lv2) lv0))). Admitted. QuickChick conj22synthconj1.
function pde = elli3DcircIntf2(am,ap,bm,bp,r,x0,y0,z0,a11,a12,a) %% USAGE: polynomial solution for Poisson equation % Last Modified: 02/21/2020 by Xu Zhang %% PDE Structure pde = struct('intf',@intf,... 'exactu1',@exactu1,'exactu2',@exactu2,'exactu3',@exactu3,... 'um1',@um1,'um2',@um2,'um3',@um3,'up1',@up1,'up2',@up2,'up3',@up3,... 'Dxu',@Dxu,'Dxum',@Dxum,'Dxup',@Dxup,'Dyu',@Dyu,... 'Dyum',@Dyum,'Dyup',@Dyup,'Dzu',@Dzu,'Dzum',@Dzum,'Dzup',@Dzup,... 'f1',@f1,'f2',@f2,'f3',@f3,... 'fm1',@fm1,'fm2',@fm2,'fm3',@fm3,... 'fp1',@fp1,'fp2',@fp2,'fp3',@fp3,... 'A',@A,'Am',@Am,'Ap',@Ap,'one',@one,... 'B',@B,'Bm',@Bm,'Bp',@Bp); pde.am = am; pde.ap = ap; pde.bm = bm; pde.bp = bp; %% interface function function u = intf(x,y,z) u = ((x-x0).^2 + (y-y0).^2 + (z-z0).^2).^(1/2)/r-1; end %% exact solution function u = exactu1(x,y,z) u = um1(x,y,z); id = intf(x,y,z) > 0; u(id) = up1(x(id),y(id),z(id)); end function u = exactu2(x,y,z) u = um2(x,y,z); id = intf(x,y,z) > 0; u(id) = up2(x(id),y(id),z(id)); end function u = exactu3(x,y,z) u = um3(x,y,z); id = intf(x,y,z) > 0; u(id) = up3(x(id),y(id),z(id)); end function u = um1(x,y,z) u1 = uker1(x,y,z).*(x-x0); u2 = uker2(x,y,z); u = (u1 + u2)/bm; end function u = um2(x,y,z) u1 = uker1(x,y,z).*(y-y0); u2 = uker2(x,y,z); u = (u1 + u2)/bm; end function u = um3(x,y,z) u1 = uker1(x,y,z).*(z-z0); u2 = uker2(x,y,z); u = (u1 + u2)/bm; end function u = up1(x,y,z) u1 = uker1(x,y,z).*(x-x0); u2 = uker2(x,y,z); u = (u1 + u2)/bp; end function u = up2(x,y,z) u1 = uker1(x,y,z).*(y-y0); u2 = uker2(x,y,z); u = (u1 + u2)/bp; end function u = up3(x,y,z) u1 = uker1(x,y,z).*(z-z0); u2 = uker2(x,y,z); u = (u1 + u2)/bp; end function u = uker1(x,y,z) u = nthroot((x-x0).^2 + (y-y0).^2 + (z-z0).^2 - r^2,a11); u = u.^a12; end function u = uker2(x,y,z) u = ((x-x0).^2 + (y-y0).^2 + (z-z0).^2 - r^2).^a; end %% Boundary Function function u = gD1(x,y,z) u = exactu1(x,y,z); end function u = gD2(x,y,z) u = exactu2(x,y,z); end function u = gD3(x,y,z) u = exactu3(x,y,z); end %% Derivative of the exact solution function u = Dxu(x,y,z) u = Dxum(x,y,z); id = intf(x,y,z) > 0; u(id) = Dxup(x(id),y(id),z(id)); end function u = Dyu(x,y,z) u = Dyum(x,y,z); id = intf(x,y,z) > 0; u(id) = Dyup(x(id),y(id),z(id)); end function u = Dzu(x,y,z) u = Dzum(x,y,z); id = intf(x,y,z) > 0; u(id) = Dzup(x(id),y(id),z(id)); end function u = Dxum(x,y,z) u = (Duker(x,y,z).*(y-y0)*2 - Duker(x,y,z).*(z-z0)*2)/bm; end function u = Dyum(x,y,z) u = (Duker(x,y,z).*(z-z0)*2 - Duker(x,y,z).*(x-x0)*2)/bm; end function u = Dzum(x,y,z) u = (Duker(x,y,z).*(x-x0)*2 - Duker(x,y,z).*(y-y0)*2)/bm; end function u = Dxup(x,y,z) u = (Duker(x,y,z).*(y-y0)*2 - Duker(x,y,z).*(z-z0)*2)/bp; end function u = Dyup(x,y,z) u = (Duker(x,y,z).*(z-z0)*2 - Duker(x,y,z).*(x-x0)*2)/bp; end function u = Dzup(x,y,z) u = (Duker(x,y,z).*(x-x0)*2 - Duker(x,y,z).*(y-y0)*2)/bp; end function u = Duker(x,y,z) u = a*((x-x0).^2 + (y-y0).^2 + (z-z0).^2 - r^2).^(a-1); end %% right hand side function function u = f1(x,y,z) u = fm1(x,y,z); id = intf(x,y,z) > 0; u(id) = fp1(x(id),y(id),z(id)); end function u = f2(x,y,z) u = fm2(x,y,z); id = intf(x,y,z) > 0; u(id) = fp2(x(id),y(id),z(id)); end function u = f3(x,y,z) u = fm3(x,y,z); id = intf(x,y,z) > 0; u(id) = fp3(x(id),y(id),z(id)); end function u = fm1(x,y,z) xh = x-x0; yh = y-y0; zh = z-z0; u = (-4*Duker(x,y,z) + DDuker(x,y,z).*(xh.*(yh+zh)-yh.^2-zh.^2)) + bm*um1(x,y,z); end function u = fm2(x,y,z) xh = x-x0; yh = y-y0; zh = z-z0; u = (-4*Duker(x,y,z) + DDuker(x,y,z).*(yh.*(xh+zh)-xh.^2-zh.^2)) + bm*um2(x,y,z); end function u = fm3(x,y,z) xh = x-x0; yh = y-y0; zh = z-z0; u = (-4*Duker(x,y,z) + DDuker(x,y,z).*(zh.*(xh+yh)-xh.^2-yh.^2)) + bm*um3(x,y,z); end function u = fp1(x,y,z) xh = x-x0; yh = y-y0; zh = z-z0; u = (-4*Duker(x,y,z) + DDuker(x,y,z).*(xh.*(yh+zh)-yh.^2-zh.^2)) + bp*up1(x,y,z); end function u = fp2(x,y,z) xh = x-x0; yh = y-y0; zh = z-z0; u = (-4*Duker(x,y,z) + DDuker(x,y,z).*(yh.*(xh+zh)-xh.^2-zh.^2)) + bp*up2(x,y,z); end function u = fp3(x,y,z) xh = x-x0; yh = y-y0; zh = z-z0; u = (-4*Duker(x,y,z) + DDuker(x,y,z).*(zh.*(xh+yh)-xh.^2-yh.^2)) + bp*up3(x,y,z); end function u = DDuker(x,y,z) u = a*(a-1)*((x-x0).^2 + (y-y0).^2 + (z-z0).^2 - r^2).^(a-2); end %% Diffusion coefficient function function u = A(x,y,z) u = Am(x,y,z); id = intf(x,y,z) > 0; u(id) = Ap(x(id),y(id),z(id)); end function u = Am(x,y,z) u = am*ones(size(x)); end function u = Ap(x,y,z) u = ap*ones(size(x)); end %% Mass coefficient function function u = B(x,y,z) u = Bm(x,y,z); id = intf(x,y,z) > 0; u(id) = Bp(x(id),y(id),z(id)); end function u = Bm(x,y,z) u = bm*ones(size(x)); end function u = Bp(x,y,z) u = bp*ones(size(x)); end %% Other function function u = one(x,y,z) u = ones(size(x)); end end
The rectangular casemate was pierced with eight narrow gun ports , one each at the bow and stern and three along each side . Each gun port was protected by an armored shutter made of two layers of iron riveted together and allowed the guns to elevate only to a maximum of + 5 to + 7 ° . Atlanta was armed with single @-@ banded , 7 @-@ inch ( 178 mm ) Brooke rifles on pivot mounts at the bow and stern . The middle gun port on each side was occupied by a single @-@ banded , 6 @.@ 4 @-@ inch ( 163 mm ) Brooke rifle . The 17 @-@ caliber , seven @-@ inch guns weighed about 15 @,@ 000 pounds ( 6 @,@ 800 kg ) and fired 80 @-@ pound ( 36 kg ) armor @-@ piercing " bolts " and 110 @-@ pound ( 50 kg ) explosive shells . The equivalent statistics for the 18 @.@ 5 @-@ caliber , 6 @.@ 4 @-@ inch gun were 9 @,@ 110 pounds ( 4 @,@ 130 kg ) with 80 @-@ pound bolts and 64 @-@ pound ( 29 kg ) shells . Atlanta was also armed with a 20 @-@ foot ( 6 @.@ 1 m ) , solid iron , ram that was reinforced by a series of vertical steel bars . In front of the ram was a spar torpedo that carried 50 pounds ( 23 kg ) of black powder on a wooden pole connected to an iron lever that could be raised or lowered by means of pulleys .