Datasets:
AI4M
/

text
stringlengths
0
3.34M
{- This Source Code Form is subject to the terms of the Mozilla Public - License, v. 2.0. If a copy of the MPL was not distributed with this - file, You can obtain one at http://mozilla.org/MPL/2.0/. -} module Main import Todo import TodoHtml import Event import Http import Record import FerryJS import FerryJS.Util import Sql import Sql.JS import Html as H import Effects import Data.String import Debug.Error import Debug.Trace -- To use the error function %language ElabReflection Server : Type Server = Event Multiple (Request, Response) data Msg = NewRequest (Request, Response) State : Type State = (Callback Msg, DBConnection) connection : JS_IO (Maybe DBConnection) connection = do args <- getArgs (case args of -- Todo: better error handling [_, _, db, name, pass] => Just <$> newConnection {user=name} {database=db} {password=pass} _ => pure Nothing) -- Next state function ignore : JS_IO a -> JS_IO () ignore = map (const ()) respondWithTodos : Response -> List Todo -> JS_IO () respondWithTodos res ts = do str <- showTodos ts write res str Route : Type Route = State -> (Request, Response) -> JS_IO State returnTodos : Route returnTodos st@(cb, conn) (req, res) = do result <- runSelectQuery selectAll conn execute $ map (respondWithTodos res) (waitSelectResult result) pure st respondMessageCode : String -> Nat -> Route respondMessageCode msg code st (req, res) = do setStatusCode res code write res msg pure st respondMessage : String -> Route respondMessage msg = respondMessageCode msg 200 -- Todo: set 404 header notFound : Route notFound = respondMessageCode "Not found" 404 respondWithForm : Response -> List Todo -> JS_IO () respondWithForm res [t] = do page <- withinContent [todoForm t] write res page respondWithForm res [] = write res "Could not find Todo with given id" respondWithForm res _ = error "Todo error: should not find multiple values for private key 'id'" displayForm : Int -> Route displayForm id st@(cb, conn) (req, res) = do result <- runSelectQuery (selectWhereId id) conn execute $ map (respondWithForm res) (waitSelectResult result) pure st editTodo : Route editTodo st@(cb, conn) (req, res) = let url = getUrl req in let search = getQueryAs {sch=[("id", String)]} url in case search of Nothing => notFound st (req, res) Just rec => let id = parseInteger (rec .. "id") {a=Int} in case id of Just id => displayForm id st (req, res) Nothing => respondMessage "Error converting id to Int" st (req, res) newTodo : Route newTodo st@(cb, conn) (req, res) = do page <- withinContent [emptyForm] write res page pure st upsertTodo : Todo -> DBConnection -> JS_IO (Event Single Int) upsertTodo t conn = case t .. "id" of Nothing => waitRowCountResult <$> runInsertQuery (insertTodo t) conn Just id => waitRowCountResult <$> runUpdateQuery (updateTodo t) conn convertId : Maybe String -> Maybe Int convertId = join . map (parseInteger {a=Int}) stringTodoSchema : Schema stringTodoSchema = [("id", Maybe String), ("name", String), ("done", Maybe String)] requestToStringTodo : Request -> Maybe (Record Main.stringTodoSchema) requestToStringTodo req = getQueryAs {sch=stringTodoSchema} (getUrl req) requestToTodo : Request -> Maybe Todo requestToTodo req = (\rec => Record.update {se=S (S Z)} "done" isJust (Record.update {se=Z} "id" convertId rec)) <$> requestToStringTodo req saveTodo : Route saveTodo st@(cb, conn) (req, res) = case requestToTodo req of Nothing => notFound st (req, res) Just todo => do ev <- upsertTodo todo conn doAfter ev (returnTodos st (req, res)) pure st deleteTodo : Route deleteTodo st@(cb, conn) (req, res) = let maybeId = do rec <- getQueryAs {sch=[("id", Maybe String)]} (getUrl req) id <- rec .. "id" parseInteger {a=Int} id in case maybeId of Just id => do ev <- runDeleteQuery (removeTodoWithId id) conn doAfter (waitRowCountResult ev) (returnTodos st (req, res)) pure st Nothing => notFound st (req, res) Router : Type Router = Url -> Maybe Route pathRouter : List String -> Route -> Router pathRouter s route url = if getPath url == s then Just route else Nothing tryAll : List Router -> Router tryAll [] _ = Nothing tryAll (hd::tail) url = (hd url) <|> tryAll tail url router : Router router = tryAll [ pathRouter [] returnTodos, pathRouter ["edit"] editTodo, pathRouter ["save"] saveTodo, pathRouter ["new"] newTodo, pathRouter ["delete"] deleteTodo] computeState : ProgramMsg State Msg -> JS_IO (Maybe (State)) computeState (ProgramStart cb) = do maybeConn <- connection (case maybeConn of Nothing => putStrLn' "Please pass database name, username and password as command line arguments" *> pure Nothing Just conn => (let ev = map NewRequest $ Http.listen (httpServer 3001) in listen ev cb) *> putStrLn' "Listening on localhost:3001" *> pure (Just (cb, conn))) computeState (ProgramNext st (NewRequest (req, res))) = Just <$> case router . getUrl $ req of Just route => route st (req, res) Nothing => notFound st (req, res) main : JS_IO () main = run computeState
Require Import Crypto.Arithmetic.PrimeFieldTheorems. Require Import Crypto.Specific.solinas32_2e511m187_22limbs.Synthesis. (* TODO : change this to field once field isomorphism happens *) Definition carry : { carry : feBW_loose -> feBW_tight | forall a, phiBW_tight (carry a) = (phiBW_loose a) }. Proof. Set Ltac Profiling. Time synthesize_carry (). Show Ltac Profile. Time Defined. Print Assumptions carry.
lemma tendsto_add_filterlim_at_infinity: fixes c :: "'b::real_normed_vector" and F :: "'a filter" assumes "(f \<longlongrightarrow> c) F" and "filterlim g at_infinity F" shows "filterlim (\<lambda>x. f x + g x) at_infinity F"
! This file is part of toml-f. ! ! Copyright (C) 2019-2020 Sebastian Ehlert ! ! Licensed under either of Apache License, Version 2.0 or MIT license ! at your option; you may not use this file except in compliance with ! the License. ! ! Unless required by applicable law or agreed to in writing, software ! distributed under the License is distributed on an "AS IS" BASIS, ! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ! See the License for the specific language governing permissions and ! limitations under the License. !> Implementation of the TOML array data type. module tomlf_type_array use tomlf_error, only : toml_stat use tomlf_type_value, only : toml_value, toml_visitor use tomlf_structure, only : toml_ordered, new_ordered implicit none private public :: toml_array, new_array, new, len !> TOML array type, extends(toml_value) :: toml_array !> Is an inline array rather than an array of tables logical :: inline = .true. !> Storage unit for TOML values of this array class(toml_ordered), allocatable :: list contains !> Get the TOML value at a given index procedure :: get !> Append value to array procedure :: push_back !> Release allocation hold by TOML array procedure :: destroy end type toml_array !> Create standard constructor interface toml_array module procedure :: new_array_func end interface toml_array !> Overloaded constructor for TOML values interface new module procedure :: new_array end interface !> Overload len function interface len module procedure :: get_len end interface contains !> Constructor to create a new TOML array and allocate the internal storage subroutine new_array(self) !> Instance of the TOML array type(toml_array), intent(out) :: self call new_ordered(self%list) end subroutine new_array !> Default constructor for TOML array type function new_array_func() result(self) !> Instance of the TOML array type(toml_array) :: self call new_array(self) end function new_array_func !> Get number of TOML values in the array pure function get_len(self) result(length) !> Instance of the TOML array class(toml_array), intent(in) :: self !> Current length of the array integer :: length length = self%list%get_len() end function get_len !> Get the TOML value at the respective index subroutine get(self, idx, ptr) !> Instance of the TOML array class(toml_array), intent(inout) :: self !> Index to the TOML value integer, intent(in) :: idx !> Pointer to the TOML value class(toml_value), pointer, intent(out) :: ptr call self%list%get(idx, ptr) end subroutine get !> Push back a TOML value to the array subroutine push_back(self, val, stat) !> Instance of the TOML array class(toml_array), intent(inout) :: self !> TOML value to append to array class(toml_value), allocatable, intent(inout) :: val !> Status of operation integer, intent(out) :: stat if (allocated(val%key)) then stat = toml_stat%fatal return end if call self%list%push_back(val) stat = toml_stat%success end subroutine push_back !> Deconstructor to cleanup allocations (optional) subroutine destroy(self) !> Instance of the TOML array class(toml_array), intent(inout) :: self if (allocated(self%key)) then deallocate(self%key) end if if (allocated(self%list)) then call self%list%destroy deallocate(self%list) end if end subroutine destroy end module tomlf_type_array
% !TeX spellcheck = en_US \chapter{Implementation}\label{chap:imp} This chapter provides an information about the implementation of the prototype and its elements, whose behavior was described in chapter~\ref{chap:conarch}. %The prototype of the developed framework was implemented and will be described in this chapter. The application can resolve external references presented by package installation commands. It handles the Bash language with apt-get package manager and the Ansible configuration management tool with the apt package manager. The developed software can operate in two modes: One node for one package and Sets of packages.\\ Java language was chosen for implementation, because of its simplicity and strength. To compile the framework a Java Development Kit version 1.8 or above is needed. If the software is already compiled, an Java Runtime Environment version 1.8 or above must be installed in order to execute the application. Additionally, the apt-get package manager which functions only in Unix-like operating systems must be installed to download packages and identify dependencies between them. If a user wants to download packages for the specific architecture the package manager must be setup to access this architecture's repository. % %In this language, the elements are represented by classes. %Java uses additional kind of packages which describe third-party modules and make programming easier. %The used Java packages will be mentioned here and the necessary license will be listed in the "NOTICE.txt" file in the source code's root folder. \section{Global Elements} This section describes the elements used throughout the whole framework's execution. The ZIP handler provides a functionality to operate ZIP archives, the CSAR handler keeps an interface to interact with a CSAR and Utils helps to solve problems common for many other elements. \subsection*{Zip Handler} This is a small element with straight functionality. It serves to pack and unpack ZIP archives which are used by the TOSCA standard to pack applications. It was decided to use the $java$.$utils$.$zip$ package for this task. The functions for archiving and unarchiving are called $zipIt$ and $unZipIt$ respectively. \subsection*{CSAR Handler} This element provides an interface to access the content of a CSAR and stores information about files associated with it. The mostly used data are: the name of a temporary extraction folder, the list of files from the input CSAR, the meta-file entry, and the architecture of the target platform. All this data are encapsulated into the CSAR handler. The set of public functions allowing to operate with this element is available. \begin{itemize} \item $unpack$ and $pack$ functions are used to extract the CSAR into the temporary folder and pack the folder to the output CSAR. These functions use the $ZIP$~$handler$. \item $getFiles$ returns the list with files presented by the input CSAR. \item $getFolder$ returns the path to the folder which the CSAR was extracted to. \item $getArchitecture$ returns the chosen architecture of the target platform. \item $addFileToMeta$ adds information about the new file to the meta-data. \end{itemize} Here is an example usage of the element. When the CSAR handler extracts the input CSAR to the temporary extraction folder during the $unpack$'s call, it saves the folder's name. Then other elements can use the $getFolder$ function to get this name and access the data. \subsection*{Utils} This class provides the $createFile$, $getPathLength$, and $correctName$ methods used by many other elements. The main purpose of these functions is to make the code cleaner. \\ Using the $createFile$ function other elements of the framework can create a file with the given content. The $getPathLength$ method returns the deep of the given file's path what is very useful for creating references between files. %\\ OpenTOSCA uses some limitations to names of TOSCA nodes. Those names can't contain slashes, dots, etc. The function $correctName$ can be used to obtain an acceptable name from the given string. \section{References Resolver} This is the main module which starts by framework startup and is executed into three stages: preprocessing, processing and finishing which were described in section \ref{subs:RR}. In this section, some implementation aspects will be presented briefly. \subsection*{Preprocessing} At the preprocessing stage, the CSAR is unpacked, common \gls{tosca} definitions are generated and internal dependencies trees are built. % %\subsubsection*{Unpacking} As the first step, a user interface is provided to get the names of the input CSAR, output CSAR, mode of operation and the architecture of the final platform. To unpack the CSAR the function $unpack$ from the CSAR handler is used.\\ % %\subsubsection*{Generating TOSCA Definitions} The $javax$.$xml$.$bind$ package was chosen for creating the common TOSCA definition. This Java package allows to generate $Descriptions$ - Java classes describing an XML documents which store TOSCA definitions. The following $Descriptions$ where created: \begin{itemize} \item $DependsOn$ and $PreDependsOn$ defines Relationship Types which determine dependencies %(Described in the section \nameref{subs:reltype}) between packages.% (described in the section \nameref{subs:dep}). \item $Package$ $Artifact$ describes a deployment Artifact Type for package installation data. \item $Script$ $Artifact$ specify an implementation Artifact Type for a script installing a package. \item $Ansible$ $Playbook$ represent a implementation Artifact Type for a package installation via an Ansible playbook. \end{itemize} An example of $Description$ of the Script Artifact can be found in listing~\ref{lst:scripttype}. %%Each description is represented by a separate Java class. \\ %\\ %\subsubsection*{Build internal dependencies trees}\label{subs:imp_findintref} %Internal dependencies are mainly used by the \nameref{subs:archtop}. %Therefore, these two modules were combined within the one Java class named $Topology$~$Handler$. To build internal dependencies trees the topology handler described in section~\ref{sec:imptophan} was used. \begin{Listing} \caption{Java class containing the $Description$ for the Script Artifact Type definition} \label{lst:scripttype} \begin{lstlisting} public class RR_ScriptArtifactType { @XmlRootElement(name = "tosca:Definitions") @XmlAccessorType(XmlAccessType.PUBLIC_MEMBER) public static class Definitions { @XmlElement(name = "tosca:ArtifactType", required = true) public ArtifactType artifactType; @XmlAttribute(name = "xmlns:tosca", required = true) public static final String tosca="http://docs.oasis-open.org/tosca/ns/2011/12"; @XmlAttribute(name = "xmlns:winery", required = true) public static final String winery = "http://www.opentosca.org/winery/extensions/tosca/2013/02/12"; @XmlAttribute(name = "xmlns:ns0", required = true) public static final String ns0="http://www.eclipse.org/winery/model/selfservice"; @XmlAttribute(name = "id", required = true) public static final String id="winery-defs-for_tbt-RR_ScriptArtifact"; @XmlAttribute(name = "targetNamespace", required = true) public static final String targetNamespace = "http://docs.oasis-open.org/tosca/ns/2011/12/ToscaBaseTypes"; public Definitions() { artifactType = new ArtifactType(); } public static class ArtifactType { @XmlAttribute(name = "name", required = true) public static final String name = "RR_ScriptArtifact"; @XmlAttribute(name = "targetNamespace", required = true) public static final String targetNamespace = "http://docs.oasis-open.org/tosca/ns/2011/12/ToscaBaseTypes"; ArtifactType() {} } } } \end{lstlisting} \end{Listing} \subsection*{Processing} During this stage, all language modules listed in the framework are started. For the references resolver element that is only two following strings of code, but they start the main functionality of the framework. \begin{lstlisting} \\All languages are stored in the "languages" variable for (Language language : languages) language.proceed(); \end{lstlisting} The language modules check all files presented in the input CSAR. The list of these files is stored in the CSAR handler, a pointer to which the modules became, store and translate to the supported package manager modules during their instantiation. This system allows the modules to access the CSAR's content at any time. %Since the language modules are stored in $language$ variable, this simple stage can be presented by the listing~\ref{lst:start_lang}. %\begin{Listing} %\caption{The processing stage} %\label{lst:start_lang} %\end{Listing} \subsection*{Finishing} When all external references will be resolved, the framework can enter its last stage. At this stage, the changed data should be packed into the output \gls{csar}, whose name was entered during the preprocessing stage. The function $pack$ from the CSAR handler is used. After this operation, one receives a more encapsulated CSAR which implements the requirements, presented in section~\ref{chap:req}, and has lower level of access to the Internet. \section{Language Modules} This section will describe the implemented language modules. %implementation of %TODO %For this purpose serve \nameref{subs:archlm} and \nameref{subs:archpmm}. Since the framework is initially oriented to easy extensibility, an abstract model for the modules will be defined in such way, that new modules can be added by implementing this model. %This abstract model serves to implement requirement~\ref{req:expand}. The realization of the Bash and Ansible modules will be provided at the end of the section. % which responds to requirement~\ref{req:handledif}. \subsection*{Language Model} To specify the common functionality and behavior of different language modules, the language model is used. In Java, this model is described by an abstract class. The abstract class $Language$ is presented in listing~\ref{lst:langabst}. The common variables for all language modules are the name of the language, the list with package manager modules, and the extensions of files. The common functions are presented below. \begin{itemize} \item $getName$ returns the name of this language. \item $getExtensions$ returns the list of file extensions for this language. \item $proceed$ checks all original files. Files written in the language should be transferred to every supported package manager module. \item $getNodeName$ uses a package name to generate the name for a Node Type, which will install the package using this language. \item $createTOSCA\_Node$ creates the definitions for a TOSCA node. Since the created TOSCA nodes must install packages using the same language as the original node, all languages must provide the method for the creation such definitions. \end{itemize} The $createTOSCA\_Node$ function must be implemented in two variants. The first option is to accept a single package as an argument, which is needed for the One node for one package mode. The second implementation is needed for the Sets of packages mode where a lot of packages can by installed by one node. In second variant, this function must take a set of packages as an argument. New language modules must be inherited from the language model and then can be added to the framework. \begin{Listing} \caption{Abstract language model} \label{lst:langabst} \begin{lstlisting} public abstract class Language { // List of package managers supported by language protected List<PacketManager> packetManagers; // Extensions for this language protected List<String> extensions; // Language Name protected String Name; // To access package topology protected Control_references cr; // List with already created packages protected List <String> created_packages; /** Generate node name for specific packages * @param packet * @param source * @return */ public abstract String getNodeName(String packet, String source); /** Generate Node for TOSCA Topology * @param packet * @param source * @return * @throws IOException * @throws JAXBException */ public abstract String createTOSCA_Node(String packet, String source) throws IOException, JAXBException; public abstract String createTOSCA_Node(List<String> packages, String source) throws IOException, JAXBException; } \end{lstlisting} \end{Listing} \subsection*{Bash Module Implementation} The processing of the popular Bash language was implemented in the prototype. The Bash module should accept only files written in the Bash language. All the files presented in Artifact Templates with a Bash script type will be accepted. Additionally the module will accept files not presented in Artifact Templates at all, but possessing specific signs. %Some signs inherent to all Bash scripts can be used to choose artifacts not represented in Artifact Templates. These signs can be the file extensions (".sh" or ".bash") and the first line ("\#!/bin/bash"). Each file which contains those signs will be passed to supported package managers modules, in our case to the $apt$-$get$ module described later. \\ The Bash module must provide a capability for the given packages to create a definitions of package nodes which use Bash to install the packages. Such a Bash package node is defined by Package Type, Implementation, Package Artifact and Script Artifact. Package Type is a Node Type with the "$install$" operation and a name received from the $getNodeName$ function. Implementation is a Node Type Implementation which refers the Package Artifact and the Script Artifact to implement the operation. Package Artifact and the Script Artifact are Artifact Templates referencing the installation data and a Bash installation script respectively. The installation script contains the Bash header and an installation command, like "$dpkg$ -$i$ \textbf{installation\_data}". The topology handler will instantiate the package node by defining a Node Template. Those definitions and the installation script are created by the $createTOSCA\_Node$ function. %To avoid creating of the same nodes, the names of created nodes are stored in the $created\_packages$ list. %Then the node name is generated using $getNodeName$ and TOSCA definitions for this name are created. \subsection*{Ansible Implementation} Ansible configuration management tool was added to validate the extensibility of the framework. Since Ansible playbooks are often packed into archives, it may be necessary to unpack them first and then to analyze the content. Thus, the files are either immediately transferred to the package manager modules, or they are unzipped first. %Listing~\ref{lst:ansible_proceed} represents those operations. To filter Ansible files not represented by Artifact Templates, the ".$yml$" extension is used, because Ansible playbooks don't contain any specific header.\\ Creation of an Ansible \gls{tosca} node for a package is a complicated operation, because one must define the configuration of the created Ansible playbook and to archive the configuration, playbook and installation data. As the first step, the original files should be analyzed to determine the Ansible configuration (the set of options like a user name or a proxy server). If the implemented analyzer is unable to find all necessary options, a user interface will be provided to fulfill any missing parameters. After that, the Ansible playbook installing the package and the configuration file describing the playbook will be created in a temporary folder. After addition of installation data into the folder, it can be packed to a zip archive. This archive is an implementation artifact, which the Artifact Template should be created for. A Node Type with an "$install$" operation % and a name built from the name of the package should be defined. And finally, a Node Type Implementation linking the operation and the Artifact Templates should be generated. A Node Template will be added by the topology handler. %\begin{Listing} % \caption{Ansible proceeding} % \label{lst:ansible_proceed} %\begin{lstlisting} %public void proceed() % throws FileNotFoundException, IOException, JAXBException { % if (ch == null) % throw new NullPointerException(); % for (String f : ch.getFiles()) % for (String suf : extensions) % if (f.toLowerCase().endsWith(suf.toLowerCase())) { % if(ch.getResolving() == CSAR_handler.Resolving.Single){ % if (suf.equals(".zip")) % createTOSCA_Node(proceedZIP(f),f); % else % createTOSCA_Node(proceed(f, f),f); % else{ % if (suf.equals(".zip")) % proceedZIP(f); % else % proceed(f, f); % } % } %} % %public List<String> proceed(String filename, String source) % throws FileNotFoundException, IOException, JAXBException { % List<String> packages = new LinkedList<String>(); % for (PackageManager pm : packetManagers) % packages.addAll(pm.proceed(filename, source)); % List<String> templist = new LinkedList<String>(); % for(String temp:packages) % templist.add(Utils.correctName(temp)); % return templist; %} %\end{lstlisting} %\end{Listing} \section{Package Manager Modules} In this section, package manager modules will be specified. The main task of this modules is to identify external references, delete them and call the package handler. %Those modules satisfy the requirements~\ref{req:identify} and~\ref{req:delete}. An abstract model will be defined to make the extensibility easier. The apt-get module for Bash and an apt module for Ansible will be implemented. \subsection*{Package Manager Model} The model is described by an abstract class. It contains only one function $proceed$ that finds and eliminates external references, as well as passes the found package names to the package handler and return the list of all required packages. %\begin{Listing} % \caption{Abstract package manager model} % \label{lst:pmabst} %\begin{lstlisting} %public abstract class PacketManager { % % // Name of manager % static public String Name; % % protected Language language; % % protected Control_references cr; % % /** % * Proceed given file with different source (like archive) % * % * @param filename % * @throws IOException % * @throws JAXBException % */ % public abstract List<String> proceed(String filename, String source) % throws FileNotFoundException, IOException, JAXBException; %} %\end{lstlisting} %\end{Listing} \subsection*{Apt-get for Bash} The apt-get package manager module is a simple line-by-line file parser which searches for the lines starting with the "$apt$-$get$ $install$" string, comments them out and passes the command's arguments to the package handler's public function $getPackage$. %The code can be found in the listing~\ref{lst:bash_apt_parse}. \subsection*{Apt for Ansible} Since Ansible package installation commands which use the $apt$ package manager can be written in many different ways, then the processing will be more difficult than a simple line parser. Therefore, to handle Ansible playbook a state machine and regular expressions from the $java$.$util$.$regex$ package are used. \section{Package Handler} Package handler provides an interface for an interaction with the package manager of the operating system. It allows to $download$ $packages$, to $determine$ $the$ $type$ $of$ $dependencies$ between them and to $obtain$ $the$ $list$ $with$ $dependent$ $packages$ for the given package. \subsection*{Download Packages} The download operation is performed using the recursive function $getPackage$. %Download of installation data will satisfy the requirement~\ref{req:adddata}. % defined in the listing \ref{lst:getpack}. %\begin{Listing} %\caption{The $getPackage$ definition} %\label{lst:getpack} %\begin{lstlisting} %/** %* Download package and check its dependency %* %* @param language, language name %* @param packet, package name %* @param listed, list with already included packages %* @param source, name of package or file depending on the package %* @param sourcefile, name of original file contained external reference. %* @throws JAXBException %* @throws IOException %*/ %public void getPacket(Language language, String packet, List<String> listed, String source, String sourcefile) %\end{lstlisting} %\end{Listing} The arguments of the function are described shortly. $language$ is a reference to the language module which has accepted the original artifact. $packagename$ is a name of the package. $listed$ holds a list with already downloaded packages. It is not necessary to download them again, but new dependencies must be created. $source$ defines the parent element of the package. It will be the original artifact file for the root package, and the depending package for other packages. $sourcefile$ is a name of the original artifact. \\ %This name will be used by the $language$ to generate package node and by topology handler to create the dependency. The command $apt$-$get$ $download$ \textbf{packagename} is used for download the package. If the process fails, a user input is provided to solve the problem. A user will be able to rename the package, ignore it or even break the processing. If dependent packages are available, the function calls itself recursively for each dependent package. After these operations, a dependencies three for the $packagename$ will be downloaded.\\ In the One node for one package mode, the function calls the language's function $createTOSCA\_Node$ using the $language$ variable to create the TOSCA node for the package. Then it calls the topology handler's functions $addDependencyToPacket$ or $addDependencyToArtifact$ to update the topology. \subsection*{Obtain List with Dependent Packages} To obtain the dependent packages for the given package the $getDependensies$ function was developed. It becomes a \emph{packagename} as an argument and uses the command $apt$-$cache$ $depends$ \emph{packagename} to build a list with dependencies for the \emph{package}. The $apt$-$cache$ command is a part of the $apt$-$get$ package manager and uses a packages database to print the dependencies. The output is parsed to find strings like "Depends: \emph{dependent\_package}". These dependent packages are combined to a list and returned back. %An example output was presented in the section \ref{subs:dep}. \subsection*{Determine Type of Dependency} To determine the type of dependency between two packages the $getDependencyType$ function is used. It becomes the names of the source package and the target package and uses the $apt$-$cache$ $depends$ command to get the type. It can be $Depends$, $preDepends$ or $noDepends$ dependency. \section{Topology Handling}\label{sec:imptophan} The topology handler serves to update the TOSCA topology. It builds the internal dependencies trees during the preprocessing stage. The trees are used to find the right places for definitions of Node Templates and Dependency Templates. \subsection*{Building Internal Dependencies Trees} At the preprocessing stage, this element analyzes all original definitions and constructs internal dependencies trees. %, as was described in the section~\ref{subs:analyse}. To read those definitions from the XML files the package $org$.$w3c$.$dom$ was used.\\ As the first step, all definitions of Artifact Templates are analyzed and pairs consist of an Artifact Template's ID and an artifact itself are built. Then each Node Type Implementation will be read and Node Types and Artifact Template's IDs found. Now each artifact has a set with Node Types where it is used. After the analysis of Service Templates, analog sets of Node Templates for each artifact will be created. In addition, for each Node Template one should keep the Service Templates, where this Node Template was defined. \subsection*{Updating Service Templates} To update Service Templates two functions are provided. \begin{itemize} \item $addDependencyToPackage(sourcePackage, targetPackage, dependencyType)$ generates a dependency between two package nodes. \item $addDependencyToArtifact(sourceArtifact, targetPackage)$ generates a dependency between an original node and a package node. \end{itemize} Both functions find all Node Templates which use the given $sourcePackae$ or $sourceArtifact$. Besides, they find Service Templates where the Node Templates are defined. The search is done with the help of the internal dependencies trees. For each found Node Template a package node for installation of the $targetPackage$ package should be instantiated by creating a new Node Template. Then the dependency between the found Node Template and the new Node Template is created by defining a Relationship Template. %The Relationship Template references both Node Templates. The type of dependency is the value of the $dependencyType$ for the $addDependencyToPackage$ function and the $preDependsOn$ for $addDependencyToArtifact$. %\\ To update the existing TOSCA definition the $org$.$w3c$.$dom$ and $org$.$xml$.$sax$ packages are used. The definition of a new Node Template for the given $topology$ and $package$ is presented in listing~\ref{lst:newnodetemp}. %Together with definitions from the preprocessing stage and definitions made by languages this satisfy the requirement~\ref{req:represent}. \begin{Listing} \caption{Creating of a new Node Template} \label{lst:newnodetemp} \begin{lstlisting} Element template = document.createElement("tosca_ns:NodeTemplate"); template.setAttribute("xmlns:RRnt", RR_NodeType.Definitions.NodeType.targetNamespace); template.setAttribute("id", getID(package)); template.setAttribute("name", package); template.setAttribute("type", "RRnt:" + RR_NodeType.getTypeName(package)); topology.appendChild(template); \end{lstlisting} \end{Listing}
theory practise imports Main begin fun sum_upTo :: "nat \<Rightarrow> nat" where "sum_upTo 0 = 0" | "sum_upTo (Suc n) = (Suc n) + (sum_upTo n)" value "sum_upTo 5" theorem gaussumme: "sum_upTo n = (n * (n + 1)) div 2" apply(induction n) apply(auto) done fun itrev :: "'a list \<Rightarrow> 'a list \<Rightarrow> 'a list" where "itrev [] xs = xs" | "itrev (x # xs) ys = itrev xs (x # ys)" lemma abc: "itrev xs ys = rev xs @ ys" apply (induction xs arbitrary: ys) apply (auto) done lemma "itrev xs [] = rev xs" sledgehammer by (simp add: abc) end
Formal statement is: lemma decseq_def: "decseq X \<longleftrightarrow> (\<forall>m. \<forall>n\<ge>m. X n \<le> X m)" Informal statement is: A sequence $X$ is decreasing if and only if for all $m$ and $n \geq m$, we have $X_n \leq X_m$.
Pricing and specification for the all-new Abarth 124 spider have been announced ahead of the stunning new car’s arrival to the UKthis summer. What’s more, its good news too; with a starting price of £29,565 OTR for the 1.4 TJet 170PS Manual, the new two-seater roadster represents outstanding value for money. The new Abarth 124 spider marks the return of the ‘spider’ badge, and is the natural heir of the original model launched in 1972. The 2016 car incorporates performance, technology and iconic Italian style as the key factors of a very successful line-up. Equipped with a powerful four-cylinder 1.4-litre MultiAit Turbo engine, the Abarth124 spider delivers 170PS and 250Nm of torque. 0-62mph is dealt with in 6.8 seconds and the top speed is143 mph. Furthermore, as the sound of the engine really matters in an Abarth, the Record Monza exhaust comes as standard, generating a unique growl which only adds to the top-down driving experience. The spider is available with a six-speed manual or the automatic Sequenziale Sportivo gearbox. The manual is characterised by quick and accurate shifting with a short-throw, direct lever, while the Sequenziale Sportivo gearbox, with steering wheel-mounted paddle shift, exploits all of the engine’s torque. You get a choice of five very Italian-sounding colours: special solid “Turini 1975 White” and “Costa Brava 1972 Red, metallic “Isola d’Elba 1974 Blue”, “Portogallo 1974 Grey” and “San Marino 1972 Black”. If you want the musical ambiance to match that of the exhausts, then the optional Bose premium sound system with nine speakers is a must. It even includes dual headrest speakers on both seats and a subwoofer. A satellite navigation system is also optional.
[STATEMENT] lemma change_loopI: "LOOP X INV G = LOOP X INV I" [PROOF STATE] proof (prove) goal (1 subgoal): 1. LOOP X INV G = LOOP X INV I [PROOF STEP] unfolding loopi_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. kstar X = kstar X [PROOF STEP] by simp
[STATEMENT] lemma T_subprob[simp]: "T p S0 \<in> space (subprob_algebra S)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. T p S0 \<in> space (subprob_algebra (stream_space (count_space UNIV))) [PROOF STEP] by (metis prob_space.M_in_subprob prob_space_T sets_T subprob_algebra_cong)
[STATEMENT] lemma ortho_a_scalprod0: assumes "z1 \<noteq> 0" and "z2 \<noteq> 0" shows "\<angle>a z1 z2 = pi/2 \<longleftrightarrow> scalprod z1 z2 = 0" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (\<angle>a z1 z2 = pi / 2) = (scalprod z1 z2 = 0) [PROOF STEP] unfolding ang_vec_a_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. (acute_ang (\<angle>c z1 z2) = pi / 2) = (scalprod z1 z2 = 0) [PROOF STEP] using assms ortho_c_scalprod0[of z1 z2] [PROOF STATE] proof (prove) using this: z1 \<noteq> 0 z2 \<noteq> 0 \<lbrakk>z1 \<noteq> 0; z2 \<noteq> 0\<rbrakk> \<Longrightarrow> (\<angle>c z1 z2 = pi / 2) = (scalprod z1 z2 = 0) goal (1 subgoal): 1. (acute_ang (\<angle>c z1 z2) = pi / 2) = (scalprod z1 z2 = 0) [PROOF STEP] by auto
{-# OPTIONS --without-K --safe #-} open import Categories.Category module Categories.Category.Cartesian.Properties {o ℓ e} (C : Category o ℓ e) where open import Level using (_⊔_) open import Function using (_$_) open import Data.Nat using (ℕ; zero; suc) open import Data.Product using (Σ; _,_; proj₁) renaming (_×_ to _&_) open import Data.Product.Properties open import Data.List as List open import Data.List.Relation.Unary.Any as Any using (here; there) open import Data.List.Relation.Unary.Any.Properties open import Data.List.Membership.Propositional open import Data.Vec as Vec using (Vec; []; _∷_) open import Data.Vec.Relation.Unary.Any as AnyV using (here; there) open import Data.Vec.Relation.Unary.Any.Properties open import Data.Vec.Membership.Propositional renaming (_∈_ to _∈ᵥ_) open import Relation.Binary using (Rel) open import Relation.Binary.PropositionalEquality as ≡ using (refl; _≡_) import Data.List.Membership.Propositional.Properties as ∈ₚ import Data.Vec.Membership.Propositional.Properties as ∈ᵥₚ open import Categories.Category.Cartesian C open import Categories.Diagram.Pullback C open import Categories.Diagram.Equalizer C open import Categories.Morphism.Reasoning C private open Category C open HomReasoning variable A B X Y : Obj f g : A ⇒ B -- all binary products and pullbacks implies equalizers module _ (prods : BinaryProducts) (pullbacks : ∀ {A B X} (f : A ⇒ X) (g : B ⇒ X) → Pullback f g) where open BinaryProducts prods open HomReasoning prods×pullbacks⇒equalizers : Equalizer f g prods×pullbacks⇒equalizers {f = f} {g = g} = record { arr = pb′.p₁ ; equality = begin f ∘ pb′.p₁ ≈⟨ refl⟩∘⟨ helper₁ ⟩ f ∘ pb.p₁ ∘ pb′.p₂ ≈⟨ pullˡ pb.commute ⟩ (g ∘ pb.p₂) ∘ pb′.p₂ ≈˘⟨ pushʳ helper₂ ⟩ g ∘ pb′.p₁ ∎ ; equalize = λ {_ i} eq → pb′.universal $ begin ⟨ id , id ⟩ ∘ i ≈⟨ ⟨⟩∘ ⟩ ⟨ id ∘ i , id ∘ i ⟩ ≈⟨ ⟨⟩-cong₂ identityˡ identityˡ ⟩ ⟨ i , i ⟩ ≈˘⟨ ⟨⟩-cong₂ pb.p₁∘universal≈h₁ pb.p₂∘universal≈h₂ ⟩ ⟨ pb.p₁ ∘ pb.universal eq , pb.p₂ ∘ pb.universal eq ⟩ ≈˘⟨ ⟨⟩∘ ⟩ h ∘ pb.universal eq ∎ ; universal = ⟺ pb′.p₁∘universal≈h₁ ; unique = λ eq → pb′.unique (⟺ eq) (pb.unique (pullˡ (⟺ helper₁) ○ ⟺ eq) (pullˡ (⟺ helper₂) ○ ⟺ eq)) } where pb : Pullback f g pb = pullbacks _ _ module pb = Pullback pb h = ⟨ pb.p₁ , pb.p₂ ⟩ pb′ : Pullback ⟨ id , id ⟩ h pb′ = pullbacks _ _ module pb′ = Pullback pb′ helper₁ : pb′.p₁ ≈ pb.p₁ ∘ pb′.p₂ helper₁ = begin pb′.p₁ ≈˘⟨ cancelˡ project₁ ⟩ π₁ ∘ ⟨ id , id ⟩ ∘ pb′.p₁ ≈⟨ refl⟩∘⟨ pb′.commute ⟩ π₁ ∘ h ∘ pb′.p₂ ≈⟨ pullˡ project₁ ⟩ pb.p₁ ∘ pb′.p₂ ∎ helper₂ : pb′.p₁ ≈ pb.p₂ ∘ pb′.p₂ helper₂ = begin pb′.p₁ ≈˘⟨ cancelˡ project₂ ⟩ π₂ ∘ ⟨ id , id ⟩ ∘ pb′.p₁ ≈⟨ refl⟩∘⟨ pb′.commute ⟩ π₂ ∘ h ∘ pb′.p₂ ≈⟨ pullˡ project₂ ⟩ pb.p₂ ∘ pb′.p₂ ∎ module Prods (car : Cartesian) where open Cartesian car -- for lists prod : List Obj → Obj prod objs = foldr _×_ ⊤ objs π[_] : ∀ {x xs} → x ∈ xs → prod xs ⇒ x π[ here refl ] = π₁ π[ there x∈xs ] = π[ x∈xs ] ∘ π₂ data _⇒_* : Obj → List Obj → Set (o ⊔ ℓ) where _~[] : ∀ x → x ⇒ [] * _∷_ : ∀ {x y ys} → x ⇒ y → x ⇒ ys * → x ⇒ y ∷ ys * ⟨_⟩* : ∀ {x ys} (fs : x ⇒ ys *) → x ⇒ prod ys ⟨ x ~[] ⟩* = ! ⟨ f ∷ fs ⟩* = ⟨ f , ⟨ fs ⟩* ⟩ ∈⇒mor : ∀ {x y ys} (fs : x ⇒ ys *) (y∈ys : y ∈ ys) → x ⇒ y ∈⇒mor (x ~[]) () ∈⇒mor (f ∷ fs) (here refl) = f ∈⇒mor (f ∷ fs) (there y∈ys) = ∈⇒mor fs y∈ys project* : ∀ {x y ys} (fs : x ⇒ ys *) (y∈ys : y ∈ ys) → π[ y∈ys ] ∘ ⟨ fs ⟩* ≈ ∈⇒mor fs y∈ys project* (x ~[]) () project* (f ∷ fs) (here refl) = project₁ project* (f ∷ fs) (there y∈ys) = pullʳ project₂ ○ project* fs y∈ys uniqueness* : ∀ {x ys} {g h : x ⇒ prod ys} → (∀ {y} (y∈ys : y ∈ ys) → π[ y∈ys ] ∘ g ≈ π[ y∈ys ] ∘ h) → g ≈ h uniqueness* {x} {[]} uni = !-unique₂ uniqueness* {x} {y ∷ ys} uni = unique′ (uni (here ≡.refl)) (uniqueness* λ y∈ys → sym-assoc ○ uni (there y∈ys) ○ assoc) module _ {a} {A : Set a} (f : A → Obj) where uniqueness*′ : ∀ {x ys} {g h : x ⇒ prod (map f ys)} → (∀ {y} (y∈ys : y ∈ ys) → π[ ∈ₚ.∈-map⁺ f y∈ys ] ∘ g ≈ π[ ∈ₚ.∈-map⁺ f y∈ys ] ∘ h) → g ≈ h uniqueness*′ {x} {[]} uni = !-unique₂ uniqueness*′ {x} {y ∷ ys} uni = unique′ (uni (here ≡.refl)) (uniqueness*′ λ y∈ys → sym-assoc ○ uni (there y∈ys) ○ assoc) module _ {x} (g : ∀ a → x ⇒ f a) where build-mors : (l : List A) → x ⇒ map f l * build-mors [] = _ ~[] build-mors (y ∷ l) = g y ∷ build-mors l build-proj≡ : ∀ {a l} (a∈l : a ∈ l) → g a ≡ ∈⇒mor (build-mors l) (∈ₚ.∈-map⁺ f a∈l) build-proj≡ (here refl) = ≡.refl build-proj≡ (there a∈l) = build-proj≡ a∈l build-proj : ∀ {a l} (a∈l : a ∈ l) → g a ≈ π[ ∈ₚ.∈-map⁺ f a∈l ] ∘ ⟨ build-mors l ⟩* build-proj {_} {l} a∈l = reflexive (build-proj≡ a∈l) ○ ⟺ (project* (build-mors l) _) build-⟨⟩*∘ : ∀ {x y} (g : ∀ a → x ⇒ f a) (h : y ⇒ x) → ∀ l → ⟨ build-mors g l ⟩* ∘ h ≈ ⟨ build-mors (λ a → g a ∘ h) l ⟩* build-⟨⟩*∘ g h [] = !-unique₂ build-⟨⟩*∘ g h (x ∷ l) = begin ⟨ build-mors g (x ∷ l) ⟩* ∘ h ≈⟨ ⟨⟩∘ ⟩ ⟨ g x ∘ h , ⟨ build-mors g l ⟩* ∘ h ⟩ ≈⟨ ⟨⟩-congˡ (build-⟨⟩*∘ g h l) ⟩ ⟨ g x ∘ h , ⟨ build-mors (λ a → g a ∘ h) l ⟩* ⟩ ∎ build-uniqueness* : ∀ {x} {g h : ∀ a → x ⇒ f a} → (∀ a → g a ≈ h a) → ∀ l → ⟨ build-mors g l ⟩* ≈ ⟨ build-mors h l ⟩* build-uniqueness* {x} {g} {h} uni [] = Equiv.refl build-uniqueness* {x} {g} {h} uni (y ∷ l) = ⟨⟩-cong₂ (uni y) (build-uniqueness* uni l) -- for vectors prodᵥ : ∀ {n} → Vec Obj (suc n) → Obj prodᵥ v = Vec.foldr₁ _×_ v π[_]ᵥ : ∀ {n x} {xs : Vec Obj (suc n)} → x ∈ᵥ xs → prodᵥ xs ⇒ x π[_]ᵥ {.0} {.x} {x ∷ []} (here refl) = id π[_]ᵥ {.(suc _)} {.x} {x ∷ y ∷ xs} (here refl) = π₁ π[_]ᵥ {.(suc _)} {x} {_ ∷ y ∷ xs} (there x∈xs) = π[ x∈xs ]ᵥ ∘ π₂ data [_]_⇒ᵥ_* : ∀ n → Obj → Vec Obj n → Set (o ⊔ ℓ) where _~[] : ∀ x → [ 0 ] x ⇒ᵥ [] * _∷_ : ∀ {x y n} {ys : Vec Obj n} → x ⇒ y → [ n ] x ⇒ᵥ ys * → [ suc n ] x ⇒ᵥ y ∷ ys * ⟨_⟩ᵥ* : ∀ {n x ys} (fs : [ suc n ] x ⇒ᵥ ys *) → x ⇒ prodᵥ ys ⟨ f ∷ (x ~[]) ⟩ᵥ* = f ⟨ f ∷ (g ∷ fs) ⟩ᵥ* = ⟨ f , ⟨ g ∷ fs ⟩ᵥ* ⟩ ∈⇒morᵥ : ∀ {n x y ys} (fs : [ n ] x ⇒ᵥ ys *) (y∈ys : y ∈ᵥ ys) → x ⇒ y ∈⇒morᵥ (x ~[]) () ∈⇒morᵥ (f ∷ fs) (here refl) = f ∈⇒morᵥ (f ∷ fs) (there y∈ys) = ∈⇒morᵥ fs y∈ys projectᵥ* : ∀ {n x y ys} (fs : [ suc n ] x ⇒ᵥ ys *) (y∈ys : y ∈ᵥ ys) → π[ y∈ys ]ᵥ ∘ ⟨ fs ⟩ᵥ* ≈ ∈⇒morᵥ fs y∈ys projectᵥ* (f ∷ (x ~[])) (here ≡.refl) = identityˡ projectᵥ* (f ∷ g ∷ fs) (here ≡.refl) = project₁ projectᵥ* (f ∷ g ∷ fs) (there y∈ys) = pullʳ project₂ ○ projectᵥ* (g ∷ fs) y∈ys uniquenessᵥ* : ∀ {x n ys} {g h : x ⇒ prodᵥ {n} ys} → (∀ {y} (y∈ys : y ∈ᵥ ys) → π[ y∈ys ]ᵥ ∘ g ≈ π[ y∈ys ]ᵥ ∘ h) → g ≈ h uniquenessᵥ* {x} {.0} {y ∷ []} uni = ⟺ identityˡ ○ uni (here ≡.refl) ○ identityˡ uniquenessᵥ* {x} {.(suc _)} {y ∷ z ∷ ys} uni = unique′ (uni (here ≡.refl)) (uniquenessᵥ* (λ y∈ys → sym-assoc ○ uni (there y∈ys) ○ assoc)) module _ {a} {A : Set a} (f : A → Obj) where uniquenessᵥ*′ : ∀ {x n ys} {g h : x ⇒ prodᵥ {n} (Vec.map f ys)} → (∀ {y} (y∈ys : y ∈ᵥ ys) → π[ ∈ᵥₚ.∈-map⁺ f y∈ys ]ᵥ ∘ g ≈ π[ ∈ᵥₚ.∈-map⁺ f y∈ys ]ᵥ ∘ h) → g ≈ h uniquenessᵥ*′ {x} {.0} {y ∷ []} uni = ⟺ identityˡ ○ uni (here ≡.refl) ○ identityˡ uniquenessᵥ*′ {x} {.(suc _)} {y ∷ z ∷ ys} uni = unique′ (uni (here ≡.refl)) (uniquenessᵥ*′ (λ y∈ys → sym-assoc ○ uni (there y∈ys) ○ assoc)) module _ {x} (g : ∀ a → x ⇒ f a) where buildᵥ-mors : ∀ {n} (l : Vec A n) → [ n ] x ⇒ᵥ Vec.map f l * buildᵥ-mors [] = _ ~[] buildᵥ-mors (y ∷ []) = g y ∷ _ ~[] buildᵥ-mors (y ∷ z ∷ l) = g y ∷ buildᵥ-mors (z ∷ l) buildᵥ-proj≡ : ∀ {a n} {l : Vec A n} (a∈l : a ∈ᵥ l) → g a ≡ ∈⇒morᵥ (buildᵥ-mors l) (∈ᵥₚ.∈-map⁺ f a∈l) buildᵥ-proj≡ {_} {_} {y ∷ []} (here refl) = ≡.refl buildᵥ-proj≡ {_} {_} {y ∷ z ∷ l} (here refl) = ≡.refl buildᵥ-proj≡ {_} {_} {y ∷ z ∷ l} (there a∈l) = buildᵥ-proj≡ a∈l buildᵥ-proj : ∀ {a n} {l : Vec A (suc n)} (a∈l : a ∈ᵥ l) → g a ≈ π[ ∈ᵥₚ.∈-map⁺ f a∈l ]ᵥ ∘ ⟨ buildᵥ-mors l ⟩ᵥ* buildᵥ-proj {_} {_} {l} a∈l = reflexive (buildᵥ-proj≡ a∈l) ○ ⟺ (projectᵥ* (buildᵥ-mors l) _) buildᵥ-⟨⟩*∘ : ∀ {x y} (g : ∀ a → x ⇒ f a) (h : y ⇒ x) → ∀ {n} (l : Vec A (suc n)) → ⟨ buildᵥ-mors g l ⟩ᵥ* ∘ h ≈ ⟨ buildᵥ-mors (λ a → g a ∘ h) l ⟩ᵥ* buildᵥ-⟨⟩*∘ g h (x ∷ []) = Equiv.refl buildᵥ-⟨⟩*∘ g h (x ∷ y ∷ []) = ⟨⟩∘ buildᵥ-⟨⟩*∘ g h (x ∷ y ∷ z ∷ l) = begin ⟨ g x , ⟨ buildᵥ-mors g (y ∷ z ∷ l) ⟩ᵥ* ⟩ ∘ h ≈⟨ ⟨⟩∘ ⟩ ⟨ g x ∘ h , ⟨ buildᵥ-mors g (y ∷ z ∷ l) ⟩ᵥ* ∘ h ⟩ ≈⟨ ⟨⟩-congˡ (buildᵥ-⟨⟩*∘ g h (y ∷ z ∷ l)) ⟩ ⟨ g x ∘ h , ⟨ buildᵥ-mors (λ a₁ → g a₁ ∘ h) (y ∷ z ∷ l) ⟩ᵥ* ⟩ ∎ buildᵥ-uniqueness* : ∀ {x} {g h : ∀ a → x ⇒ f a} → (∀ a → g a ≈ h a) → ∀ {n} (l : Vec A (suc n)) → ⟨ buildᵥ-mors g l ⟩ᵥ* ≈ ⟨ buildᵥ-mors h l ⟩ᵥ* buildᵥ-uniqueness* {x} {g} {h} uni (y ∷ []) = uni y buildᵥ-uniqueness* {x} {g} {h} uni (y ∷ z ∷ []) = ⟨⟩-cong₂ (uni y) (uni z) buildᵥ-uniqueness* {x} {g} {h} uni (y ∷ z ∷ w ∷ l) = ⟨⟩-cong₂ (uni y) (buildᵥ-uniqueness* uni (z ∷ w ∷ l))
The real part of the quotient of two complex numbers is equal to the quotient of the real parts of the two complex numbers.
(* Copyright (C) 2017 M.A.L. Marques 2019 Susi Lehtola This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. *) (* type: gga_exc *) (* prefix: gga_c_am05_params *params; assert(p->params != NULL); params = (gga_c_am05_params * )(p->params); *) $define lda_c_pw_params $define lda_c_pw_modified_params $include "lda_c_pw.mpl" XX := s -> 1/(1 + params_a_alpha*s^2): ff := s -> XX(s) + params_a_gamma*(1 - XX(s)): f := (rs, z, xt, xs0, xs1) -> f_pw(rs, z)*( + opz_pow_n( z,1)/2 * ff(X2S*xs0) + opz_pow_n(-z,1)/2 * ff(X2S*xs1) ):
#include <boost/python.hpp> #include <boost/multi_array.hpp> #include <boost/foreach.hpp> #include <thread> #include <vector> #include <iostream> #include <array> #include <string> #include "shared/path_creator.hpp" #include "shared/tdmap.hpp" #include "gui.cpp" #include "shared/sizes.h" #include "shared/sprite.hpp" #include <stdlib.h> #include <time.h> #include "shared/spritesheet.hpp" #include "shared/player.hpp" #include <pthread.h> class TDGamecore { private: Player player; int level_number; TDMap * map; GUI * gui; Spritesheet zombie; Spritesheet projectile; std::vector<Path> paths; std::vector<Coordinate> tower_cycles; std::vector<Coordinate> cycles; std::vector<Coordinate> death_cycles; Range simple_range; void set_up() { for(int i = 10; i < 30; i++) death_cycles.push_back(Coordinate(i, 5)); tower_cycles = {Coordinate(1, 1), Coordinate(0, 1), Coordinate(2, 0), Coordinate(3, 2), Coordinate(0, 3), Coordinate(1, 3)}; for(int i = 5; i < 13; i++) cycles.push_back(Coordinate(i, 5)); } Tower * tower_generator(Coordinate location) { Tower * tower = new Tower(simple_range, location, "tower.png", &projectile, tower_cycles); return tower; } Sprite * sprite_generator(int health = 100) { Sprite * sprite = new Sprite(paths.at(0) , &zombie, cycles, death_cycles, health); return sprite; } public: int get_money() { return player.money; } int get_kills() { return player.kills; } int get_score() { return player.score; } bool make_tower_request(int x, int y) { Tower * tower = tower_generator(Coordinate(x, y)); if(tower->get_cost() > player.money) { delete tower; return false; } else { bool retval = map->add_tower(tower); if(retval) player.money -= tower->get_cost(); return retval; } } TDGamecore(int width = DEFAULT_WIDTH, int height = DEFAULT_HEIGHT) : zombie("zombie.png", Coordinate(128, 128), 8), projectile ("projectile.png", Coordinate(64, 64), 2), simple_range(6) { srand(time(NULL)); Path path(NUM_ROWS, NUM_COLS); paths.push_back(path); map = new TDMap(NUM_ROWS, NUM_COLS, paths); set_up(); Sprite * sprite = sprite_generator(); map->add_sprite(sprite); gui = new GUI(NUM_ROWS, NUM_COLS, paths, map); } void update_sprites() { BOOST_FOREACH(Sprite * sprite, map->get_sprites()) { if(!sprite->is_dead()) { sprite->move_to_next_position(); if(sprite->is_out_of_map()) { map->remove_sprite(sprite); player.score-=1; } } else { player.money+=50; player.score+=1; player.kills++; level_number = player.kills/10; } } int range = (rand() % 2) ; while(--range >= 0) map->add_sprite(sprite_generator(100 + level_number * 50)); } void game_loop(int number_of_times = 1) { while(--number_of_times >= 0) { BOOST_FOREACH(Sprite * sprite, map->get_sprites()) { if(sprite->is_dead()) { map->remove_sprite(sprite); continue; } bool sprite_attacked_flag = false; BOOST_FOREACH(Tower * tower, map->get_towers()) { if(tower->can_attack(sprite->get_coordinate())) { tower->set_attacking(sprite); sprite_attacked_flag = true; } } if(!sprite_attacked_flag) sprite->set_not_attacked(); } gui->Update(); map->update_towers(); update_sprites(); } } }; using namespace boost::python; BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(loop_overloads, game_loop, 0, 1); BOOST_PYTHON_MODULE(libtd) { class_<TDGamecore>("Core", init<>()) .def("loop", &TDGamecore::game_loop, loop_overloads(args("number_of_times"))) .def("tower", &TDGamecore::make_tower_request) .def_readonly("money", &TDGamecore::get_money) .def_readonly("kills", &TDGamecore::get_kills) .def_readonly("score", &TDGamecore::get_score); }
[GOAL] α✝ β✝ : Type u inst✝ : (P : Prop) → Decidable P α β : Type ?u.384 ⊢ Functor.mapConst = Functor.map ∘ const β [PROOFSTEP] simp only [Functor.mapConst, Functor.map] [GOAL] α✝ β✝ : Type u inst✝ : (P : Prop) → Decidable P α β γ : Type u_1 f : α → β → γ s : Finset α t : Finset β ⊢ image₂ f s t = Seq.seq (f <$> s) fun x => t [PROOFSTEP] ext [GOAL] case a α✝ β✝ : Type u inst✝ : (P : Prop) → Decidable P α β γ : Type u_1 f : α → β → γ s : Finset α t : Finset β a✝ : γ ⊢ a✝ ∈ image₂ f s t ↔ a✝ ∈ Seq.seq (f <$> s) fun x => t [PROOFSTEP] simp [mem_sup] [GOAL] α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulFunctor Finset := lawfulFunctor α✝ β✝ : Type ?u.13031 s : Finset α✝ t : Finset β✝ ⊢ (SeqLeft.seqLeft s fun x => t) = Seq.seq (const β✝ <$> s) fun x => t [PROOFSTEP] rw [seq_def, fmap_def, seqLeft_def] [GOAL] α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulFunctor Finset := lawfulFunctor α✝ β✝ : Type ?u.13031 s : Finset α✝ t : Finset β✝ ⊢ (if t = ∅ then ∅ else s) = sup (image (const β✝) s) fun f => image f t [PROOFSTEP] obtain rfl | ht := t.eq_empty_or_nonempty [GOAL] case inl α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulFunctor Finset := lawfulFunctor α✝ β✝ : Type ?u.13031 s : Finset α✝ ⊢ (if ∅ = ∅ then ∅ else s) = sup (image (const β✝) s) fun f => image f ∅ [PROOFSTEP] simp_rw [image_empty, if_true] [GOAL] case inl α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulFunctor Finset := lawfulFunctor α✝ β✝ : Type ?u.13031 s : Finset α✝ ⊢ ∅ = sup (image (const β✝) s) fun f => ∅ [PROOFSTEP] exact (sup_bot _).symm [GOAL] case inr α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulFunctor Finset := lawfulFunctor α✝ β✝ : Type ?u.13031 s : Finset α✝ t : Finset β✝ ht : Finset.Nonempty t ⊢ (if t = ∅ then ∅ else s) = sup (image (const β✝) s) fun f => image f t [PROOFSTEP] ext a [GOAL] case inr.a α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulFunctor Finset := lawfulFunctor α✝ β✝ : Type ?u.13031 s : Finset α✝ t : Finset β✝ ht : Finset.Nonempty t a : α✝ ⊢ (a ∈ if t = ∅ then ∅ else s) ↔ a ∈ sup (image (const β✝) s) fun f => image f t [PROOFSTEP] rw [if_neg ht.ne_empty, mem_sup] [GOAL] case inr.a α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulFunctor Finset := lawfulFunctor α✝ β✝ : Type ?u.13031 s : Finset α✝ t : Finset β✝ ht : Finset.Nonempty t a : α✝ ⊢ a ∈ s ↔ ∃ v, v ∈ image (const β✝) s ∧ a ∈ image v t [PROOFSTEP] refine' ⟨fun ha => ⟨const _ a, mem_image_of_mem _ ha, mem_image_const_self.2 ht⟩, _⟩ [GOAL] case inr.a α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulFunctor Finset := lawfulFunctor α✝ β✝ : Type ?u.13031 s : Finset α✝ t : Finset β✝ ht : Finset.Nonempty t a : α✝ ⊢ (∃ v, v ∈ image (const β✝) s ∧ a ∈ image v t) → a ∈ s [PROOFSTEP] rintro ⟨f, hf, ha⟩ [GOAL] case inr.a.intro.intro α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulFunctor Finset := lawfulFunctor α✝ β✝ : Type ?u.13031 s : Finset α✝ t : Finset β✝ ht : Finset.Nonempty t a : α✝ f : β✝ → α✝ hf : f ∈ image (const β✝) s ha : a ∈ image f t ⊢ a ∈ s [PROOFSTEP] rw [mem_image] at hf ha [GOAL] case inr.a.intro.intro α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulFunctor Finset := lawfulFunctor α✝ β✝ : Type ?u.13031 s : Finset α✝ t : Finset β✝ ht : Finset.Nonempty t a : α✝ f : β✝ → α✝ hf : ∃ a, a ∈ s ∧ const β✝ a = f ha : ∃ a_1, a_1 ∈ t ∧ f a_1 = a ⊢ a ∈ s [PROOFSTEP] obtain ⟨b, hb, rfl⟩ := hf [GOAL] case inr.a.intro.intro.intro.intro α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulFunctor Finset := lawfulFunctor α✝ β✝ : Type ?u.13031 s : Finset α✝ t : Finset β✝ ht : Finset.Nonempty t a b : α✝ hb : b ∈ s ha : ∃ a_1, a_1 ∈ t ∧ const β✝ b a_1 = a ⊢ a ∈ s [PROOFSTEP] obtain ⟨_, _, rfl⟩ := ha [GOAL] case inr.a.intro.intro.intro.intro.intro.intro α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulFunctor Finset := lawfulFunctor α✝ β✝ : Type ?u.13031 s : Finset α✝ t : Finset β✝ ht : Finset.Nonempty t b : α✝ hb : b ∈ s w✝ : β✝ left✝ : w✝ ∈ t ⊢ const β✝ b w✝ ∈ s [PROOFSTEP] exact hb [GOAL] α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulFunctor Finset := lawfulFunctor α✝ β✝ : Type ?u.13031 s : Finset α✝ t : Finset β✝ ⊢ (SeqRight.seqRight s fun x => t) = Seq.seq (const α✝ id <$> s) fun x => t [PROOFSTEP] rw [seq_def, fmap_def, seqRight_def] [GOAL] α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulFunctor Finset := lawfulFunctor α✝ β✝ : Type ?u.13031 s : Finset α✝ t : Finset β✝ ⊢ (if s = ∅ then ∅ else t) = sup (image (const α✝ id) s) fun f => image f t [PROOFSTEP] obtain rfl | hs := s.eq_empty_or_nonempty [GOAL] case inl α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulFunctor Finset := lawfulFunctor α✝ β✝ : Type ?u.13031 t : Finset β✝ ⊢ (if ∅ = ∅ then ∅ else t) = sup (image (const α✝ id) ∅) fun f => image f t [PROOFSTEP] rw [if_pos rfl, image_empty, sup_empty, bot_eq_empty] [GOAL] case inr α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulFunctor Finset := lawfulFunctor α✝ β✝ : Type ?u.13031 s : Finset α✝ t : Finset β✝ hs : Finset.Nonempty s ⊢ (if s = ∅ then ∅ else t) = sup (image (const α✝ id) s) fun f => image f t [PROOFSTEP] ext a [GOAL] case inr.a α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulFunctor Finset := lawfulFunctor α✝ β✝ : Type ?u.13031 s : Finset α✝ t : Finset β✝ hs : Finset.Nonempty s a : β✝ ⊢ (a ∈ if s = ∅ then ∅ else t) ↔ a ∈ sup (image (const α✝ id) s) fun f => image f t [PROOFSTEP] rw [if_neg hs.ne_empty, mem_sup] [GOAL] case inr.a α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulFunctor Finset := lawfulFunctor α✝ β✝ : Type ?u.13031 s : Finset α✝ t : Finset β✝ hs : Finset.Nonempty s a : β✝ ⊢ a ∈ t ↔ ∃ v, v ∈ image (const α✝ id) s ∧ a ∈ image v t [PROOFSTEP] refine' ⟨fun ha => ⟨id, mem_image_const_self.2 hs, by rwa [image_id]⟩, _⟩ [GOAL] α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulFunctor Finset := lawfulFunctor α✝ β✝ : Type ?u.13031 s : Finset α✝ t : Finset β✝ hs : Finset.Nonempty s a : β✝ ha : a ∈ t ⊢ a ∈ image id t [PROOFSTEP] rwa [image_id] [GOAL] case inr.a α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulFunctor Finset := lawfulFunctor α✝ β✝ : Type ?u.13031 s : Finset α✝ t : Finset β✝ hs : Finset.Nonempty s a : β✝ ⊢ (∃ v, v ∈ image (const α✝ id) s ∧ a ∈ image v t) → a ∈ t [PROOFSTEP] rintro ⟨f, hf, ha⟩ [GOAL] case inr.a.intro.intro α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulFunctor Finset := lawfulFunctor α✝ β✝ : Type ?u.13031 s : Finset α✝ t : Finset β✝ hs : Finset.Nonempty s a : β✝ f : β✝ → β✝ hf : f ∈ image (const α✝ id) s ha : a ∈ image f t ⊢ a ∈ t [PROOFSTEP] rw [mem_image] at hf ha [GOAL] case inr.a.intro.intro α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulFunctor Finset := lawfulFunctor α✝ β✝ : Type ?u.13031 s : Finset α✝ t : Finset β✝ hs : Finset.Nonempty s a : β✝ f : β✝ → β✝ hf : ∃ a, a ∈ s ∧ const α✝ id a = f ha : ∃ a_1, a_1 ∈ t ∧ f a_1 = a ⊢ a ∈ t [PROOFSTEP] obtain ⟨b, hb, rfl⟩ := ha [GOAL] case inr.a.intro.intro.intro.intro α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulFunctor Finset := lawfulFunctor α✝ β✝ : Type ?u.13031 s : Finset α✝ t : Finset β✝ hs : Finset.Nonempty s f : β✝ → β✝ hf : ∃ a, a ∈ s ∧ const α✝ id a = f b : β✝ hb : b ∈ t ⊢ f b ∈ t [PROOFSTEP] obtain ⟨_, _, rfl⟩ := hf [GOAL] case inr.a.intro.intro.intro.intro.intro.intro α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulFunctor Finset := lawfulFunctor α✝ β✝ : Type ?u.13031 s : Finset α✝ t : Finset β✝ hs : Finset.Nonempty s b : β✝ hb : b ∈ t w✝ : α✝ left✝ : w✝ ∈ s ⊢ const α✝ id w✝ b ∈ t [PROOFSTEP] exact hb [GOAL] α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulFunctor Finset := lawfulFunctor α✝ β✝ : Type ?u.13031 f : α✝ → β✝ s : Finset α✝ ⊢ (Seq.seq (pure f) fun x => s) = f <$> s [PROOFSTEP] simp only [pure_def, seq_def, sup_singleton, fmap_def] [GOAL] α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulFunctor Finset := lawfulFunctor α✝ β✝ γ✝ : Type ?u.13031 s : Finset α✝ t : Finset (α✝ → β✝) u : Finset (β✝ → γ✝) ⊢ (Seq.seq u fun x => Seq.seq t fun x => s) = Seq.seq (Seq.seq (comp <$> u) fun x => t) fun x => s [PROOFSTEP] ext a [GOAL] case a α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulFunctor Finset := lawfulFunctor α✝ β✝ γ✝ : Type ?u.13031 s : Finset α✝ t : Finset (α✝ → β✝) u : Finset (β✝ → γ✝) a : γ✝ ⊢ (a ∈ Seq.seq u fun x => Seq.seq t fun x => s) ↔ a ∈ Seq.seq (Seq.seq (comp <$> u) fun x => t) fun x => s [PROOFSTEP] simp_rw [seq_def, fmap_def] [GOAL] case a α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulFunctor Finset := lawfulFunctor α✝ β✝ γ✝ : Type ?u.13031 s : Finset α✝ t : Finset (α✝ → β✝) u : Finset (β✝ → γ✝) a : γ✝ ⊢ (a ∈ sup u fun f => image f (sup t fun f => image f s)) ↔ a ∈ sup (sup (image comp u) fun f => image f t) fun f => image f s [PROOFSTEP] simp only [exists_prop, mem_sup, mem_image] [GOAL] case a α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulFunctor Finset := lawfulFunctor α✝ β✝ γ✝ : Type ?u.13031 s : Finset α✝ t : Finset (α✝ → β✝) u : Finset (β✝ → γ✝) a : γ✝ ⊢ (∃ v, v ∈ u ∧ ∃ a_1, (∃ v, v ∈ t ∧ ∃ a, a ∈ s ∧ v a = a_1) ∧ v a_1 = a) ↔ ∃ v, (∃ v_1, (∃ a, a ∈ u ∧ comp a = v_1) ∧ ∃ a, a ∈ t ∧ v_1 a = v) ∧ ∃ a_1, a_1 ∈ s ∧ v a_1 = a [PROOFSTEP] constructor [GOAL] case a.mp α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulFunctor Finset := lawfulFunctor α✝ β✝ γ✝ : Type ?u.13031 s : Finset α✝ t : Finset (α✝ → β✝) u : Finset (β✝ → γ✝) a : γ✝ ⊢ (∃ v, v ∈ u ∧ ∃ a_1, (∃ v, v ∈ t ∧ ∃ a, a ∈ s ∧ v a = a_1) ∧ v a_1 = a) → ∃ v, (∃ v_1, (∃ a, a ∈ u ∧ comp a = v_1) ∧ ∃ a, a ∈ t ∧ v_1 a = v) ∧ ∃ a_2, a_2 ∈ s ∧ v a_2 = a [PROOFSTEP] rintro ⟨g, hg, b, ⟨f, hf, a, ha, rfl⟩, rfl⟩ [GOAL] case a.mp.intro.intro.intro.intro.intro.intro.intro.intro α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulFunctor Finset := lawfulFunctor α✝ β✝ γ✝ : Type ?u.13031 s : Finset α✝ t : Finset (α✝ → β✝) u : Finset (β✝ → γ✝) g : β✝ → γ✝ hg : g ∈ u f : α✝ → β✝ hf : f ∈ t a : α✝ ha : a ∈ s ⊢ ∃ v, (∃ v_1, (∃ a, a ∈ u ∧ comp a = v_1) ∧ ∃ a, a ∈ t ∧ v_1 a = v) ∧ ∃ a_1, a_1 ∈ s ∧ v a_1 = g (f a) [PROOFSTEP] exact ⟨g ∘ f, ⟨comp g, ⟨g, hg, rfl⟩, f, hf, rfl⟩, a, ha, rfl⟩ [GOAL] case a.mpr α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulFunctor Finset := lawfulFunctor α✝ β✝ γ✝ : Type ?u.13031 s : Finset α✝ t : Finset (α✝ → β✝) u : Finset (β✝ → γ✝) a : γ✝ ⊢ (∃ v, (∃ v_1, (∃ a, a ∈ u ∧ comp a = v_1) ∧ ∃ a, a ∈ t ∧ v_1 a = v) ∧ ∃ a_1, a_1 ∈ s ∧ v a_1 = a) → ∃ v, v ∈ u ∧ ∃ a_2, (∃ v, v ∈ t ∧ ∃ a, a ∈ s ∧ v a = a_2) ∧ v a_2 = a [PROOFSTEP] rintro ⟨c, ⟨_, ⟨g, hg, rfl⟩, f, hf, rfl⟩, a, ha, rfl⟩ [GOAL] case a.mpr.intro.intro.intro.intro.intro.intro.intro.intro.intro.intro α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulFunctor Finset := lawfulFunctor α✝ β✝ γ✝ : Type ?u.13031 s : Finset α✝ t : Finset (α✝ → β✝) u : Finset (β✝ → γ✝) g : β✝ → γ✝ hg : g ∈ u f : α✝ → β✝ hf : f ∈ t a : α✝ ha : a ∈ s ⊢ ∃ v, v ∈ u ∧ ∃ a_1, (∃ v, v ∈ t ∧ ∃ a, a ∈ s ∧ v a = a_1) ∧ v a_1 = (g ∘ f) a [PROOFSTEP] exact ⟨g, hg, f a, ⟨f, hf, a, ha, rfl⟩, rfl⟩ [GOAL] α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulApplicative Finset := lawfulApplicative α✝ β✝ : Type ?u.21428 s : Finset α✝ t : Finset β✝ ⊢ (Seq.seq (Prod.mk <$> s) fun x => t) = Seq.seq ((fun b a => (a, b)) <$> t) fun x => s [PROOFSTEP] simp_rw [seq_def, fmap_def, sup_image, sup_eq_biUnion] [GOAL] α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulApplicative Finset := lawfulApplicative α✝ β✝ : Type ?u.21428 s : Finset α✝ t : Finset β✝ ⊢ Finset.biUnion s ((fun f => image f t) ∘ Prod.mk) = Finset.biUnion t ((fun f => image f s) ∘ fun b a => (a, b)) [PROOFSTEP] change (s.biUnion fun a => t.image fun b => (a, b)) = t.biUnion fun b => s.image fun a => (a, b) [GOAL] α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulApplicative Finset := lawfulApplicative α✝ β✝ : Type ?u.21428 s : Finset α✝ t : Finset β✝ ⊢ (Finset.biUnion s fun a => image (fun b => (a, b)) t) = Finset.biUnion t fun b => image (fun a => (a, b)) s [PROOFSTEP] trans s ×ˢ t <;> [rw [product_eq_biUnion]; rw [product_eq_biUnion_right]] [GOAL] α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulApplicative Finset := lawfulApplicative α✝ β✝ : Type ?u.21428 s : Finset α✝ t : Finset β✝ ⊢ (Finset.biUnion s fun a => image (fun b => (a, b)) t) = Finset.biUnion t fun b => image (fun a => (a, b)) s [PROOFSTEP] trans s ×ˢ t [GOAL] α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulApplicative Finset := lawfulApplicative α✝ β✝ : Type ?u.23242 s : Finset α✝ t : Finset β✝ ⊢ (Finset.biUnion s fun a => image (fun b => (a, b)) t) = s ×ˢ t [PROOFSTEP] rw [product_eq_biUnion] [GOAL] α β : Type u inst✝ : (P : Prop) → Decidable P src✝ : LawfulApplicative Finset := lawfulApplicative α✝ β✝ : Type ?u.23242 s : Finset α✝ t : Finset β✝ ⊢ s ×ˢ t = Finset.biUnion t fun b => image (fun a => (a, b)) s [PROOFSTEP] rw [product_eq_biUnion_right] [GOAL] inst✝ : (P : Prop) → Decidable P src✝ : LawfulApplicative Finset := lawfulApplicative α✝ β✝ γ✝ : Type ?u.24298 s : Finset α✝ f : α✝ → Finset β✝ g : β✝ → Finset γ✝ ⊢ s >>= f >>= g = s >>= fun x => f x >>= g [PROOFSTEP] simp only [bind, ← sup_biUnion, sup_eq_biUnion, biUnion_biUnion] [GOAL] α β γ : Type u F G : Type u → Type u inst✝⁴ : Applicative F inst✝³ : Applicative G inst✝² : CommApplicative F inst✝¹ : CommApplicative G inst✝ : DecidableEq α s : Finset α ⊢ traverse pure s = s [PROOFSTEP] rw [traverse, Multiset.id_traverse] [GOAL] α β γ : Type u F G : Type u → Type u inst✝⁴ : Applicative F inst✝³ : Applicative G inst✝² : CommApplicative F inst✝¹ : CommApplicative G inst✝ : DecidableEq α s : Finset α ⊢ Multiset.toFinset <$> s.val = s [PROOFSTEP] exact s.val_toFinset [GOAL] α β γ : Type u F G : Type u → Type u inst✝³ : Applicative F inst✝² : Applicative G inst✝¹ : CommApplicative F inst✝ : CommApplicative G g : α → G β h : β → γ s : Finset α ⊢ Functor.map h <$> traverse g s = traverse (Functor.map h ∘ g) s [PROOFSTEP] unfold traverse [GOAL] α β γ : Type u F G : Type u → Type u inst✝³ : Applicative F inst✝² : Applicative G inst✝¹ : CommApplicative F inst✝ : CommApplicative G g : α → G β h : β → γ s : Finset α ⊢ Functor.map h <$> Multiset.toFinset <$> Multiset.traverse g s.val = Multiset.toFinset <$> Multiset.traverse (Functor.map h ∘ g) s.val [PROOFSTEP] simp only [map_comp_coe, functor_norm] [GOAL] α β γ : Type u F G : Type u → Type u inst✝³ : Applicative F inst✝² : Applicative G inst✝¹ : CommApplicative F inst✝ : CommApplicative G g : α → G β h : β → γ s : Finset α ⊢ (Multiset.toFinset ∘ Functor.map h) <$> Multiset.traverse g s.val = Multiset.toFinset <$> Multiset.traverse (Functor.map h ∘ g) s.val [PROOFSTEP] rw [LawfulFunctor.comp_map, Multiset.map_traverse]
The real part of a sum of complex numbers is the sum of the real parts.
The real part of a sum of complex numbers is the sum of the real parts.
The real part of a sum of complex numbers is the sum of the real parts.
The second entry , a " commercial , yet heavy " album called Addicted , was released in November 2009 and features lead vocals from Townsend and Dutch singer Anneke van Giersbergen . Brian " Beav " Waddell was recruited from the Devin Townsend Band to play bass .
[STATEMENT] lemma rem_implicit_pres_ops_execute: assumes "(\<And>op. op \<in> set (ast_problem.ast\<delta> prob) \<Longrightarrow> is_standard_operator op)" and "s \<in> ast_problem.valid_states prob" shows "ast_problem.execute (rem_implicit_pres_ops prob) name s = ast_problem.execute prob name s" [PROOF STATE] proof (prove) goal (1 subgoal): 1. ast_problem.execute (rem_implicit_pres_ops prob) name s = ast_problem.execute prob name s [PROOF STEP] proof- [PROOF STATE] proof (state) goal (1 subgoal): 1. ast_problem.execute (rem_implicit_pres_ops prob) name s = ast_problem.execute prob name s [PROOF STEP] have "(n,ps,es,c) \<in> set (ast_problem.ast\<delta> prob) \<Longrightarrow> (filter (ast_problem.eff_enabled prob s) es) = es" for n ps es c [PROOF STATE] proof (prove) goal (1 subgoal): 1. (n, ps, es, c) \<in> set (ast_problem.ast\<delta> prob) \<Longrightarrow> filter (ast_problem.eff_enabled prob s) es = es [PROOF STEP] using assms(2) [PROOF STATE] proof (prove) using this: s \<in> ast_problem.valid_states prob goal (1 subgoal): 1. (n, ps, es, c) \<in> set (ast_problem.ast\<delta> prob) \<Longrightarrow> filter (ast_problem.eff_enabled prob s) es = es [PROOF STEP] by (auto simp add: ast_problem.std_eff_enabled dest!: assms(1)) [PROOF STATE] proof (state) this: (?n, ?ps, ?es, ?c) \<in> set (ast_problem.ast\<delta> prob) \<Longrightarrow> filter (ast_problem.eff_enabled prob s) ?es = ?es goal (1 subgoal): 1. ast_problem.execute (rem_implicit_pres_ops prob) name s = ast_problem.execute prob name s [PROOF STEP] moreover [PROOF STATE] proof (state) this: (?n, ?ps, ?es, ?c) \<in> set (ast_problem.ast\<delta> prob) \<Longrightarrow> filter (ast_problem.eff_enabled prob s) ?es = ?es goal (1 subgoal): 1. ast_problem.execute (rem_implicit_pres_ops prob) name s = ast_problem.execute prob name s [PROOF STEP] have "(n,ps,es,c) \<in> set (ast_problem.ast\<delta> prob) \<Longrightarrow> (filter (ast_problem.eff_enabled (rem_implicit_pres_ops prob) s) (map rem_effect_implicit_pres es)) = map rem_effect_implicit_pres es" for n ps es c [PROOF STATE] proof (prove) goal (1 subgoal): 1. (n, ps, es, c) \<in> set (ast_problem.ast\<delta> prob) \<Longrightarrow> filter (ast_problem.eff_enabled (rem_implicit_pres_ops prob) s) (map rem_effect_implicit_pres es) = map rem_effect_implicit_pres es [PROOF STEP] using assms [PROOF STATE] proof (prove) using this: ?op \<in> set (ast_problem.ast\<delta> prob) \<Longrightarrow> is_standard_operator ?op s \<in> ast_problem.valid_states prob goal (1 subgoal): 1. (n, ps, es, c) \<in> set (ast_problem.ast\<delta> prob) \<Longrightarrow> filter (ast_problem.eff_enabled (rem_implicit_pres_ops prob) s) (map rem_effect_implicit_pres es) = map rem_effect_implicit_pres es [PROOF STEP] by (fastforce simp add: ast_problem.std_eff_enabled rem_implicit_pres_ops_valid_states dest!: is_standard_operator_rem_implicit_pres_ops' dest: in_rem_implicit_pres_\<delta>) [PROOF STATE] proof (state) this: (?n, ?ps, ?es, ?c) \<in> set (ast_problem.ast\<delta> prob) \<Longrightarrow> filter (ast_problem.eff_enabled (rem_implicit_pres_ops prob) s) (map rem_effect_implicit_pres ?es) = map rem_effect_implicit_pres ?es goal (1 subgoal): 1. ast_problem.execute (rem_implicit_pres_ops prob) name s = ast_problem.execute prob name s [PROOF STEP] moreover [PROOF STATE] proof (state) this: (?n, ?ps, ?es, ?c) \<in> set (ast_problem.ast\<delta> prob) \<Longrightarrow> filter (ast_problem.eff_enabled (rem_implicit_pres_ops prob) s) (map rem_effect_implicit_pres ?es) = map rem_effect_implicit_pres ?es goal (1 subgoal): 1. ast_problem.execute (rem_implicit_pres_ops prob) name s = ast_problem.execute prob name s [PROOF STEP] have "map_of (map ((\<lambda>(_,x,_,v). (x,v)) o rem_effect_implicit_pres) effs) = map_of (map (\<lambda>(_,x,_,v). (x,v)) effs)" for effs [PROOF STATE] proof (prove) goal (1 subgoal): 1. map_of (map ((\<lambda>(uu_, x, uu_, v). (x, v)) \<circ> rem_effect_implicit_pres) effs) = map_of (map (\<lambda>(uu_, x, uu_, v). (x, v)) effs) [PROOF STEP] by (induction effs) auto [PROOF STATE] proof (state) this: map_of (map ((\<lambda>(uu_, x, uu_, v). (x, v)) \<circ> rem_effect_implicit_pres) ?effs) = map_of (map (\<lambda>(uu_, x, uu_, v). (x, v)) ?effs) goal (1 subgoal): 1. ast_problem.execute (rem_implicit_pres_ops prob) name s = ast_problem.execute prob name s [PROOF STEP] ultimately [PROOF STATE] proof (chain) picking this: (?n, ?ps, ?es, ?c) \<in> set (ast_problem.ast\<delta> prob) \<Longrightarrow> filter (ast_problem.eff_enabled prob s) ?es = ?es (?n, ?ps, ?es, ?c) \<in> set (ast_problem.ast\<delta> prob) \<Longrightarrow> filter (ast_problem.eff_enabled (rem_implicit_pres_ops prob) s) (map rem_effect_implicit_pres ?es) = map rem_effect_implicit_pres ?es map_of (map ((\<lambda>(uu_, x, uu_, v). (x, v)) \<circ> rem_effect_implicit_pres) ?effs) = map_of (map (\<lambda>(uu_, x, uu_, v). (x, v)) ?effs) [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) using this: (?n, ?ps, ?es, ?c) \<in> set (ast_problem.ast\<delta> prob) \<Longrightarrow> filter (ast_problem.eff_enabled prob s) ?es = ?es (?n, ?ps, ?es, ?c) \<in> set (ast_problem.ast\<delta> prob) \<Longrightarrow> filter (ast_problem.eff_enabled (rem_implicit_pres_ops prob) s) (map rem_effect_implicit_pres ?es) = map rem_effect_implicit_pres ?es map_of (map ((\<lambda>(uu_, x, uu_, v). (x, v)) \<circ> rem_effect_implicit_pres) ?effs) = map_of (map (\<lambda>(uu_, x, uu_, v). (x, v)) ?effs) goal (1 subgoal): 1. ast_problem.execute (rem_implicit_pres_ops prob) name s = ast_problem.execute prob name s [PROOF STEP] by(auto simp add: ast_problem.execute_def rem_implicit_pres_ops_lookup_op_Some_1' split: option.splits dest: rem_implicit_pres_ops_lookup_op_Some_2' ast_problem.lookup_Some_in\<delta>) [PROOF STATE] proof (state) this: ast_problem.execute (rem_implicit_pres_ops prob) name s = ast_problem.execute prob name s goal: No subgoals! [PROOF STEP] qed
module Data.Num.Core where open import Data.Num.Digit public open import Data.Nat open import Data.Nat.Properties open import Data.Nat.Properties.Simple open import Data.Nat.Properties.Extra open import Data.Fin as Fin using (Fin; fromℕ≤; inject≤; toℕ; zero; suc; #_) open import Data.Fin.Properties as FinProps using (bounded; toℕ-fromℕ≤) open import Function open import Relation.Binary.PropositionalEquality open import Relation.Nullary open import Relation.Nullary.Negation open import Relation.Nullary.Decidable open import Relation.Binary open import Function.Equality using (_⟶_) open ≡-Reasoning open ≤-Reasoning renaming (begin_ to start_; _∎ to _□; _≡⟨_⟩_ to _≈⟨_⟩_) open DecTotalOrder decTotalOrder using (reflexive) renaming (refl to ≤-refl) ------------------------------------------------------------------------ -- Numbers ------------------------------------------------------------------------ infixr 5 _∷_ data Numeral : ℕ → ℕ → ℕ → Set where _∙ : ∀ {b d o} → Digit d → Numeral b d o _∷_ : ∀ {b d o} → Digit d → Numeral b d o → Numeral b d o MMXVI : Numeral 10 10 0 MMXVI = # 6 ∷ # 1 ∷ # 0 ∷ (# 2) ∙ ------------------------------------------------------------------------ -- Converting from Numeral to ℕ ------------------------------------------------------------------------ ⟦_⟧ : ∀ {b d o} → (xs : Numeral b d o) → ℕ ⟦_⟧ {_} {_} {o} (x ∙) = Digit-toℕ x o ⟦_⟧ {b} {_} {o} (x ∷ xs) = Digit-toℕ x o + ⟦ xs ⟧ * b Num-lower-bound : ∀ {b d o} → (xs : Numeral b (suc d) o) → ⟦ xs ⟧ ≥ o Num-lower-bound {_} {_} {o} (x ∙) = Digit-lower-bound o x Num-lower-bound {b} {d} {o} (x ∷ xs) = start o ≤⟨ m≤m+n o (⟦ xs ⟧ * b) ⟩ o + ⟦ xs ⟧ * b ≤⟨ +n-mono (⟦ xs ⟧ * b) (Digit-lower-bound o x) ⟩ Digit-toℕ x o + ⟦ xs ⟧ * b □ -- the least significant digit lsd : ∀ {b d o} → (xs : Numeral b d o) → Digit d lsd (x ∙ ) = x lsd (x ∷ xs) = x lsd-toℕ : ∀ {b d o} → (xs : Numeral b d o) → Digit-toℕ (lsd xs) o ≤ ⟦ xs ⟧ lsd-toℕ (x ∙) = ≤-refl lsd-toℕ {b} {d} {o} (x ∷ xs) = m≤m+n (Digit-toℕ x o) (⟦ xs ⟧ * b) ------------------------------------------------------------------------ -- View of Num ------------------------------------------------------------------------ data NumView : ℕ → ℕ → ℕ → Set where NullBase : ∀ d o → NumView 0 (suc d) o NoDigits : ∀ b o → NumView b 0 o AllZeros : ∀ b → NumView (suc b) 1 0 Proper : ∀ b d o → (proper : suc d + o ≥ 2) → NumView (suc b) (suc d) o numView : ∀ b d o → NumView b d o numView b zero o = NoDigits b o numView zero (suc d) o = NullBase d o numView (suc b) (suc zero) zero = AllZeros b numView (suc b) (suc zero) (suc o) = Proper b zero (suc o) (s≤s (s≤s z≤n)) numView (suc b) (suc (suc d)) o = Proper b (suc d) o (s≤s (s≤s z≤n)) ------------------------------------------------------------------------ -- Properties of Num ------------------------------------------------------------------------ NoDigits-explode : ∀ {b o a} {Whatever : Set a} → (xs : Numeral b 0 o) → Whatever NoDigits-explode (() ∙ ) NoDigits-explode (() ∷ xs) toℕ-NullBase : ∀ {d o} → (x : Digit d) → (xs : Numeral 0 d o) → ⟦ x ∷ xs ⟧ ≡ Digit-toℕ x o toℕ-NullBase {d} {o} x xs = begin Digit-toℕ x o + ⟦ xs ⟧ * 0 ≡⟨ cong (λ w → Digit-toℕ x o + w) (*-right-zero ⟦ xs ⟧) ⟩ Digit-toℕ x o + 0 ≡⟨ +-right-identity (Digit-toℕ x o) ⟩ Digit-toℕ x o ∎ toℕ-AllZeros : ∀ {b} → (xs : Numeral b 1 0) → ⟦ xs ⟧ ≡ 0 toℕ-AllZeros (zero ∙ ) = refl toℕ-AllZeros (suc () ∙ ) toℕ-AllZeros {b} (zero ∷ xs) = cong (λ w → w * b) (toℕ-AllZeros xs) toℕ-AllZeros (suc () ∷ xs) n∷-mono-strict : ∀ {b d o} → (x : Fin d) (xs : Numeral (suc b) d o) → (y : Fin d) (ys : Numeral (suc b) d o) → Digit-toℕ x o ≡ Digit-toℕ y o → ⟦ xs ⟧ < ⟦ ys ⟧ → ⟦ x ∷ xs ⟧ < ⟦ y ∷ ys ⟧ n∷-mono-strict {b} {d} {o} x xs y ys ⟦x⟧≡⟦y⟧ ⟦xs⟧<⟦ys⟧ = start suc (Digit-toℕ x o) + ⟦ xs ⟧ * suc b ≈⟨ sym (+-suc (Digit-toℕ x o) (⟦ xs ⟧ * suc b)) ⟩ Digit-toℕ x o + suc (⟦ xs ⟧ * suc b) ≤⟨ n+-mono (Digit-toℕ x o) (s≤s (n≤m+n b (⟦ xs ⟧ * suc b))) ⟩ Digit-toℕ x o + (suc ⟦ xs ⟧) * suc b ≤⟨ (reflexive ⟦x⟧≡⟦y⟧) +-mono (*n-mono (suc b) ⟦xs⟧<⟦ys⟧) ⟩ Digit-toℕ y o + ⟦ ys ⟧ * suc b □ ∷ns-mono-strict : ∀ {b d o} → (x : Fin d) (xs : Numeral b d o) → (y : Fin d) (ys : Numeral b d o) → ⟦ xs ⟧ ≡ ⟦ ys ⟧ → Digit-toℕ x o < Digit-toℕ y o → ⟦ x ∷ xs ⟧ < ⟦ y ∷ ys ⟧ ∷ns-mono-strict {b} {d} {o} x xs y ys ⟦xs⟧≡⟦ys⟧ ⟦x⟧<⟦y⟧ = start suc ⟦ x ∷ xs ⟧ ≤⟨ ⟦x⟧<⟦y⟧ +-mono *n-mono b (reflexive ⟦xs⟧≡⟦ys⟧) ⟩ ⟦ y ∷ ys ⟧ □ tail-mono-strict : ∀ {b d o} → (x : Fin d) (xs : Numeral b d o) → (y : Fin d) (ys : Numeral b d o) → Greatest x → ⟦ x ∷ xs ⟧ < ⟦ y ∷ ys ⟧ → ⟦ xs ⟧ < ⟦ ys ⟧ tail-mono-strict {b} {_} {o} x xs y ys greatest p = *n-mono-strict-inverse b ⟦∷xs⟧<⟦∷ys⟧ where ⟦x⟧≥⟦y⟧ : Digit-toℕ x o ≥ Digit-toℕ y o ⟦x⟧≥⟦y⟧ = greatest-of-all o x y greatest ⟦∷xs⟧<⟦∷ys⟧ : ⟦ xs ⟧ * b < ⟦ ys ⟧ * b ⟦∷xs⟧<⟦∷ys⟧ = +-mono-contra ⟦x⟧≥⟦y⟧ p tail-mono-strict-Null : ∀ {b d o} → (x : Fin d) → (y : Fin d) (ys : Numeral b d o) → Greatest x → ⟦ _∙ {b} {d} {o} x ⟧ < ⟦ y ∷ ys ⟧ → 0 < ⟦ ys ⟧ tail-mono-strict-Null {b} {_} {o} x y ys greatest p = *n-mono-strict-inverse b ⟦∷∙⟧<⟦∷ys⟧ where ⟦x⟧≥⟦y⟧ : Digit-toℕ x o ≥ Digit-toℕ y o ⟦x⟧≥⟦y⟧ = greatest-of-all o x y greatest ⟦∷∙⟧<⟦∷ys⟧ : 0 < ⟦ ys ⟧ * b ⟦∷∙⟧<⟦∷ys⟧ = +-mono-contra ⟦x⟧≥⟦y⟧ $ start suc (Digit-toℕ x o) + 0 ≈⟨ +-right-identity (suc (Digit-toℕ x o)) ⟩ suc (Digit-toℕ x o) ≤⟨ p ⟩ ⟦ y ∷ ys ⟧ □
State Before: α : Type ?u.42064 β : Type ?u.42067 K : Type u_1 inst✝ : DivisionRing K a✝ b✝ c d a b : K ha : a ≠ 0 hb : b ≠ 0 ⊢ a⁻¹ - b⁻¹ = a⁻¹ * (b - a) * b⁻¹ State After: no goals Tactic: rw [mul_sub, sub_mul, mul_inv_cancel_right₀ hb, inv_mul_cancel ha, one_mul]
AdderType : (numType : Type) -> (argc : Nat) -> Type AdderType numType Z = numType AdderType numType (S k) = (next : numType) -> AdderType numType k adder : Num numType => (argc : Nat) -> (acc : numType) -> AdderType numType argc adder Z acc = acc adder (S k) acc = \next => adder k (acc + next)
The norm of the difference of two vectors is less than or equal to the sum of their norms.
##### Copyright 2020 The OpenFermion Developers ```python #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # FQE vs OpenFermion vs Cirq: Diagonal Coulomb Operators <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://quantumai.google/openfermion/fqe/tutorials/diagonal_coulomb_evolution">View on QuantumAI</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/quantumlib/OpenFermion-FQE/blob/master/docs/tutorials/diagonal_coulomb_evolution.ipynb">Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/quantumlib/OpenFermion-FQE/blob/master/docs/tutorials/diagonal_coulomb_evolution.ipynb">View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/OpenFermion-FQE/docs/tutorials/diagonal_coulomb_evolution.ipynb">Download notebook</a> </td> </table> Special routines are available for evolving under a diagonal Coulomb operator. This notebook describes how to use these built in routines and how they work. ```python try: import fqe except ImportError: !pip install fqe --quiet ``` ```python from itertools import product import fqe from fqe.hamiltonians.diagonal_coulomb import DiagonalCoulomb import numpy as np import openfermion as of from scipy.linalg import expm ``` ```python #Utility function def uncompress_tei(tei_mat, notation='chemistry'): """ uncompress chemist notation integrals tei_tensor[i, k, j, l] = tei_mat[(i, j), (k, l)] [1, 1, 2, 2] = [1, 1, 2, 2] = [1, 1, 2, 2] = [1, 1, 2, 2] [i, j, k, l] = [k, l, i, j] = [j, i, l, k]* = [l, k, j, i]* For real we also have swap of i <> j and k <> l [j, i, k, l] = [l, k, i, j] = [i, j, l, k] = [k, l, j, i] tei_mat[(i, j), (k, l)] = int dr1 int dr2 phi_i(dr1) phi_j(dr1) O(r12) phi_k(dr1) phi_l(dr1) Physics notation is the notation that is used in FQE. Args: tei_mat: compressed two electron integral matrix Returns: uncompressed 4-electron integral tensor. No antisymmetry. """ if notation not in ['chemistry', 'physics']: return ValueError("notation can be [chemistry, physics]") norbs = int(0.5 * (np.sqrt(8 * tei_mat.shape[0] + 1) - 1)) basis = {} cnt = 0 for i, j in product(range(norbs), repeat=2): if i >= j: basis[(i, j)] = cnt cnt += 1 tei_tensor = np.zeros((norbs, norbs, norbs, norbs)) for i, j, k, l in product(range(norbs), repeat=4): if i >= j and k >= l: tei_tensor[i, j, k, l] = tei_mat[basis[(i, j)], basis[(k, l)]] tei_tensor[k, l, i, j] = tei_mat[basis[(i, j)], basis[(k, l)]] tei_tensor[j, i, l, k] = tei_mat[basis[(i, j)], basis[(k, l)]] tei_tensor[l, k, j, i] = tei_mat[basis[(i, j)], basis[(k, l)]] tei_tensor[j, i, k, l] = tei_mat[basis[(i, j)], basis[(k, l)]] tei_tensor[l, k, i, j] = tei_mat[basis[(i, j)], basis[(k, l)]] tei_tensor[i, j, l, k] = tei_mat[basis[(i, j)], basis[(k, l)]] tei_tensor[k, l, j, i] = tei_mat[basis[(i, j)], basis[(k, l)]] if notation == 'chemistry': return tei_tensor elif notation == 'physics': return np.asarray(tei_tensor.transpose(0, 2, 1, 3), order='C') return tei_tensor ``` The first example we will perform is diagonal Coulomb evolution on the Hartree-Fock state. The diagonal Coulomb operator is defined as \begin{align} V = \sum_{\alpha, \beta \in \{\uparrow, \downarrow\}}\sum_{p,q} V_{pq,pq}n_{p,\alpha}n_{q,\beta} \end{align} The number of free parpameters are $\mathcal{O}(N^{2})$ where $N$ is the rank of the spatial basis. The `DiagonalCoulomb` Hamiltonian takes either a generic 4-index tensor or the $N \times N$ matrix defining $V$. If the 4-index tensor is given the $N \times N$ matrix is constructed along with the diagonal correction. If the goal is to just evolve under $V$ it is recommended the user input the $N \times N$ matrix directly. All the terms in $V$ commute and thus we can evolve under $V$ exactly by counting the accumulated phase on each bitstring. To start out let's define a Hartree-Fock wavefunction for 4-orbitals and 2-electrons $S_{z} =0$. ```python norbs = 4 tedim = norbs * (norbs + 1) // 2 if (norbs // 2) % 2 == 0: n_elec = norbs // 2 else: n_elec = (norbs // 2) + 1 sz = 0 fqe_wfn = fqe.Wavefunction([[n_elec, sz, norbs]]) fci_data = fqe_wfn.sector((n_elec, sz)) fci_graph = fci_data.get_fcigraph() hf_wf = np.zeros((fci_data.lena(), fci_data.lenb()), dtype=np.complex128) hf_wf[0, 0] = 1 # right most bit is zero orbital. fqe_wfn.set_wfn(strategy='from_data', raw_data={(n_elec, sz): hf_wf}) fqe_wfn.print_wfn() ``` Now we can define a random 2-electron operator $V$. To define $V$ we need a $4 \times 4$ matrix. We will generate this matrix by making a full random two-electron integral matrix and then just take the diagonal elements ```python tei_compressed = np.random.randn(tedim**2).reshape((tedim, tedim)) tei_compressed = 0.5 * (tei_compressed + tei_compressed.T) tei_tensor = uncompress_tei(tei_compressed, notation='physics') diagonal_coulomb = of.FermionOperator() diagonal_coulomb_mat = np.zeros((norbs, norbs)) for i, j in product(range(norbs), repeat=2): diagonal_coulomb_mat[i, j] = tei_tensor[i, j, i, j] for sigma, tau in product(range(2), repeat=2): diagonal_coulomb += of.FermionOperator( ((2 * i + sigma, 1), (2 * i + sigma, 0), (2 * j + tau, 1), (2 * j + tau, 0)), coefficient=diagonal_coulomb_mat[i, j]) dc_ham = DiagonalCoulomb(diagonal_coulomb_mat) ``` Evolution under $V$ can be computed by looking at each bitstring, seeing if $n_{p\alpha}n_{q\beta}$ is non-zero and then phasing that string by $V_{pq}$. For the Hartree-Fock state we can easily calculate this phase accumulation. The alpha and beta bitstrings are "0001" and "0001". ```python alpha_occs = [list(range(fci_graph.nalpha()))] beta_occs = [list(range(fci_graph.nbeta()))] occs = alpha_occs[0] + beta_occs[0] diag_ele = 0. for ind in occs: for jnd in occs: diag_ele += diagonal_coulomb_mat[ind, jnd] evolved_phase = np.exp(-1j * diag_ele) print(evolved_phase) # evolve FQE wavefunction evolved_hf_wfn = fqe_wfn.time_evolve(1, dc_ham) # check they the accumulated phase is equivalent! assert np.isclose(evolved_hf_wfn.get_coeff((n_elec, sz))[0, 0], evolved_phase) ``` We can now try this out for more than 2 electrons. Let's reinitialize a wavefunction on 6-orbitals with 4-electrons $S_{z} = 0$ to a random state. ```python norbs = 6 tedim = norbs * (norbs + 1) // 2 if (norbs // 2) % 2 == 0: n_elec = norbs // 2 else: n_elec = (norbs // 2) + 1 sz = 0 fqe_wfn = fqe.Wavefunction([[n_elec, sz, norbs]]) fqe_wfn.set_wfn(strategy='random') inital_coeffs = fqe_wfn.get_coeff((n_elec, sz)).copy() print("Random initial wavefunction") fqe_wfn.print_wfn() ``` We need to build our Diagoanl Coulomb operator For this bigger system. ```python tei_compressed = np.random.randn(tedim**2).reshape((tedim, tedim)) tei_compressed = 0.5 * (tei_compressed + tei_compressed.T) tei_tensor = uncompress_tei(tei_compressed, notation='physics') diagonal_coulomb = of.FermionOperator() diagonal_coulomb_mat = np.zeros((norbs, norbs)) for i, j in product(range(norbs), repeat=2): diagonal_coulomb_mat[i, j] = tei_tensor[i, j, i, j] for sigma, tau in product(range(2), repeat=2): diagonal_coulomb += of.FermionOperator( ((2 * i + sigma, 1), (2 * i + sigma, 0), (2 * j + tau, 1), (2 * j + tau, 0)), coefficient=diagonal_coulomb_mat[i, j]) dc_ham = DiagonalCoulomb(diagonal_coulomb_mat) ``` Now we can convert our wavefunction to a cirq wavefunction, evolve under the diagonal_coulomb operator we constructed and then compare the outputs. ```python cirq_wfn = fqe.to_cirq(fqe_wfn).reshape((-1, 1)) final_cirq_wfn = expm(-1j * of.get_sparse_operator(diagonal_coulomb)) @ cirq_wfn # recover a fqe wavefunction from_cirq_wfn = fqe.from_cirq(final_cirq_wfn.flatten(), 1.0E-8) ``` ```python fqe_wfn = fqe_wfn.time_evolve(1, dc_ham) print("Evolved wavefunction") fqe_wfn.print_wfn() ``` ```python print("From Cirq Evolution") from_cirq_wfn.print_wfn() assert np.allclose(from_cirq_wfn.get_coeff((n_elec, sz)), fqe_wfn.get_coeff((n_elec, sz))) print("Wavefunctions are equivalent") ``` Finally, we can compare against evolving each term of $V$ individually. ```python fqe_wfn = fqe.Wavefunction([[n_elec, sz, norbs]]) fqe_wfn.set_wfn(strategy='from_data', raw_data={(n_elec, sz): inital_coeffs}) for term, coeff in diagonal_coulomb.terms.items(): op = of.FermionOperator(term, coefficient=coeff) fqe_wfn = fqe_wfn.time_evolve(1, op) assert np.allclose(from_cirq_wfn.get_coeff((n_elec, sz)), fqe_wfn.get_coeff((n_elec, sz))) print("Individual term evolution is equivalent") ``` ```python ```
#!/usr/bin/env julia using DocOpt using DataFrames using RCall using Glob import BioBenchmarks args = docopt("Usage: runbenchmarks.jl <revision>...") revisions = args["<revision>"] results = DataFrame() for revision in revisions filepath = BioBenchmarks.run(revision) r = readtable(filepath) r[:revision] = filepath[9:end-4] results = vcat(results, r) end # 64-bit integers may overflow in R for f in [:elapsed, :gc_time, :allocated] results[f] = map(Float64, results[f]) end serial = maximum(vcat(0, [parse(Int, splitext(fn)[1][12:end]) for fn in glob("benchmarks_*.png")])) plotfile = string("benchmarks_", serial + 1, ".png") @rput plotfile R""" library(ggplot2) ggplot($(results), aes(x=revision, y=elapsed, color=revision)) + geom_boxplot() + geom_point(position=position_jitterdodge()) + facet_wrap(~name, scales="free") + theme(axis.text.x=element_blank()) + expand_limits(y=0) ggsave(plotfile) """
Formal statement is: lemma hol_pal_lem3: assumes S: "convex S" "open S" and abc: "a \<in> S" "b \<in> S" "c \<in> S" and "d \<noteq> 0" and lek: "d \<bullet> a \<le> k" and holf1: "f holomorphic_on {z. z \<in> S \<and> d \<bullet> z < k}" and holf2: "f holomorphic_on {z. z \<in> S \<and> k < d \<bullet> z}" and contf: "continuous_on S f" shows "contour_integral (linepath a b) f + contour_integral (linepath b c) f + contour_integral (linepath c a) f = 0" Informal statement is: If $f$ is a continuous function on a convex open set $S$ and $f$ is holomorphic on the two open sets $S_1$ and $S_2$ where $S = S_1 \cup S_2$, then the contour integral of $f$ along the boundary of $S$ is zero.
function A = metric_05 ( p ) %*****************************************************************************80 % %% METRIC_05 evaluates metric #5 at any point. % % Discussion: % % This routine evaluates the matrix that determines the metric % at a point. % % It is not diagonal. % % Licensing: % % This code is distributed under the GNU LGPL license. % % Modified: % % 25 May 2006 % % Author: % % John Burkardt % % Parameters: % % Input, real P(2), the point at which the metric matrix is to % be evaluated. % % Output, real A[2,2], the metric matrix. % A = [ 2.0, 3.0; 3.0, 5.0 ]; return end
import IIT.PropInversion import IIT.ClarifyIndices /- mututal inductive Con : Type | nil : Con | ext : (Γ : Con) → Ty Γ → Con inductive Ty : Con → Types | base : (Γ : Con) → Ty Γ | wk : (Γ : Con) → (A B : Ty Γ) → Ty (ext Γ A) end -/ mutual inductive Conₑ : Type | nilₑ : Conₑ | extₑ : Conₑ → Tyₑ → Conₑ inductive Tyₑ : Type | baseₑ : Conₑ → Tyₑ | wkₑ : Conₑ → Tyₑ → Tyₑ → Tyₑ end open Conₑ Tyₑ mutual inductive Con_w : Conₑ → Prop | nil_w : Con_w nilₑ | ext_w : ∀ {Γ}, Con_w Γ → ∀ {A}, Ty_w Γ A → Con_w (extₑ Γ A) inductive Ty_w : Conₑ → Tyₑ → Prop | base_w : ∀ {Γ}, Con_w Γ → Ty_w Γ (baseₑ Γ) | wk_w : ∀ {Γ}, Con_w Γ → ∀ {A}, Ty_w Γ A → ∀ {B}, Ty_w Γ B → Ty_w (extₑ Γ A) (wkₑ Γ A B) end open Con_w Ty_w def Con := PSigma Con_w def Ty := fun (Γ : Con) => PSigma (Ty_w Γ.1) def nil : Con := ⟨nilₑ, nil_w⟩ def ext (Γ : Con) (A : Ty Γ) : Con := ⟨extₑ Γ.1 A.1, ext_w Γ.2 A.2⟩ def base (Γ : Con) : Ty Γ := ⟨baseₑ Γ.1, base_w Γ.2⟩ def wk (Γ : Con) (A B : Ty Γ) : Ty (ext Γ A) := ⟨wkₑ Γ.1 A.1 B.1, wk_w Γ.2 A.2 B.2⟩ section variable (Conₘ : Con → Type) (Tyₘ : ∀ {Γ}, Conₘ Γ → Ty Γ → Type) (nilₘ : Conₘ nil) (extₘ : ∀ {Γ} (Γₘ : Conₘ Γ) {A}, Tyₘ Γₘ A → Conₘ (ext Γ A)) (baseₘ : ∀ {Γ} (Γₘ : Conₘ Γ), Tyₘ Γₘ (base Γ)) (wkₘ : ∀ {Γ} (Γₘ : Conₘ Γ) {A} (Aₘ : Tyₘ Γₘ A) {B} (Bₘ : Tyₘ Γₘ B), Tyₘ (extₘ Γₘ Aₘ) (wk Γ A B)) mutual inductive Conᵣ : (Γ : Con) → Conₘ Γ → Type | nilᵣ : Conᵣ nil nilₘ | extᵣ : ∀ {Γ} {Γₘ : Conₘ Γ}, Conᵣ Γ Γₘ → ∀ {A} {Aₘ : Tyₘ Γₘ A}, Tyᵣ Γₘ A Aₘ → Conᵣ (ext Γ A) (extₘ Γₘ Aₘ) inductive Tyᵣ : {Γ : Con} → (Γₘ : Conₘ Γ) → (A : Ty Γ) → Tyₘ Γₘ A → Type | baseᵣ: ∀ {Γ} {Γₘ : Conₘ Γ}, Conᵣ Γ Γₘ → Tyᵣ Γₘ (base Γ) (baseₘ Γₘ) | wkᵣ: ∀ {Γ} {Γₘ : Conₘ Γ}, Conᵣ Γ Γₘ → ∀ {A} {Aₘ : Tyₘ Γₘ A}, Tyᵣ Γₘ A Aₘ → ∀ {B} {Bₘ : Tyₘ Γₘ B}, Tyᵣ Γₘ B Bₘ → Tyᵣ (extₘ Γₘ Aₘ) (wk Γ A B) (wkₘ Γₘ Aₘ Bₘ) end open Conᵣ Tyᵣ noncomputable def Con_tot (Γ : Con) : PSigma (Conᵣ Conₘ Tyₘ nilₘ extₘ baseₘ wkₘ Γ) := by cases Γ with | mk Γₑ Γ_w => ?_ apply Conₑ.recOn Γₑ (motive_1 := fun Γₑ => ∀ Γ_w, PSigma (Conᵣ Conₘ Tyₘ nilₘ extₘ baseₘ wkₘ ⟨Γₑ, Γ_w⟩)) (motive_2 := fun Aₑ => ∀ {Γ Γₘ} (Γᵣ : Conᵣ Conₘ Tyₘ nilₘ extₘ baseₘ wkₘ Γ Γₘ) A_w, PSigma (Tyᵣ Conₘ Tyₘ nilₘ extₘ baseₘ wkₘ Γₘ ⟨Aₑ, A_w⟩)) · intro Γ_w exact PSigma.mk nilₘ nilᵣ · intro Δₑ Aₑ Δ_ih A_ih ctor_w inversion ctor_w with Δ_w A_w cases Δ_ih Δ_w with | mk Δₘ Δᵣ => ?_ cases A_ih Δᵣ A_w with | mk Aₘ Aᵣ => ?_ exact PSigma.mk (extₘ Δₘ Aₘ) (extᵣ Δᵣ Aᵣ) · intro Γₑ Γ_ih Δ Δₘ Δᵣ ctor_w cases Δ with | mk Δₑ Δ_w => ?_ simp only at ctor_w clarifyIndices ctor_w exact PSigma.mk (baseₘ Δₘ) (baseᵣ Δᵣ) · intro Δₑ Aₑ Bₑ Δ_ih A_ih B_ih Δ' Δ'ₘ Δ'ᵣ ctor_w cases Δ' with | mk Δ'ₑ Δ_w => ?_ simp only at ctor_w clarifyIndices ctor_w inversion ctor_w with Δ_w A_w B_w cases Δ'ᵣ with | @extᵣ Γ' Γ'ₘ Γ'ᵣ A' A'ₘ A'ᵣ => ?_ cases B_ih Γ'ᵣ B_w with | mk Bₘ Bᵣ => ?_ exact PSigma.mk (wkₘ Γ'ₘ A'ₘ Bₘ) (wkᵣ Γ'ᵣ A'ᵣ Bᵣ) noncomputable def Ty_tot (Γ : Con) (A : Ty Γ) : PSigma (Tyᵣ Conₘ Tyₘ nilₘ extₘ baseₘ wkₘ (Con_tot Conₘ Tyₘ nilₘ extₘ baseₘ wkₘ Γ).1 A) := by cases Γ with | mk Γₑ Γ_w => ?_ cases A with | mk Aₑ A_w => ?_ apply Tyₑ.recOn Aₑ (motive_1 := fun Γₑ => ∀ Γ_w, PSigma (Conᵣ Conₘ Tyₘ nilₘ extₘ baseₘ wkₘ ⟨Γₑ, Γ_w⟩)) (motive_2 := fun Aₑ => ∀ {Γ Γₘ} (Γᵣ : Conᵣ Conₘ Tyₘ nilₘ extₘ baseₘ wkₘ Γ Γₘ) A_w, PSigma (Tyᵣ Conₘ Tyₘ nilₘ extₘ baseₘ wkₘ Γₘ ⟨Aₑ, A_w⟩)) · intro Γ_w exact PSigma.mk nilₘ nilᵣ · intro Δₑ Aₑ Δ_ih A_ih ctor_w inversion ctor_w with Δ_w A_w cases Δ_ih Δ_w with | mk Δₘ Δᵣ => ?_ cases A_ih Δᵣ A_w with | mk Aₘ Aᵣ => ?_ exact PSigma.mk (extₘ Δₘ Aₘ) (extᵣ Δᵣ Aᵣ) · intro Γₑ Γ_ih Δ Δₘ Δᵣ ctor_w cases Δ with | mk Δₑ Δ_w => ?_ simp only at ctor_w clarifyIndices ctor_w exact PSigma.mk (baseₘ Δₘ) (baseᵣ Δᵣ) · intro Δₑ Aₑ Bₑ Δ_ih A_ih B_ih Δ' Δ'ₘ Δ'ᵣ ctor_w cases Δ' with | mk Δ'ₑ Δ_w => ?_ simp only at ctor_w clarifyIndices ctor_w inversion ctor_w with Δ_w A_w B_w cases Δ'ᵣ with | @extᵣ Γ' Γ'ₘ Γ'ᵣ A' A'ₘ A'ᵣ => ?_ cases B_ih Γ'ᵣ B_w with | mk Bₘ Bᵣ => ?_ exact PSigma.mk (wkₘ Γ'ₘ A'ₘ Bₘ) (wkᵣ Γ'ᵣ A'ᵣ Bᵣ) · exact (Con_tot Conₘ Tyₘ nilₘ extₘ baseₘ wkₘ ⟨Γₑ, Γ_w⟩).2 noncomputable def Con.rec (Γ : Con) : Conₘ Γ := (Con_tot Conₘ Tyₘ nilₘ extₘ baseₘ wkₘ Γ).1 noncomputable def Ty.rec (Γ : Con) (A : Ty Γ) : Tyₘ (Con.rec Conₘ Tyₘ nilₘ extₘ baseₘ wkₘ Γ) A := (Ty_tot Conₘ Tyₘ nilₘ extₘ baseₘ wkₘ Γ A).1 theorem nil_beta : Con.rec Conₘ Tyₘ nilₘ extₘ baseₘ wkₘ nil = nilₘ := rfl theorem ext_beta (Γ : Con) (A : Ty Γ) : Con.rec Conₘ Tyₘ nilₘ extₘ baseₘ wkₘ (ext Γ A) = extₘ (Con.rec Conₘ Tyₘ nilₘ extₘ baseₘ wkₘ Γ) (Ty.rec Conₘ Tyₘ nilₘ extₘ baseₘ wkₘ Γ A) := rfl theorem base_beta (Γ : Con) : Ty.rec Conₘ Tyₘ nilₘ extₘ baseₘ wkₘ Γ (base Γ) = baseₘ (Con.rec Conₘ Tyₘ nilₘ extₘ baseₘ wkₘ Γ) := rfl theorem wk_beta (Γ : Con) (A : Ty Γ) (B : Ty Γ) : Ty.rec Conₘ Tyₘ nilₘ extₘ baseₘ wkₘ (ext Γ A) (wk Γ A B) = wkₘ (Con.rec Conₘ Tyₘ nilₘ extₘ baseₘ wkₘ Γ) (Ty.rec Conₘ Tyₘ nilₘ extₘ baseₘ wkₘ Γ A) (Ty.rec Conₘ Tyₘ nilₘ extₘ baseₘ wkₘ Γ B) := rfl end
(* Title: HOL/Auth/n_germanSymIndex_lemma_on_inv__18.thy Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences *) header{*The n_germanSymIndex Protocol Case Study*} theory n_germanSymIndex_lemma_on_inv__18 imports n_germanSymIndex_base begin section{*All lemmas on causal relation between inv__18 and some rule r*} lemma n_SendInv__part__0Vsinv__18: assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i)" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__18 p__Inv2)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInv__part__0 i" apply fastforce done from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__18 p__Inv2" apply fastforce done have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(i=p__Inv2)" have "?P3 s" apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (andForm (andForm (eqn (IVar (Ident ''ExGntd'')) (Const true)) (neg (eqn (IVar (Field (Para (Ident ''Cache'') p__Inv2) ''State'')) (Const E)))) (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv2) ''Cmd'')) (Const Empty))) (eqn (IVar (Para (Ident ''InvSet'') p__Inv2)) (Const true))))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(i~=p__Inv2)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_SendInv__part__1Vsinv__18: assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i)" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__18 p__Inv2)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInv__part__1 i" apply fastforce done from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__18 p__Inv2" apply fastforce done have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(i=p__Inv2)" have "?P3 s" apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (andForm (andForm (eqn (IVar (Ident ''ExGntd'')) (Const true)) (neg (eqn (IVar (Field (Para (Ident ''Cache'') p__Inv2) ''State'')) (Const E)))) (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv2) ''Cmd'')) (Const Empty))) (eqn (IVar (Para (Ident ''InvSet'') p__Inv2)) (Const true))))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(i~=p__Inv2)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_SendInvAckVsinv__18: assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__18 p__Inv2)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInvAck i" apply fastforce done from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__18 p__Inv2" apply fastforce done have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(i=p__Inv2)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(i~=p__Inv2)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_RecvInvAckVsinv__18: assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__18 p__Inv2)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvInvAck i" apply fastforce done from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__18 p__Inv2" apply fastforce done have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(i=p__Inv2)" have "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))\<or>((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))" by auto moreover { assume c1: "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))" have "?P1 s" proof(cut_tac a1 a2 b1 c1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume c1: "((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))" have "?P2 s" proof(cut_tac a1 a2 b1 c1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately have "invHoldForRule s f r (invariants N)" by satx } moreover { assume b1: "(i~=p__Inv2)" have "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))\<or>((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))" by auto moreover { assume c1: "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))" have "?P1 s" proof(cut_tac a1 a2 b1 c1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume c1: "((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))" have "?P2 s" proof(cut_tac a1 a2 b1 c1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately have "invHoldForRule s f r (invariants N)" by satx } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_SendGntSVsinv__18: assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendGntS i)" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__18 p__Inv2)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain i where a1:"i\<le>N\<and>r=n_SendGntS i" apply fastforce done from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__18 p__Inv2" apply fastforce done have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(i=p__Inv2)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(i~=p__Inv2)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_SendGntEVsinv__18: assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__18 p__Inv2)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain i where a1:"i\<le>N\<and>r=n_SendGntE N i" apply fastforce done from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__18 p__Inv2" apply fastforce done have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(i=p__Inv2)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(i~=p__Inv2)" have "?P3 s" apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv2) ''Cmd'')) (Const Inv)) (eqn (IVar (Para (Ident ''ShrSet'') p__Inv2)) (Const false))))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_RecvGntSVsinv__18: assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__18 p__Inv2)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvGntS i" apply fastforce done from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__18 p__Inv2" apply fastforce done have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(i=p__Inv2)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(i~=p__Inv2)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_RecvGntEVsinv__18: assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__18 p__Inv2)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvGntE i" apply fastforce done from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__18 p__Inv2" apply fastforce done have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(i=p__Inv2)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(i~=p__Inv2)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_SendReqE__part__1Vsinv__18: assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqE__part__1 i" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__18 p__Inv2)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_StoreVsinv__18: assumes a1: "\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__18 p__Inv2)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_RecvReqEVsinv__18: assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvReqE N i" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__18 p__Inv2)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_SendReqE__part__0Vsinv__18: assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqE__part__0 i" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__18 p__Inv2)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_SendReqSVsinv__18: assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqS i" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__18 p__Inv2)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_RecvReqSVsinv__18: assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvReqS N i" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__18 p__Inv2)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done end
{-# OPTIONS --cubical --no-import-sorts --safe #-} open import Cubical.Core.Everything open import Cubical.Foundations.HLevels module Cubical.Algebra.Semigroup.Construct.Free {ℓ} (Aˢ : hSet ℓ) where open import Cubical.Foundations.Prelude open import Cubical.Foundations.Isomorphism open import Cubical.Foundations.Univalence open import Cubical.Foundations.Path open import Cubical.Algebra.Semigroup open import Cubical.Data.Empty.Polymorphic open import Cubical.Data.Prod open import Cubical.Relation.Binary.Reasoning.Equality A = ⟨ Aˢ ⟩ isSetA = Aˢ .snd ------------------------------------------------------------------------ -- The direct free semigroup type data FreeS′ : Type ℓ where inj : A → FreeS′ _•_ : Op₂ FreeS′ •-assoc : Associative _•_ squash : isSet FreeS′ elim : ∀ {ℓ′} {B : FreeS′ → Type ℓ′} → (∀ x → isSet (B x)) → (∀ x → B (inj x)) → (op : ∀ x y → B x → B y → B (x • y)) → (∀ x y z (a : B x) (b : B y) (c : B z) → PathP (λ i → B (•-assoc x y z i)) (op _ _ (op _ _ a b) c) (op _ _ a (op _ _ b c))) → (x : FreeS′) → B x elim isSetB f op assc (inj x) = f x elim isSetB f op assc (x • y) = op x y (elim isSetB f op assc x) (elim isSetB f op assc y) elim isSetB f op assc (•-assoc x y z i) = assc x y z (elim isSetB f op assc x) (elim isSetB f op assc y) (elim isSetB f op assc z) i elim isSetB f op assc (squash x y p q i j) = isOfHLevel→isOfHLevelDep 2 isSetB (elim isSetB f op assc x) (elim isSetB f op assc y) (cong (elim isSetB f op assc) p) (cong (elim isSetB f op assc) q) (squash x y p q) i j ------------------------------------------------------------------------ -- The simplified free semigroup type infixl 5 _□ infixr 4 _∶_ data FreeS : Type ℓ where _□ : A → FreeS _∶_ : A → FreeS → FreeS module FreeSPath where Cover : FreeS → FreeS → Type ℓ Cover (x □) (y □) = x ≡ y Cover (x □) (y ∶ ys) = ⊥ Cover (x ∶ xs) (y □) = ⊥ Cover (x ∶ xs) (y ∶ ys) = (x ≡ y) × Cover xs ys isPropCover : ∀ xs ys → isProp (Cover xs ys) isPropCover (x □) (y □) = isSetA x y isPropCover (x □) (y ∶ ys) = isProp⊥ isPropCover (x ∶ xs) (y □) = isProp⊥ isPropCover (x ∶ xs) (y ∶ ys) = isPropProd (isSetA x y) (isPropCover xs ys) reflCover : ∀ xs → Cover xs xs reflCover (x □) = refl reflCover (x ∶ xs) = refl , reflCover xs encode : ∀ xs ys → xs ≡ ys → Cover xs ys encode xs _ = J (λ ys _ → Cover xs ys) (reflCover xs) encodeRefl : ∀ xs → encode xs xs refl ≡ reflCover xs encodeRefl xs = JRefl (λ ys _ → Cover xs ys) (reflCover xs) decode : ∀ xs ys → Cover xs ys → xs ≡ ys decode (x □) (y □) p = cong _□ p decode (x □) (y ∶ ys) () decode (x ∶ xs) (y □) () decode (x ∶ xs) (y ∶ ys) (p , q) = cong₂ _∶_ p (decode xs ys q) decodeRefl : ∀ xs → decode xs xs (reflCover xs) ≡ refl decodeRefl (x □) = refl decodeRefl (x ∶ xs) = cong (cong₂ _∶_ refl) (decodeRefl xs) decodeEncode : ∀ xs ys → (p : xs ≡ ys) → decode xs ys (encode xs ys p) ≡ p decodeEncode xs _ = J (λ ys p → decode xs ys (encode xs ys p) ≡ p) (cong (decode xs xs) (encodeRefl xs) ∙ decodeRefl xs) isSetFreeS : isSet FreeS isSetFreeS x y = isOfHLevelRetract 1 (FreeSPath.encode x y) (FreeSPath.decode x y) (FreeSPath.decodeEncode x y) (FreeSPath.isPropCover x y) _++_ : Op₂ FreeS (x □) ++ ys = x ∶ ys (x ∶ xs) ++ ys = x ∶ xs ++ ys ++-assoc : Associative _++_ ++-assoc (x □) ys zs = refl ++-assoc (x ∶ xs) ys zs = cong (x ∶_) (++-assoc xs ys zs) ------------------------------------------------------------------------ -- Proving the two types are equivalent FreeS→FreeS′ : FreeS → FreeS′ FreeS→FreeS′ (x □) = inj x FreeS→FreeS′ (x ∶ xs) = inj x • FreeS→FreeS′ xs FreeS′→FreeS : FreeS′ → FreeS FreeS′→FreeS = elim (λ _ → isSetFreeS) _□ (λ _ _ → _++_) (λ _ _ _ → ++-assoc) FreeS→FreeS′→FreeS : retract FreeS→FreeS′ FreeS′→FreeS FreeS→FreeS′→FreeS (x □) = refl FreeS→FreeS′→FreeS (x ∶ xs) = cong (x ∶_) (FreeS→FreeS′→FreeS xs) ++-hom : Homomorphic₂ FreeS→FreeS′ _++_ _•_ ++-hom (x □) ys = refl ++-hom (x ∶ xs) ys = cong (inj x •_) (++-hom xs ys) ∙ sym (•-assoc (inj x) (FreeS→FreeS′ xs) (FreeS→FreeS′ ys)) FreeS′→FreeS→FreeS′ : section FreeS→FreeS′ FreeS′→FreeS FreeS′→FreeS→FreeS′ = elim (λ _ → isSet→isGroupoid squash _ _) (λ _ → refl) sectionOp (λ _ _ _ _ _ _ → isOfHLevelPathP' 0 (squash _ _) _ _ .fst) where sectionOp : ∀ x y → FreeS→FreeS′ (FreeS′→FreeS x) ≡ x → FreeS→FreeS′ (FreeS′→FreeS y) ≡ y → FreeS→FreeS′ (FreeS′→FreeS (x • y)) ≡ (x • y) sectionOp x y p q = FreeS→FreeS′ (FreeS′→FreeS (x • y)) ≡⟨⟩ FreeS→FreeS′ (FreeS′→FreeS x ++ FreeS′→FreeS y) ≡⟨ ++-hom (FreeS′→FreeS x) (FreeS′→FreeS y) ⟩ FreeS→FreeS′ (FreeS′→FreeS x) • FreeS→FreeS′ (FreeS′→FreeS y) ≡⟨ cong₂ _•_ p q ⟩ x • y ∎ FreeS≃FreeS′ : FreeS ≃ FreeS′ FreeS≃FreeS′ = isoToEquiv (iso FreeS→FreeS′ FreeS′→FreeS FreeS′→FreeS→FreeS′ FreeS→FreeS′→FreeS) FreeS≡FreeS′ : FreeS ≡ FreeS′ FreeS≡FreeS′ = ua FreeS≃FreeS′ ------------------------------------------------------------------------ -- Semigroup Algebra FreeS′-isSemigroup : IsSemigroup FreeS′ _•_ FreeS′-isSemigroup = record { isMagma = record { is-set = squash } ; assoc = •-assoc } FreeS-isSemigroup : IsSemigroup FreeS _++_ FreeS-isSemigroup = record { isMagma = record { is-set = isSetFreeS } ; assoc = ++-assoc } FreeSemigroup′ : Semigroup ℓ FreeSemigroup′ = record { isSemigroup = FreeS′-isSemigroup } FreeSemigroup : Semigroup ℓ FreeSemigroup = record { isSemigroup = FreeS-isSemigroup } EquivFreeS : SemigroupEquiv FreeSemigroup FreeSemigroup′ EquivFreeS = record { eq = FreeS≃FreeS′ ; isHom = ++-hom } FreeSemigroup≡ : FreeSemigroup ≡ FreeSemigroup′ FreeSemigroup≡ = uaSemigroup EquivFreeS
module System.File.Buffer import public System.File.Error import System.File.Handle import System.File.Meta import System.File.Mode import System.File.ReadWrite import System.File.Support import public System.File.Types import Data.Buffer %default total %foreign support "idris2_readBufferData" "node:lambda:(f,b,l,m) => require('fs').readSync(f.fd,b,l,m)" "jvm:readFromFile(java/nio/channels/ReadableByteChannel io/github/mmhelloworld/idrisjvm/runtime/IdrisBuffer int int int),io/github/mmhelloworld/idrisjvm/runtime/IdrisBuffer" prim__readBufferData : FilePtr -> Buffer -> (offset : Int) -> (maxbytes : Int) -> PrimIO Int %foreign support "idris2_writeBufferData" "node:lambda:(f,b,l,m) => require('fs').writeSync(f.fd,b,l,m)" "jvm:writeToFile(java/nio/channels/WritableByteChannel io/github/mmhelloworld/idrisjvm/runtime/IdrisBuffer int int int),io/github/mmhelloworld/idrisjvm/runtime/IdrisBuffer" prim__writeBufferData : FilePtr -> Buffer -> (offset : Int) -> (maxbytes : Int) -> PrimIO Int export readBufferData : HasIO io => File -> Buffer -> (offset : Int) -> -- position in buffer to start adding (maxbytes : Int) -> -- maximums size to read, which must not -- exceed buffer length io (Either FileError ()) readBufferData (FHandle h) buf offset max = do read <- primIO (prim__readBufferData h buf offset max) if read >= 0 then pure (Right ()) else pure (Left FileReadError) export writeBufferData : HasIO io => File -> Buffer -> (offset : Int) -> -- position in buffer to write from (maxbytes : Int) -> -- maximums size to write, which must not -- exceed buffer length io (Either FileError ()) writeBufferData (FHandle h) buf offset max = do written <- primIO (prim__writeBufferData h buf offset max) if written >= 0 then pure (Right ()) else pure (Left FileWriteError) export writeBufferToFile : HasIO io => String -> Buffer -> Int -> io (Either FileError ()) writeBufferToFile fn buf max = do Right f <- openFile fn WriteTruncate | Left err => pure (Left err) Right ok <- writeBufferData f buf 0 max | Left err => pure (Left err) closeFile f pure (Right ok) export createBufferFromFile : HasIO io => String -> io (Either FileError Buffer) createBufferFromFile fn = do Right f <- openFile fn Read | Left err => pure (Left err) Right size <- fileSize f | Left err => pure (Left err) Just buf <- newBuffer size | Nothing => pure (Left FileReadError) Right ok <- readBufferData f buf 0 size | Left err => pure (Left err) closeFile f pure (Right buf)
/* ** ** G.Lohmann, MPI-KYB, May 2018 */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include "viaio/Vlib.h" #include "viaio/VImage.h" #include "viaio/mu.h" #include "viaio/option.h" #include <gsl/gsl_matrix.h> #include <gsl/gsl_vector.h> #include <gsl/gsl_errno.h> #include <gsl/gsl_math.h> extern void VCheckImage(VImage src); extern VImage VFilterGauss3d (VImage src,VImage dest,double *sigma); void Gauss4d(VAttrList list,double *sigma) { /* get pointers to image data */ double u=0; int j,b,r,c; int nrows=0,ncols=0,nt=0; int nslices = VAttrListNumImages(list); VImage *src = VAttrListGetImages(list,nslices); VImageDimensions(src,nslices,&nt,&nrows,&ncols); fprintf(stderr," image dimensions: %d x %d x %d, nt: %d\n",nslices,nrows,ncols,nt); VImage tmp = VCreateImage(nslices,nrows,ncols,VFloatRepn); VFillImage(tmp,VAllBands,0); VImage dest = VCreateImage(nslices,nrows,ncols,VFloatRepn); VFillImage(dest,VAllBands,0); for (j=0; j<nt; j++) { if (j%5==0) fprintf(stderr," %5d of %d\r",j,nt); for (b=0; b<nslices; b++) { for (r=0; r<nrows; r++) { for (c=0; c<ncols; c++) { u = VGetPixel(src[b],j,r,c); VSetPixel(tmp,b,r,c,u); } } } dest = VFilterGauss3d (tmp,dest,sigma); for (b=0; b<nslices; b++) { for (r=0; r<nrows; r++) { for (c=0; c<ncols; c++) { u = VGetPixel(dest,b,r,c); VSetPixel(src[b],j,r,c,u); } } } } fprintf(stderr,"\n"); VDestroyImage(tmp); VDestroyImage(dest); }
[GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ f : α → β ⊢ UniformInducing f ↔ UniformContinuous f ∧ comap (Prod.map f f) (𝓤 β) ≤ 𝓤 α [PROOFSTEP] rw [uniformInducing_iff, UniformContinuous, tendsto_iff_comap, le_antisymm_iff, and_comm] [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ f : α → β ⊢ 𝓤 α ≤ comap (fun x => (f x.fst, f x.snd)) (𝓤 β) ∧ comap (fun x => (f x.fst, f x.snd)) (𝓤 β) ≤ 𝓤 α ↔ 𝓤 α ≤ comap (fun x => (f x.fst, f x.snd)) (𝓤 β) ∧ comap (Prod.map f f) (𝓤 β) ≤ 𝓤 α [PROOFSTEP] rfl [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ ι : Sort u_1 ι' : Sort u_2 p : ι → Prop p' : ι' → Prop s : ι → Set (α × α) s' : ι' → Set (β × β) h : HasBasis (𝓤 α) p s h' : HasBasis (𝓤 β) p' s' f : α → β ⊢ UniformInducing f ↔ (∀ (i : ι'), p' i → ∃ j, p j ∧ ∀ (x y : α), (x, y) ∈ s j → (f x, f y) ∈ s' i) ∧ ∀ (j : ι), p j → ∃ i, p' i ∧ ∀ (x y : α), (f x, f y) ∈ s' i → (x, y) ∈ s j [PROOFSTEP] simp [uniformInducing_iff', h.uniformContinuous_iff h', (h'.comap _).le_basis_iff h, subset_def] [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ f : α → β h : ∀ (s : Set (α × α)), s ∈ 𝓤 α ↔ ∃ t, t ∈ 𝓤 β ∧ ∀ (x y : α), (f x, f y) ∈ t → (x, y) ∈ s ⊢ comap (fun x => (f x.fst, f x.snd)) (𝓤 β) = 𝓤 α [PROOFSTEP] simp [eq_comm, Filter.ext_iff, subset_def, h] [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ ⊢ comap (fun x => (id x.fst, id x.snd)) (𝓤 α) = 𝓤 α [PROOFSTEP] rw [← Prod.map_def, Prod.map_id, comap_id] [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ g : β → γ hg : UniformInducing g f : α → β hf : UniformInducing f ⊢ comap (fun x => ((g ∘ f) x.fst, (g ∘ f) x.snd)) (𝓤 γ) = 𝓤 α [PROOFSTEP] rw [← hf.1, ← hg.1, comap_comap] [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ g : β → γ hg : UniformInducing g f : α → β hf : UniformInducing f ⊢ comap (fun x => ((g ∘ f) x.fst, (g ∘ f) x.snd)) (𝓤 γ) = comap ((fun x => (g x.fst, g x.snd)) ∘ fun x => (f x.fst, f x.snd)) (𝓤 γ) [PROOFSTEP] rfl [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ f : α → β hf : UniformInducing f F : Filter α ⊢ Cauchy (map f F) ↔ Cauchy F [PROOFSTEP] simp only [Cauchy, map_neBot_iff, prod_map_map_eq, map_le_iff_le_comap, ← hf.comap_uniformity] [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ f : α → β g : β → γ hf : UniformContinuous f hg : UniformContinuous g hgf : UniformInducing (g ∘ f) ⊢ UniformInducing f [PROOFSTEP] refine' ⟨le_antisymm _ hf.le_comap⟩ [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ f : α → β g : β → γ hf : UniformContinuous f hg : UniformContinuous g hgf : UniformInducing (g ∘ f) ⊢ comap (fun x => (f x.fst, f x.snd)) (𝓤 β) ≤ 𝓤 α [PROOFSTEP] rw [← hgf.1, ← Prod.map_def, ← Prod.map_def, ← Prod.map_comp_map f f g g, ← comap_comap] [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ f : α → β g : β → γ hf : UniformContinuous f hg : UniformContinuous g hgf : UniformInducing (g ∘ f) ⊢ comap (Prod.map f f) (𝓤 β) ≤ comap (Prod.map f f) (comap (Prod.map g g) (𝓤 γ)) [PROOFSTEP] exact comap_mono hg.le_comap [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ f : α → β g : β → γ hg : UniformInducing g ⊢ UniformContinuous f ↔ UniformContinuous (g ∘ f) [PROOFSTEP] dsimp only [UniformContinuous, Tendsto] [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ f : α → β g : β → γ hg : UniformInducing g ⊢ map (fun x => (f x.fst, f x.snd)) (𝓤 α) ≤ 𝓤 β ↔ map (fun x => ((g ∘ f) x.fst, (g ∘ f) x.snd)) (𝓤 α) ≤ 𝓤 γ [PROOFSTEP] rw [← hg.comap_uniformity, ← map_le_iff_le_comap, Filter.map_map] [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ f : α → β g : β → γ hg : UniformInducing g ⊢ map ((fun x => (g x.fst, g x.snd)) ∘ fun x => (f x.fst, f x.snd)) (𝓤 α) ≤ 𝓤 γ ↔ map (fun x => ((g ∘ f) x.fst, (g ∘ f) x.snd)) (𝓤 α) ≤ 𝓤 γ [PROOFSTEP] rfl [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ f : α → β h : UniformInducing f ⊢ Inducing f [PROOFSTEP] obtain rfl := h.comap_uniformSpace [GOAL] α : Type u β : Type v γ : Type w inst✝¹ : UniformSpace β inst✝ : UniformSpace γ f : α → β h : UniformInducing f ⊢ Inducing f [PROOFSTEP] exact inducing_induced f [GOAL] α : Type u β : Type v γ : Type w inst✝⁴ : UniformSpace α inst✝³ : UniformSpace β inst✝² : UniformSpace γ α' : Type u_1 β' : Type u_2 inst✝¹ : UniformSpace α' inst✝ : UniformSpace β' e₁ : α → α' e₂ : β → β' h₁ : UniformInducing e₁ h₂ : UniformInducing e₂ ⊢ comap (fun x => ((e₁ x.fst.fst, e₂ x.fst.snd), e₁ x.snd.fst, e₂ x.snd.snd)) (𝓤 (α' × β')) = 𝓤 (α × β) [PROOFSTEP] simp [(· ∘ ·), uniformity_prod, ← h₁.1, ← h₂.1, comap_inf, comap_comap] [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ f : α → β ⊢ UniformEmbedding f ↔ Injective f ∧ UniformContinuous f ∧ comap (Prod.map f f) (𝓤 β) ≤ 𝓤 α [PROOFSTEP] rw [uniformEmbedding_iff, and_comm, uniformInducing_iff'] [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ ι : Sort u_1 ι' : Sort u_2 p : ι → Prop p' : ι' → Prop s : ι → Set (α × α) s' : ι' → Set (β × β) h : HasBasis (𝓤 α) p s h' : HasBasis (𝓤 β) p' s' f : α → β ⊢ UniformEmbedding f ↔ Injective f ∧ (∀ (i : ι'), p' i → ∃ j, p j ∧ ∀ (x y : α), (x, y) ∈ s j → (f x, f y) ∈ s' i) ∧ ∀ (j : ι), p j → ∃ i, p' i ∧ ∀ (x y : α), (f x, f y) ∈ s' i → (x, y) ∈ s j [PROOFSTEP] rw [uniformEmbedding_iff, and_comm, h.uniformInducing_iff h'] [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ ι : Sort u_1 ι' : Sort u_2 p : ι → Prop p' : ι' → Prop s : ι → Set (α × α) s' : ι' → Set (β × β) h : HasBasis (𝓤 α) p s h' : HasBasis (𝓤 β) p' s' f : α → β ⊢ UniformEmbedding f ↔ Injective f ∧ UniformContinuous f ∧ ∀ (j : ι), p j → ∃ i, p' i ∧ ∀ (x y : α), (f x, f y) ∈ s' i → (x, y) ∈ s j [PROOFSTEP] simp only [h.uniformEmbedding_iff' h', h.uniformContinuous_iff h'] [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ s t : Set α hst : s ⊆ t ⊢ comap (fun x => (inclusion hst x.fst, inclusion hst x.snd)) (𝓤 ↑t) = 𝓤 ↑s [PROOFSTEP] rw [uniformity_subtype, uniformity_subtype, comap_comap] [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ s t : Set α hst : s ⊆ t ⊢ comap ((fun q => (↑q.fst, ↑q.snd)) ∘ fun x => (inclusion hst x.fst, inclusion hst x.snd)) (𝓤 α) = comap (fun q => (↑q.fst, ↑q.snd)) (𝓤 α) [PROOFSTEP] rfl [GOAL] α✝ : Type u β✝ : Type v γ : Type w inst✝⁴ : UniformSpace α✝ inst✝³ : UniformSpace β✝ inst✝² : UniformSpace γ α : Type u_1 β : Type u_2 inst✝¹ : UniformSpace α inst✝ : UniformSpace β f : α ≃ β h₁ : UniformContinuous ↑f h₂ : UniformContinuous ↑f.symm ⊢ comap (Prod.map ↑f ↑f) (𝓤 β) ≤ 𝓤 α [PROOFSTEP] rwa [← Equiv.prodCongr_apply, ← map_equiv_symm] [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ s : Set (α × α) hs : s ∈ 𝓤 α x : α × α h : x ∈ Prod.map Sum.inl Sum.inl ⁻¹' (Prod.map Sum.inl Sum.inl '' s ∪ range (Prod.map Sum.inr Sum.inr)) ⊢ x ∈ s [PROOFSTEP] simpa using h [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ s : Set (β × β) hs : s ∈ 𝓤 β x : β × β h : x ∈ Prod.map Sum.inr Sum.inr ⁻¹' (range (Prod.map Sum.inl Sum.inl) ∪ Prod.map Sum.inr Sum.inr '' s) ⊢ x ∈ s [PROOFSTEP] simpa using h [GOAL] α✝ : Type u β : Type v γ : Type w inst✝² : UniformSpace α✝ inst✝¹ : UniformSpace β inst✝ : UniformSpace γ α : Type u_1 f : α → β s : Set (β × β) hs : s ∈ 𝓤 β hf : Pairwise fun x y => ¬(f x, f y) ∈ s ⊢ comap (Prod.map f f) (𝓤 β) = 𝓟 idRel [PROOFSTEP] refine' le_antisymm _ (@refl_le_uniformity α (UniformSpace.comap f _)) [GOAL] α✝ : Type u β : Type v γ : Type w inst✝² : UniformSpace α✝ inst✝¹ : UniformSpace β inst✝ : UniformSpace γ α : Type u_1 f : α → β s : Set (β × β) hs : s ∈ 𝓤 β hf : Pairwise fun x y => ¬(f x, f y) ∈ s ⊢ comap (Prod.map f f) (𝓤 β) ≤ 𝓟 idRel [PROOFSTEP] calc comap (Prod.map f f) (𝓤 β) ≤ comap (Prod.map f f) (𝓟 s) := comap_mono (le_principal_iff.2 hs) _ = 𝓟 (Prod.map f f ⁻¹' s) := comap_principal _ ≤ 𝓟 idRel := principal_mono.2 ?_ [GOAL] α✝ : Type u β : Type v γ : Type w inst✝² : UniformSpace α✝ inst✝¹ : UniformSpace β inst✝ : UniformSpace γ α : Type u_1 f : α → β s : Set (β × β) hs : s ∈ 𝓤 β hf : Pairwise fun x y => ¬(f x, f y) ∈ s ⊢ Prod.map f f ⁻¹' s ⊆ idRel [PROOFSTEP] rintro ⟨x, y⟩ [GOAL] case mk α✝ : Type u β : Type v γ : Type w inst✝² : UniformSpace α✝ inst✝¹ : UniformSpace β inst✝ : UniformSpace γ α : Type u_1 f : α → β s : Set (β × β) hs : s ∈ 𝓤 β hf : Pairwise fun x y => ¬(f x, f y) ∈ s x y : α ⊢ (x, y) ∈ Prod.map f f ⁻¹' s → (x, y) ∈ idRel [PROOFSTEP] simpa [not_imp_not] using @hf x y [GOAL] α✝ : Type u β : Type v γ : Type w inst✝² : UniformSpace α✝ inst✝¹ : UniformSpace β inst✝ : UniformSpace γ α : Type u_1 f : α → β s : Set (β × β) hs : s ∈ 𝓤 β hf : Pairwise fun x y => ¬(f x, f y) ∈ s ⊢ UniformEmbedding f [PROOFSTEP] let _ : UniformSpace α := ⊥ [GOAL] α✝ : Type u β : Type v γ : Type w inst✝² : UniformSpace α✝ inst✝¹ : UniformSpace β inst✝ : UniformSpace γ α : Type u_1 f : α → β s : Set (β × β) hs : s ∈ 𝓤 β hf : Pairwise fun x y => ¬(f x, f y) ∈ s x✝ : UniformSpace α := ⊥ ⊢ UniformEmbedding f [PROOFSTEP] have := discreteTopology_bot α [GOAL] α✝ : Type u β : Type v γ : Type w inst✝² : UniformSpace α✝ inst✝¹ : UniformSpace β inst✝ : UniformSpace γ α : Type u_1 f : α → β s : Set (β × β) hs : s ∈ 𝓤 β hf : Pairwise fun x y => ¬(f x, f y) ∈ s x✝ : UniformSpace α := ⊥ this : DiscreteTopology α ⊢ UniformEmbedding f [PROOFSTEP] exact UniformInducing.uniformEmbedding ⟨comap_uniformity_of_spaced_out hs hf⟩ [GOAL] α✝ : Type u β : Type v γ : Type w inst✝⁵ : UniformSpace α✝ inst✝⁴ : UniformSpace β inst✝³ : UniformSpace γ α : Type u_1 inst✝² : TopologicalSpace α inst✝¹ : DiscreteTopology α inst✝ : SeparatedSpace β f : α → β s : Set (β × β) hs : s ∈ 𝓤 β hf : Pairwise fun x y => ¬(f x, f y) ∈ s ⊢ ClosedEmbedding f [PROOFSTEP] [email protected]_bot α _ _ with rfl [GOAL] α✝ : Type u β : Type v γ : Type w inst✝⁴ : UniformSpace α✝ inst✝³ : UniformSpace β inst✝² : UniformSpace γ α : Type u_1 inst✝¹ : SeparatedSpace β f : α → β s : Set (β × β) hs : s ∈ 𝓤 β hf : Pairwise fun x y => ¬(f x, f y) ∈ s inst✝ : DiscreteTopology α ⊢ ClosedEmbedding f [PROOFSTEP] let _ : UniformSpace α := ⊥ [GOAL] α✝ : Type u β : Type v γ : Type w inst✝⁴ : UniformSpace α✝ inst✝³ : UniformSpace β inst✝² : UniformSpace γ α : Type u_1 inst✝¹ : SeparatedSpace β f : α → β s : Set (β × β) hs : s ∈ 𝓤 β hf : Pairwise fun x y => ¬(f x, f y) ∈ s inst✝ : DiscreteTopology α x✝ : UniformSpace α := ⊥ ⊢ ClosedEmbedding f [PROOFSTEP] exact { (uniformEmbedding_of_spaced_out hs hf).embedding with closed_range := isClosed_range_of_spaced_out hs hf } [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ s : Set (α × α) e : α → β b : β he₁ : UniformInducing e he₂ : DenseInducing e hs : s ∈ 𝓤 α ⊢ ∃ a, closure (e '' {a' | (a, a') ∈ s}) ∈ 𝓝 b [PROOFSTEP] obtain ⟨U, ⟨hU, hUo, hsymm⟩, hs⟩ : ∃ U, (U ∈ 𝓤 β ∧ IsOpen U ∧ SymmetricRel U) ∧ Prod.map e e ⁻¹' U ⊆ s := by rwa [← he₁.comap_uniformity, (uniformity_hasBasis_open_symmetric.comap _).mem_iff] at hs [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ s : Set (α × α) e : α → β b : β he₁ : UniformInducing e he₂ : DenseInducing e hs : s ∈ 𝓤 α ⊢ ∃ U, (U ∈ 𝓤 β ∧ IsOpen U ∧ SymmetricRel U) ∧ Prod.map e e ⁻¹' U ⊆ s [PROOFSTEP] rwa [← he₁.comap_uniformity, (uniformity_hasBasis_open_symmetric.comap _).mem_iff] at hs [GOAL] case intro.intro.intro.intro α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ s : Set (α × α) e : α → β b : β he₁ : UniformInducing e he₂ : DenseInducing e hs✝ : s ∈ 𝓤 α U : Set (β × β) hs : Prod.map e e ⁻¹' U ⊆ s hU : U ∈ 𝓤 β hUo : IsOpen U hsymm : SymmetricRel U ⊢ ∃ a, closure (e '' {a' | (a, a') ∈ s}) ∈ 𝓝 b [PROOFSTEP] rcases he₂.dense.mem_nhds (UniformSpace.ball_mem_nhds b hU) with ⟨a, ha⟩ [GOAL] case intro.intro.intro.intro.intro α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ s : Set (α × α) e : α → β b : β he₁ : UniformInducing e he₂ : DenseInducing e hs✝ : s ∈ 𝓤 α U : Set (β × β) hs : Prod.map e e ⁻¹' U ⊆ s hU : U ∈ 𝓤 β hUo : IsOpen U hsymm : SymmetricRel U a : α ha : e a ∈ UniformSpace.ball b U ⊢ ∃ a, closure (e '' {a' | (a, a') ∈ s}) ∈ 𝓝 b [PROOFSTEP] refine ⟨a, mem_of_superset ?_ (closure_mono <| image_subset _ <| ball_mono hs a)⟩ [GOAL] case intro.intro.intro.intro.intro α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ s : Set (α × α) e : α → β b : β he₁ : UniformInducing e he₂ : DenseInducing e hs✝ : s ∈ 𝓤 α U : Set (β × β) hs : Prod.map e e ⁻¹' U ⊆ s hU : U ∈ 𝓤 β hUo : IsOpen U hsymm : SymmetricRel U a : α ha : e a ∈ UniformSpace.ball b U ⊢ closure (e '' UniformSpace.ball a (Prod.map e e ⁻¹' U)) ∈ 𝓝 b [PROOFSTEP] have ho : IsOpen (UniformSpace.ball (e a) U) := UniformSpace.isOpen_ball (e a) hUo [GOAL] case intro.intro.intro.intro.intro α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ s : Set (α × α) e : α → β b : β he₁ : UniformInducing e he₂ : DenseInducing e hs✝ : s ∈ 𝓤 α U : Set (β × β) hs : Prod.map e e ⁻¹' U ⊆ s hU : U ∈ 𝓤 β hUo : IsOpen U hsymm : SymmetricRel U a : α ha : e a ∈ UniformSpace.ball b U ho : IsOpen (UniformSpace.ball (e a) U) ⊢ closure (e '' UniformSpace.ball a (Prod.map e e ⁻¹' U)) ∈ 𝓝 b [PROOFSTEP] refine mem_of_superset (ho.mem_nhds <| (mem_ball_symmetry hsymm).2 ha) fun y hy => ?_ [GOAL] case intro.intro.intro.intro.intro α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ s : Set (α × α) e : α → β b : β he₁ : UniformInducing e he₂ : DenseInducing e hs✝ : s ∈ 𝓤 α U : Set (β × β) hs : Prod.map e e ⁻¹' U ⊆ s hU : U ∈ 𝓤 β hUo : IsOpen U hsymm : SymmetricRel U a : α ha : e a ∈ UniformSpace.ball b U ho : IsOpen (UniformSpace.ball (e a) U) y : β hy : y ∈ UniformSpace.ball (e a) U ⊢ y ∈ closure (e '' UniformSpace.ball a (Prod.map e e ⁻¹' U)) [PROOFSTEP] refine mem_closure_iff_nhds.2 fun V hV => ?_ [GOAL] case intro.intro.intro.intro.intro α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ s : Set (α × α) e : α → β b : β he₁ : UniformInducing e he₂ : DenseInducing e hs✝ : s ∈ 𝓤 α U : Set (β × β) hs : Prod.map e e ⁻¹' U ⊆ s hU : U ∈ 𝓤 β hUo : IsOpen U hsymm : SymmetricRel U a : α ha : e a ∈ UniformSpace.ball b U ho : IsOpen (UniformSpace.ball (e a) U) y : β hy : y ∈ UniformSpace.ball (e a) U V : Set β hV : V ∈ 𝓝 y ⊢ Set.Nonempty (V ∩ e '' UniformSpace.ball a (Prod.map e e ⁻¹' U)) [PROOFSTEP] rcases he₂.dense.mem_nhds (inter_mem hV (ho.mem_nhds hy)) with ⟨x, hxV, hxU⟩ [GOAL] case intro.intro.intro.intro.intro.intro.intro α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ s : Set (α × α) e : α → β b : β he₁ : UniformInducing e he₂ : DenseInducing e hs✝ : s ∈ 𝓤 α U : Set (β × β) hs : Prod.map e e ⁻¹' U ⊆ s hU : U ∈ 𝓤 β hUo : IsOpen U hsymm : SymmetricRel U a : α ha : e a ∈ UniformSpace.ball b U ho : IsOpen (UniformSpace.ball (e a) U) y : β hy : y ∈ UniformSpace.ball (e a) U V : Set β hV : V ∈ 𝓝 y x : α hxV : e x ∈ V hxU : e x ∈ UniformSpace.ball (e a) U ⊢ Set.Nonempty (V ∩ e '' UniformSpace.ball a (Prod.map e e ⁻¹' U)) [PROOFSTEP] exact ⟨e x, hxV, mem_image_of_mem e hxU⟩ [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ p : α → Prop e : α → β ue : UniformEmbedding e de : DenseEmbedding e ⊢ comap (fun x => (DenseEmbedding.subtypeEmb p e x.fst, DenseEmbedding.subtypeEmb p e x.snd)) (𝓤 { x // x ∈ closure (e '' {x | p x}) }) = 𝓤 { x // p x } [PROOFSTEP] simp [comap_comap, (· ∘ ·), DenseEmbedding.subtypeEmb, uniformity_subtype, ue.comap_uniformity.symm] [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ m : α → β s : Set α hm : UniformInducing m hs : IsComplete (m '' s) ⊢ IsComplete s [PROOFSTEP] intro f hf hfs [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ m : α → β s : Set α hm : UniformInducing m hs : IsComplete (m '' s) f : Filter α hf : Cauchy f hfs : f ≤ 𝓟 s ⊢ ∃ x, x ∈ s ∧ f ≤ 𝓝 x [PROOFSTEP] rw [le_principal_iff] at hfs [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ m : α → β s : Set α hm : UniformInducing m hs : IsComplete (m '' s) f : Filter α hf : Cauchy f hfs : s ∈ f ⊢ ∃ x, x ∈ s ∧ f ≤ 𝓝 x [PROOFSTEP] obtain ⟨_, ⟨x, hx, rfl⟩, hyf⟩ : ∃ y ∈ m '' s, map m f ≤ 𝓝 y [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ m : α → β s : Set α hm : UniformInducing m hs : IsComplete (m '' s) f : Filter α hf : Cauchy f hfs : s ∈ f ⊢ ∃ y, y ∈ m '' s ∧ map m f ≤ 𝓝 y case intro.intro.intro.intro α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ m : α → β s : Set α hm : UniformInducing m hs : IsComplete (m '' s) f : Filter α hf : Cauchy f hfs : s ∈ f x : α hx : x ∈ s hyf : map m f ≤ 𝓝 (m x) ⊢ ∃ x, x ∈ s ∧ f ≤ 𝓝 x [PROOFSTEP] exact hs (f.map m) (hf.map hm.uniformContinuous) (le_principal_iff.2 (image_mem_map hfs)) [GOAL] case intro.intro.intro.intro α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ m : α → β s : Set α hm : UniformInducing m hs : IsComplete (m '' s) f : Filter α hf : Cauchy f hfs : s ∈ f x : α hx : x ∈ s hyf : map m f ≤ 𝓝 (m x) ⊢ ∃ x, x ∈ s ∧ f ≤ 𝓝 x [PROOFSTEP] rw [map_le_iff_le_comap, ← nhds_induced, ← hm.inducing.induced] at hyf [GOAL] case intro.intro.intro.intro α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ m : α → β s : Set α hm : UniformInducing m hs : IsComplete (m '' s) f : Filter α hf : Cauchy f hfs : s ∈ f x : α hx : x ∈ s hyf : f ≤ 𝓝 x ⊢ ∃ x, x ∈ s ∧ f ≤ 𝓝 x [PROOFSTEP] exact ⟨x, hx, hyf⟩ [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ s : Set α hs : IsComplete s ⊢ IsComplete (Subtype.val '' univ) [PROOFSTEP] simp [hs] [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ m : α → β s : Set α hm : UniformInducing m ⊢ IsComplete (m '' s) ↔ IsComplete s [PROOFSTEP] refine' ⟨isComplete_of_complete_image hm, fun c => _⟩ [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ m : α → β s : Set α hm : UniformInducing m c : IsComplete s ⊢ IsComplete (m '' s) [PROOFSTEP] haveI : CompleteSpace s := c.completeSpace_coe [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ m : α → β s : Set α hm : UniformInducing m c : IsComplete s this : CompleteSpace ↑s ⊢ IsComplete (m '' s) [PROOFSTEP] set m' : s → β := m ∘ (↑) [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ m : α → β s : Set α hm : UniformInducing m c : IsComplete s this : CompleteSpace ↑s m' : ↑s → β := m ∘ Subtype.val ⊢ IsComplete (m '' s) [PROOFSTEP] suffices IsComplete (range m') by rwa [range_comp, Subtype.range_coe] at this [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ m : α → β s : Set α hm : UniformInducing m c : IsComplete s this✝ : CompleteSpace ↑s m' : ↑s → β := m ∘ Subtype.val this : IsComplete (range m') ⊢ IsComplete (m '' s) [PROOFSTEP] rwa [range_comp, Subtype.range_coe] at this [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ m : α → β s : Set α hm : UniformInducing m c : IsComplete s this : CompleteSpace ↑s m' : ↑s → β := m ∘ Subtype.val ⊢ IsComplete (range m') [PROOFSTEP] have hm' : UniformInducing m' := hm.comp uniformEmbedding_subtype_val.toUniformInducing [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ m : α → β s : Set α hm : UniformInducing m c : IsComplete s this : CompleteSpace ↑s m' : ↑s → β := m ∘ Subtype.val hm' : UniformInducing m' ⊢ IsComplete (range m') [PROOFSTEP] intro f hf hfm [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ m : α → β s : Set α hm : UniformInducing m c : IsComplete s this : CompleteSpace ↑s m' : ↑s → β := m ∘ Subtype.val hm' : UniformInducing m' f : Filter β hf : Cauchy f hfm : f ≤ 𝓟 (range m') ⊢ ∃ x, x ∈ range m' ∧ f ≤ 𝓝 x [PROOFSTEP] rw [Filter.le_principal_iff] at hfm [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ m : α → β s : Set α hm : UniformInducing m c : IsComplete s this : CompleteSpace ↑s m' : ↑s → β := m ∘ Subtype.val hm' : UniformInducing m' f : Filter β hf : Cauchy f hfm : range m' ∈ f ⊢ ∃ x, x ∈ range m' ∧ f ≤ 𝓝 x [PROOFSTEP] have cf' : Cauchy (comap m' f) := hf.comap' hm'.comap_uniformity.le (NeBot.comap_of_range_mem hf.1 hfm) [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ m : α → β s : Set α hm : UniformInducing m c : IsComplete s this : CompleteSpace ↑s m' : ↑s → β := m ∘ Subtype.val hm' : UniformInducing m' f : Filter β hf : Cauchy f hfm : range m' ∈ f cf' : Cauchy (comap m' f) ⊢ ∃ x, x ∈ range m' ∧ f ≤ 𝓝 x [PROOFSTEP] rcases CompleteSpace.complete cf' with ⟨x, hx⟩ [GOAL] case intro α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ m : α → β s : Set α hm : UniformInducing m c : IsComplete s this : CompleteSpace ↑s m' : ↑s → β := m ∘ Subtype.val hm' : UniformInducing m' f : Filter β hf : Cauchy f hfm : range m' ∈ f cf' : Cauchy (comap m' f) x : ↑s hx : comap m' f ≤ 𝓝 x ⊢ ∃ x, x ∈ range m' ∧ f ≤ 𝓝 x [PROOFSTEP] rw [hm'.inducing.nhds_eq_comap, comap_le_comap_iff hfm] at hx [GOAL] case intro α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ m : α → β s : Set α hm : UniformInducing m c : IsComplete s this : CompleteSpace ↑s m' : ↑s → β := m ∘ Subtype.val hm' : UniformInducing m' f : Filter β hf : Cauchy f hfm : range m' ∈ f cf' : Cauchy (comap m' f) x : ↑s hx : f ≤ 𝓝 (m' x) ⊢ ∃ x, x ∈ range m' ∧ f ≤ 𝓝 x [PROOFSTEP] exact ⟨m' x, mem_range_self _, hx⟩ [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ f : α → β hf : UniformInducing f ⊢ CompleteSpace α ↔ IsComplete (range f) [PROOFSTEP] rw [completeSpace_iff_isComplete_univ, ← isComplete_image_iff hf, image_univ] [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ e : α ≃ β he : UniformEmbedding ↑e ⊢ CompleteSpace α ↔ CompleteSpace β [PROOFSTEP] rw [completeSpace_iff_isComplete_range he.toUniformInducing, e.range_eq_univ, completeSpace_iff_isComplete_univ] [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ s : Set α ⊢ IsComplete (range Subtype.val) ↔ IsComplete s [PROOFSTEP] rw [Subtype.range_coe] [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ m : β → α hm : UniformInducing m dense : DenseRange m h : ∀ (f : Filter β), Cauchy f → ∃ x, map m f ≤ 𝓝 x f : Filter α hf : Cauchy f p : Set (α × α) → Set α → Set α := fun s t => {y | ∃ x, x ∈ t ∧ (x, y) ∈ s} g : Filter α := Filter.lift (𝓤 α) fun s => Filter.lift' f (p s) mp₀ : Monotone p mp₁ : ∀ {s : Set (α × α)}, Monotone (p s) this✝⁵ : f ≤ g this✝⁴ : NeBot g this✝³ : NeBot (comap m g) this✝² : Cauchy g this✝¹ : Cauchy (comap m g) x : α hx : map m (comap m g) ≤ 𝓝 x this✝ : ClusterPt x (map m (comap m g)) this : ClusterPt x g ⊢ f ≤ g [PROOFSTEP] assumption [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ f : α → β s : Set β hf : UniformEmbedding f hs : TotallyBounded s t : Set (α × α) ht : t ∈ 𝓤 α ⊢ ∃ t_1, Set.Finite t_1 ∧ f ⁻¹' s ⊆ ⋃ (y : α) (_ : y ∈ t_1), {x | (x, y) ∈ t} [PROOFSTEP] rw [← hf.comap_uniformity] at ht [GOAL] α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ f : α → β s : Set β hf : UniformEmbedding f hs : TotallyBounded s t : Set (α × α) ht : t ∈ comap (fun x => (f x.fst, f x.snd)) (𝓤 β) ⊢ ∃ t_1, Set.Finite t_1 ∧ f ⁻¹' s ⊆ ⋃ (y : α) (_ : y ∈ t_1), {x | (x, y) ∈ t} [PROOFSTEP] rcases mem_comap.2 ht with ⟨t', ht', ts⟩ [GOAL] case intro.intro α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ f : α → β s : Set β hf : UniformEmbedding f hs : TotallyBounded s t : Set (α × α) ht : t ∈ comap (fun x => (f x.fst, f x.snd)) (𝓤 β) t' : Set (β × β) ht' : t' ∈ 𝓤 β ts : (fun x => (f x.fst, f x.snd)) ⁻¹' t' ⊆ t ⊢ ∃ t_1, Set.Finite t_1 ∧ f ⁻¹' s ⊆ ⋃ (y : α) (_ : y ∈ t_1), {x | (x, y) ∈ t} [PROOFSTEP] rcases totallyBounded_iff_subset.1 (totallyBounded_subset (image_preimage_subset f s) hs) _ ht' with ⟨c, cs, hfc, hct⟩ [GOAL] case intro.intro.intro.intro.intro α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ f : α → β s : Set β hf : UniformEmbedding f hs : TotallyBounded s t : Set (α × α) ht : t ∈ comap (fun x => (f x.fst, f x.snd)) (𝓤 β) t' : Set (β × β) ht' : t' ∈ 𝓤 β ts : (fun x => (f x.fst, f x.snd)) ⁻¹' t' ⊆ t c : Set β cs : c ⊆ f '' (f ⁻¹' s) hfc : Set.Finite c hct : f '' (f ⁻¹' s) ⊆ ⋃ (y : β) (_ : y ∈ c), {x | (x, y) ∈ t'} ⊢ ∃ t_1, Set.Finite t_1 ∧ f ⁻¹' s ⊆ ⋃ (y : α) (_ : y ∈ t_1), {x | (x, y) ∈ t} [PROOFSTEP] refine' ⟨f ⁻¹' c, hfc.preimage (hf.inj.injOn _), fun x h => _⟩ [GOAL] case intro.intro.intro.intro.intro α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ f : α → β s : Set β hf : UniformEmbedding f hs : TotallyBounded s t : Set (α × α) ht : t ∈ comap (fun x => (f x.fst, f x.snd)) (𝓤 β) t' : Set (β × β) ht' : t' ∈ 𝓤 β ts : (fun x => (f x.fst, f x.snd)) ⁻¹' t' ⊆ t c : Set β cs : c ⊆ f '' (f ⁻¹' s) hfc : Set.Finite c hct : f '' (f ⁻¹' s) ⊆ ⋃ (y : β) (_ : y ∈ c), {x | (x, y) ∈ t'} x : α h : x ∈ f ⁻¹' s ⊢ x ∈ ⋃ (y : α) (_ : y ∈ f ⁻¹' c), {x | (x, y) ∈ t} [PROOFSTEP] have := hct (mem_image_of_mem f h) [GOAL] case intro.intro.intro.intro.intro α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ f : α → β s : Set β hf : UniformEmbedding f hs : TotallyBounded s t : Set (α × α) ht : t ∈ comap (fun x => (f x.fst, f x.snd)) (𝓤 β) t' : Set (β × β) ht' : t' ∈ 𝓤 β ts : (fun x => (f x.fst, f x.snd)) ⁻¹' t' ⊆ t c : Set β cs : c ⊆ f '' (f ⁻¹' s) hfc : Set.Finite c hct : f '' (f ⁻¹' s) ⊆ ⋃ (y : β) (_ : y ∈ c), {x | (x, y) ∈ t'} x : α h : x ∈ f ⁻¹' s this : f x ∈ ⋃ (y : β) (_ : y ∈ c), {x | (x, y) ∈ t'} ⊢ x ∈ ⋃ (y : α) (_ : y ∈ f ⁻¹' c), {x | (x, y) ∈ t} [PROOFSTEP] simp at this ⊢ [GOAL] case intro.intro.intro.intro.intro α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ f : α → β s : Set β hf : UniformEmbedding f hs : TotallyBounded s t : Set (α × α) ht : t ∈ comap (fun x => (f x.fst, f x.snd)) (𝓤 β) t' : Set (β × β) ht' : t' ∈ 𝓤 β ts : (fun x => (f x.fst, f x.snd)) ⁻¹' t' ⊆ t c : Set β cs : c ⊆ f '' (f ⁻¹' s) hfc : Set.Finite c hct : f '' (f ⁻¹' s) ⊆ ⋃ (y : β) (_ : y ∈ c), {x | (x, y) ∈ t'} x : α h : x ∈ f ⁻¹' s this : ∃ i, i ∈ c ∧ (f x, i) ∈ t' ⊢ ∃ i, f i ∈ c ∧ (x, i) ∈ t [PROOFSTEP] rcases this with ⟨z, zc, zt⟩ [GOAL] case intro.intro.intro.intro.intro.intro.intro α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ f : α → β s : Set β hf : UniformEmbedding f hs : TotallyBounded s t : Set (α × α) ht : t ∈ comap (fun x => (f x.fst, f x.snd)) (𝓤 β) t' : Set (β × β) ht' : t' ∈ 𝓤 β ts : (fun x => (f x.fst, f x.snd)) ⁻¹' t' ⊆ t c : Set β cs : c ⊆ f '' (f ⁻¹' s) hfc : Set.Finite c hct : f '' (f ⁻¹' s) ⊆ ⋃ (y : β) (_ : y ∈ c), {x | (x, y) ∈ t'} x : α h : x ∈ f ⁻¹' s z : β zc : z ∈ c zt : (f x, z) ∈ t' ⊢ ∃ i, f i ∈ c ∧ (x, i) ∈ t [PROOFSTEP] rcases cs zc with ⟨y, -, rfl⟩ [GOAL] case intro.intro.intro.intro.intro.intro.intro.intro.intro α : Type u β : Type v γ : Type w inst✝² : UniformSpace α inst✝¹ : UniformSpace β inst✝ : UniformSpace γ f : α → β s : Set β hf : UniformEmbedding f hs : TotallyBounded s t : Set (α × α) ht : t ∈ comap (fun x => (f x.fst, f x.snd)) (𝓤 β) t' : Set (β × β) ht' : t' ∈ 𝓤 β ts : (fun x => (f x.fst, f x.snd)) ⁻¹' t' ⊆ t c : Set β cs : c ⊆ f '' (f ⁻¹' s) hfc : Set.Finite c hct : f '' (f ⁻¹' s) ⊆ ⋃ (y : β) (_ : y ∈ c), {x | (x, y) ∈ t'} x : α h : x ∈ f ⁻¹' s y : α zc : f y ∈ c zt : (f x, f y) ∈ t' ⊢ ∃ i, f i ∈ c ∧ (x, i) ∈ t [PROOFSTEP] exact ⟨y, zc, ts zt⟩ [GOAL] α : Type u β : Type v γ : Type w inst✝⁴ : UniformSpace α inst✝³ : UniformSpace β inst✝² : UniformSpace γ inst✝¹ : CompleteSpace α inst✝ : CompleteSpace β ⊢ CompleteSpace (α ⊕ β) [PROOFSTEP] rw [completeSpace_iff_isComplete_univ, ← range_inl_union_range_inr] [GOAL] α : Type u β : Type v γ : Type w inst✝⁴ : UniformSpace α inst✝³ : UniformSpace β inst✝² : UniformSpace γ inst✝¹ : CompleteSpace α inst✝ : CompleteSpace β ⊢ IsComplete (range Sum.inl ∪ range Sum.inr) [PROOFSTEP] exact uniformEmbedding_inl.toUniformInducing.isComplete_range.union uniformEmbedding_inr.toUniformInducing.isComplete_range [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 inst✝³ : UniformSpace α inst✝² : UniformSpace β inst✝¹ : UniformSpace γ e✝ : β → α h_e : UniformInducing e✝ h_dense : DenseRange e✝ f✝ : β → γ h_f : UniformContinuous f✝ inst✝ : CompleteSpace γ p : α → Prop e : α → β f : α → γ b : β s : Set α hf : UniformContinuous fun x => f ↑x he : UniformEmbedding e hd : ∀ (x : β), x ∈ closure (range e) hb : closure (e '' s) ∈ 𝓝 b hs : IsClosed s hp : ∀ (x : α), x ∈ s → p x ⊢ ∃ c, Tendsto f (comap e (𝓝 b)) (𝓝 c) [PROOFSTEP] have de : DenseEmbedding e := he.denseEmbedding hd [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 inst✝³ : UniformSpace α inst✝² : UniformSpace β inst✝¹ : UniformSpace γ e✝ : β → α h_e : UniformInducing e✝ h_dense : DenseRange e✝ f✝ : β → γ h_f : UniformContinuous f✝ inst✝ : CompleteSpace γ p : α → Prop e : α → β f : α → γ b : β s : Set α hf : UniformContinuous fun x => f ↑x he : UniformEmbedding e hd : ∀ (x : β), x ∈ closure (range e) hb : closure (e '' s) ∈ 𝓝 b hs : IsClosed s hp : ∀ (x : α), x ∈ s → p x de : DenseEmbedding e ⊢ ∃ c, Tendsto f (comap e (𝓝 b)) (𝓝 c) [PROOFSTEP] have de' : DenseEmbedding (DenseEmbedding.subtypeEmb p e) := de.subtype p [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 inst✝³ : UniformSpace α inst✝² : UniformSpace β inst✝¹ : UniformSpace γ e✝ : β → α h_e : UniformInducing e✝ h_dense : DenseRange e✝ f✝ : β → γ h_f : UniformContinuous f✝ inst✝ : CompleteSpace γ p : α → Prop e : α → β f : α → γ b : β s : Set α hf : UniformContinuous fun x => f ↑x he : UniformEmbedding e hd : ∀ (x : β), x ∈ closure (range e) hb : closure (e '' s) ∈ 𝓝 b hs : IsClosed s hp : ∀ (x : α), x ∈ s → p x de : DenseEmbedding e de' : DenseEmbedding (DenseEmbedding.subtypeEmb p e) ⊢ ∃ c, Tendsto f (comap e (𝓝 b)) (𝓝 c) [PROOFSTEP] have ue' : UniformEmbedding (DenseEmbedding.subtypeEmb p e) := uniformEmbedding_subtypeEmb _ he de [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 inst✝³ : UniformSpace α inst✝² : UniformSpace β inst✝¹ : UniformSpace γ e✝ : β → α h_e : UniformInducing e✝ h_dense : DenseRange e✝ f✝ : β → γ h_f : UniformContinuous f✝ inst✝ : CompleteSpace γ p : α → Prop e : α → β f : α → γ b : β s : Set α hf : UniformContinuous fun x => f ↑x he : UniformEmbedding e hd : ∀ (x : β), x ∈ closure (range e) hb : closure (e '' s) ∈ 𝓝 b hs : IsClosed s hp : ∀ (x : α), x ∈ s → p x de : DenseEmbedding e de' : DenseEmbedding (DenseEmbedding.subtypeEmb p e) ue' : UniformEmbedding (DenseEmbedding.subtypeEmb p e) ⊢ ∃ c, Tendsto f (comap e (𝓝 b)) (𝓝 c) [PROOFSTEP] have : b ∈ closure (e '' {x | p x}) := (closure_mono <| monotone_image <| hp) (mem_of_mem_nhds hb) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 inst✝³ : UniformSpace α inst✝² : UniformSpace β inst✝¹ : UniformSpace γ e✝ : β → α h_e : UniformInducing e✝ h_dense : DenseRange e✝ f✝ : β → γ h_f : UniformContinuous f✝ inst✝ : CompleteSpace γ p : α → Prop e : α → β f : α → γ b : β s : Set α hf : UniformContinuous fun x => f ↑x he : UniformEmbedding e hd : ∀ (x : β), x ∈ closure (range e) hb : closure (e '' s) ∈ 𝓝 b hs : IsClosed s hp : ∀ (x : α), x ∈ s → p x de : DenseEmbedding e de' : DenseEmbedding (DenseEmbedding.subtypeEmb p e) ue' : UniformEmbedding (DenseEmbedding.subtypeEmb p e) this : b ∈ closure (e '' {x | p x}) ⊢ ∃ c, Tendsto f (comap e (𝓝 b)) (𝓝 c) [PROOFSTEP] let ⟨c, hc⟩ := uniformly_extend_exists ue'.toUniformInducing de'.dense hf ⟨b, this⟩ [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 inst✝³ : UniformSpace α inst✝² : UniformSpace β inst✝¹ : UniformSpace γ e✝ : β → α h_e : UniformInducing e✝ h_dense : DenseRange e✝ f✝ : β → γ h_f : UniformContinuous f✝ inst✝ : CompleteSpace γ p : α → Prop e : α → β f : α → γ b : β s : Set α hf : UniformContinuous fun x => f ↑x he : UniformEmbedding e hd : ∀ (x : β), x ∈ closure (range e) hb : closure (e '' s) ∈ 𝓝 b hs : IsClosed s hp : ∀ (x : α), x ∈ s → p x de : DenseEmbedding e de' : DenseEmbedding (DenseEmbedding.subtypeEmb p e) ue' : UniformEmbedding (DenseEmbedding.subtypeEmb p e) this : b ∈ closure (e '' {x | p x}) c : γ hc : Tendsto (fun x => f ↑x) (comap (DenseEmbedding.subtypeEmb p e) (𝓝 { val := b, property := this })) (𝓝 c) ⊢ ∃ c, Tendsto f (comap e (𝓝 b)) (𝓝 c) [PROOFSTEP] replace hc : Tendsto (f ∘ Subtype.val) (((𝓝 b).comap e).comap Subtype.val) (𝓝 c) := by simpa only [nhds_subtype_eq_comap, comap_comap, DenseEmbedding.subtypeEmb_coe] using hc [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 inst✝³ : UniformSpace α inst✝² : UniformSpace β inst✝¹ : UniformSpace γ e✝ : β → α h_e : UniformInducing e✝ h_dense : DenseRange e✝ f✝ : β → γ h_f : UniformContinuous f✝ inst✝ : CompleteSpace γ p : α → Prop e : α → β f : α → γ b : β s : Set α hf : UniformContinuous fun x => f ↑x he : UniformEmbedding e hd : ∀ (x : β), x ∈ closure (range e) hb : closure (e '' s) ∈ 𝓝 b hs : IsClosed s hp : ∀ (x : α), x ∈ s → p x de : DenseEmbedding e de' : DenseEmbedding (DenseEmbedding.subtypeEmb p e) ue' : UniformEmbedding (DenseEmbedding.subtypeEmb p e) this : b ∈ closure (e '' {x | p x}) c : γ hc : Tendsto (fun x => f ↑x) (comap (DenseEmbedding.subtypeEmb p e) (𝓝 { val := b, property := this })) (𝓝 c) ⊢ Tendsto (f ∘ Subtype.val) (comap Subtype.val (comap e (𝓝 b))) (𝓝 c) [PROOFSTEP] simpa only [nhds_subtype_eq_comap, comap_comap, DenseEmbedding.subtypeEmb_coe] using hc [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 inst✝³ : UniformSpace α inst✝² : UniformSpace β inst✝¹ : UniformSpace γ e✝ : β → α h_e : UniformInducing e✝ h_dense : DenseRange e✝ f✝ : β → γ h_f : UniformContinuous f✝ inst✝ : CompleteSpace γ p : α → Prop e : α → β f : α → γ b : β s : Set α hf : UniformContinuous fun x => f ↑x he : UniformEmbedding e hd : ∀ (x : β), x ∈ closure (range e) hb : closure (e '' s) ∈ 𝓝 b hs : IsClosed s hp : ∀ (x : α), x ∈ s → p x de : DenseEmbedding e de' : DenseEmbedding (DenseEmbedding.subtypeEmb p e) ue' : UniformEmbedding (DenseEmbedding.subtypeEmb p e) this : b ∈ closure (e '' {x | p x}) c : γ hc : Tendsto (f ∘ Subtype.val) (comap Subtype.val (comap e (𝓝 b))) (𝓝 c) ⊢ ∃ c, Tendsto f (comap e (𝓝 b)) (𝓝 c) [PROOFSTEP] refine ⟨c, (tendsto_comap'_iff ?_).1 hc⟩ [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 inst✝³ : UniformSpace α inst✝² : UniformSpace β inst✝¹ : UniformSpace γ e✝ : β → α h_e : UniformInducing e✝ h_dense : DenseRange e✝ f✝ : β → γ h_f : UniformContinuous f✝ inst✝ : CompleteSpace γ p : α → Prop e : α → β f : α → γ b : β s : Set α hf : UniformContinuous fun x => f ↑x he : UniformEmbedding e hd : ∀ (x : β), x ∈ closure (range e) hb : closure (e '' s) ∈ 𝓝 b hs : IsClosed s hp : ∀ (x : α), x ∈ s → p x de : DenseEmbedding e de' : DenseEmbedding (DenseEmbedding.subtypeEmb p e) ue' : UniformEmbedding (DenseEmbedding.subtypeEmb p e) this : b ∈ closure (e '' {x | p x}) c : γ hc : Tendsto (f ∘ Subtype.val) (comap Subtype.val (comap e (𝓝 b))) (𝓝 c) ⊢ range Subtype.val ∈ comap e (𝓝 b) [PROOFSTEP] rw [Subtype.range_coe_subtype] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 inst✝³ : UniformSpace α inst✝² : UniformSpace β inst✝¹ : UniformSpace γ e✝ : β → α h_e : UniformInducing e✝ h_dense : DenseRange e✝ f✝ : β → γ h_f : UniformContinuous f✝ inst✝ : CompleteSpace γ p : α → Prop e : α → β f : α → γ b : β s : Set α hf : UniformContinuous fun x => f ↑x he : UniformEmbedding e hd : ∀ (x : β), x ∈ closure (range e) hb : closure (e '' s) ∈ 𝓝 b hs : IsClosed s hp : ∀ (x : α), x ∈ s → p x de : DenseEmbedding e de' : DenseEmbedding (DenseEmbedding.subtypeEmb p e) ue' : UniformEmbedding (DenseEmbedding.subtypeEmb p e) this : b ∈ closure (e '' {x | p x}) c : γ hc : Tendsto (f ∘ Subtype.val) (comap Subtype.val (comap e (𝓝 b))) (𝓝 c) ⊢ {x | p x} ∈ comap e (𝓝 b) [PROOFSTEP] exact ⟨_, hb, by rwa [← de.toInducing.closure_eq_preimage_closure_image, hs.closure_eq]⟩ [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 inst✝³ : UniformSpace α inst✝² : UniformSpace β inst✝¹ : UniformSpace γ e✝ : β → α h_e : UniformInducing e✝ h_dense : DenseRange e✝ f✝ : β → γ h_f : UniformContinuous f✝ inst✝ : CompleteSpace γ p : α → Prop e : α → β f : α → γ b : β s : Set α hf : UniformContinuous fun x => f ↑x he : UniformEmbedding e hd : ∀ (x : β), x ∈ closure (range e) hb : closure (e '' s) ∈ 𝓝 b hs : IsClosed s hp : ∀ (x : α), x ∈ s → p x de : DenseEmbedding e de' : DenseEmbedding (DenseEmbedding.subtypeEmb p e) ue' : UniformEmbedding (DenseEmbedding.subtypeEmb p e) this : b ∈ closure (e '' {x | p x}) c : γ hc : Tendsto (f ∘ Subtype.val) (comap Subtype.val (comap e (𝓝 b))) (𝓝 c) ⊢ e ⁻¹' closure (e '' s) ⊆ {x | p x} [PROOFSTEP] rwa [← de.toInducing.closure_eq_preimage_closure_image, hs.closure_eq] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 inst✝³ : UniformSpace α inst✝² : UniformSpace β inst✝¹ : UniformSpace γ e : β → α h_e : UniformInducing e h_dense : DenseRange e f : β → γ h_f : UniformContinuous f inst✝ : CompleteSpace γ a : α ⊢ Tendsto f (comap e (𝓝 a)) (𝓝 (DenseInducing.extend (_ : DenseInducing e) f a)) [PROOFSTEP] simpa only [DenseInducing.extend] using tendsto_nhds_limUnder (uniformly_extend_exists h_e ‹_› h_f _) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 inst✝³ : UniformSpace α inst✝² : UniformSpace β inst✝¹ : UniformSpace γ e : β → α h_e : UniformInducing e h_dense : DenseRange e f : β → γ h_f : UniformContinuous f inst✝ : CompleteSpace γ d : Set (γ × γ) hd : d ∈ 𝓤 γ s : Set (γ × γ) hs : s ∈ 𝓤 γ hs_comp : s ○ (s ○ s) ⊆ d h_pnt : ∀ {a : α} {m : Set α}, m ∈ 𝓝 a → ∃ c, c ∈ f '' (e ⁻¹' m) ∧ (c, DenseInducing.extend (_ : DenseInducing e) f a) ∈ s ∧ (DenseInducing.extend (_ : DenseInducing e) f a, c) ∈ s this : Prod.map f f ⁻¹' s ∈ 𝓤 β ⊢ Prod.map f f ⁻¹' s ∈ comap (Prod.map e e) (𝓤 α) [PROOFSTEP] rwa [← h_e.comap_uniformity] at this [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 inst✝³ : UniformSpace α inst✝² : UniformSpace β inst✝¹ : UniformSpace γ e : β → α h_e : UniformInducing e h_dense : DenseRange e f : β → γ h_f : UniformContinuous f inst✝ : CompleteSpace γ d : Set (γ × γ) hd : d ∈ 𝓤 γ s : Set (γ × γ) hs : s ∈ 𝓤 γ hs_comp : s ○ (s ○ s) ⊆ d h_pnt : ∀ {a : α} {m : Set α}, m ∈ 𝓝 a → ∃ c, c ∈ f '' (e ⁻¹' m) ∧ (c, DenseInducing.extend (_ : DenseInducing e) f a) ∈ s ∧ (DenseInducing.extend (_ : DenseInducing e) f a, c) ∈ s this✝ : Prod.map f f ⁻¹' s ∈ 𝓤 β this : Prod.map f f ⁻¹' s ∈ comap (Prod.map e e) (𝓤 α) t : Set (α × α) ht : t ∈ 𝓤 α ts : Prod.map e e ⁻¹' t ⊆ Prod.map f f ⁻¹' s x✝ : α × α x₁ x₂ : α hx_t : (x₁, x₂) ∈ interior t ⊢ (x₁, x₂) ∈ Prod.map (DenseInducing.extend (_ : DenseInducing e) f) (DenseInducing.extend (_ : DenseInducing e) f) ⁻¹' d [PROOFSTEP] have : interior t ∈ 𝓝 (x₁, x₂) := isOpen_interior.mem_nhds hx_t [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 inst✝³ : UniformSpace α inst✝² : UniformSpace β inst✝¹ : UniformSpace γ e : β → α h_e : UniformInducing e h_dense : DenseRange e f : β → γ h_f : UniformContinuous f inst✝ : CompleteSpace γ d : Set (γ × γ) hd : d ∈ 𝓤 γ s : Set (γ × γ) hs : s ∈ 𝓤 γ hs_comp : s ○ (s ○ s) ⊆ d h_pnt : ∀ {a : α} {m : Set α}, m ∈ 𝓝 a → ∃ c, c ∈ f '' (e ⁻¹' m) ∧ (c, DenseInducing.extend (_ : DenseInducing e) f a) ∈ s ∧ (DenseInducing.extend (_ : DenseInducing e) f a, c) ∈ s this✝¹ : Prod.map f f ⁻¹' s ∈ 𝓤 β this✝ : Prod.map f f ⁻¹' s ∈ comap (Prod.map e e) (𝓤 α) t : Set (α × α) ht : t ∈ 𝓤 α ts : Prod.map e e ⁻¹' t ⊆ Prod.map f f ⁻¹' s x✝ : α × α x₁ x₂ : α hx_t : (x₁, x₂) ∈ interior t this : interior t ∈ 𝓝 (x₁, x₂) ⊢ (x₁, x₂) ∈ Prod.map (DenseInducing.extend (_ : DenseInducing e) f) (DenseInducing.extend (_ : DenseInducing e) f) ⁻¹' d [PROOFSTEP] let ⟨m₁, hm₁, m₂, hm₂, (hm : m₁ ×ˢ m₂ ⊆ interior t)⟩ := mem_nhds_prod_iff.mp this [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 inst✝³ : UniformSpace α inst✝² : UniformSpace β inst✝¹ : UniformSpace γ e : β → α h_e : UniformInducing e h_dense : DenseRange e f : β → γ h_f : UniformContinuous f inst✝ : CompleteSpace γ d : Set (γ × γ) hd : d ∈ 𝓤 γ s : Set (γ × γ) hs : s ∈ 𝓤 γ hs_comp : s ○ (s ○ s) ⊆ d h_pnt : ∀ {a : α} {m : Set α}, m ∈ 𝓝 a → ∃ c, c ∈ f '' (e ⁻¹' m) ∧ (c, DenseInducing.extend (_ : DenseInducing e) f a) ∈ s ∧ (DenseInducing.extend (_ : DenseInducing e) f a, c) ∈ s this✝¹ : Prod.map f f ⁻¹' s ∈ 𝓤 β this✝ : Prod.map f f ⁻¹' s ∈ comap (Prod.map e e) (𝓤 α) t : Set (α × α) ht : t ∈ 𝓤 α ts : Prod.map e e ⁻¹' t ⊆ Prod.map f f ⁻¹' s x✝ : α × α x₁ x₂ : α hx_t : (x₁, x₂) ∈ interior t this : interior t ∈ 𝓝 (x₁, x₂) m₁ : Set α hm₁ : m₁ ∈ 𝓝 x₁ m₂ : Set α hm₂ : m₂ ∈ 𝓝 x₂ hm : m₁ ×ˢ m₂ ⊆ interior t ⊢ (x₁, x₂) ∈ Prod.map (DenseInducing.extend (_ : DenseInducing e) f) (DenseInducing.extend (_ : DenseInducing e) f) ⁻¹' d [PROOFSTEP] obtain ⟨_, ⟨a, ha₁, rfl⟩, _, ha₂⟩ := h_pnt hm₁ [GOAL] case intro.intro.intro.intro.intro α : Type u_1 β : Type u_2 γ : Type u_3 inst✝³ : UniformSpace α inst✝² : UniformSpace β inst✝¹ : UniformSpace γ e : β → α h_e : UniformInducing e h_dense : DenseRange e f : β → γ h_f : UniformContinuous f inst✝ : CompleteSpace γ d : Set (γ × γ) hd : d ∈ 𝓤 γ s : Set (γ × γ) hs : s ∈ 𝓤 γ hs_comp : s ○ (s ○ s) ⊆ d h_pnt : ∀ {a : α} {m : Set α}, m ∈ 𝓝 a → ∃ c, c ∈ f '' (e ⁻¹' m) ∧ (c, DenseInducing.extend (_ : DenseInducing e) f a) ∈ s ∧ (DenseInducing.extend (_ : DenseInducing e) f a, c) ∈ s this✝¹ : Prod.map f f ⁻¹' s ∈ 𝓤 β this✝ : Prod.map f f ⁻¹' s ∈ comap (Prod.map e e) (𝓤 α) t : Set (α × α) ht : t ∈ 𝓤 α ts : Prod.map e e ⁻¹' t ⊆ Prod.map f f ⁻¹' s x✝ : α × α x₁ x₂ : α hx_t : (x₁, x₂) ∈ interior t this : interior t ∈ 𝓝 (x₁, x₂) m₁ : Set α hm₁ : m₁ ∈ 𝓝 x₁ m₂ : Set α hm₂ : m₂ ∈ 𝓝 x₂ hm : m₁ ×ˢ m₂ ⊆ interior t a : β ha₁ : a ∈ e ⁻¹' m₁ left✝ : (f a, DenseInducing.extend (_ : DenseInducing e) f x₁) ∈ s ha₂ : (DenseInducing.extend (_ : DenseInducing e) f x₁, f a) ∈ s ⊢ (x₁, x₂) ∈ Prod.map (DenseInducing.extend (_ : DenseInducing e) f) (DenseInducing.extend (_ : DenseInducing e) f) ⁻¹' d [PROOFSTEP] obtain ⟨_, ⟨b, hb₁, rfl⟩, hb₂, _⟩ := h_pnt hm₂ [GOAL] case intro.intro.intro.intro.intro.intro.intro.intro.intro.intro α : Type u_1 β : Type u_2 γ : Type u_3 inst✝³ : UniformSpace α inst✝² : UniformSpace β inst✝¹ : UniformSpace γ e : β → α h_e : UniformInducing e h_dense : DenseRange e f : β → γ h_f : UniformContinuous f inst✝ : CompleteSpace γ d : Set (γ × γ) hd : d ∈ 𝓤 γ s : Set (γ × γ) hs : s ∈ 𝓤 γ hs_comp : s ○ (s ○ s) ⊆ d h_pnt : ∀ {a : α} {m : Set α}, m ∈ 𝓝 a → ∃ c, c ∈ f '' (e ⁻¹' m) ∧ (c, DenseInducing.extend (_ : DenseInducing e) f a) ∈ s ∧ (DenseInducing.extend (_ : DenseInducing e) f a, c) ∈ s this✝¹ : Prod.map f f ⁻¹' s ∈ 𝓤 β this✝ : Prod.map f f ⁻¹' s ∈ comap (Prod.map e e) (𝓤 α) t : Set (α × α) ht : t ∈ 𝓤 α ts : Prod.map e e ⁻¹' t ⊆ Prod.map f f ⁻¹' s x✝ : α × α x₁ x₂ : α hx_t : (x₁, x₂) ∈ interior t this : interior t ∈ 𝓝 (x₁, x₂) m₁ : Set α hm₁ : m₁ ∈ 𝓝 x₁ m₂ : Set α hm₂ : m₂ ∈ 𝓝 x₂ hm : m₁ ×ˢ m₂ ⊆ interior t a : β ha₁ : a ∈ e ⁻¹' m₁ left✝ : (f a, DenseInducing.extend (_ : DenseInducing e) f x₁) ∈ s ha₂ : (DenseInducing.extend (_ : DenseInducing e) f x₁, f a) ∈ s b : β hb₁ : b ∈ e ⁻¹' m₂ hb₂ : (f b, DenseInducing.extend (_ : DenseInducing e) f x₂) ∈ s right✝ : (DenseInducing.extend (_ : DenseInducing e) f x₂, f b) ∈ s ⊢ (x₁, x₂) ∈ Prod.map (DenseInducing.extend (_ : DenseInducing e) f) (DenseInducing.extend (_ : DenseInducing e) f) ⁻¹' d [PROOFSTEP] have : Prod.map f f (a, b) ∈ s := ts <| mem_preimage.2 <| interior_subset (@hm (e a, e b) ⟨ha₁, hb₁⟩) [GOAL] case intro.intro.intro.intro.intro.intro.intro.intro.intro.intro α : Type u_1 β : Type u_2 γ : Type u_3 inst✝³ : UniformSpace α inst✝² : UniformSpace β inst✝¹ : UniformSpace γ e : β → α h_e : UniformInducing e h_dense : DenseRange e f : β → γ h_f : UniformContinuous f inst✝ : CompleteSpace γ d : Set (γ × γ) hd : d ∈ 𝓤 γ s : Set (γ × γ) hs : s ∈ 𝓤 γ hs_comp : s ○ (s ○ s) ⊆ d h_pnt : ∀ {a : α} {m : Set α}, m ∈ 𝓝 a → ∃ c, c ∈ f '' (e ⁻¹' m) ∧ (c, DenseInducing.extend (_ : DenseInducing e) f a) ∈ s ∧ (DenseInducing.extend (_ : DenseInducing e) f a, c) ∈ s this✝² : Prod.map f f ⁻¹' s ∈ 𝓤 β this✝¹ : Prod.map f f ⁻¹' s ∈ comap (Prod.map e e) (𝓤 α) t : Set (α × α) ht : t ∈ 𝓤 α ts : Prod.map e e ⁻¹' t ⊆ Prod.map f f ⁻¹' s x✝ : α × α x₁ x₂ : α hx_t : (x₁, x₂) ∈ interior t this✝ : interior t ∈ 𝓝 (x₁, x₂) m₁ : Set α hm₁ : m₁ ∈ 𝓝 x₁ m₂ : Set α hm₂ : m₂ ∈ 𝓝 x₂ hm : m₁ ×ˢ m₂ ⊆ interior t a : β ha₁ : a ∈ e ⁻¹' m₁ left✝ : (f a, DenseInducing.extend (_ : DenseInducing e) f x₁) ∈ s ha₂ : (DenseInducing.extend (_ : DenseInducing e) f x₁, f a) ∈ s b : β hb₁ : b ∈ e ⁻¹' m₂ hb₂ : (f b, DenseInducing.extend (_ : DenseInducing e) f x₂) ∈ s right✝ : (DenseInducing.extend (_ : DenseInducing e) f x₂, f b) ∈ s this : Prod.map f f (a, b) ∈ s ⊢ (x₁, x₂) ∈ Prod.map (DenseInducing.extend (_ : DenseInducing e) f) (DenseInducing.extend (_ : DenseInducing e) f) ⁻¹' d [PROOFSTEP] exact hs_comp ⟨f a, ha₂, ⟨f b, this, hb₂⟩⟩
(* Copyright (C) 2017 M.A.L. Marques 2019 Susi Lehtola This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. *) (* type: gga_exc *) (* prefix: gga_c_optc_params *params; assert(p->params != NULL); params = (gga_c_optc_params * )(p->params); *) $include "gga_c_pw91.mpl" optc_f2 := (rs, z, xt, xs0, xs1) -> + f_pw91(rs*(2/(1 + z))^(1/3), 1, xs0, xs0, 0)*opz_pow_n( z,1)/2 + f_pw91(rs*(2/(1 - z))^(1/3), -1, xs1, 0, xs1)*opz_pow_n(-z,1)/2: f := (rs, z, xt, xs0, xs1) -> + params_a_c1*f_pw91(rs, z, xt, xs0, xs1) + (params_a_c2 - params_a_c1)*optc_f2(rs, z, xt, xs0, xs1):
# Copyright 2022 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ import os from StreamManagerApi import MxDataInput, InProtobufVector,\ MxProtobufIn, StringVector, MxBufferInput, MxMetadataInput, MetadataInputVector, StreamManagerApi import MxpiDataType_pb2 as MxpiDataType import numpy as np def getData(path): data = np.fromfile(path, dtype=np.float32) data = data.reshape(128, -1) return data def send_source_data(filename, stream_manager): """ Construct the input of the stream, send inputs data to a specified stream based on streamName. Returns: bool: send data success or not """ stream_name = b'tall' tensor = getData(filename) tensor_package_list = MxpiDataType.MxpiTensorPackageList() tensor_package = tensor_package_list.tensorPackageVec.add() array_bytes = tensor.tobytes() data_input = MxDataInput() data_input.data = array_bytes tensor_vec = tensor_package.tensorVec.add() tensor_vec.deviceId = 0 tensor_vec.memType = 0 for i in tensor.shape: tensor_vec.tensorShape.append(i) tensor_vec.dataStr = data_input.data tensor_vec.tensorDataSize = len(array_bytes) key = "appsrc0".encode('utf-8') protobuf_vec = InProtobufVector() protobuf = MxProtobufIn() protobuf.key = key protobuf.type = b'MxTools.MxpiTensorPackageList' protobuf.protobuf = tensor_package_list.SerializeToString() protobuf_vec.push_back(protobuf) ret2 = stream_manager.SendProtobuf(stream_name, 0, protobuf_vec) if ret2 < 0: print("Failed to send data to stream.") return False key_vec = StringVector() key_vec.push_back(b'mxpi_tensorinfer0') infer_result = stream_manager.GetProtobuf(stream_name, 0, key_vec) if infer_result.size() == 0: print("inferResult is null") return False if infer_result[0].errorCode != 0: print("GetProtobuf error. errorCode=%d" % (infer_result[0].errorCode)) return False result = MxpiDataType.MxpiTensorPackageList() result.ParseFromString(infer_result[0].messageBuf) res = np.frombuffer(result.tensorPackageVec[0].tensorVec[0].dataStr, dtype='<f4') return res def infer(img_path, streamManager): # Construct the input of the stream data_input = MxDataInput() data_128 = getData(img_path) data_input.data = data_128.tobytes() # Inputs data to a specified stream based on streamName. batch_size = 128 stream_name = b'tall' elment_name = b'appsrc0' key = b'mxpi_tensorinfer0' frame_info = MxpiDataType.MxpiFrameInfo() frame_info.frameId = 0 frame_info.channelId = 0 vision_list = MxpiDataType.MxpiVisionList() vision_vec = vision_list.visionVec.add() vision_vec.visionData.deviceId = 0 vision_vec.visionData.memType = 0 vision_vec.visionData.dataStr = data_input.data buffer_input = MxBufferInput() buffer_input.mxpiFrameInfo = frame_info.SerializeToString() buffer_input.mxpiVisionInfo = vision_vec.SerializeToString() buffer_input.data = data_input.data metedata_input = MxMetadataInput() metedata_input.dataSource = elment_name metedata_input.dataType = b"MxTools.MxpiVisionList" metedata_input.serializedMetadata = vision_list.SerializeToString() metedata_vec = MetadataInputVector() metedata_vec.push_back(metedata_input) error_code = streamManager.SendData(stream_name, elment_name, metedata_vec, buffer_input) if error_code < 0: print("Failed to send data to stream.") exit() data_source_vector = StringVector() data_source_vector.push_back(key) infer_result = streamManager.GetResult(stream_name, b'appsink0', data_source_vector) infer_result = infer_result.bufferOutput.data infer_result1 = infer_result.metadataVec[0] tensorList = MxpiDataType.MxpiTensorPackageList() tensorList.ParseFromString(infer_result1.serializedMetadata) print(tensorList.tensorPackageVec[0].tensorVec[0].tensorShape) nparr = np.frombuffer(tensorList.tensorPackageVec[0].tensorVec[0].dataStr, dtype=np.float32) nparr = nparr.reshape((batch_size, batch_size, 3)) return nparr if __name__ == '__main__': # init stream manager streamManagerApi = StreamManagerApi() ret1 = streamManagerApi.InitManager() if ret1 != 0: print("Failed to init Stream manager, ret=%s" % str(ret1)) exit() # create streams by pipeline config file with open("./config/tall.pipeline", 'rb') as f: pipeline = f.read() ret = streamManagerApi.CreateMultipleStreams(pipeline) if ret != 0: print("Failed to create Stream, ret=%s" % str(ret)) exit() with open('infer.txt', 'r', encoding='utf-8') as f: datapaths = f.readlines() if not os.path.exists('./result'): os.makedirs('./result') for datapath in datapaths: output = send_source_data(datapath[:-1], streamManagerApi) dataname = datapath[:-1].split('/')[-1].replace('.data', '.bin') output.tofile(f'./result/{dataname}') # destroy streams streamManagerApi.DestroyAllStreams()
!--------------! module system !--------------! implicit none integer :: nn integer :: nrep integer, allocatable :: repr(:) integer, allocatable :: peri(:) real(8), allocatable :: enr(:) real(8), allocatable :: spn(:) complex(8), allocatable :: mat(:,:) complex(8), allocatable :: vec(:,:) complex(8), allocatable :: expk(:) end module system !-----------------! !=================! program hchain_mk !=================! use system; implicit none integer :: i,k,nu,rm open(10,file='read.in',status='old') read(10,*)nn,rm close(10) allocate(repr(rm)) allocate(peri(rm)) open(10,file='eig.dat',status='replace') close(10) open(10,file='low.dat',status='replace') close(10) do nu=0,nn/2 do k=0,nn/2 call expfunction(k) call makebasis(nu,k) if (nrep/=0) then allocate(mat(nrep,nrep)) allocate(vec(nrep,nrep)) allocate(enr(nrep)) allocate(spn(nrep)) call hamiltonian() call diagonalize(nrep,mat,vec,enr) call spinsquared(nu) call transform(nrep,mat,vec,spn) spn(:)=0.5d0*abs(sqrt(1.d0+4.d0*spn(:))-1.d0) call writedata(nu,k) deallocate(mat) deallocate(vec) deallocate(enr) deallocate(spn) endif enddo enddo deallocate(repr) deallocate(peri) deallocate(expk) end program hchain_mk !=====================! !--------------------------! subroutine writedata(nu,k) !--------------------------! use system; implicit none integer :: i,k,nu open(10,file='eig.dat',position='append') write(10,*)'nu =',nu,' k = ',k,' nst =',nrep do i=1,nrep write(10,'(i5,2f18.10)')i-1,enr(i),spn(i) enddo close(10) open(10,file='low.dat',position='append') write(10,30)nu,k,enr(1),spn(1),nrep 30 format(2i5,2f16.10,i10) close(10) end subroutine writedata !------------------------! !------------------------! subroutine hamiltonian() !------------------------! use system; implicit none integer :: i,j,a,b,l,sa,sb,bb mat(:,:)=0.d0 do a=1,nrep sa=repr(a) do i=0,nn-1 j=mod(i+1,nn) if (btest(sa,i).eqv.btest(sa,j)) then mat(a,a)=mat(a,a)+0.25d0 else mat(a,a)=mat(a,a)-0.25d0 bb=ieor(sa,2**i+2**j) call representative(bb,sb,l) call findstate(sb,b) if (b>=0) then mat(a,b)=mat(a,b)+0.5d0*sqrt(dfloat(peri(a))/dfloat(peri(b)))*expk(l) endif endif enddo enddo end subroutine hamiltonian !--------------------------! !--------------------------! subroutine spinsquared(nu) !--------------------------! use system; implicit none integer :: i,j,a,b,l,nu,sa,sb,bb mat(:,:)=0.d0 do a=1,nrep sa=repr(a) mat(a,a)=mat(a,a)+(dfloat(nu)-dfloat(nn)/2.d0)**2+dfloat(nn)/2.d0 do i=0,nn-1 do j=i+1,nn-1 if (btest(sa,i).neqv.btest(sa,j)) then bb=ieor(sa,2**i+2**j) call representative(bb,sb,l) call findstate(sb,b) if (b>=0) mat(a,b)=mat(a,b)+sqrt(dfloat(peri(a))/dfloat(peri(b)))*expk(l) endif enddo enddo enddo end subroutine spinsquared !--------------------------! !--------------------------! subroutine makebasis(nu,k) !--------------------------! use system; implicit none integer :: s,k,nu,ra logical :: pass nrep=0 do s=0,2**nn-1 call checkstate(s,nu,k,ra,pass) if (pass) then nrep=nrep+1 repr(nrep)=s peri(nrep)=ra endif enddo end subroutine makebasis !------------------------! !--------------------------------------! subroutine checkstate(sa,nu,k,ra,pass) !--------------------------------------! use system; implicit none integer :: i,t,k,nu,n1,ra,sa,at logical :: pass pass=.false. n1=0 do i=0,nn-1 if (btest(sa,i)) n1=n1+1 enddo if (n1/=nu) return ra=nn at=sa do t=1,nn-1 at=ishftc(at,-1,nn) if (at<sa) then return elseif (at==sa) then if (mod(k,nn/t)/=0) return ra=t exit endif enddo pass=.true. end subroutine checkstate !-------------------------! !----------------------------------! subroutine representative(aa,sa,l) !----------------------------------! use system; implicit none integer :: i,t,l,aa,sa,at sa=aa; at=aa; l=0 do t=1,nn-1 at=ishftc(at,-1,nn) if (at<sa) then sa=at; l=t endif enddo end subroutine representative !-----------------------------! !--------------------------! subroutine findstate(sa,a) !--------------------------! use system; implicit none integer :: a,sa,amin,amax amin=1; amax=nrep do a=amin+(amax-amin)/2 if (sa<repr(a)) then amax=a-1 elseif (sa>repr(a)) then amin=a+1 else return endif if (amin>amax) then a=-1 return endif enddo end subroutine findstate !-------------------------! !-------------------------! subroutine expfunction(k) !-------------------------! use system; implicit none real(8), parameter :: pi=3.14159265358979d0 integer :: i,k real(8) :: kk kk=dfloat(2*k)*pi/dfloat(nn) if (.not.allocated(expk)) allocate(expk(-nn:nn)) do i=-nn,nn expk(i)=exp(-(0,1)*kk*i) enddo end subroutine expfunction !--------------------------! !-------------------------------------! subroutine diagonalize(n,mat,vec,eig) !-------------------------------------! implicit none integer :: n,ierr real(8) :: rz(n,n),iz(n,n),eig(n),fv1(n),fv2(n),fm1(2,n) complex(8) :: mat(n,n),vec(n,n) vec=mat call dch(n,n,real(vec),aimag(vec),eig,1,rz,iz,fv1,fv2,fm1,ierr) vec=rz+(0,1)*iz end subroutine diagonalize !--------------------------! !-----------------------------------! subroutine transform(n,mat,vec,dia) !-----------------------------------! implicit none integer :: i,n real(8) :: dia(n) complex(8) :: mat(n,n),vec(n,n) mat=matmul(mat,vec) mat=matmul(conjg(transpose(vec)),mat) do i=1,n dia(i)=real(mat(i,i)) enddo end subroutine transform !------------------------!
[STATEMENT] lemma edom_empty_iff_bot: "edom m = {} \<longleftrightarrow> m = \<bottom>" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (edom m = {}) = (m = \<bottom>) [PROOF STEP] by (metis below_bottom_iff bot_edom edomIff empty_iff fun_belowI)
module Category where open import Prelude -------------------------------------------------------------------------------- record Category {ℓ ℓ′} (𝒪 : Set ℓ) (_▹_ : 𝒪 → 𝒪 → Set ℓ′) : Set (ℓ ⊔ ℓ′) where field idₓ : ∀ {x} → x ▹ x _⋄_ : ∀ {x y z} → y ▹ x → z ▹ y → z ▹ x lid⋄ : ∀ {x y} → (f : y ▹ x) → idₓ ⋄ f ≡ f rid⋄ : ∀ {x y} → (f : y ▹ x) → f ⋄ idₓ ≡ f assoc⋄ : ∀ {x y z a} → (h : a ▹ z) (g : z ▹ y) (f : y ▹ x) → (f ⋄ g) ⋄ h ≡ f ⋄ (g ⋄ h) 𝗦𝗲𝘁 : (ℓ : Level) → Category (Set ℓ) Π 𝗦𝗲𝘁 ℓ = record { idₓ = id ; _⋄_ = _∘_ ; lid⋄ = λ f → refl ; rid⋄ = λ f → refl ; assoc⋄ = λ h g f → refl } 𝗦𝗲𝘁₀ : Category (Set ℓ₀) Π 𝗦𝗲𝘁₀ = 𝗦𝗲𝘁 ℓ₀ record Functor {ℓ₁ ℓ₁′ ℓ₂ ℓ₂′} {𝒪₁ : Set ℓ₁} {_▹₁_ : 𝒪₁ → 𝒪₁ → Set ℓ₁′} {𝒪₂ : Set ℓ₂} {_▹₂_ : 𝒪₂ → 𝒪₂ → Set ℓ₂′} (𝗖 : Category 𝒪₁ _▹₁_) (𝗗 : Category 𝒪₂ _▹₂_) : Set (ℓ₁ ⊔ ℓ₁′ ⊔ ℓ₂ ⊔ ℓ₂′) where private module C = Category 𝗖 module D = Category 𝗗 field Fₓ : 𝒪₁ → 𝒪₂ F : ∀ {x y} → y ▹₁ x → Fₓ y ▹₂ Fₓ x idF : ∀ {x} → F (C.idₓ {x = x}) ≡ D.idₓ F⋄ : ∀ {x y z} → (g : z ▹₁ y) (f : y ▹₁ x) → F (f C.⋄ g) ≡ F f D.⋄ F g record NaturalTransformation {ℓ₁ ℓ₁′ ℓ₂ ℓ₂′} {𝒪₁ : Set ℓ₁} {_▹₁_ : 𝒪₁ → 𝒪₁ → Set ℓ₁′} {𝒪₂ : Set ℓ₂} {_▹₂_ : 𝒪₂ → 𝒪₂ → Set ℓ₂′} {𝗖 : Category 𝒪₁ _▹₁_} {𝗗 : Category 𝒪₂ _▹₂_} (𝗙 𝗚 : Functor 𝗖 𝗗) : Set (ℓ₁ ⊔ ℓ₁′ ⊔ ℓ₂ ⊔ ℓ₂′) where private open module D = Category 𝗗 using (_⋄_) open module F = Functor 𝗙 using (Fₓ ; F) open module G = Functor 𝗚 using () renaming (Fₓ to Gₓ ; F to G) field N : ∀ {x} → Fₓ x ▹₂ Gₓ x natN : ∀ {x y} → (f : y ▹₁ x) → (N ⋄ F f) ≡ (G f ⋄ N) Opposite : ∀ {ℓ ℓ′} → {𝒪 : Set ℓ} {_▹_ : 𝒪 → 𝒪 → Set ℓ′} → Category 𝒪 _▹_ → Category 𝒪 (flip _▹_) Opposite 𝗖 = record { idₓ = C.idₓ ; _⋄_ = flip C._⋄_ ; lid⋄ = C.rid⋄ ; rid⋄ = C.lid⋄ ; assoc⋄ = λ f g h → C.assoc⋄ h g f ⁻¹ } where module C = Category 𝗖 Presheaf : ∀ ℓ {ℓ′ ℓ″} → {𝒪 : Set ℓ′} {_▹_ : 𝒪 → 𝒪 → Set ℓ″} → (𝗖 : Category 𝒪 _▹_) → Set _ Presheaf ℓ 𝗖 = Functor (Opposite 𝗖) (𝗦𝗲𝘁 ℓ) Presheaf₀ : ∀ {ℓ ℓ′} → {𝒪 : Set ℓ} {_▹_ : 𝒪 → 𝒪 → Set ℓ′} → (𝗖 : Category 𝒪 _▹_) → Set _ Presheaf₀ 𝗖 = Presheaf ℓ₀ 𝗖 --------------------------------------------------------------------------------
function [eigVec, eigVal] = dtiSplitTensor(tensor) % Derive eigenvector and eigenvalues from the volume of tensor data % % [eigVec, eigVal] = dtiSplitTensor(tensor) % % The input tensor array is in XxYxZx6xN format % The order of the value in the 4th dimension is [Dxx, Dyy, Dzz, Dxy, Dxz, Dyz]) % % The routine returns a XxYxZx3x3xN volume of eigVec % and a the XxYxZx3xN volume of eigVal arrays % % X,Y,Z are positions in the volume % N is the number of subjects. % % SEE ALSO: dtiRebuildTensor % % HISTORY: % 2003.12.08 ASH ([email protected]) Wrote it. % 2004.02.03 DTM ([email protected]) Sort eigVal from high to low % 2004.02.17 ASH: added extra dimension for subjects % 2005.01.06 ASH: truly added extra dimension for subjects % % The code is implemented in the mex file dtiSplitTensor.c. If you don't % have a that file compiled for you system, then the (very slow) code below % will be executed. In one test, the following code ran in 2.85 minutes, % and the compiled version ran in about 11 seconds. % % (c) Stanford VISTA Team 2003 disp('This function is mexified for speed- compile dtiSplitTensor.c.'); sz = size(tensor); if (length(sz)<5), sz =[sz, 1]; end % vec = zeros([sz(1:3), 3, 3, sz(5)]); % val = zeros([sz(1:3), 3, sz(5)]); h = mrvWaitbar(0, 'Computing tensors...'); for(x=1:sz(1)) for(y=1:sz(2)) for(z=1:sz(3)) for(j=1:sz(5)) D = [tensor(x, y, z, 1, j), tensor(x, y, z, 4, j), tensor(x, y, z, 5, j); tensor(x, y, z, 4, j), tensor(x, y, z, 2, j), tensor(x, y, z, 6, j); tensor(x, y, z, 5, j), tensor(x, y, z, 6, j), tensor(x, y, z, 3, j)]; [vec, val] = eig(D); [val2, order] = sort(-diag(val)); eigVec(x, y, z, :, :, j) = vec(:, order); eigVal(x, y, z, :, j) = -val2; end end end mrvWaitbar(x/sz(1),h); end close(h); return;
open import Agda.Builtin.Equality using (_≡_; refl) record ∃ {A : Set} (B : A → Set) : Set where constructor _,_ field proj₁ : A proj₂ : B proj₁ open ∃ public uncurry : {A : Set} {B : A → Set} {C : ∃ B → Set₁} → ((x : A) (y : B x) → C (x , y)) → ((p : ∃ B) → C p) uncurry f (x , y) = f x y _⟶_ : {I : Set} → (I → Set) → (I → Set) → Set A ⟶ B = ∀ {i} → A i → B i postulate I : Set i j : I R : I → I → Set record P : Set where field f : ∀ {k} → R i k → ∃ λ l → R k l Q : Set Q = ∃ λ (f : ∀ {j} → R i j → I) → (λ { (j , k) → ∃ λ (r : R i j) → f r ≡ k }) ⟶ uncurry R to : P → Q to f = (λ r → proj₁ (P.f f r)) , λ { (r , refl) → proj₂ (P.f f r) } from : Q → P P.f (from (f , g)) r = f r , g (r , refl) -- Should produce a nice error and not throw an __IMPOSSIBLE__. to∘from : ∀ q → to (from q) ≡ q to∘from _ = refl
using Test using TrigPolys @testset "Arithmetic" begin p1 = random_trig_poly(102) p2 = random_trig_poly(201) x = range(0; stop=2*pi, length=2000) @test isapprox((-p1).(x), -(p1.(x))) @test isapprox((p1 + p2).(x), p1.(x) .+ p2.(x)) @test isapprox((p1 - p2).(x), p1.(x) .- p2.(x)) @test isapprox((p1 * p2).(x), p1.(x) .* p2.(x)) @test TrigPolys.a0(p1 + 3) == TrigPolys.a0(p1) + 3 @test TrigPoly(pi) == TrigPoly(pi) @test p1 + p2 == p2 + p1 @test p1 * p2 == p2 * p1 @test p1 + pi == pi + p1 @test p1 * pi == pi * p1 @test p1 / pi == p1 * (1/pi) end @testset "Type Conversion and Promotion" begin F1 = Float32 F2 = Float64 VF1 = Vector{F1} VF2 = Vector{F2} @test typeof(TrigPoly(F2(4))+F1(3)) == TrigPoly{F2, VF2} @test typeof(convert(TrigPoly{F2, VF2}, F1(.3))) == TrigPoly{F2, VF2} @test typeof(convert(TrigPoly{F1, VF1}, F2(.3))) == TrigPoly{F1, VF1} @test typeof(TrigPoly(F1(2))+TrigPoly(F2(3))) == TrigPoly{F2, VF2} @test typeof(TrigPoly(F1(2))+F1(3)) == TrigPoly{F1, VF1} end @testset "Evaluate and interpolate" begin n = 1000 p = random_trig_poly(n) s = p.n*2+1 samples = [2*pi*(i-1)/s for i in 1:s] B = hcat([TrigPolys.basis(p.n, xi) for xi in samples]...) u = randn(s) @test isapprox(evaluate(u), B'*u) @test isapprox(evaluate(p), p.(samples)) @test isapprox(evaluateT(vec(u)), B*u) @test isapprox(interpolatev(evaluate(u)), u) end
/- Copyright (c) 2021 Eric Wieser. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Eric Wieser -/ import algebra.module.pi /-! # Bundled hom instances for module and multiplicative actions This file defines instances for module, mul_action and related structures on bundled `_hom` types. These are analogous to the instances in `algebra.module.pi`, but for bundled instead of unbundled functions. -/ variables {R S A B : Type*} namespace add_monoid_hom section variables [monoid R] [monoid S] [add_monoid A] [add_comm_monoid B] variables [distrib_mul_action R B] [distrib_mul_action S B] instance : distrib_mul_action R (A →+ B) := { smul := λ r f, { to_fun := r • f, map_zero' := by simp, map_add' := λ x y, by simp [smul_add] }, one_smul := λ f, by simp, mul_smul := λ r s f, by simp [mul_smul], smul_add := λ r f g, ext $ λ x, by simp [smul_add], smul_zero := λ r, ext $ λ x, by simp [smul_zero] } @[simp] lemma coe_smul (r : R) (f : A →+ B) : ⇑(r • f) = r • f := rfl lemma smul_apply (r : R) (f : A →+ B) (x : A) : (r • f) x = r • f x := rfl instance [smul_comm_class R S B] : smul_comm_class R S (A →+ B) := ⟨λ a b f, ext $ λ x, smul_comm _ _ _⟩ instance [has_scalar R S] [is_scalar_tower R S B] : is_scalar_tower R S (A →+ B) := ⟨λ a b f, ext $ λ x, smul_assoc _ _ _⟩ instance [distrib_mul_action Rᵐᵒᵖ B] [is_central_scalar R B] : is_central_scalar R (A →+ B) := ⟨λ a b, ext $ λ x, op_smul_eq_smul _ _⟩ end instance [semiring R] [add_monoid A] [add_comm_monoid B] [module R B] : module R (A →+ B) := { add_smul := λ r s x, ext $ λ y, by simp [add_smul], zero_smul := λ x, ext $ λ y, by simp [zero_smul], ..add_monoid_hom.distrib_mul_action } end add_monoid_hom
function id = mapWindowIndices(map,mapWindow) % % id = mapWindowIndices(map,mapWindow) % % Returns indices of vector map within mapWindow % % if mapWindow(1)<mapWindow(2) returns mapWindow(1) <= map <= mapWindow(2) % if mapWindow(1)>mapWindow(2) returns map >= mapWindow(1) or map <= mapWindow(2) % % djh, 7/98 if diff(mapWindow)>0 id = find(map>=mapWindow(1) & map<=mapWindow(2)); else id = find(map>=mapWindow(1) | map<=mapWindow(2)); end
module BinaryFormats.Data.List %default total export trySplitAt : {a : Type} -> Nat -> List a -> Maybe (List a, List a) trySplitAt offset bits = toMaybe (length bits < offset) (splitAt offset bits) export tryDrop : {a : Type} -> Nat -> List a -> Maybe (List a) tryDrop offset bits = toMaybe (length bits < offset) (drop offset bits)
State Before: X : Type u_1 Y : Type ?u.711872 E : Type u_2 R : Type ?u.711878 inst✝⁷ : MeasurableSpace X inst✝⁶ : TopologicalSpace X inst✝⁵ : MeasurableSpace Y inst✝⁴ : TopologicalSpace Y inst✝³ : NormedAddCommGroup E f : X → E μ : MeasureTheory.Measure X s : Set X inst✝² : OpensMeasurableSpace X inst✝¹ : IsLocallyFiniteMeasure μ K : Set X a b : X inst✝ : MetrizableSpace X hK : IsCompact K hf : ContinuousOn f K ⊢ IntegrableOn f K State After: X : Type u_1 Y : Type ?u.711872 E : Type u_2 R : Type ?u.711878 inst✝⁷ : MeasurableSpace X inst✝⁶ : TopologicalSpace X inst✝⁵ : MeasurableSpace Y inst✝⁴ : TopologicalSpace Y inst✝³ : NormedAddCommGroup E f : X → E μ : MeasureTheory.Measure X s : Set X inst✝² : OpensMeasurableSpace X inst✝¹ : IsLocallyFiniteMeasure μ K : Set X a b : X inst✝ : MetrizableSpace X hK : IsCompact K hf : ContinuousOn f K this : MetricSpace X := metrizableSpaceMetric X ⊢ IntegrableOn f K Tactic: letI := metrizableSpaceMetric X State Before: X : Type u_1 Y : Type ?u.711872 E : Type u_2 R : Type ?u.711878 inst✝⁷ : MeasurableSpace X inst✝⁶ : TopologicalSpace X inst✝⁵ : MeasurableSpace Y inst✝⁴ : TopologicalSpace Y inst✝³ : NormedAddCommGroup E f : X → E μ : MeasureTheory.Measure X s : Set X inst✝² : OpensMeasurableSpace X inst✝¹ : IsLocallyFiniteMeasure μ K : Set X a b : X inst✝ : MetrizableSpace X hK : IsCompact K hf : ContinuousOn f K this : MetricSpace X := metrizableSpaceMetric X ⊢ IntegrableOn f K State After: X : Type u_1 Y : Type ?u.711872 E : Type u_2 R : Type ?u.711878 inst✝⁷ : MeasurableSpace X inst✝⁶ : TopologicalSpace X inst✝⁵ : MeasurableSpace Y inst✝⁴ : TopologicalSpace Y inst✝³ : NormedAddCommGroup E f : X → E μ : MeasureTheory.Measure X s : Set X inst✝² : OpensMeasurableSpace X inst✝¹ : IsLocallyFiniteMeasure μ K : Set X a b : X inst✝ : MetrizableSpace X hK : IsCompact K hf : ContinuousOn f K this : MetricSpace X := metrizableSpaceMetric X x : X hx : x ∈ K ⊢ IntegrableAtFilter f (𝓝[K] x) Tactic: refine' LocallyIntegrableOn.integrableOn_isCompact (fun x hx => _) hK State Before: X : Type u_1 Y : Type ?u.711872 E : Type u_2 R : Type ?u.711878 inst✝⁷ : MeasurableSpace X inst✝⁶ : TopologicalSpace X inst✝⁵ : MeasurableSpace Y inst✝⁴ : TopologicalSpace Y inst✝³ : NormedAddCommGroup E f : X → E μ : MeasureTheory.Measure X s : Set X inst✝² : OpensMeasurableSpace X inst✝¹ : IsLocallyFiniteMeasure μ K : Set X a b : X inst✝ : MetrizableSpace X hK : IsCompact K hf : ContinuousOn f K this : MetricSpace X := metrizableSpaceMetric X x : X hx : x ∈ K ⊢ IntegrableAtFilter f (𝓝[K] x) State After: no goals Tactic: exact hf.integrableAt_nhdsWithin_of_isSeparable hK.measurableSet hK.isSeparable hx
lemma tendsto_add_filterlim_at_infinity': fixes c :: "'b::real_normed_vector" and F :: "'a filter" assumes "filterlim f at_infinity F" and "(g \<longlongrightarrow> c) F" shows "filterlim (\<lambda>x. f x + g x) at_infinity F"
[STATEMENT] lemma (in cpx_sq_mat) cpx_sq_mat_mult: assumes "A \<in> fc_mats" and "B \<in> fc_mats" shows "A * B \<in> fc_mats" [PROOF STATE] proof (prove) goal (1 subgoal): 1. A * B \<in> fc_mats [PROOF STEP] proof - [PROOF STATE] proof (state) goal (1 subgoal): 1. A * B \<in> fc_mats [PROOF STEP] have "dim_row (A * B) = dimR" [PROOF STATE] proof (prove) goal (1 subgoal): 1. dim_row (A * B) = dimR [PROOF STEP] using assms fc_mats_carrier [PROOF STATE] proof (prove) using this: A \<in> fc_mats B \<in> fc_mats fc_mats = carrier_mat dimR dimC goal (1 subgoal): 1. dim_row (A * B) = dimR [PROOF STEP] by simp [PROOF STATE] proof (state) this: dim_row (A * B) = dimR goal (1 subgoal): 1. A * B \<in> fc_mats [PROOF STEP] moreover [PROOF STATE] proof (state) this: dim_row (A * B) = dimR goal (1 subgoal): 1. A * B \<in> fc_mats [PROOF STEP] have "dim_col (A * B) = dimR" [PROOF STATE] proof (prove) goal (1 subgoal): 1. dim_col (A * B) = dimR [PROOF STEP] using assms fc_mats_carrier dim_eq [PROOF STATE] proof (prove) using this: A \<in> fc_mats B \<in> fc_mats fc_mats = carrier_mat dimR dimC dimR = dimC goal (1 subgoal): 1. dim_col (A * B) = dimR [PROOF STEP] by simp [PROOF STATE] proof (state) this: dim_col (A * B) = dimR goal (1 subgoal): 1. A * B \<in> fc_mats [PROOF STEP] ultimately [PROOF STATE] proof (chain) picking this: dim_row (A * B) = dimR dim_col (A * B) = dimR [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) using this: dim_row (A * B) = dimR dim_col (A * B) = dimR goal (1 subgoal): 1. A * B \<in> fc_mats [PROOF STEP] using fc_mats_carrier carrier_mat_def dim_eq [PROOF STATE] proof (prove) using this: dim_row (A * B) = dimR dim_col (A * B) = dimR fc_mats = carrier_mat dimR dimC carrier_mat ?nr ?nc = {m. dim_row m = ?nr \<and> dim_col m = ?nc} dimR = dimC goal (1 subgoal): 1. A * B \<in> fc_mats [PROOF STEP] by auto [PROOF STATE] proof (state) this: A * B \<in> fc_mats goal: No subgoals! [PROOF STEP] qed
Load LFindLoad. From lfind Require Import LFind. From QuickChick Require Import QuickChick. From adtind Require Import goal33. Derive Show for natural. Derive Arbitrary for natural. Instance Dec_Eq_natural : Dec_Eq natural. Proof. dec_eq. Qed. Lemma conj5synthconj4 : forall (lv0 : natural) (lv1 : natural) (lv2 : natural), (@eq natural (plus lv0 Zero) (plus (mult lv1 (Succ lv2)) (Succ lv2))). Admitted. QuickChick conj5synthconj4.
Kingston’s DataTraveler® Vault – Privacy 3.0 offers affordable business-grade security with 256-bit AES hardware-based encryption in XTS mode. It safeguards all data stored with complex password protection. DTVP 3.0 is available with optional anti-virus protection from ESET that’s easy to deploy and protects the drive’s contents from viruses and other threats. It’s also available with a SafeConsole Management option that uses Blockmaster software to let IT professionals remotely set and configure passwords, activate audit for compliance and more.
------------------------------------------------------------------------ -- Virtual machine instructions, state etc. ------------------------------------------------------------------------ open import Prelude module Lambda.Virtual-machine.Instructions (Name : Type) where open import Equality.Propositional open import Lambda.Syntax Name ------------------------------------------------------------------------ -- Instruction set mutual -- Instructions. data Instr (n : ℕ) : Type where var : Fin n → Instr n clo : Code (suc n) → Instr n app ret : Instr n cal tcl : Name → Instr n -- Calls and tail calls. con : Bool → Instr n bra : Code n → Code n → Instr n -- Code. Code : ℕ → Type Code n = List (Instr n) -- Environments and values. open Closure Code ------------------------------------------------------------------------ -- Stacks and states -- Stacks. data Stack-element : Type where val : Value → Stack-element ret : ∀ {n} → Code n → Env n → Stack-element Stack : Type Stack = List Stack-element -- States. data State : Type where ⟨_,_,_⟩ : ∀ {n} → Code n → Stack → Env n → State ------------------------------------------------------------------------ -- Results -- The result of running the VM one step. data Result : Type where continue : State → Result done : Value → Result crash : Result
module Starfield -- Background starfield effect import Effect.SDL import Effect.StdIO import Effect.State import Rnd data Starfield : Type where -- for labelling state StarEff : (Type -> Type) -> Type -> Type StarEff m t = { [Starfield ::: STATE (List (Int, Int)), RND] } Eff m t initStarfield : List (Int, Int) -> Nat -> StarEff m () initStarfield acc Z = Starfield :- put acc initStarfield acc n = do x <- rndInt 0 639 y <- rndInt 0 479 initStarfield ((x, y) :: acc) (n - 1) updateStarfield : StarEff m () updateStarfield = do xs <- Starfield :- get xs' <- upd [] xs Starfield :- put xs' where upd : List (Int, Int) -> List (Int, Int) -> { [RND] } Eff m (List (Int, Int)) upd acc [] = return acc upd acc ((x, y) :: xs) = if (y > 479) then do x <- rndInt 0 639 upd ((x, 0) :: acc) xs else upd ((x, y+1) :: acc) xs drawStarfield : List (Int, Int) -> { [SDL_ON] } Eff IO () drawStarfield [] = return () drawStarfield ((x, y) :: xs) = do line white x y x y drawStarfield xs
/- Copyright (c) 2022 Yury G. Kudryashov. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Yury G. Kudryashov -/ import topology.algebra.order.basic import algebra.order.archimedean /-! # Rational numbers are dense in a linear ordered archimedean field In this file we prove that coercion from `ℚ` to a linear ordered archimedean field has dense range. This lemma is in a separate file because `topology.algebra.order.basic` does not import `algebra.order.archimedean`. -/ variables {𝕜 : Type*} [linear_ordered_field 𝕜] [topological_space 𝕜] [order_topology 𝕜] [archimedean 𝕜] /-- Rational numbers are dense in a linear ordered archimedean field. -/ lemma rat.dense_range_cast : dense_range (coe : ℚ → 𝕜) := dense_of_exists_between $ λ a b h, set.exists_range_iff.2 $ exists_rat_btwn h
If $f$ is analytic at $z$, then the derivative of $c f$ at $z$ is $c$ times the derivative of $f$ at $z$.
SUBROUTINE main_vector_cairn USE global INTEGER ::IC ! === Main Loop ======================================================= DO 99 J = 1, NTIME !---- Computation of parameter at integral time steps ----------------- CALL FD4n(VX(1,:), FD0(1,:)) CALL FD4n(NI(1,:), FD1(1,:)) CALL FD4n(PH(1,:), FD2(1,:)) DO 17 IC = 3, NXP2 NI(2,IC) = NI(2,IC) - DT*(NI(1,IC)*FD0(1,IC)+ VX(1,IC) & & *FD1(1,IC)) VX(2,IC) = VX(2,IC) - DT*(VX(1,IC)*FD0(1,IC) + FD2(1,IC)) 17 CONTINUE !---- Boundary condition ------- NI(2,1) = NI(2,NXP1) NI(2,2) = NI(2,NXP2) NI(2,NXP3) = NI(2,3) NI(2,NXP4) = NI(2,4) ! VX(2,1) = VX(2,NXP1) VX(2,2) = VX(2,NXP2) VX(2,NXP3) = VX(2,3) VX(2,NXP4) = VX(2,4) !---- FILTERING ------------------------------------------------------- CALL FILTERn(NI(2,:)) CALL FILTERn(VX(2,:)) !---- Giving initla value --------------------------------------------- PH(2,:) = PH(1,:) ! --- Poisson solveer ------------------------------------------------- CALL POISSON_SOL_CAIRN(PH(2,:), NI(2,:)) !---- FILTERING ------------------------------------------------------- CALL FILTERn(PH(2,:)) ! !---- Computation of parameter at HALF integral time steps ------------ CALL FD4n(VX(2,:), FD0(1,:)) CALL FD4n(NI(2,:), FD1(1,:)) CALL FD4n(PH(2,:), FD2(1,:)) ! DO 20 IC = 3, NXP2 NI(1,IC) = NI(1,IC) - DT*(NI(2,IC)*FD0(1,IC) + VX(2,IC) & & *FD1(1,IC)) VX(1,IC) = VX(1,IC) - DT*(VX(2,IC)*FD0(1,IC) + FD2(1,IC)) 20 CONTINUE !---- Boundary condition ------- NI(1,1) = NI(1,NXP1) NI(1,2) = NI(1,NXP2) NI(1,NXP3) = NI(1,3) NI(1,NXP4) = NI(1,4) ! VX(1,1) = VX(1,NXP1) VX(1,2) = VX(1,NXP2) VX(1,NXP3) = VX(1,3) VX(1,NXP4) = VX(1,4) ! !---- FILTERING ------------------------------------------------------- CALL FILTERn(NI(1,:)) CALL FILTERn(VX(1,:)) !---- Giving initla value --------------------------------------------- PH(1,:) = PH(2,:) ! CALL POISSON_SOL_CAIRN(PH(1,:), NI(1,:)) !---- FILTERING ------------------------------------------------------- CALL FILTERn(PH(1,:)) IF (MOD(J,TIDATA)==0) THEN CALL datawriting END IF 99 CONTINUE END SUBROUTINE main_vector_cairn
Formal statement is: lemma degree_monom_le: "degree (monom a n) \<le> n" Informal statement is: The degree of a monomial is less than or equal to the exponent of the monomial.
""" Google Compute Engine API """ module _compute export compute using ..api using ...root """ Google Compute Engine API root. """ compute = APIRoot( "$API_ROOT/compute/v1/projects/{project}", Dict( "cloud-platform" => "Full access to all resources and services in the specified Cloud Platform project.", "compute" => "Read-write access to Google Compute Engine methods.", "compute.readonly" => "Read-only access to Google Compute Engine methods.", ); Address=APIResource("regions/{region}/addresses"; aggregatedList=APIMethod(:GET, "$API_ROOT/compute/v1/projects/{project}/aggregated/addresses", "Retrieves an aggregated list of addresses."), delete=APIMethod(:DELETE, "{address}", "Deletes the specified address resource."), get=APIMethod(:GET, "{address}", "Returns the specified address resource."), insert=APIMethod(:POST, "", "Creates an address resource in the specified project using the data included in the request."), list=APIMethod(:GET, "", "Retrieves a list of addresses contained within the specified region."; transform=(x, t) -> map(t, get(x, :items, []))), ), Autoscaler=APIResource("zones/{zone}/autoscalers"; aggregatedList=APIMethod(:GET, "$API_ROOT/compute/v1/projects/{project}/aggregated/autoscalers", "Retrieves an aggregated list of autoscalers."), delete=APIMethod(:DELETE, "{autoscaler}", "Deletes the specified autoscaler."), get=APIMethod(:GET, "{autoscaler}", "Returns the specified autoscaler resource. Get a list of available autoscalers by making a list() request."), insert=APIMethod(:POST, "", "Creates an autoscaler in the specified project using the data included in the request."), list=APIMethod(:GET, "", "Retrieves a list of autoscalers contained within the specified zone."; transform=(x, t) -> map(t, get(x, :items, []))), patch=APIMethod(:PATCH, "", "Updates an autoscaler in the specified project using the data included in the request. This method supports patch semantics. Required query parameters: autoscaler"), update=APIMethod(:PUT, "", "Updates an autoscaler in the specified project using the data included in the request."), ), BackendService=APIResource("global/backendServices"; delete=APIMethod(:DELETE, "{backendService}", "Deletes the specified BackendService resource."), get=APIMethod(:GET, "{backendService}", "Returns the specified BackendService resource. Get a list of available backend services by making a list() request."), getHealth=APIMethod(:POST, "{backendService}/getHealth", "Gets the most recent health check results for this BackendService."), insert=APIMethod(:POST, "", "Creates a BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a backend service. Read Restrictions and Guidelines for more information."), list=APIMethod(:GET, "", "Retrieves the list of BackendService resources available to the specified project."; transform=(x, t) -> map(t, get(x, :items, []))), patch=APIMethod(:PATCH, "{backendService}", "Updates the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports patch semantics."), update=APIMethod(:PUT, "{backendService}", "Updates the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information."), ), DiskType=APIResource("zones/{zone}/diskTypes"; aggregatedList=APIMethod(:GET, "$API_ROOT/compute/v1/projects/{project}/aggregated/diskTypes", "Retrieves an aggregated list of disk types."), get=APIMethod(:GET, "{diskType}", "Returns the specified disk type. Get a list of available disk types by making a list() request."), list=APIMethod(:GET, "", "Retrieves a list of disk types available to the specified project."; transform=(x, t) -> map(t, get(x, :items, []))), ), Disk=APIResource("zones/{zone}/disks"; aggregatedList=APIMethod(:GET, "$API_ROOT/compute/v1/projects/{project}/aggregated/disks", "Retrieves an aggregated list of persistent disks."), createSnapshot=APIMethod(:POST, "{disk}/createSnapshot", "Creates a snapshot of a specified persistent disk."), delete=APIMethod(:DELETE, "{disk}", "Deletes the specified persistent disk. Deleting a disk removes its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots."), get=APIMethod(:GET, "{disk}", "Returns a specified persistent disk. Get a list of available persistent disks by making a list() request."), insert=APIMethod(:POST, "", "Creates a persistent disk in the specified project using the data in the request. You can create a disk with a sourceImage, a sourceSnapshot, or create an empty 500 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property."), list=APIMethod(:GET, "", "Retrieves a list of persistent disks contained within the specified zone."; transform=(x, t) -> map(t, get(x, :items, []))), resize=APIMethod(:POST, "{disk}/resize", "Resizes the specified persistent disk."), ), Firewall=APIResource("global/firewalls"; delete=APIMethod(:DELETE, "{firewall}", "Deletes the specified firewall."), get=APIMethod(:GET, "{firewall}", "Returns the specified firewall."), insert=APIMethod(:POST, "", "Creates a firewall rule in the specified project using the data included in the request."), list=APIMethod(:GET, "", "Retrieves the list of firewall rules available to the specified project."; transform=(x, t) -> map(t, get(x, :items, []))), patch=APIMethod(:PATCH, "{firewall}", "Updates the specified firewall rule with the data included in the request. This method supports patch semantics."), update=APIMethod(:PUT, "{firewall}", "Updates the specified firewall rule with the data included in the request."), ), ForwardingRule=APIResource("regions/{region}/forwardingRules"; aggregatedList=APIMethod(:GET, "$API_ROOT/compute/v1/projects/{project}/aggregated/forwardingRules", "Retrieves an aggregated list of forwarding rules."), delete=APIMethod(:DELETE, "{forwardingRule}", "Deletes the specified ForwardingRule resource."), get=APIMethod(:GET, "{forwardingRule}", "Returns the specified ForwardingRule resource."), insert=APIMethod(:POST, "", "Creates a ForwardingRule resource in the specified project and region using the data included in the request."), list=APIMethod(:GET, "", "Retrieves a list of ForwardingRule resources available to the specified project and region."; transform=(x, t) -> map(t, get(x, :items, []))), setTarget=APIMethod(:POST, "{forwardingRule}/setTarget", "Changes target URL for forwarding rule. The new target should be of the same type as the old target."), ), GlobalAddress=APIResource("global/addresses"; delete=APIMethod(:DELETE, "{address}", "Deletes the specified address resource."), get=APIMethod(:GET, "{address}", "Returns the specified address resource. Get a list of available addresses by making a list() request."), insert=APIMethod(:POST, "", "Creates an address resource in the specified project using the data included in the request."), list=APIMethod(:GET, "", "Retrieves a list of global addresses."; transform=(x, t) -> map(t, get(x, :items, []))), ), GlobalForwardingRule=APIResource("global/forwardingRules"; delete=APIMethod(:DELETE, "{forwardingRule}", "Deletes the specified ForwardingRule resource."), get=APIMethod(:GET, "{forwardingRule}", "Returns the specified ForwardingRule resource. Get a list of available forwarding rules by making a list() request."), insert=APIMethod(:POST, "", "Creates a ForwardingRule resource in the specified project and region using the data included in the request."), list=APIMethod(:GET, "", "Retrieves a list of ForwardingRule resources available to the specified project."; transform=(x, t) -> map(t, get(x, :items, []))), setTarget=APIMethod(:POST, "{forwardingRule}/setTarget", "Changes target URL for forwarding rule. The new target should be of the same type as the old target."), ), GlobalOperation=APIResource("global/operations"; aggregatedList=APIMethod(:GET, "$API_ROOT/compute/v1/projects/{project}/aggregated/operations", "Retrieves an aggregated list of all operations."), delete=APIMethod(:DELETE, "{operation}", "Deletes the specified Operations resource."), get=APIMethod(:GET, "{operation}", "Retrieves the specified Operations resource. Get a list of operations by making a list() request."), list=APIMethod(:GET, "", "Retrieves a list of Operation resources contained within the specified project."; transform=(x, t) -> map(t, get(x, :items, []))), ), HealthCheck=APIResource("global/healthChecks"; delete=APIMethod(:DELETE, "{healthCheck}", "Deletes the specified HealthCheck resource."), get=APIMethod(:GET, "{healthCheck}", "Returns the specified HealthCheck resource. Get a list of available health checks by making a list() request."), insert=APIMethod(:POST, "", "Creates a HealthCheck resource in the specified project using the data included in the request."), list=APIMethod(:GET, "", "Retrieves the list of HealthCheck resources available to the specified project."; transform=(x, t) -> map(t, get(x, :items, []))), patch=APIMethod(:PATCH, "{healthCheck}", "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports patch semantics."), update=APIMethod(:PUT, "{healthCheck}", "Updates a HealthCheck resource in the specified project using the data included in the request."), ), HttpHealthCheck=APIResource("global/httpHealthChecks"; delete=APIMethod(:DELETE, "{httpHealthCheck}", "Deletes the specified HttpHealthCheck resource."), get=APIMethod(:GET, "{httpHealthCheck}", "Returns the specified HttpHealthCheck resource. Get a list of available HTTP health checks by making a list() request."), insert=APIMethod(:POST, "", "Creates a HttpHealthCheck resource in the specified project using the data included in the request."), list=APIMethod(:GET, "", "Retrieves the list of HttpHealthCheck resources available to the specified project."; transform=(x, t) -> map(t, get(x, :items, []))), patch=APIMethod(:PATCH, "{httpHealthCheck}", "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports patch semantics."), update=APIMethod(:PUT, "{httpHealthCheck}", "Updates a HttpHealthCheck resource in the specified project using the data included in the request."), ), HttpsHealthCheck=APIResource("global/httpsHealthChecks"; delete=APIMethod(:DELETE, "{httpsHealthCheck}", "Deletes the specified HttpsHealthCheck resource."), get=APIMethod(:GET, "{httpsHealthCheck}", "Returns the specified HttpsHealthCheck resource. Get a list of available HTTPS health checks by making a list() request."), insert=APIMethod(:POST, "", "Creates a HttpsHealthCheck resource in the specified project using the data included in the request."), list=APIMethod(:GET, "", "Retrieves the list of HttpsHealthCheck resources available to the specified project."; transform=(x, t) -> map(t, get(x, :items, []))), patch=APIMethod(:PATCH, "{httpsHealthCheck}", "Updates a HttpsHealthCheck resource in the specified project using the data included in the request. This method supports patch semantics."), update=APIMethod(:PUT, "{httpsHealthCheck}", "Updates a HttpsHealthCheck resource in the specified project using the data included in the request."), ), Image=APIResource("global/images"; delete=APIMethod(:DELETE, "{image}", "Deletes the specified image."), deprecate=APIMethod(:POST, "{image}/deprecate", "Sets the deprecation status of an image. If an empty request body is given, clears the deprecation status instead."), get=APIMethod(:GET, "{image}", "Returns the specified image. Get a list of available images by making a list() request."), getFromFamily=APIMethod(:GET, "family/{family}", "Returns the latest image that is part of an image family and is not deprecated."), insert=APIMethod(:POST, "", "Creates an image in the specified project using the data included in the request."), list=APIMethod(:GET, "", "Retrieves the list of private images available to the specified project. Private images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud."; transform=(x, t) -> map(t, get(x, :items, []))), ), InstanceGroupManager=APIResource("zones/{zone}/instanceGroupManagers"; abandonInstances=APIMethod(:POST, "{instanceGroupManager}/abandonInstances", "Schedules a group action to remove the specified instances from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method."), aggregatedList=APIMethod(:GET, "$API_ROOT/compute/v1/projects/{project}/aggregated/instanceGroupManagers", "Retrieves the list of managed instance groups and groups them by zone."), delete=APIMethod(:DELETE, "instanceGroupManager", "Deletes the specified managed instance group and all of the instances in that group. Note that the instance group must not belong to a backend service. Read Deleting an instance group for more information."), deleteInstances=APIMethod(:POST, "{instanceGroupManager}/deleteInstances", "Schedules a group action to delete the specified instances in the managed instance group. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method."), get=APIMethod(:GET, "{instanceGroupManager}", "Returns all of the details about the specified managed instance group. Get a list of available managed instance groups by making a list() request."), insert=APIMethod(:POST, "", "Creates a managed instance group using the information that you specify in the request. After the group is created, it schedules an action to create instances in the group using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method."), list=APIMethod(:GET, "", "Retrieves a list of managed instance groups that are contained within the specified project and zone."; transform=(x, t) -> map(t, get(x, :items, []))), listManagedInstances=APIMethod(:POST, "{instanceGroupManager}/listManagedInstances", "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action."), recreateInstances=APIMethod(:POST, "{instanceGroupManager}/recreateInstances", "Schedules a group action to recreate the specified instances in the managed instance group. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the action is scheduled even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method."), resize=APIMethod(:POST, "{instanceGroupManager}/resize", "Resizes the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method. Required query parameters: size"), setInstanceTemplate=APIMethod(:POST, "{instanceGroupManager}/setInstanceTemplate", "Specifies the instance template to use when creating new instances in this group. The templates for existing instances in the group do not change unless you recreate them."), setTargetPools=APIMethod(:POST, "{instanceGroupManager}/setTargetPools", "Modifies the target pools to which all instances in this managed instance group are assigned. The target pools automatically apply to all of the instances in the managed instance group. This operation is marked DONE when you make the request even if the instances have not yet been added to their target pools. The change might take some time to apply to all of the instances in the group depending on the size of the group."), ), InstanceGroup=APIResource("zones/{zone}/instanceGroups"; addInstances=APIMethod(:POST, "{instanceGroup}/addInstances", "Adds a list of instances to the specified instance group. All of the instances in the instance group must be in the same network/subnetwork. Read Adding instances for more information."), aggregatedList=APIMethod(:GET, "$API_ROOT/compute/v1/projects/{project}/aggregated/instanceGroups", "Retrieves the list of instance groups and sorts them by zone."), delete=APIMethod(:DELETE, "{instanceGroup}", "Deletes the specified instance group. The instances in the group are not deleted. Note that instance group must not belong to a backend service. Read Deleting an instance group for more information."), get=APIMethod(:GET, "{instanceGroup}", "Returns the specified instance group. Get a list of available instance groups by making a list() request."), insert=APIMethod(:POST, "", "Creates an instance group in the specified project using the parameters that are included in the request."), list=APIMethod(:GET, "", "Retrieves the list of instance groups that are located in the specified project and zone."; transform=(x, t) -> map(t, get(x, :items, []))), listInstances=APIMethod(:POST, "{instanceGroup}/listInstances", "Lists the instances in the specified instance group."), removeInstances=APIMethod(:POST, "{instanceGroup}/removeInstances", "Removes one or more instances from the specified instance group, but does not delete those instances."), setNamedPorts=APIMethod(:POST, "{instanceGroup}/setNamedPorts", "Sets the named ports for the specified instance group."), ), InstanceTemplate=APIResource("global/instanceTemplates"; delete=APIMethod(:DELETE, "{instanceTemplate}", "Deletes the specified instance template. If you delete an instance template that is being referenced from another instance group, the instance group will not be able to create or recreate virtual machine instances. Deleting an instance template is permanent and cannot be undone."), get=APIMethod(:GET, "{instanceTemplate}", "Returns the specified instance template. Get a list of available instance templates by making a list() request."), insert=APIMethod(:POST, "", "Creates an instance template in the specified project using the data that is included in the request. If you are creating a new template to update an existing instance group, your new instance template must use the same network or, if applicable, the same subnetwork as the original template."), list=APIMethod(:GET, "", "Retrieves a list of instance templates that are contained within the specified project and zone."; transform=(x, t) -> map(t, get(x, :items, []))), ), Instance=APIResource("zones/{zone}/instances"; addAccessConfig=APIMethod(:POST, "{instance}/addAccessConfig", "Adds an access config to an instance's network interface. Required query parameters: networkInterface"), aggregatedList=APIMethod(:GET, "$API_ROOT/compute/v1/projects/{project}/aggregated/instances", "Retrieves aggregated list of instances."), attachDisk=APIMethod(:POST, "{instance}/attachDisk", "Attaches a Disk resource to an instance."), delete=APIMethod(:DELETE, "{instance}", "Deletes the specified Instance resource. For more information, see Stopping or Deleting an Instance."), deleteAccessConfig=APIMethod(:POST, "{instance}/deleteAccessConfig", "Deletes an access config from an instance's network interface. Required query parameters: accessConfig, networkInterface"), detachDisk=APIMethod(:POST, "{instance}/detachDisk", "Detaches a disk from an instance. Required query parameters: deviceName"), get=APIMethod(:GET, "{instance}", "Returns the specified Instance resource. Get a list of available instances by making a list() request."), getSerialPortOutput=APIMethod(:GET, "{instance}/serialPort", "Returns the specified instance's serial port output."), insert=APIMethod(:POST, "", "Creates an instance resource in the specified project using the data included in the request."), list=APIMethod(:GET, "", "Retrieves the list of instances contained within the specified zone."; transform=(x, t) -> map(t, get(x, :items, []))), reset=APIMethod(:POST, "{instance}/reset", "Performs a hard reset on the instance."), setDiskAutoDelete=APIMethod(:POST, "{instance}/setDiskAutoDelete", "Sets the auto-delete flag for a disk attached to an instance. Required query parameters: autoDelete, deviceName"), setMachineType=APIMethod(:POST, "{instance}/setMachineType", "Changes the machine type for a stopped instance to the machine type specified in the request."), setMetadata=APIMethod(:POST, "{instance}/setMetadata", "Sets metadata for the specified instance to the data included in the request."), setScheduling=APIMethod(:POST, "{instance}/setScheduling", "Sets an instance's scheduling options."), setServiceAccount=APIMethod(:POST, "{instance}/setServiceAccount", "Sets the service account on the instance."), setTags=APIMethod(:POST, "{instance}/setTags", "Sets tags for the specified instance to the data included in the request."), start=APIMethod(:POST, "{instance}/start", "Starts an instance that was stopped using the using the instances().stop method. For more information, see Restart an instance."), startWithEncryptionKey=APIMethod(:POST, "{instance}/startWithEncryptionKey", "Starts an instance that was stopped using the using the instances().stop method. For more information, see Restart an instance."), stop=APIMethod(:POST, "{instance}/stop", "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur per-minute, virtual machine usage charges while they are stopped, but any resources that the virtual machine is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance."), ), License=APIResource("global/licenses"; get=APIMethod(:GET, "{license}", "Returns the specified License resource. Get a list of available licenses by making a list() request."), ), MachineType=APIResource("zones/{zone}/machineTypes"; aggregatedList=APIMethod(:GET, "$API_ROOT/compute/v1/projects/{project}/aggregated/machineTypes", "Retrieves an aggregated list of machine types."), get=APIMethod(:GET, "{machineType}", "Returns the specified machine type. Get a list of available machine types by making a list() request."), list=APIMethod(:GET, "{machineTypes}", "Retrieves a list of machine types available to the specified project."; transform=(x, t) -> map(t, get(x, :items, []))), ), Network=APIResource("global/networks"; delete=APIMethod(:DELETE, "{network}", "Deletes the specified network."), get=APIMethod(:GET, "{network}", "Returns the specified network. Get a list of available networks by making a list() request."), insert=APIMethod(:POST, "", "Creates a network in the specified project using the data included in the request."), list=APIMethod(:GET, "", "Retrieves the list of networks available to the specified project."; transform=(x, t) -> map(t, get(x, :items, []))), ), Project=APIResource(""; get=APIMethod(:GET, "", "Returns the specified Project resource."), moveDisk=APIMethod(:POST, "moveDisk", "Moves a persistent disk from one zone to another."), moveInstance=APIMethod(:POST, "moveInstance", "Moves an instance and its attached persistent disks from one zone to another."), setCommonInstanceMetadata=APIMethod(:POST, "setCommonInstanceMetadata", "Sets metadata common to all instances within the specified project using the data included in the request."), setUsageExportBucket=APIMethod(:POST, "setUsageExportBucket", "Enables the usage export feature and sets the usage export bucket where reports are stored. If you provide an empty request body using this method, the usage export feature will be disabled."), ), RegionOperation=APIResource("regions/{region}/operations"; delete=APIMethod(:DELETE, "{operation}", "Deletes the specified region-specific Operations resource."), get=APIMethod(:GET, "{operation}", "Retrieves the specified region-specific Operations resource."), list=APIMethod(:GET, "", "Retrieves a list of Operation resources contained within the specified region."; transform=(x, t) -> map(t, get(x, :items, []))), ), Region=APIResource("regions"; get=APIMethod(:GET, "{region}", "Returns the specified Region resource. Get a list of available regions by making a list() request."), list=APIMethod(:GET, "", "Retrieves the list of region resources available to the specified project."; transform=(x, t) -> map(t, get(x, :items, []))), ), Router=APIResource("regions/{region}/routers"; aggregatedList=APIMethod(:GET, "$API_ROOT/compute/v1/projects/{project}/aggregated/routers", "Retrieves an aggregated list of routers."), delete=APIMethod(:DELETE, "{router}", "Deletes the specified Router resource."), get=APIMethod(:GET, "{router}", "Returns the specified Router resource. Get a list of available routers by making a list() request."), getRouterStatus=APIMethod(:GET, "{router}/getRouterStatus", "Retrieves runtime information of the specified router."), insert=APIMethod(:POST, "", "Creates a Router resource in the specified project and region using the data included in the request."), list=APIMethod(:GET, "", "Retrieves a list of Router resources available to the specified project."; transform=(x, t) -> map(t, get(x, :items, []))), patch=APIMethod(:PATCH, "{router}", "Updates the specified Router resource with the data included in the request. This method supports patch semantics."), preview=APIMethod(:POST, "{router}/preview", "Preview fields auto-generated during router create and update operations. Calling this method does NOT create or update the router."), update=APIMethod(:PUT, "{router}", "Updates the specified Router resource with the data included in the request."), ), Route=APIResource("global/routes"; delete=APIMethod(:DELETE, "{route}", "Deletes the specified Route resource."), get=APIMethod(:GET, "{route}", "Returns the specified Route resource. Get a list of available routes by making a list() request."), insert=APIMethod(:POST, "", "Creates a Route resource in the specified project using the data included in the request."), list=APIMethod(:GET, "", "Retrieves the list of Route resources available to the specified project."; transform=(x, t) -> map(t, get(x, :items, []))), ), Snapshot=APIResource("global/snapshots"; delete=APIMethod(:DELETE, "{snapshot}", "Deletes the specified Snapshot resource. Keep in mind that deleting a single snapshot might not necessarily delete all the data on that snapshot. If any data on the snapshot that is marked for deletion is needed for subsequent snapshots, the data will be moved to the next corresponding snapshot."), get=APIMethod(:GET, "{snapshot}", "Returns the specified Snapshot resource. Get a list of available snapshots by making a list() request."), list=APIMethod(:GET, "", "Retrieves the list of Snapshot resources contained within the specified project."; transform=(x, t) -> map(t, get(x, :items, []))), ), SslCertificate=APIResource("global/sslCertificates"; delete=APIMethod(:DELETE, "{sslCertificate}", "Deletes the specified SslCertificate resource."), get=APIMethod(:GET, "{sslCertificate}", "Returns the specified SslCertificate resource. Get a list of available SSL certificates by making a list() request."), insert=APIMethod(:POST, "", "Creates a SslCertificate resource in the specified project using the data included in the request."), list=APIMethod(:GET, "", "Retrieves the list of SslCertificate resources available to the specified project."; transform=(x, t) -> map(t, get(x, :items, []))), ), Subnetwork=APIResource("regions/{region}/subnetworks"; aggregatedList=APIMethod(:GET, "$API_ROOT/compute/v1/projects/{project}/aggregated/subnetworks", "Retrieves an aggregated list of subnetworks."), delete=APIMethod(:DELETE, "{subnetwork}", "Deletes the specified subnetwork."), get=APIMethod(:GET, "{subnetwork}", "Returns the specified subnetwork. Get a list of available subnetworks list() request."), insert=APIMethod(:POST, "", "Creates a subnetwork in the specified project using the data included in the request."), list=APIMethod(:GET, "", "Retrieves a list of subnetworks available to the specified project."; transform=(x, t) -> map(t, get(x, :items, []))), ), TargetHttpProxy=APIResource("global/targetHttpProxies"; delete=APIMethod(:DELETE, "{targetHttpProxy}", "Deletes the specified TargetHttpProxy resource."), get=APIMethod(:GET, "{targetHttpProxy}", "Returns the specified TargetHttpProxy resource. Get a list of available target HTTP proxies by making a list() request."), insert=APIMethod(:POST, "", "Creates a TargetHttpProxy resource in the specified project using the data included in the request."), list=APIMethod(:GET, "", "Retrieves the list of TargetHttpProxy resources available to the specified project."; transform=(x, t) -> map(t, get(x, :items, []))), setUrlMap=APIMethod(:POST, "$API_ROOT/compute/v1/projects/{project}/targetHttpProxies/{targetHttpProxy}/setUrlMap", "Changes the URL map for TargetHttpProxy."), ), TargetHttpsProxy=APIResource("global/targetHttpsProxies"; delete=APIMethod(:DELETE, "{targetHttpsProxy}", "Deletes the specified TargetHttpsProxy resource."), get=APIMethod(:GET, "{targetHttpsProxy}", "Returns the specified TargetHttpsProxy resource. Get a list of available target HTTPS proxies by making a list() request."), insert=APIMethod(:POST, "", "Creates a TargetHttpsProxy resource in the specified project using the data included in the request."), list=APIMethod(:GET, "", "Retrieves the list of TargetHttpsProxy resources available to the specified project."; transform=(x, t) -> map(t, get(x, :items, []))), setSslCertificates=APIMethod(:POST, "$API_ROOT/compute/v1/projects/{project}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", "Replaces SslCertificates for TargetHttpsProxy."), setUrlMap=APIMethod(:POST, "$API_ROOT/compute/v1/projects/{project}/targetHttpProxies/{targetHttpsProxy}/setUrlMap", "Changes the URL map for TargetHttpsProxy."), ), TargetInstance=APIResource("zones/{zone}/targetInstances"; aggregatedList=APIMethod(:GET, "$API_ROOT/compute/v1/projects/{project}/aggregated/targetInstances", "Retrieves an aggregated list of target instances."), delete=APIMethod(:DELETE, "{targetInstance}", "Deletes the specified TargetInstance resource."), get=APIMethod(:GET, "{targetInstance}", "Returns the specified TargetInstance resource. Get a list of available target instances by making a list() request."), insert=APIMethod(:POST, "", "Creates a TargetInstance resource in the specified project and zone using the data included in the request."), list=APIMethod(:GET, "", "Retrieves a list of TargetInstance resources available to the specified project and zone."; transform=(x, t) -> map(t, get(x, :items, []))), ), TargetPool=APIResource("regions/{region}/targetPools"; addHealthCheck=APIMethod(:POST, "{targetPool}/addHealthCheck", "Adds health check URLs to a target pool."), addInstance=APIMethod(:POST, "{targetPool}/addInstance", "Adds an instance to a target pool."), aggregatedList=APIMethod(:GET, "$API_ROOT/compute/v1/projects/{project}/aggregated/targetPools", "Retrieves an aggregated list of target pools."), delete=APIMethod(:DELETE, "{targetPool}", "Deletes the specified target pool."), get=APIMethod(:GET, "{targetPool}", "Returns the specified target pool. Get a list of available target pools by making a list() request."), getHealth=APIMethod(:POST, "{targetPool}/getHealth", "Gets the most recent health check results for each IP for the instance that is referenced by the given target pool."), insert=APIMethod(:POST, "", "Creates a target pool in the specified project and region using the data included in the request."), list=APIMethod(:GET, "", "Retrieves a list of target pools available to the specified project and region."; transform=(x, t) -> map(t, get(x, :items, []))), removeHealthCheck=APIMethod(:POST, "{targetPool}/removeHealthCheck", "Removes health check URL from a target pool."), removeInstance=APIMethod(:POST, "{targetPool}/removeInstance", "Removes instance URL from a target pool."), setBackup=APIMethod(:POST, "{targetPool}/setBackup", "Changes a backup target pool's configurations."), ), TargetSslProxy=APIResource("global/targetSslProxies"; delete=APIMethod(:DELETE, "{targetSslProxy}", "Deletes the specified TargetSslProxy resource."), get=APIMethod(:GET, "{targetSslProxy}", "Returns the specified TargetSslProxy resource. Get a list of available target SSL proxies by making a list() request."), insert=APIMethod(:POST, "", "Creates a TargetSslProxy resource in the specified project using the data included in the request."), list=APIMethod(:GET, "", "Retrieves the list of TargetSslProxy resources available to the specified project."; transform=(x, t) -> map(t, get(x, :items, []))), setBackendService=APIMethod(:POST, "{targetSslProxy}/setBackendService", "Changes the BackendService for TargetSslProxy."), setProxyHeader=APIMethod(:POST, "{targetSslProxy}/setProxyHeader", "Changes the ProxyHeaderType for TargetSslProxy."), setSslCertificates=APIMethod(:POST, "{targetSslProxy}/setSslCertificates", "Changes SslCertificates for TargetSslProxy."), ), TargetVpnGateway=APIResource("regions/{region}/targetVpnGateways"; aggregatedList=APIMethod(:GET, "$API_ROOT/compute/v1/projects/{project}/aggregated/targetVpnGateways", "Retrieves an aggregated list of target VPN gateways."), delete=APIMethod(:DELETE, "{targetVpnGateway}", "Deletes the specified target VPN gateway."), get=APIMethod(:GET, "{targetVpnGateway}", "Returns the specified target VPN gateway. Get a list of available target VPN gateways by making a list() request."), insert=APIMethod(:POST, "", "Creates a target VPN gateway in the specified project and region using the data included in the request."), list=APIMethod(:GET, "", "Retrieves a list of target VPN gateways available to the specified project and region."; transform=(x, t) -> map(t, get(x, :items, []))), ), UrlMap=APIResource("global/urlMaps"; delete=APIMethod(:DELETE, "{urlMap}", "Deletes the specified UrlMap resource."), get=APIMethod(:GET, "{urlMap}", "Returns the specified UrlMap resource. Get a list of available URL maps by making a list() request."), insert=APIMethod(:POST, "", "Creates a UrlMap resource in the specified project using the data included in the request."), invalidateCache=APIMethod(:POST, "{urlMap}/invalidateCache", "Initiates a cache invalidation operation, invalidating the specified path, scoped to the specified UrlMap."), list=APIMethod(:GET, "", "Retrieves the list of UrlMap resources available to the specified project."; transform=(x, t) -> map(t, get(x, :items, []))), patch=APIMethod(:PATCH, "{urlMap}", "Updates the specified UrlMap resource with the data included in the request. This method supports patch semantics."), update=APIMethod(:PUT, "{urlMap}", "Updates the specified UrlMap resource with the data included in the request."), validate=APIMethod(:POST, "{urlMap}/validate", "Runs static validation for the UrlMap. In particular, the tests of the provided UrlMap will be run. Calling this method does NOT create the UrlMap."), ), VpnTunnel=APIResource("regions/{region}/vpnTunnels"; aggregatedList=APIMethod(:GET, "$API_ROOT/compute/v1/projects/{project}/aggregated/vpnTunnels", "Retrieves an aggregated list of VPN tunnels."), delete=APIMethod(:DELETE, "{vpnTunnel}", "Deletes the specified VpnTunnel resource."), get=APIMethod(:GET, "{vpnTunnel}", "Returns the specified VpnTunnel resource. Get a list of available VPN tunnels by making a list() request."), insert=APIMethod(:POST, "", "Creates a VpnTunnel resource in the specified project and region using the data included in the request."), list=APIMethod(:GET, "", "Retrieves a list of VpnTunnel resources contained in the specified project and region."; transform=(x, t) -> map(t, get(x, :items, []))), ), ZoneOperation=APIResource("zones/{zone}/operations"; delete=APIMethod(:DELETE, "{operation}", "Deletes the specified zone-specific Operations resource."), get=APIMethod(:GET, "{operation}", "Retrieves the specified zone-specific Operations resource."), list=APIMethod(:GET, "", "Retrieves a list of Operation resources contained within the specified zone."; transform=(x, t) -> map(t, get(x, :items, []))), ), Zone=APIResource("zones"; get=APIMethod(:GET, "{zone}", "Returns the specified Zone resource. Get a list of available zones by making a list() request."), list=APIMethod(:GET, "", "Retrieves the list of Zone resources available to the specified project."; transform=(x, t) -> map(t, get(x, :items, []))), ), ) end
[STATEMENT] lemma gt_right_comm: assumes "A B Gt C D" shows "A B Gt D C" [PROOF STATE] proof (prove) goal (1 subgoal): 1. A B Gt D C [PROOF STEP] using Gt_def assms lt_left_comm [PROOF STATE] proof (prove) using this: ?A ?B Gt ?C ?D \<equiv> ?C ?D Lt ?A ?B A B Gt C D ?A ?B Lt ?C ?D \<Longrightarrow> ?B ?A Lt ?C ?D goal (1 subgoal): 1. A B Gt D C [PROOF STEP] by presburger
If $c \neq 0$, then the sequence $c \cdot f_n$ converges if and only if the sequence $f_n$ converges.
This chapter summarizes the thesis by outlining its accomplishments and remaining work left to do. Section 7.1 contains the conclusion, and in section 7.2 the reader can find a description of work to be done in the future. \section{Conclusion} This thesis examines the possibility of using an Eclipse Arrowhead framework local cloud on an embedded device, namely the STM32 B-L4S5I-IOT01A board. It was possible to implement functionality for the Eclipse Arrowhead framework on the STM32 B-L4S5I-IOT01A board, having the board send temperature readings to a computer within a local network. Another goal of this thesis was to provide an easy-to-run example of the Eclipse Arrowhead framework that anyone could compile by anyone wanting to give the framework a try, a missing feature right now for embedded devices. The implementation was done with usability in mind, making it easy for users to try the code. The desired usability was achieved with the Mbed online compiler's code importation and minimum setup development environment, allowing users to try out the Eclipse Arrowhead framework on the STM32 B-L4S5I-IOT01A board. The thesis also examines the benefits of using the Eclipse Arrowhead framework compared to its competitors Amazon Web Services and Microsoft Azure. The thesis showed some benefits in terms of response time when running a local cloud instead of using a remote service such as Amazon Web Services. A 17.5 decrease in average response time was recorded. Maximum and minimum response times decreased by 1.9 and 134 times, respectively. \section{Future work} %Intro. The research presented in this thesis can take many different directions moving forward. Two main issues were raised during this thesis, security and having the STM32 B-L4S5I-IOT01A board act as a server. The following sections will address these two issues. \subsection{Security} %Security. We have to address the security issues raised in the previous chapters if the implementation done in this thesis ever is to be used by the industry in production. This thesis made several attempts at implementing HTTPS; the Mbed-HTTP library has support for HTTPS, and STM cube IDE has support for wolfSSL. The problem to be solved before implementing HTTPS is how the Eclipse Arrowhead framework handles certificates in programming languages other than Java. Both Mbed and STM cube has examples using HTTPS that works, and getting started with Amazon Web Services uses HTTPS with a user-generated certificate. One of the main issues is that the certificates are self-signed, meaning no trusted certificate authority has signed them. The self-signed certificates proved to be the main obstacle for implementing HTTPS using C. None of the libraries, Mbed-HTTP or wolfSSL, could trust the certificate from the Eclipse Arrowhead framework. There is a need to conduct further research using the certificates generated by the Eclipse Arrowhead framework on embedded devices. One area of research could be to move away from the .pk12 format, generally used by java applications, and include more support for the .pem format used by C and many other languages. Another area of research needed is lightweight cryptography and possible ways to move away from the idea that an IoT device has its certificate. A concept that quickly becomes unbearable when dealing with thousands of devices in one network. \subsection{Server implementation} %Server implementation. Before being appliable to the industry, one would have to solve the board's inability to react to requests. The issue of reacting to requests is of utmost importance. Both the Mbed online compiler and the STM Cube IDE have a working example of an HTTP server that can request the temperature data from a generated webpage. Those examples use pure HTTP requests and responses, leading to very lengthy and challenging messages to parse. Future research that promotes the same usability as Mbed-HTTP and responding to the request would greatly benefit the Eclipse Arrowhead framework. A server implementation on the STM32 B-L4S5I-IOT01A board could also have great educational potential by using it in courses for young adults or aspiring engineers. With the number of IoT devices connecting to the internet only increasing, understanding connected embedded devices is crucial for future engineers. Introducing concepts like IoT and embedded system programming early in an engineering degree and real-life examples could enhance knowledge and spike interest for those subjects, making aspiring engineers ready for the future.
# Import Problem Instance We start by importing a simple problem instance to demonstrate the tsplib reader. ```python from tsplib95 import tsplib95 import itertools import networkx as nx instance = tsplib95.load_problem('./tsplib/ulysses16.tsp') instance.comment ``` 'Odyssey of Ulysses (Groetschel/Padberg)' Remember, this repository contains a small selection of TSP instances that you can use to test your algorithms. | name | nodes | description | |------|-------|-------------| | ulysses16.tsp | 16 | Odyssey of Ulysses | | ulysses7.tsp | 7 | subset of ulysses16 for testing purposes | | bayg29.tsp | 29 | 29 Cities in Bavaria | | bier127.tsp | 127 | 127 Biergaerten in Augsburg | | bier20.tsp | 20 | subset of bier127 | | brazil58.tsp | 58 | 58 cities in Brazil | | ali535.tsp | 535 | 535 Airports around the globe | | d18512.tsp | 18512 | 18512 places in Germany | The following calls show the dimension = number of nodes of the problem, its node set and the edge weights. The functions `instance.get_nodes()` and `instance.get_edges()` are implemented as iterators, so you can loop over the nodes or edges. To get a list of nodes or edges, you have to explicitly construct one using `list(instance.get_nodes())`. Note that node counting may start at 1 for some instances while others use 0 as starting point. For convenience, we store the index of the first node as `first_node`. ```python instance.dimension instance.get_nodes() print("List of nodes: ", list(instance.get_nodes())) first_node = min(instance.get_nodes()) first_node for i,j in instance.get_edges(): if i >= j: continue print(f"edge {{ {i:2},{j:2} }} has weight {instance.wfunc(i,j):3}.") ``` List of nodes: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] edge { 1, 2 } has weight 6. edge { 1, 3 } has weight 5. edge { 1, 4 } has weight 3. edge { 1, 5 } has weight 11. edge { 1, 6 } has weight 8. edge { 1, 7 } has weight 7. edge { 1, 8 } has weight 1. edge { 1, 9 } has weight 12. edge { 1,10 } has weight 8. edge { 1,11 } has weight 26. edge { 1,12 } has weight 5. edge { 1,13 } has weight 5. edge { 1,14 } has weight 5. edge { 1,15 } has weight 7. edge { 1,16 } has weight 1. edge { 2, 3 } has weight 1. edge { 2, 4 } has weight 4. edge { 2, 5 } has weight 17. edge { 2, 6 } has weight 14. edge { 2, 7 } has weight 13. edge { 2, 8 } has weight 6. edge { 2, 9 } has weight 17. edge { 2,10 } has weight 13. edge { 2,11 } has weight 32. edge { 2,12 } has weight 11. edge { 2,13 } has weight 11. edge { 2,14 } has weight 11. edge { 2,15 } has weight 13. edge { 2,16 } has weight 7. edge { 3, 4 } has weight 5. edge { 3, 5 } has weight 16. edge { 3, 6 } has weight 13. edge { 3, 7 } has weight 12. edge { 3, 8 } has weight 6. edge { 3, 9 } has weight 16. edge { 3,10 } has weight 12. edge { 3,11 } has weight 31. edge { 3,12 } has weight 10. edge { 3,13 } has weight 10. edge { 3,14 } has weight 11. edge { 3,15 } has weight 12. edge { 3,16 } has weight 6. edge { 4, 5 } has weight 13. edge { 4, 6 } has weight 11. edge { 4, 7 } has weight 10. edge { 4, 8 } has weight 3. edge { 4, 9 } has weight 15. edge { 4,10 } has weight 11. edge { 4,11 } has weight 28. edge { 4,12 } has weight 8. edge { 4,13 } has weight 8. edge { 4,14 } has weight 8. edge { 4,15 } has weight 9. edge { 4,16 } has weight 5. edge { 5, 6 } has weight 4. edge { 5, 7 } has weight 6. edge { 5, 8 } has weight 11. edge { 5, 9 } has weight 8. edge { 5,10 } has weight 8. edge { 5,11 } has weight 16. edge { 5,12 } has weight 7. edge { 5,13 } has weight 7. edge { 5,14 } has weight 6. edge { 5,15 } has weight 4. edge { 5,16 } has weight 11. edge { 6, 7 } has weight 1. edge { 6, 8 } has weight 8. edge { 6, 9 } has weight 5. edge { 6,10 } has weight 4. edge { 6,11 } has weight 17. edge { 6,12 } has weight 3. edge { 6,13 } has weight 3. edge { 6,14 } has weight 3. edge { 6,15 } has weight 3. edge { 6,16 } has weight 8. edge { 7, 8 } has weight 7. edge { 7, 9 } has weight 5. edge { 7,10 } has weight 3. edge { 7,11 } has weight 18. edge { 7,12 } has weight 2. edge { 7,13 } has weight 2. edge { 7,14 } has weight 2. edge { 7,15 } has weight 3. edge { 7,16 } has weight 7. edge { 8, 9 } has weight 12. edge { 8,10 } has weight 8. edge { 8,11 } has weight 26. edge { 8,12 } has weight 5. edge { 8,13 } has weight 5. edge { 8,14 } has weight 5. edge { 8,15 } has weight 6. edge { 8,16 } has weight 2. edge { 9,10 } has weight 4. edge { 9,11 } has weight 15. edge { 9,12 } has weight 7. edge { 9,13 } has weight 7. edge { 9,14 } has weight 7. edge { 9,15 } has weight 8. edge { 9,16 } has weight 11. edge { 10,11 } has weight 19. edge { 10,12 } has weight 3. edge { 10,13 } has weight 4. edge { 10,14 } has weight 4. edge { 10,15 } has weight 6. edge { 10,16 } has weight 7. edge { 11,12 } has weight 20. edge { 11,13 } has weight 21. edge { 11,14 } has weight 20. edge { 11,15 } has weight 20. edge { 11,16 } has weight 25. edge { 12,13 } has weight 0. edge { 12,14 } has weight 1. edge { 12,15 } has weight 3. edge { 12,16 } has weight 5. edge { 13,14 } has weight 1. edge { 13,15 } has weight 3. edge { 13,16 } has weight 4. edge { 14,15 } has weight 2. edge { 14,16 } has weight 5. edge { 15,16 } has weight 7. You have already seen how to draw a graph, here is the relevant code again. ```python G = instance.get_graph() if instance.is_depictable(): pos = {i: instance.get_display(i) for i in instance.get_nodes()} else: pos = nx.drawing.layout.spring_layout(G) nx.draw_networkx_nodes(G, pos, node_color='#66a3ff', node_size=200) nx.draw_networkx_labels(G, pos, font_weight='bold' ) nx.draw_networkx_edges(G, pos, edge_color='#e6e6e6') ``` # Implementing the standard model with subtour elimination in Gurobi We will implement the standard model with subtour elimination callback using binary variables $x_{ij} \in \{0,1\}$ to indicate whether edge $\{i,j\}$ is being used in the tour. To avoid double counting edges, we employ the convention that $i < j$ for an edge $\{i,j\}$ and we will denote the resulting edge set by $E$. The formulation looks like this: \begin{align} \min\;&\sum_{\{i,j\} \in E} c_{i,j} \cdot x_{i,j}\\ &\sum_{j: \{i,j\} \in E} x_{i,j} = 2 \quad \text{for all nodes $i$}\\ &\sum_{\{i,j\} \in \delta(S)} x_{ij} \ge 2 \quad \text{for all $S \subsetneq V$, $S \ne \emptyset$}\\ &x_{i,j} \in \{0,1\} \end{align} ## Creating the variables We start by creating the model and the variables. Notice that we already define the objective function by using the `obj` parameter upon variable creation. ```python import gurobipy as grb model = grb.Model(name="Subtour TSP formulation") x = grb.tupledict() for i,j in instance.get_edges(): if i < j: x[i,j] = model.addVar(obj=instance.wfunc(i,j), vtype=grb.GRB.BINARY, name=f"x[{i},{j}]") ``` Academic license - for non-commercial use only ## Adding the degree constraints Next, we add the constraints for our model with the exception of the subtour elimination constraints. We use the sum method of our variables to express the summation in an elegant way. ```python for i in instance.get_nodes(): model.addConstr(x.sum(i,'*') + x.sum('*',i) == 2, name=f"degree_ctr[{i}]") ``` ## Starting the Optimization Process Finally, we set the objective to minimization and call the optimizer. ```python model.ModelSense = grb.GRB.MINIMIZE model.reset() model.optimize() ``` Optimize a model with 16 rows, 120 columns and 240 nonzeros Variable types: 0 continuous, 120 integer (120 binary) Coefficient statistics: Matrix range [1e+00, 1e+00] Objective range [1e+00, 3e+01] Bounds range [1e+00, 1e+00] RHS range [2e+00, 2e+00] Found heuristic solution: objective 148.0000000 Presolve time: 0.00s Presolved: 16 rows, 120 columns, 240 nonzeros Variable types: 0 continuous, 120 integer (120 binary) Root relaxation: objective 6.200000e+01, 20 iterations, 0.00 seconds Nodes | Current Node | Objective Bounds | Work Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time * 0 0 0 62.0000000 62.00000 0.00% - 0s Explored 0 nodes (20 simplex iterations) in 0.01 seconds Thread count was 8 (of 8 available processors) Solution count 2: 62 148 Optimal solution found (tolerance 1.00e-04) Best objective 6.200000000000e+01, best bound 6.200000000000e+01, gap 0.0000% ## Querying and Visualizing the Solution Before we visualize our result, let us look at a few key figures of our solution. ```python model.ObjVal ``` ```python solution_edges = [(i,j) for i,j in x.keys() if x[i,j].x > 0.9] solution_edges ``` [(1, 8), (1, 16), (2, 3), (2, 4), (3, 4), (5, 11), (5, 15), (6, 7), (6, 15), (7, 10), (8, 16), (9, 10), (9, 11), (12, 13), (12, 14), (13, 14)] For debugging purposes, it might be helpful to export the model held by Gurobi into a human-readable format: ```python model.write('test.lp') ``` Finally, let us visualize the solution using NetworkX. In this case, we need to prescribe positions and draw the nodes and two layers of edges separately. ```python if instance.is_depictable(): pos = {i: instance.get_display(i) for i in instance.get_nodes()} else: pos = nx.drawing.layout.spring_layout(G) nx.draw_networkx_nodes(G, pos, node_color='#66a3ff', node_size=500) nx.draw_networkx_labels(G, pos, font_weight='bold' ) nx.draw_networkx_edges(G, pos, edge_color='#e6e6e6') nx.draw_networkx_edges(G, pos, edgelist=solution_edges, edge_color='#ffa31a', width=4) ``` ## Subtour elimination As you can hopefully see (depending on the instance you selected), the solution may contain a subtour. Let us add a callback to detect and eliminate subtours in an integer solution. We start by defining a function that, given a list of edges, finds a subtour if there is one. For a more concise implementation, we use a function of networkx to find such a subtour: We construct an auxiliary graph $G_{\text{aux}}$, find a cycle in this graph and return just the nodes contained in this cycle. ```python def find_subtour_set(nodes, edges): G_aux = nx.Graph() G_aux.add_nodes_from(nodes) G_aux.add_edges_from(edges) return set(itertools.chain(*nx.find_cycle(G_aux))) find_subtour_set(instance.get_nodes(), [(i,j) for i,j in x.keys() if x[i,j].x > 0.9]) ``` {1, 8, 16} ### Define a callback Next, we need to define a callback function that adds a violated subtour inequality if one exists. In Gurobi, there is just one "global" callback function that two parameters: The `model` and a constant called `where` (a "Callback Code") that indicates at which position in the optimization process the callback has been invoked. The documentation contains a list of the available codes at http://www.gurobi.com/documentation/8.0/refman/callback_codes.html#sec:CallbackCodes. We want our callback to spring into action whenever a new integer solution has been found, so the relevant callback code is `GRB.Callback.MIPSOL`. Notice that we can only access parameters and current values of our model through the model object, not through the variables we have defined above. The model supplies a number of `cbGet...` methods for this purpose. To access our variables, we need to define a _user variable_ in the model that stores this information and makes it accessible in the callback. User variables can be any member of the model objects that starts with an underscore. We will add a parameter `_vars` to our model that simply stores the x variables and then use this parameter in the callback to access the current solution. To add a new constraint, we use the method `cbLazy` of our model that adds a new lazy constraint. The node will then be re-evaluated automatically. ### Subtour Elimination Callback **Task 1:** In the following function, complete the definition of the `cut_edges` list to make the subtour elimination work properly. ```python def subtour_callback(model, where): if where == grb.GRB.Callback.MIPSOL: sol = model.cbGetSolution(model._vars) S = find_subtour_set(model._instance.get_nodes(), [(i,j) for i,j in model._vars.keys() if sol[i,j] > 0.9]) if len(S) < model._instance.dimension: # TODO: cut_edges cut_edges = [(i,j) for i,j in model._vars.keys() if ((i in S and j not in S) or (i not in S and j in S))] model.cbLazy(sum(model._vars[i,j] for i,j in cut_edges) >= 2) ``` ### Add Callback to the Model Let us now add the variables to the model and resolve, this time using the callback. Also, we need to switch on lazy constraints by setting the appropropriate parameter. The callback function is simply passed as a parameter to the optimizer. ```python model._vars = x # for use in the callback model._instance = instance # for use in the callback model.reset() model.Params.lazyConstraints = 1 # use lazy constraints model.optimize(subtour_callback) # use callback to add lazy constraints ``` Parameter lazyConstraints unchanged Value: 1 Min: 0 Max: 1 Default: 0 Optimize a model with 16 rows, 120 columns and 240 nonzeros Variable types: 0 continuous, 120 integer (120 binary) Coefficient statistics: Matrix range [1e+00, 1e+00] Objective range [1e+00, 3e+01] Bounds range [1e+00, 1e+00] RHS range [2e+00, 2e+00] Presolve time: 0.00s Presolved: 16 rows, 120 columns, 240 nonzeros Variable types: 0 continuous, 120 integer (120 binary) Root relaxation: objective 6.800000e+01, 25 iterations, 0.00 seconds Nodes | Current Node | Objective Bounds | Work Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time 0 0 68.00000 0 6 - 68.00000 - - 0s 0 0 70.50000 0 10 - 70.50000 - - 0s H 0 0 72.0000000 70.50000 2.08% - 0s H 0 0 71.0000000 70.50000 0.70% - 0s Cutting planes: Gomory: 1 Zero half: 1 Lazy constraints: 10 Explored 1 nodes (42 simplex iterations) in 0.03 seconds Thread count was 8 (of 8 available processors) Solution count 2: 71 72 Optimal solution found (tolerance 1.00e-04) Best objective 7.100000000000e+01, best bound 7.100000000000e+01, gap 0.0000% ### Results Let's have a look at the results. ```python model.ObjVal ``` 71.0 ```python solution_edges = [(i,j) for i,j in x.keys() if x[i,j].x > 0.9] solution_edges ``` [(1, 3), (1, 16), (2, 3), (2, 4), (4, 8), (5, 6), (5, 11), (6, 7), (7, 14), (8, 15), (9, 10), (9, 11), (10, 12), (12, 13), (13, 16), (14, 15)] ```python if instance.is_depictable(): pos = {i: instance.get_display(i) for i in instance.get_nodes()} else: pos = nx.drawing.layout.spring_layout(G) nx.draw_networkx_nodes(G, pos, node_color='#66a3ff', node_size=500) nx.draw_networkx_labels(G, pos, font_weight='bold' ) nx.draw_networkx_edges(G, pos, edge_color='#e6e6e6') nx.draw_networkx_edges(G, pos, edgelist=solution_edges, edge_color='#ffa31a', width=4) ``` ## Fractional Subtour Elimination Finally, let us try to implement a separation procedure for subtour elimination constraints that works on the relaxation and does not need an integer solution. This can be done by solving a minimum cut problem on an auxiliary graph where we fix an arbitrary source node (we will use node $1$) and iterate through all possible target nodes until we find a minimum cut that has a value of less than $2$. For this cut, the corresponding subtour elimination constraint is inserted as a lazy cut. We modify our callback to use the callback code `MIPNODE` instead of `MIPSOL` and query `GRB.Callback.MIPNODE_STATUS` to see whether the node has already been solved to (fractional) optimality. For computing the minimum cut we again use an algorithm provided by the `networkx` package. Note that we have substituted `cbGetSolution` by `cbGetNodeRel`, as a solution is not generally available at any node. Also, we have to include the `MIPSOL` callback branch, because `MIPNODE` might not be called for nodes that yield an integer optimum right away. ### Define Separation Method ```python import itertools def find_minimum_cut_partition(instance, sol): G_flow = instance.get_graph() for i,j in G_flow.edges(): if (i,j) in sol: G_flow[i][j]['capacity'] = sol[i,j] else: G_flow[i][j]['capacity'] = 0 for t in G_flow.nodes() - {1}: cut_value, S = nx.minimum_cut(G_flow, 1, t, capacity='capacity') if cut_value < 2: return S[0] return set() #no cut with value < 2 has been found ``` ### Subtour Elimination Callback **Task 2:** In the following callback, fill in the code for adding the correct subtour elimination inequalities. ```python def subtour_elimination_callback(model, where): if where == grb.GRB.Callback.MIPSOL: sol = model.cbGetSolution(model._vars) S = find_subtour_set(model._instance.get_nodes(), [(i,j) for i,j in model._vars.keys() if sol[i,j] > 0.9]) if len(S) < model._instance.dimension: cut_edges = [(i,j) for i,j in model._vars.keys() if ((i in S and j not in S) or (i not in S and j in S))] model.cbLazy(sum(model._vars[i,j] for i,j in cut_edges) >= 2) elif where == grb.GRB.Callback.MIPNODE and model.cbGet(grb.GRB.Callback.MIPNODE_STATUS) == grb.GRB.Status.OPTIMAL: sol = model.cbGetNodeRel(model._vars) S = find_minimum_cut_partition(model._instance, sol) if 0 < len(S) < model._instance.dimension: cut_edges = [(i,j) for i,j in model._vars.keys() if ((i in S and j not in S) or (i not in S and j in S))] model.cbLazy(sum(model._vars[i,j] for i,j in cut_edges) >= 2) ``` ### Optimize ```python model._vars = x model._instance = instance model.reset() model.Params.lazyConstraints = 1 model.optimize(subtour_elimination_callback) ``` Parameter lazyConstraints unchanged Value: 1 Min: 0 Max: 1 Default: 0 Optimize a model with 16 rows, 120 columns and 240 nonzeros Variable types: 0 continuous, 120 integer (120 binary) Coefficient statistics: Matrix range [1e+00, 1e+00] Objective range [1e+00, 3e+01] Bounds range [1e+00, 1e+00] RHS range [2e+00, 2e+00] Presolve time: 0.00s Presolved: 16 rows, 120 columns, 240 nonzeros Variable types: 0 continuous, 120 integer (120 binary) Root relaxation: objective 6.800000e+01, 25 iterations, 0.00 seconds Nodes | Current Node | Objective Bounds | Work Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time 0 0 68.00000 0 6 - 68.00000 - - 0s 0 0 70.50000 0 10 - 70.50000 - - 0s H 0 0 72.0000000 70.50000 2.08% - 0s H 0 0 71.0000000 70.50000 0.70% - 0s Cutting planes: Gomory: 1 Zero half: 1 Lazy constraints: 10 Explored 1 nodes (42 simplex iterations) in 0.03 seconds Thread count was 8 (of 8 available processors) Solution count 2: 71 72 Optimal solution found (tolerance 1.00e-04) Best objective 7.100000000000e+01, best bound 7.100000000000e+01, gap 0.0000% ### Results ```python model.ObjVal ``` 71.0 ```python solution_edges = [(i,j) for i,j in x.keys() if x[i,j].x > 0.9] solution_edges ``` [(1, 3), (1, 16), (2, 3), (2, 4), (4, 8), (5, 6), (5, 11), (6, 7), (7, 14), (8, 15), (9, 10), (9, 11), (10, 12), (12, 13), (13, 16), (14, 15)] ```python if instance.is_depictable(): pos = {i: instance.get_display(i) for i in instance.get_nodes()} else: pos = nx.drawing.layout.spring_layout(G) nx.draw_networkx_nodes(G, pos, node_color='#66a3ff', node_size=500) nx.draw_networkx_labels(G, pos, font_weight='bold' ) nx.draw_networkx_edges(G, pos, edge_color='#e6e6e6') nx.draw_networkx_edges(G, pos, edgelist=solution_edges, edge_color='#ffa31a', width=4) ``` ### Comparison **Task 3:** Compare the two different callbacks with respect to running time and number of cuts added. Try different TSPLIB instances. Compare this formulation to MTZ. ```python ```
[STATEMENT] lemma finite_numbers[simp,intro]: "finite [n]" [PROOF STATE] proof (prove) goal (1 subgoal): 1. finite [n] [PROOF STEP] unfolding numbers_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. finite {..<n} [PROOF STEP] by auto
!****************************************************************************** ! ! Licensing: ! ! This code is distributed under the MIT license. ! ! Modified: ! ! 22 April 2019 ! ! Author: ! ! Nescirem ! !============================================================================== ! !>M test_helloworld !> >S mpiomphelloworld ! module test_helloworld use,intrinsic :: iso_fortran_env, only: error_unit,output_unit implicit none private public :: helloworld contains subroutine helloworld use mpi_mod use omp_lib implicit none !OpenMP relevant integer :: tid,num_threads call omp_set_num_threads( 4 ) !$omp parallel default(private) shared(pid,num_p) call mpi_get_processor_name( p_name,p_namelen,err ) num_threads = omp_get_num_threads() tid = omp_get_thread_num() write( output_unit,'(A,I2,A,I2,A,I2,A,I2,A,A)' ) 'Thread',tid,' of',num_threads,& ' threads is alive in process',pid,' of',num_p,' processes on ',p_name(1:p_namelen) !$omp end parallel end subroutine helloworld end module test_helloworld !****************************************************************************** !****************************************************************************** #ifndef INTEGRATED_TESTS program test_mpiomp_helloworld use test_helloworld , only: helloworld implicit none !parse the command line call cmd_parser !start parallel execution call mpi_start call helloworld !end parallel execution call mpi_end end program test_mpiomp_helloworld #endif !******************************************************************************
program prog real, allocatable :: e(:,:),eup(:,:),edn(:,:) real, allocatable :: k(:,:) real, dimension(3) ::k0,a real, dimension(6) ::xxxx character(len = 32):: xx, yy write(6,*) 'fermi level (eV)' read(5,*) ef open(10,file = 'EIGENVAL', status = 'old') open(11,file = 'band_fermi.dat') open(13,file = 'band.dat') read(10,*) iii, iii, iii, ispin ! 第一行数据读取 read(10,*) (xxxx(i),i = 1,5) ! 第二行数据读取 read(10,*) xxxx(6) ! 第三行数据读取 read(10,*) xx ! 第四行数据读取 read(10,*) yy ! 第五行数据读取 read(10,*) nn,nk,nbands ! 第六行数据读取 allocate(e(nk,nbands)) allocate(k(nk,3)) if(ispin.eq.2) then ! 2表示自旋极化计算 do i = 1,nk read(10,*) read(10,*) (k(i,j),j = 1,3),wtk ! 读取k点坐标及权重 do j = 1,nbands ! 读取确定k点下面的每条能带对应的本征值,这里分别是spin-up与spin-down read(10,*)jj,eup(i,j),edn(i,j) end do write(13,9030) (eup(i,j),j = 1,nbands) write(14,9030) (edn(i,j),j = 1,nbands) end do write(6,*)(k(i,j),j = 1,3) read(10,*) (e(i,n),n = 1,nbands) write(13,9030) (e(i,n),n = 1,nbands) else ! 非自旋极化能带 do i = 1,nk read(10,*) read(10,*) (k(i,j),j = 1,3),wtk do j = 1,nbands read(10,*) jj,e(i,j) ! 读取能带k点和对应本征值 end do write(13,9030) (e(i,j),j = 1,nbands) end do end if 9030 format (8f9.5) do j = 1,nbands dk = 0 do i = 1,nk if (i.eq.1) then k0 = k(i,:) end if a = k(i,:) - k0 dk = dk + sqrt(dot_product(a,a)) if(ispin.eq.2) then write(11,*)dk,eup(i,j)-ef, edn(i,j)-ef else write(11,*)dk,e(i,j)-ef end if k0 = k(i,:) end do write(11,*) end do stop end program prog
module pde3d_exp_mod use psb_base_mod, only : psb_dpk_ real(psb_dpk_), save, private :: epsilon=1.d0/80 contains subroutine pde3d_set_parm(dat) real(psb_dpk_), intent(in) :: dat epsilon = dat end subroutine pde3d_set_parm ! ! functions parametrizing the differential equation ! function b1(x,y,z) use psb_base_mod, only : psb_dpk_ real(psb_dpk_) :: b1 real(psb_dpk_), intent(in) :: x,y,z b1=1.d0/sqrt(3.d0)-exp(-(x+y+z)) end function b1 function b2(x,y,z) use psb_base_mod, only : psb_dpk_ real(psb_dpk_) :: b2 real(psb_dpk_), intent(in) :: x,y,z b2=1.d0/sqrt(3.d0)-exp(-(x+y+z)) end function b2 function b3(x,y,z) use psb_base_mod, only : psb_dpk_ real(psb_dpk_) :: b3 real(psb_dpk_), intent(in) :: x,y,z b3=1.d0/sqrt(3.d0)-exp(-(x+y+z)) end function b3 function c(x,y,z) use psb_base_mod, only : psb_dpk_ real(psb_dpk_) :: c real(psb_dpk_), intent(in) :: x,y,z c=0.d0 end function c function a1(x,y,z) use psb_base_mod, only : psb_dpk_ real(psb_dpk_) :: a1 real(psb_dpk_), intent(in) :: x,y,z a1=epsilon*exp(-(x+y+z)) end function a1 function a2(x,y,z) use psb_base_mod, only : psb_dpk_ real(psb_dpk_) :: a2 real(psb_dpk_), intent(in) :: x,y,z a2=epsilon*exp(-(x+y+z)) end function a2 function a3(x,y,z) use psb_base_mod, only : psb_dpk_ real(psb_dpk_) :: a3 real(psb_dpk_), intent(in) :: x,y,z a3=epsilon*exp(-(x+y+z)) end function a3 function g(x,y,z) use psb_base_mod, only : psb_dpk_, done, dzero real(psb_dpk_) :: g real(psb_dpk_), intent(in) :: x,y,z g = dzero if (x == done) then g = done else if (x == dzero) then g = done end if end function g end module pde3d_exp_mod
(* Copyright 2017 Yoichi Hirai Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. *) theory DynamicValidatorSet imports Main begin section "Definitions Necessary to Understand Accountable Safety (not skippable)" subsection "Hashes, Views and Validators" text "In this development we do not know much about hashes. There are many hashes. Two hashes might be equal or not. The intention is that the hashes identify blocks but we don't have to talk about that." datatype hash = Hash int text "Views are numbers. We actually need the fact that views are lines up in a total order. Otherwise accountable safety can be broken. We sometimes subtract views and obtain a number. So, for convenience, views are just defined as integers. Of course when we are multiplying a view by a view, that would be very strange." type_synonym view = int text "We have two kinds of messages. A Commit message contains a hash and a view, indicating that a hash is to be finalized at a view. Many signatures on this message would actually finalize the hash at the view. A prepare message contains a hash and two views. At this point a message is not signed by anybody." datatype message = Commit "hash * view" | Prepare "hash * view * view" text "We need a set of validators. Here, we just define a datatype containing infinitely many validators. Afterwards, when we look at a particular situation, each hash would specify two validator sets." datatype validator = Validator int text "A message signed by a validator can be represented as a pair of a validator and a message." type_synonym signed_message = "validator * message" text "Almost everything in this document depends on situations. A situation contains a set of signed messages, two validator sets for each hash, and a function specifying parents of hashes." text "A situation might be seen from a global point of view where every sent messages can be seen, or more likely seen from a local point of view." record situation = RearValidators :: "hash \<Rightarrow> validator set" FwdValidators :: "hash \<Rightarrow> validator set" Messages :: "signed_message set" PrevHash :: "hash \<Rightarrow> hash option" text " The accountable safety will make sure that at least one-third of the validators are slashed. In Isabelle/HOL, the cardinality of an infinite set is defined to be zero, so we will avoid that because it does not make sense to divide the cardinality of an infinite set by three. We should be talking about situations where the set of validators is finite. At the same time, we assume that the validator sets are not empty (I haven't tried to remove the non-emptiness assumption)." definition validator_sets_finite :: "situation \<Rightarrow> bool" where "validator_sets_finite s = (\<forall> h. finite (FwdValidators s h) \<and> finite (RearValidators s h) \<and> (\<not> (FwdValidators s h = {})) \<and> (\<not> (RearValidators s h = {})))" text "A hash's previous hash's previous hash is a second-ancestor. Later, we will see that an honest validator will send a prepare message only after seeing enough prepare messages for the ancestor of a particular degree. So we need to define what is the ancestor of a particular degree." fun nth_ancestor :: "situation \<Rightarrow> nat \<Rightarrow> hash \<Rightarrow> hash option" where "nth_ancestor _ 0 h = Some h" | "nth_ancestor s (Suc n) h = (case PrevHash s h of None \<Rightarrow> None | Some h' \<Rightarrow> nth_ancestor s n h')" text "When hashes are connected by @{term nth_ancestor} relation, they are in ancestor-descendant relation." definition ancestor_descendant :: "situation \<Rightarrow> hash \<Rightarrow> hash \<Rightarrow> bool" where "ancestor_descendant s x y = (\<exists> n. nth_ancestor s n y = Some x)" text "When two hashes are in ancestor-descendant relation in any ordering, they are on the same chain." definition on_same_chain :: "situation \<Rightarrow> hash \<Rightarrow> hash \<Rightarrow> bool" where "on_same_chain s x y = (ancestor_descendant s x y \<or> ancestor_descendant s y x)" subsection "When Hashes are Prepared and Committed" text "Blocks can be finalized only when two-thirds of the validators commit on the block. Also, in the slashing conditions, we will be talking about two-thirds of the validators doing something." text "We can lift any predicate about a validator into a predicate about a set of validators: that two thirds of the validators satisfy the predicate." definition two_thirds :: "validator set \<Rightarrow> (validator \<Rightarrow> bool) \<Rightarrow> bool" where "two_thirds vs f = (2 * card vs \<le> 3 * card ({n. n \<in> vs \<and> f n}))" text "Similarly for one-third, more-than-two-thirds, and more-than-one-third." definition one_third :: "validator set \<Rightarrow> (validator \<Rightarrow> bool) \<Rightarrow> bool" where "one_third vs f = (card vs \<le> 3 * card ({n. n \<in> vs \<and> f n}))" text "It matters when two-thirds of validators say something." definition two_thirds_sent_message :: "situation \<Rightarrow> validator set \<Rightarrow> message \<Rightarrow> bool" where "two_thirds_sent_message s vs m = two_thirds vs (\<lambda> n. (n, m) \<in> Messages s)" text "A hash is prepared when two-thirds of the validators have sent a Prepare message with the same content." definition prepared :: "situation \<Rightarrow> validator set \<Rightarrow> hash \<Rightarrow> view \<Rightarrow> view \<Rightarrow> bool" where "prepared s vs h v vsrc = (two_thirds_sent_message s vs (Prepare (h, v, vsrc)))" text "A hash is committed when two-thirds of the validators have sent a Commit message with the same content." definition committed :: "situation \<Rightarrow> validator set \<Rightarrow> hash \<Rightarrow> view \<Rightarrow> bool" where "committed s vs h v = two_thirds_sent_message s vs (Commit (h, v))" text "As we will see, honest validators should send a prepare message only when it has enough prepare messages at a particular view. Those prepare messages need to be signed by two-thirds of both the rear and the forward validators." text "A hash at a view and a view source is prepared by the rear validators when two-thirds of the rear validators have signed the prepare message." definition prepared_by_rear :: "situation \<Rightarrow> hash \<Rightarrow> view \<Rightarrow> view \<Rightarrow> bool" where "prepared_by_rear s h v vsrc = (prepared s (RearValidators s h) h v vsrc)" text "Similarly for the forward validators." definition prepared_by_fwd :: "situation \<Rightarrow> hash \<Rightarrow> view \<Rightarrow> view \<Rightarrow> bool" where "prepared_by_fwd s h v vsrc = (prepared s (FwdValidators s h) h v vsrc)" text "When both of these happens, a prepare is signed by both the rear and the forward validator sets." definition prepared_by_both :: "situation \<Rightarrow> hash \<Rightarrow> view \<Rightarrow> view \<Rightarrow> bool" where "prepared_by_both s h v vsrc = (prepared_by_rear s h v vsrc \<and> prepared_by_fwd s h v vsrc)" text "Similar definitions for commit messages." definition committed_by_rear :: "situation \<Rightarrow> hash \<Rightarrow> view \<Rightarrow> bool" where "committed_by_rear s h v = (committed s (RearValidators s h) h v)" definition committed_by_fwd :: "situation \<Rightarrow> hash \<Rightarrow> view \<Rightarrow> bool" where "committed_by_fwd s h v = (committed s (FwdValidators s h) h v)" definition committed_by_both :: "situation \<Rightarrow> hash \<Rightarrow> view \<Rightarrow> bool" where "committed_by_both s h v = (committed_by_rear s h v \<and> committed_by_fwd s h v)" subsection "Following Changing Validators to Define Forks" text "In the accountable safety statement, we need to slash 2/3 of a set of validators. This set of validators cannot be any set, but some legitimately chosen descendant of the first sets of validators. We need to look at the history and see what validator set inherits the seats. For this, we need to see the sourcing relation of pepare messages." text "The sourcing relation is also used in a slashing condition." text "One type of prepare source is the normal one. The normal source needs to have the same rear validator set and the same forward validator set." definition validators_match :: "situation \<Rightarrow> hash \<Rightarrow> hash \<Rightarrow> bool" where "validators_match s h0 h1 = (RearValidators s h0 = RearValidators s h1 \<and> FwdValidators s h0 = FwdValidators s h1)" text "Another type of sourcing allows changing the validator sets. The forward validator set of the source needs to coincide with the rear validator set of the newly prepared hash. This can only happen when the older hash has been committed by both validator sets." definition validators_change :: "situation \<Rightarrow> hash \<Rightarrow> hash \<Rightarrow> bool" where "validators_change s ancient next = (FwdValidators s ancient = RearValidators s next)" fun prev_next_with_chosen_validators :: "situation \<Rightarrow> (hash \<times> view) \<Rightarrow> (hash \<times> view) \<Rightarrow> bool" where "prev_next_with_chosen_validators s (h0, v0) (h1, v1) = (PrevHash s h1 = Some h0 \<and> v1 = v0 + 1 \<and> (validators_match s h0 h1 \<or> validators_change s h0 h1 \<and> committed_by_both s h0 v0)) " inductive ancestor_descendant_with_chosen_validators :: "situation \<Rightarrow> (hash \<times> view) \<Rightarrow> (hash \<times> view) \<Rightarrow> bool" where inheritance_self: "ancestor_descendant_with_chosen_validators s (h, v) (h, v)" | inheritances_step: "ancestor_descendant_with_chosen_validators s (h0, v0) (h1, v1) \<Longrightarrow> prev_next_with_chosen_validators s (h1, v1) (h2, v2) \<Longrightarrow> ancestor_descendant_with_chosen_validators s (h0, v0) (h2, v2)" text "Accountable safety will prevent forks (unless some number of validators are slashed). The fork is defined using two branches whose tips do not belong to the same chain. The branches are made of hashes with valid validator transitions (otherwise, sometimes, we cannot blame any validators for the fork)." fun fork :: "situation \<Rightarrow> (hash \<times> view) \<Rightarrow> (hash \<times> view) \<Rightarrow> (hash \<times> view) \<Rightarrow> bool" where "fork s (root, v) (h1, v1) (h2, v2) = (\<not> on_same_chain s h1 h2 \<and> ancestor_descendant_with_chosen_validators s (root, v) (h1, v1) \<and> ancestor_descendant_with_chosen_validators s (root, v) (h2, v2))" text "A fork is particularly harmful when their tips and the root are all committed." fun fork_with_commits :: "situation \<Rightarrow> (hash \<times> view) \<Rightarrow> (hash \<times> view) \<Rightarrow> (hash \<times> view) \<Rightarrow> bool" where "fork_with_commits s (h, v) (h1, v1) (h2, v2) = (fork s (h, v) (h1, v1) (h2, v2) \<and> committed_by_both s h v \<and> committed_by_both s h1 v1 \<and> committed_by_both s h2 v2)" subsection "Prepare Messages' Sources" text "In the next section, we are going to determine which of the validators are slashed in a situation." text "One slashing condition requires sources for prepare messages. Here we define what constitutes a source." fun sourcing_normal :: "situation \<Rightarrow> hash \<Rightarrow> (hash \<times> view \<times> view) \<Rightarrow> bool" where "sourcing_normal s h (h', v', v_src) = (\<exists> v_ss. prepared_by_both s h v_src v_ss \<and> -1 \<le> v_ss \<and> v_ss < v_src \<and> nth_ancestor s (nat (v' - v_src)) h' = Some h \<and> validators_match s h h' )" fun sourcing_switching_validators :: "situation \<Rightarrow> hash \<Rightarrow> (hash \<times> view \<times> view) \<Rightarrow> bool" where "sourcing_switching_validators s h (h', v', v_src) = (\<exists> v_ss. prepared_by_both s h v_src v_ss \<and> committed_by_both s h v_src \<and> -1 \<le> v_ss \<and> v_ss < v_src \<and> nth_ancestor s (nat (v' - v_src)) h' = Some h \<and> validators_change s h h')" text "A prepare message's source needs to be one of these two types." definition sourcing :: "situation \<Rightarrow> hash \<Rightarrow> (hash \<times> view \<times> view) \<Rightarrow> bool" where "sourcing s h_old tri = (sourcing_normal s h_old tri \<or> sourcing_switching_validators s h_old tri)" subsection "Slashing Conditions" text "In a situation, a validator might be slashed or not. A validator is slashed individually although later we will be often talking ``unless one-third of the validators are slashed.'' " text "[i] A validator is slashed when it has sent a commit message of a hash that is not prepared yet." definition slashed_one :: "situation \<Rightarrow> validator \<Rightarrow> bool" where "slashed_one s n = (\<exists> h v. ((n, Commit (h, v)) \<in> Messages s \<and> (\<not> (\<exists> vs. -1 \<le> vs \<and> vs < v \<and> prepared_by_both s h v vs) )))" text "[ii] A validator is slashed when it has sent a prepare message whose view src is not -1 but has no supporting preparation in the view src." definition slashed_two :: "situation \<Rightarrow> validator \<Rightarrow> bool" where "slashed_two s n = (\<exists> h v v_src. ((n, Prepare (h, v, v_src)) \<in> Messages s \<and> v_src \<noteq> -1 \<and> (\<not> (\<exists> h_anc. sourcing s h_anc (h, v, v_src)))))" text "[iii] A validator is slashed when it has sent a commit message and a prepare message containing view numbers in a specific constellation." definition slashed_three :: "situation \<Rightarrow> validator \<Rightarrow> bool" where "slashed_three s n = (\<exists> x y v w u. (n, Commit (x, v)) \<in> Messages s \<and> (n, Prepare (y, w, u)) \<in> Messages s \<and> u < v \<and> v < w)" text "[iv] A validator is slashed when it has signed two different Prepare messages at the same view." definition slashed_four :: "situation \<Rightarrow> validator \<Rightarrow> bool" where "slashed_four s n = (\<exists> x1 x2 v vs1 vs2. (n, Prepare (x1, v, vs1)) \<in> Messages s \<and> (n, Prepare (x2, v, vs2)) \<in> Messages s \<and> (x1 \<noteq> x2 \<or> vs1 \<noteq> vs2))" text "A validator is slashed when at least one of the above conditions [i]--[iv] hold." definition slashed :: "situation \<Rightarrow> validator \<Rightarrow> bool" where "slashed s n = (slashed_one s n \<or> slashed_two s n \<or> slashed_three s n \<or> slashed_four s n)" text "We will be frequently talking about one-third of some validators being slashed." definition one_third_slashed :: "situation \<Rightarrow> validator set \<Rightarrow> bool" where "one_third_slashed s vs = one_third vs (slashed s)" definition one_third_of_rear_slashed :: "situation \<Rightarrow> hash \<Rightarrow> bool" where "one_third_of_rear_slashed s h = one_third (RearValidators s h) (slashed s)" definition one_third_of_fwd_slashed :: "situation \<Rightarrow> hash \<Rightarrow> bool" where "one_third_of_fwd_slashed s h = one_third (FwdValidators s h) (slashed s)" text "In the end, accountable safety will slash at least one-third of fwd-or-rear validator sets." definition one_third_of_fwd_or_rear_slashed :: "situation \<Rightarrow> hash \<Rightarrow> bool" where "one_third_of_fwd_or_rear_slashed s h = (one_third_of_fwd_slashed s h \<or> one_third_of_rear_slashed s h)" section "Auxiliary Things (skippable)" subsection "Validator History Tracking" text "In the statement of accountable safety, we need to be a bit specific about which validator set the slashed validators belong to. A singleton is also a validator set and the 2/3 of a random singleton being slashed should not be significant. So, when we have a fork, we start from the root of the fork and identify the heirs of the initial validator sets. Our statement says 2/3 of a heir validator set are slashed. " text "There are two ways of inheriting the title of relevant validator set. These correspond to the two ways of sourcing a prepare message." fun inherit_normal :: "situation \<Rightarrow> (hash \<times> view) \<Rightarrow> (hash \<times> view) \<Rightarrow> bool" where "inherit_normal s (h_old, v_src) (h_new, v) = (prepared_by_both s h_new v v_src \<and> -1 \<le> v_src \<and> v_src < v \<and> sourcing_normal s h_old (h_new, v, v_src))" fun inherit_switching_validators :: "situation \<Rightarrow> (hash \<times> view) \<Rightarrow> (hash \<times> view) \<Rightarrow> bool" where "inherit_switching_validators s (h_old, v_old) (h_new, v_new) = (prepared_by_both s h_new v_new v_old \<and> -1 \<le> v_old \<and> v_old < v_new \<and> sourcing_switching_validators s h_old (h_new, v_new, v_old))" text "The heir relation is just zero-or-more repetition of the inheritance." inductive heir :: "situation \<Rightarrow> (hash \<times> view) \<Rightarrow> (hash \<times> view) \<Rightarrow> bool" where heir_self : "prepared_by_both s h v v_src \<Longrightarrow> heir s (h, v) (h, v)" | heir_normal_step : "heir s (h, v) (h', v') \<Longrightarrow> inherit_normal s (h', v') (h'', v'') \<Longrightarrow> ancestor_descendant_with_chosen_validators s (h', v') (h'', v'') \<Longrightarrow> heir s (h, v) (h'', v'')" | heir_switching_step : "heir s (h, v) (h', v') \<Longrightarrow> inherit_switching_validators s (h', v') (h'', v'') \<Longrightarrow> ancestor_descendant_with_chosen_validators s (h', v') (h'', v'') \<Longrightarrow> heir s (h, v) (h'', v'')" text "When two hashes are not in the inheritance relation in either direction, the two hashes are not on the same heir chain. In the statement of accountable safety, we use this concept to detect conflicts (which should not happen until 2/3 of a legitimate heir are slashed)." definition on_same_heir_chain :: "situation \<Rightarrow> (hash \<times> view) \<Rightarrow> (hash \<times> view) \<Rightarrow> bool" where "on_same_heir_chain s x y = (heir s x y \<or> heir s y x)" text "When heirs are not on the same chain of legitimacy, they have forked." fun legitimacy_fork :: "situation \<Rightarrow> (hash \<times> view) \<Rightarrow> (hash \<times> view) \<Rightarrow> (hash \<times> view) \<Rightarrow> bool" where "legitimacy_fork s (root, v) (h1, v1) (h2, v2) = (\<not> on_same_heir_chain s (h1, v1) (h2, v2) \<and> heir s (root, v) (h1, v1) \<and> heir s (root, v) (h2, v2))" text "A fork is particularly bad when the end points are committed, not only prepared." fun legitimacy_fork_with_commits :: "situation \<Rightarrow> (hash \<times> view) \<Rightarrow> (hash \<times> view) \<Rightarrow> (hash \<times> view) \<Rightarrow> bool" where "legitimacy_fork_with_commits s (h, v) (h1, v1) (h2, v2) = (legitimacy_fork s (h, v) (h1, v1) (h2, v2) \<and> committed_by_both s h v \<and> committed_by_both s h1 v1 \<and> committed_by_both s h2 v2)" subsection "Sets and Arithmetics" lemma sum_suc_disj : "n_one + n_two \<le> Suc k \<Longrightarrow> n_one + n_two \<le> k \<or> n_one + n_two = Suc k" using le_SucE by blast lemma sum_eq_disj : "((n_one :: nat) \<le> 1 \<and> (n_two :: nat) \<le> 1) \<or> n_one > 1 \<or> n_two > 1 " by auto lemma sum_eq_case1 : "n_one + n_two = Suc k \<Longrightarrow> n_one > 1 \<Longrightarrow> \<exists> n_one_pre. n_one_pre \<ge> 1 \<and> n_one = Suc n_one_pre \<and> n_one_pre + n_two = k" using less_imp_Suc_add by fastforce lemma sum_eq_case2 : "n_one + n_two = Suc k \<Longrightarrow> n_two > 1 \<Longrightarrow> \<exists> n_two_pre. n_two_pre \<ge> 1 \<and> n_two = Suc n_two_pre \<and> n_one + n_two_pre = k" by presburger lemma sum_suc : "n_one + n_two \<le> Suc k \<Longrightarrow> n_one + n_two \<le> k \<or> n_one \<le> 1 \<and> n_two \<le> 1 \<or> (\<exists> n_one_pre. n_one_pre \<ge> 1 \<and> n_one = Suc n_one_pre \<and> n_one_pre + n_two = k) \<or> (\<exists> n_two_pre. n_two_pre \<ge> 1 \<and> n_two = Suc n_two_pre \<and> n_one + n_two_pre = k) " using sum_eq_case1 sum_eq_case2 by auto lemma card_not [simp] : "finite s \<Longrightarrow> card {n \<in> s. \<not> f n} = card s - card {n \<in> s. f n}" proof - assume "finite s" then have "card (s - {n \<in> s. f n}) = card s - card {n \<in> s. f n}" by (rule_tac Finite_Set.card_Diff_subset; auto) moreover have "{n \<in> s. \<not> f n} = s - {n \<in> s. f n}" by blast ultimately show ?thesis by auto qed lemma set_conj [simp] : "{n \<in> s. f n \<and> g n} = {n \<in> s. f n} \<inter> {n \<in> s. g n}" by blast lemma two_more_two_set : "finite s \<Longrightarrow> 2 * card s \<le> 3 * card {n \<in> s. f n} \<Longrightarrow> 2 * card s < 3 * card {n \<in> s. g n} \<Longrightarrow> card s < 3 * card ({n \<in> s. f n} \<inter> {n \<in> s. g n}) " proof - assume "finite s" then have " card ({n \<in> s. f n} \<inter> {n \<in> s. g n}) = card {n \<in> s. f n} + card { n \<in> s. g n} - card ({n \<in> s. f n} \<union> {n \<in> s. g n})" proof - assume "finite s" then have "card {n \<in> s. f n} + card { n \<in> s. g n} = card ({n \<in> s. f n} \<union> {n \<in> s. g n}) + card ({n \<in> s. f n} \<inter> {n \<in> s. g n})" by (rule_tac Finite_Set.card_Un_Int; auto) then show ?thesis by auto qed moreover assume "finite s" then moreover have "card ({n \<in> s. f n} \<union> {n \<in> s. g n}) \<le> card s" by (rule Finite_Set.card_mono; auto) ultimately have "card ({n \<in> s. f n} \<inter> {n \<in> s. g n}) \<ge> card {n \<in> s. f n} + card { n \<in> s. g n} - card s" by auto then have "3 * card ({n \<in> s. f n} \<inter> {n \<in> s. g n}) \<ge> 3 * card {n \<in> s. f n} + 3 * card { n \<in> s. g n} - 3 * card s" by auto moreover assume "2 * card s \<le> 3 * card {n \<in> s. f n}" ultimately have "3 * card ({n \<in> s. f n} \<inter> {n \<in> s. g n}) \<ge> 3 * card { n \<in> s. g n} - card s" by auto moreover assume "2 * card s < 3 * card {n \<in> s. g n}" ultimately have "3 * card ({n \<in> s. f n} \<inter> {n \<in> s. g n}) > card s" by auto then show ?thesis by auto qed lemma card_nonzero_exists : "card {n \<in> s. f n} > 0 \<Longrightarrow> \<exists> n \<in> s. f n" (* sledgehammer *) by (metis (no_types, lifting) Collect_empty_eq card_0_eq card_ge_0_finite not_gr_zero) lemma card_conj_le : "finite s \<Longrightarrow> card ({n \<in> s. f n} \<inter> {n \<in> s. g n}) = card {n \<in> s. f n} + card { n \<in> s. g n} - card ({n \<in> s. f n} \<union> {n \<in> s. g n})" proof - assume "finite s" then have "card {n \<in> s. f n} + card { n \<in> s. g n} = card ({n \<in> s. f n} \<union> {n \<in> s. g n}) + card ({n \<in> s. f n} \<inter> {n \<in> s. g n})" by (rule_tac Finite_Set.card_Un_Int; auto) then show ?thesis by auto qed lemma two_two_set : "2 * card s \<le> 3 * card {n \<in> s. f n} \<Longrightarrow> 2 * card s \<le> 3 * card {n \<in> s. g n} \<Longrightarrow> finite s \<Longrightarrow> card s \<le> 3 * card ({n \<in> s. f n} \<inter> {n \<in> s. g n})" proof - assume "finite s" then have " card ({n \<in> s. f n} \<inter> {n \<in> s. g n}) = card {n \<in> s. f n} + card { n \<in> s. g n} - card ({n \<in> s. f n} \<union> {n \<in> s. g n})" proof - assume "finite s" then have "card {n \<in> s. f n} + card { n \<in> s. g n} = card ({n \<in> s. f n} \<union> {n \<in> s. g n}) + card ({n \<in> s. f n} \<inter> {n \<in> s. g n})" by (rule_tac Finite_Set.card_Un_Int; auto) then show ?thesis by auto qed moreover assume "finite s" then moreover have "card ({n \<in> s. f n} \<union> {n \<in> s. g n}) \<le> card s" by (rule Finite_Set.card_mono; auto) ultimately have "card ({n \<in> s. f n} \<inter> {n \<in> s. g n}) \<ge> card {n \<in> s. f n} + card { n \<in> s. g n} - card s" by auto then have "3 * card ({n \<in> s. f n} \<inter> {n \<in> s. g n}) \<ge> 3 * card {n \<in> s. f n} + 3 * card { n \<in> s. g n} - 3 * card s" by auto moreover assume "2 * card s \<le> 3 * card {n \<in> s. f n}" ultimately have "3 * card ({n \<in> s. f n} \<inter> {n \<in> s. g n}) \<ge> 3 * card { n \<in> s. g n} - card s" by auto moreover assume "2 * card s \<le> 3 * card {n \<in> s. g n}" ultimately have "3 * card ({n \<in> s. f n} \<inter> {n \<in> s. g n}) \<ge> card s" by auto then show ?thesis by auto qed lemma inclusion_card_le : "\<forall>n. n \<in> Validators s \<longrightarrow> f n \<longrightarrow> g n \<Longrightarrow> finite (Validators s) \<Longrightarrow> card {n \<in> Validators s. f n} \<le> card {n \<in> Validators s. g n}" proof - assume "\<forall> n. n \<in> Validators s \<longrightarrow> f n \<longrightarrow> g n" moreover assume "finite (Validators s)" ultimately show "card {n \<in> Validators s. f n} \<le> card {n \<in> Validators s. g n}" proof - assume "\<forall> n. n \<in> Validators s \<longrightarrow> f n \<longrightarrow> g n" then have "{n \<in> Validators s. f n} \<subseteq> {n \<in> Validators s. g n}" by blast moreover assume "finite (Validators s)" ultimately show ?thesis by (simp add: card_mono) qed qed lemma nat_min_min : " vs1 < v \<Longrightarrow> \<not> vs1 < c_view \<Longrightarrow> (nat (v - vs1) + nat (vs1 - c_view)) = nat (v - c_view)" by (simp add: Nat_Transfer.transfer_nat_int_functions(1)) lemma view_total [simp]: "(v2 :: view) \<le> v1 \<or> v1 \<le> v2" apply auto done lemma sum_is_suc_dest : "Suc n = n1 + n2 \<Longrightarrow> ((\<exists> n1'. n1 = Suc n1' \<and> n = n1' + n2) \<or> (\<exists> n2'. n2 = Suc n2' \<and> n = n1 + n2')) " apply (case_tac n1; auto) done (* Finding a max element in a set of integers *) lemma find_max_ind_step : "\<forall>u. n = nat (u - s) \<longrightarrow> s \<in> (S :: int set) \<longrightarrow> (\<forall>x. x \<in> S \<longrightarrow> x \<le> u) \<longrightarrow> (\<exists>m. m \<in> S \<and> (\<forall>y>m. y \<notin> S)) \<Longrightarrow> Suc n = nat (u - s) \<Longrightarrow> s \<in> S \<Longrightarrow> \<forall>x. x \<in> S \<longrightarrow> x \<le> u \<Longrightarrow> \<exists>m. m \<in> S \<and> (\<forall>y>m. y \<notin> S)" apply(case_tac "\<forall> x. x \<in> S \<longrightarrow> x \<le> u - 1") apply(drule_tac x = "u - 1" in spec) apply simp by force lemma find_max_ind : "\<forall> u. n = nat (u - s) \<longrightarrow> s \<in> (S :: int set) \<longrightarrow> (\<forall> x. x \<in> S \<longrightarrow> x \<le> u) \<longrightarrow> (\<exists> m. m \<in> S \<and> (\<forall> y. y > m \<longrightarrow> y \<notin> S))" apply(induction n; auto) apply force apply(rule_tac n = n and u = u and S = S and s = s in find_max_ind_step; auto) done lemma find_max : "s \<in> (S :: int set) \<Longrightarrow> \<forall> x. x \<in> S \<longrightarrow> x \<le> u \<Longrightarrow> \<exists> m. m \<in> S \<and> (\<forall> y. y > m \<longrightarrow> y \<notin> S)" using find_max_ind by auto lemma one_third_mp : "finite X \<Longrightarrow> \<forall> v. p v \<longrightarrow> q v \<Longrightarrow> one_third X p \<Longrightarrow> one_third X q" apply(simp add: one_third_def) apply(subgoal_tac "card {n \<in> X. p n} \<le> card {n \<in> X. q n}") apply linarith apply(subgoal_tac "finite {n \<in> X. q n}") apply(subgoal_tac "{n \<in> X. p n} \<subseteq> {n \<in> X. q n}") using card_mono apply blast apply blast by simp lemma two_thirds_two_thirds_one_third : "finite X \<Longrightarrow> two_thirds X p \<Longrightarrow> two_thirds X q \<Longrightarrow> one_third X (\<lambda> x. p x \<and> q x) " apply(simp add: two_thirds_def one_third_def) apply(rule_tac two_two_set) apply simp apply simp apply simp done subsection "Validator History Tracking" lemma ancestor_with_same_view : "ancestor_descendant_with_chosen_validators s (h, v) (h1, v1) \<Longrightarrow> snd (h, v) \<le> snd (h1, v1) \<and> (snd (h, v) = snd (h1, v1) \<longrightarrow> fst (h, v) = fst (h1, v1))" apply(induction rule: ancestor_descendant_with_chosen_validators.induct) apply simp apply auto done lemma heir_increases_view : "heir s t t' \<Longrightarrow> snd t \<le> snd t'" apply(induction rule: heir.induct; auto) done inductive heir_after_n_switching :: "nat \<Rightarrow> situation \<Rightarrow> (hash \<times> view) \<Rightarrow> (hash \<times> view) \<Rightarrow> bool" where heir_n_self : "prepared_by_both s h v v_src \<Longrightarrow> heir_after_n_switching 0 s (h, v) (h, v)" | heir_n_normal_step : "heir_after_n_switching n s (h, v) (h', v') \<Longrightarrow> ancestor_descendant_with_chosen_validators s (h', v') (h'', v'') \<Longrightarrow> inherit_normal s (h', v') (h'', v'') \<Longrightarrow> heir_after_n_switching n s (h, v) (h'', v'')" | heir_n_switching_step : "heir_after_n_switching n s (h, v) (h', v') \<Longrightarrow> ancestor_descendant_with_chosen_validators s (h', v') (h'', v'') \<Longrightarrow> inherit_switching_validators s (h', v') (h'', v'') \<Longrightarrow> heir_after_n_switching (Suc n) s (h, v) (h'', v'')" lemma every_heir_is_after_n_switching : "heir s p0 p1 \<Longrightarrow> \<exists> n. heir_after_n_switching n s p0 p1" apply(induction rule: heir.induct) apply(rule_tac x = 0 in exI) apply (simp add: heir_n_self) apply clarify apply(rule_tac x = n in exI) apply(rule heir_n_normal_step; blast) apply clarify using heir_n_switching_step by blast fun legitimacy_fork_with_n_switching :: "situation \<Rightarrow> (hash \<times> view) \<Rightarrow> nat \<Rightarrow> (hash \<times> view) \<Rightarrow> nat \<Rightarrow> (hash \<times> view) \<Rightarrow> bool" where "legitimacy_fork_with_n_switching s (root, v) n1 (h1, v1) n2 (h2, v2) = (\<not> on_same_heir_chain s (h1, v1) (h2, v2) \<and> heir_after_n_switching n1 s (root, v) (h1, v1) \<and> heir_after_n_switching n2 s (root, v) (h2, v2))" lemma legitimacy_fork_has_n_switching : "legitimacy_fork s (r, v) (h1, v1) (h2, v2) \<Longrightarrow> \<exists> n1 n2. legitimacy_fork_with_n_switching s (r, v) n1 (h1, v1) n2 (h2, v2)" apply(simp) using every_heir_is_after_n_switching by blast lemma ancestor_descendant_with_chosen_validators_trans: "ancestor_descendant_with_chosen_validators s (h1, v1) (h2, v2) \<Longrightarrow> ancestor_descendant_with_chosen_validators s (h0, v0) (h1, v1) \<Longrightarrow> ancestor_descendant_with_chosen_validators s (h0, v0) (h2, v2)" apply(induction rule: ancestor_descendant_with_chosen_validators.induct) apply blast using inheritances_step by blast lemma heir_decomposition : "heir s (h, v) (h'', v'') \<Longrightarrow> ((\<exists> v_src. h = h'' \<and> v = v'' \<and> prepared_by_both s h v v_src) \<or> (\<exists> h' v'. heir s (h, v) (h', v') \<and> inherit_normal s (h', v') (h'', v'')) \<or> (\<exists> h' v'. heir s (h, v) (h', v') \<and> inherit_switching_validators s (h', v') (h'', v'')) )" apply(erule_tac DynamicValidatorSet.heir.cases) apply(rule disjI1) apply blast apply(rule disjI2) apply(rule disjI1) apply blast apply(rule disjI2) apply(rule disjI2) by blast lemma heir_same_height : "heir s (h', v) (h, v) \<Longrightarrow> h' = h" apply(drule heir_decomposition) using heir_increases_view by fastforce fun legitimacy_fork_with_center :: "situation \<Rightarrow> (hash \<times> view) \<Rightarrow> (hash \<times> view) \<Rightarrow> (hash \<times> view) \<Rightarrow> (hash \<times> view) \<Rightarrow> bool" where "legitimacy_fork_with_center s (h_orig, v_orig) (h, v) (h1, v1) (h2, v2) = (legitimacy_fork s (h, v) (h1, v1) (h2, v2) \<and> heir s (h_orig, v_orig) (h, v) \<and> (* This is used to connect the whole setup with the original statement *) committed_by_both s h v \<and> committed_by_both s h1 v1 \<and> committed_by_both s h2 v2)" fun legitimacy_fork_with_center_with_n_switching :: "situation \<Rightarrow> (hash \<times> view) \<Rightarrow> (hash \<times> view) \<Rightarrow> nat \<Rightarrow> (hash \<times> view) \<Rightarrow> nat \<Rightarrow> (hash \<times> view) \<Rightarrow> bool" where "legitimacy_fork_with_center_with_n_switching s (h_orig, v_orig) (h, v) n1 (h1, v1) n2 (h2, v2) = (legitimacy_fork_with_n_switching s (h, v) n1 (h1, v1) n2 (h2, v2) \<and> heir s (h_orig, v_orig) (h, v) \<and> (* This is used to connect the whole setup with the original statement *) committed_by_both s h v \<and> committed_by_both s h1 v1 \<and> committed_by_both s h2 v2)" lemma legitimacy_fork_with_center_has_n_switching : "legitimacy_fork_with_center s (h_orig, v_orig) (h, v) (h1, v1) (h2, v2) \<Longrightarrow> \<exists> n1 n2. legitimacy_fork_with_center_with_n_switching s (h_orig, v_orig) (h, v) n1 (h1, v1) n2 (h2, v2)" apply simp using every_heir_is_after_n_switching by blast fun legitimacy_fork_root_views :: "situation \<Rightarrow> (hash \<times> view) \<Rightarrow> (hash \<times> view) \<Rightarrow> (hash \<times> view) \<Rightarrow> view set" where "legitimacy_fork_root_views s (h_orig, v_orig) (h1, v1) (h2, v2) = { v. (\<exists> h. legitimacy_fork_with_center s (h_orig, v_orig) (h, v) (h1, v1) (h2, v2)) }" (* It's convenient to have a fork's root as the latest commit immediately before the fork. * Otherwise the induction has hairier case analysis. *) fun legitimacy_fork_with_center_with_high_root :: "situation \<Rightarrow> (hash \<times> view) \<Rightarrow> (hash \<times> view) \<Rightarrow> (hash \<times> view) \<Rightarrow> (hash \<times> view) \<Rightarrow> bool" where "legitimacy_fork_with_center_with_high_root s (h_orig, v_orig) (h, v) (h1, v1) (h2, v2) = (legitimacy_fork_with_center s (h_orig, v_orig) (h, v) (h1, v1) (h2, v2) \<and> (\<forall> h' v'. v' > v \<longrightarrow> \<not> legitimacy_fork_with_center s (h_orig, v_orig) (h', v') (h1, v1) (h2, v2)))" fun legitimacy_fork_with_center_with_high_root_with_n_switching :: "situation \<Rightarrow> (hash \<times> view) \<Rightarrow> (hash \<times> view) \<Rightarrow> nat \<Rightarrow> (hash \<times> view) \<Rightarrow> nat \<Rightarrow> (hash \<times> view) \<Rightarrow> bool" where "legitimacy_fork_with_center_with_high_root_with_n_switching s (h_orig, v_orig) (h, v) n1 (h1, v1) n2 (h2, v2) = (legitimacy_fork_with_center_with_n_switching s (h_orig, v_orig) (h, v) n1 (h1, v1) n2 (h2, v2) \<and> (\<forall> h' v'. v' > v \<longrightarrow> \<not> legitimacy_fork_with_center s (h_orig, v_orig) (h', v') (h1, v1) (h2, v2)))" lemma legitimacy_fork_with_center_with_high_root_has_n_switching : "legitimacy_fork_with_center_with_high_root s (h_orig, v_orig) (h, v) (h1, v1) (h2, v2) \<Longrightarrow> \<exists> n1 n2. legitimacy_fork_with_center_with_high_root_with_n_switching s (h_orig, v_orig) (h, v) n1 (h1, v1) n2 (h2, v2)" apply simp using every_heir_is_after_n_switching by blast lemma legitimacy_fork_with_center_choose_high_root : "legitimacy_fork_with_center s (h_orig, v_orig) (h, v) (h1, v1) (h2, v2) \<Longrightarrow> \<exists> h' v'. legitimacy_fork_with_center_with_high_root s (h_orig, v_orig) (h', v') (h1, v1) (h2, v2)" proof - assume "legitimacy_fork_with_center s (h_orig, v_orig) (h, v) (h1, v1) (h2, v2)" then have "v \<in> legitimacy_fork_root_views s (h_orig, v_orig) (h1, v1) (h2, v2)" by auto moreover have "\<forall> x. x \<in> legitimacy_fork_root_views s (h_orig, v_orig) (h1, v1) (h2, v2) \<longrightarrow> x \<le> v1" using heir_increases_view by auto ultimately have "\<exists> m. m \<in> legitimacy_fork_root_views s (h_orig, v_orig) (h1, v1) (h2, v2) \<and> (\<forall> y. y > m \<longrightarrow> y \<notin> legitimacy_fork_root_views s (h_orig, v_orig) (h1, v1) (h2, v2))" by(rule_tac find_max; auto) then show ?thesis by (clarsimp; blast) qed lemma forget_number_of_switching: "heir_after_n_switching n s (h_twoa, v_twoa) (h_one, v_one) \<Longrightarrow> heir s (h_twoa, v_twoa) (h_one, v_one)" apply(induction rule: heir_after_n_switching.induct) apply (simp add: heir_self) using heir_normal_step apply blast using heir_switching_step by blast lemma inherit_normal_means_heir : "inherit_normal s (h', v') (h'', v'') \<Longrightarrow> ancestor_descendant_with_chosen_validators s (h', v') (h'', v'') \<Longrightarrow> heir s (h', v') (h'', v'')" by (meson heir_normal_step heir_self inherit_normal.simps sourcing_normal.simps) lemma chain_and_inherit : "inherit_normal s (h', v') (h'', v'') \<Longrightarrow> ancestor_descendant_with_chosen_validators s (h', v') (h'', v'') \<Longrightarrow> v_two \<le> snd (h'', v'') \<Longrightarrow> \<not> on_same_heir_chain s (h'', v'') (h_two, v_two) \<Longrightarrow> v_two \<le> snd (h', v') \<Longrightarrow> on_same_heir_chain s (h', v') (h_two, v_two) \<Longrightarrow> False" apply(subgoal_tac "heir s (h', v') (h'', v'')") apply(simp only: on_same_heir_chain_def) apply(erule disjE) using heir_increases_view heir_same_height apply fastforce using heir_normal_step apply blast using inherit_normal_means_heir by blast lemma one_validator_change_leaves_one_set : "heir_after_n_switching n s (h, v) (h', v') \<Longrightarrow> n \<le> Suc 0 \<Longrightarrow> n = 0 \<and> FwdValidators s (fst (h, v)) = FwdValidators s (fst (h', v')) \<or> n = 1 \<and> FwdValidators s (fst (h, v)) = RearValidators s (fst (h', v'))" apply(induction rule: heir_after_n_switching.induct) apply blast apply (simp add: validators_match_def) apply blast apply(subgoal_tac "n = 0") defer apply linarith by (metis (no_types, lifting) One_nat_def fstI inherit_switching_validators.elims(2) sourcing_switching_validators.simps validators_change_def zero_le_one zero_neq_one) lemma prepared_by_fwd_of_origin : " n \<le> Suc 0 \<Longrightarrow> heir_after_n_switching n s (h, v) (h', v') \<Longrightarrow> inherit_normal s (h', v') (h'', v'') \<Longrightarrow> prepared s (FwdValidators s h) h'' v'' v' " apply(simp only: inherit_normal.simps prepared_by_both_def prepared_by_fwd_def prepared_by_rear_def) apply(subgoal_tac " (FwdValidators s h) = (FwdValidators s h'') \<or> (FwdValidators s h) = (RearValidators s h'')") apply auto[1] by (metis fst_conv one_validator_change_leaves_one_set sourcing_normal.simps validators_match_def) lemma heir_found_switching : "heir_after_n_switching n s (h, v) (h', v') \<Longrightarrow> inherit_switching_validators s (h', v') (h'', v'') \<Longrightarrow> 0 < Suc n \<Longrightarrow> \<exists>h_one v_one h_two v_two. heir_after_n_switching (Suc n - 1) s (h, v) (h_one, v_one) \<and> inherit_switching_validators s (h_one, v_one) (h_two, v_two) \<and> heir_after_n_switching 0 s (h_two, v_two) (h'', v'')" apply(rule_tac x = h' in exI) apply(rule_tac x = v' in exI) apply(rule_tac x = h'' in exI) apply(rule_tac x = v'' in exI) apply simp using heir_n_self by blast lemma heir_trans : "heir s (h_r, v_r) (h', v') \<Longrightarrow> heir s (h, v) (h_r, v_r) \<Longrightarrow> heir s (h, v) (h', v')" apply(induction rule: heir.induct; auto) apply(rule_tac h' = h' and v' = v' in heir_normal_step; auto) apply(rule_tac h' = h' and v' = v' in heir_switching_step; auto) done lemma heir_normal_extend : "(\<exists>h_one v_one h_two v_two. heir_after_n_switching n s (h, v) (h_one, v_one) \<and> inherit_switching_validators s (h_one, v_one) (h_two, v_two) \<and> heir_after_n_switching 0 s (h_two, v_two) (h', v')) \<Longrightarrow> ancestor_descendant_with_chosen_validators s (h', v') (h'', v'') \<Longrightarrow> inherit_normal s (h', v') (h'', v'') \<Longrightarrow> (\<exists>h_one v_one h_two v_two. heir_after_n_switching n s (h, v) (h_one, v_one) \<and> inherit_switching_validators s (h_one, v_one) (h_two, v_two) \<and> heir_after_n_switching 0 s (h_two, v_two) (h'', v''))" apply clarify apply(rule_tac x = h_one in exI) apply(rule_tac x = v_one in exI) apply(rule_tac x = h_two in exI) apply(rule_tac x = v_two in exI) apply simp using heir_n_normal_step inherit_normal.simps sourcing_normal.simps by blast lemma heir_after_one_or_more_switching_dest : "heir_after_n_switching na s (h, v) (h_three, v_three) \<Longrightarrow> na > 0 \<Longrightarrow> (\<exists> h_one v_one h_two v_two. heir_after_n_switching (na - 1) s (h, v) (h_one, v_one) \<and> ancestor_descendant_with_chosen_validators s (h_one, v_one) (h_two, v_two) \<and> inherit_switching_validators s (h_one, v_one) (h_two, v_two) \<and> heir_after_n_switching 0 s (h_two, v_two) (h_three, v_three))" apply(induction rule: heir_after_n_switching.induct) apply simp using heir_n_normal_step apply blast by (metis diff_Suc_1 heir_n_self inherit_switching_validators.simps) lemma high_point_still_high : (* remove unnecessary assumptions *) "1 \<le> n_one_pre \<Longrightarrow> \<forall>h' v'. v < v' \<longrightarrow> \<not> legitimacy_fork_with_center s (h_orig, v_orig) (h', v') (h_one, v_one) (h_two, v_two) \<Longrightarrow> \<not> on_same_heir_chain s (h_one, v_one) (h_two, v_two) \<Longrightarrow> heir s (h_orig, v_orig) (h, v) \<Longrightarrow> heir_after_n_switching n_two s (h, v) (h_two, v_two) \<Longrightarrow> committed_by_both s h v \<Longrightarrow> committed_by_both s h_one v_one \<Longrightarrow> committed_by_both s h_two v_two \<Longrightarrow> heir_after_n_switching (Suc n_one_pre - 1) s (h, v) (h_onea, v_onea) \<Longrightarrow> inherit_switching_validators s (h_onea, v_onea) (h_twoa, v_twoa) \<Longrightarrow> ancestor_descendant_with_chosen_validators s (h_onea, v_onea) (h_twoa, v_twoa) \<Longrightarrow> heir_after_n_switching 0 s (h_twoa, v_twoa) (h_one, v_one) \<Longrightarrow> \<forall>h' v'. v < v' \<longrightarrow> \<not> legitimacy_fork_with_center s (h_orig, v_orig) (h', v') (h_onea, v_onea) (h_two, v_two)" apply(rule allI) apply(rule allI) apply(drule_tac x = h' in spec) apply(drule_tac x = v' in spec) apply(rule impI) by (metis forget_number_of_switching legitimacy_fork.simps legitimacy_fork_with_center.simps heir_switching_step heir_trans) lemma at_least_one_switching_means_higher : "heir_after_n_switching n_one_pre s (h, v) (h_onea, v_onea) \<Longrightarrow> Suc 0 \<le> n_one_pre \<Longrightarrow> snd (h, v) < snd (h_onea, v_onea)" apply(induction rule: heir_after_n_switching.induct; auto) using forget_number_of_switching heir_increases_view by fastforce lemma shallower_legitimacy_fork : "heir s (h_orig, v_orig) (h, v) \<Longrightarrow> heir_after_n_switching n_two s (h, v) (h_two, v_two) \<Longrightarrow> committed_by_both s h v \<Longrightarrow> committed_by_both s h_one v_one \<Longrightarrow> committed_by_both s h_two v_two \<Longrightarrow> heir_after_n_switching (Suc n_one_pre - 1) s (h, v) (h_onea, v_onea) \<Longrightarrow> ancestor_descendant_with_chosen_validators s (h_onea, v_onea) (h_twoa, v_twoa) \<Longrightarrow> inherit_switching_validators s (h_onea, v_onea) (h_twoa, v_twoa) \<Longrightarrow> heir_after_n_switching 0 s (h_twoa, v_twoa) (h_one, v_one) \<Longrightarrow> \<not> heir s (h_two, v_two) (h_one, v_one) \<Longrightarrow> \<not> heir s (h_one, v_one) (h_two, v_two) \<Longrightarrow> heir s (h_onea, v_onea) (h_two, v_two) \<Longrightarrow> v < v_onea \<Longrightarrow> legitimacy_fork_with_center s (h_orig, v_orig) (h_onea, v_onea) (h_one, v_one) (h_two, v_two)" apply(simp only: legitimacy_fork_with_center.simps) apply(rule conjI) apply(simp only:legitimacy_fork.simps) apply (meson forget_number_of_switching heir_self heir_switching_step heir_trans inherit_switching_validators.simps on_same_heir_chain_def sourcing_switching_validators.simps) by (meson forget_number_of_switching heir_trans inherit_switching_validators.simps sourcing_switching_validators.simps) lemma on_same_heir_chain_sym : "on_same_heir_chain s (h_one, v_one) (h_two, v_two) = on_same_heir_chain s (h_two, v_two) (h_one, v_one)" using on_same_heir_chain_def by auto lemma legitimacy_fork_with_center_with_high_root_with_n_switching_sym : "legitimacy_fork_with_center_with_high_root_with_n_switching s (h_orig, v_orig) (h, v) n_one (h_one, v_one) n_two (h_two, v_two) \<Longrightarrow> legitimacy_fork_with_center_with_high_root_with_n_switching s (h_orig, v_orig) (h, v) n_two (h_two, v_two) n_one (h_one, v_one)" apply auto using on_same_heir_chain_sym by blast subsection "Slashing Related" lemma slashed_four_means_slashed_on_a_group: "finite X \<Longrightarrow> one_third X (slashed_four s) \<Longrightarrow> one_third X (slashed s)" using one_third_mp slashed_def by blast lemma slashed_four_on_a_group: " finite (FwdValidators s h) \<Longrightarrow> prepared s (FwdValidators s h) h'' v'' v' \<Longrightarrow> \<exists>v_two_src. prepared s (FwdValidators s h) h_two v'' v_two_src \<Longrightarrow> h'' \<noteq> h_two \<Longrightarrow> one_third (FwdValidators s h) (slashed_four s)" apply(simp only: prepared_def two_thirds_sent_message_def) apply(erule exE) apply(subgoal_tac "one_third (FwdValidators s h) (\<lambda>n. (n, Prepare (h'', v'', v')) \<in> Messages s \<and> (n, Prepare (h_two, v'', v_two_src)) \<in> Messages s) ") apply(subgoal_tac "\<forall> n. ((n, Prepare (h'', v'', v')) \<in> Messages s \<and> (n, Prepare (h_two, v'', v_two_src)) \<in> Messages s) \<longrightarrow> slashed_four s n") apply (simp add: one_third_mp) using slashed_four_def apply blast by (simp add: two_thirds_two_thirds_one_third) lemma committed_so_prepared : " finite (FwdValidators s h) \<Longrightarrow> n_two \<le> Suc 0 \<Longrightarrow> heir_after_n_switching n_two s (h, v) (h_two, v'') \<Longrightarrow> committed_by_both s h_two v'' \<Longrightarrow> \<not> one_third (FwdValidators s h) (slashed s) \<Longrightarrow> prepared s (FwdValidators s h) h'' v'' v' \<Longrightarrow> \<exists>v_two_src. prepared s (FwdValidators s h) h_two v'' v_two_src" apply(subgoal_tac "committed s (FwdValidators s h) h_two v''") apply (metis eq_fst_iff forget_number_of_switching heir_decomposition inherit_normal.simps inherit_switching_validators.simps one_validator_change_leaves_one_set prepared_by_both_def prepared_by_fwd_def prepared_by_rear_def) using committed_by_both_def committed_by_fwd_def committed_by_rear_def one_validator_change_leaves_one_set by fastforce lemma slashed_three_on_a_group : "finite X \<Longrightarrow> one_third X (\<lambda>n. (n, Prepare (h'', v'', v')) \<in> Messages s \<and> (n, Commit (h_two, v_two)) \<in> Messages s) \<Longrightarrow> v' < v_two \<Longrightarrow> v_two < v'' \<Longrightarrow> one_third X (slashed_three s)" apply(rule one_third_mp; auto simp add: slashed_three_def) apply blast done lemma slashed_three_on_group: " finite (FwdValidators s (fst (h, v))) \<Longrightarrow> one_third (FwdValidators s h) (\<lambda>n. (n, Prepare (h'', v'', v')) \<in> Messages s \<and> (n, Commit (h_two, v_two)) \<in> Messages s) \<Longrightarrow> v' < v_two \<Longrightarrow> v_two < v'' \<Longrightarrow> one_third (FwdValidators s h) (slashed_three s)" proof - assume a1: "v' < v_two" assume a2: "v_two < v''" assume a3: "one_third (FwdValidators s h) (\<lambda>n. (n, Prepare (h'', v'', v')) \<in> Messages s \<and> (n, Commit (h_two, v_two)) \<in> Messages s)" assume a4: "finite (FwdValidators s (fst (h, v)))" have f5: "\<not> 0 \<le> v' + - 1 * v_two" using a1 by force have f6: "\<not> v'' + - 1 * v_two \<le> 0" using a2 by auto have f7: "\<forall>V p pa. (infinite V \<or> (\<exists>v. p v \<and> \<not> pa v) \<or> \<not> one_third V p) \<or> one_third V pa" by (meson one_third_mp) obtain vv :: "(validator \<Rightarrow> bool) \<Rightarrow> (validator \<Rightarrow> bool) \<Rightarrow> validator" where "\<forall>x0 x1. (\<exists>v3. x1 v3 \<and> \<not> x0 v3) = (x1 (vv x0 x1) \<and> \<not> x0 (vv x0 x1))" by moura then have f8: "\<forall>V p pa. (infinite V \<or> p (vv pa p) \<and> \<not> pa (vv pa p) \<or> \<not> one_third V p) \<or> one_third V pa" using f7 by presburger have f9: "\<forall>x1 x2. ((x2::int) < x1) = (\<not> x1 + - 1 * x2 \<le> 0)" by auto have "\<forall>x0 x2. ((x0::int) < x2) = (\<not> 0 \<le> x0 + - 1 * x2)" by linarith then have "\<not> ((vv (slashed_three s) (\<lambda>v. (v, Prepare (h'', v'', v')) \<in> Messages s \<and> (v, Commit (h_two, v_two)) \<in> Messages s), Prepare (h'', v'', v')) \<in> Messages s \<and> (vv (slashed_three s) (\<lambda>v. (v, Prepare (h'', v'', v')) \<in> Messages s \<and> (v, Commit (h_two, v_two)) \<in> Messages s), Commit (h_two, v_two)) \<in> Messages s) \<or> slashed_three s (vv (slashed_three s) (\<lambda>v. (v, Prepare (h'', v'', v')) \<in> Messages s \<and> (v, Commit (h_two, v_two)) \<in> Messages s))" using f9 f6 f5 slashed_three_def by blast then show ?thesis using f8 a4 a3 by fastforce qed lemma smaller_induction_same_height_violation : "heir_after_n_switching n s (h, v) (h', v') \<Longrightarrow> finite (FwdValidators s h) \<Longrightarrow> prepared_by_both s h'' v'' v' \<and> (\<exists>v_ss. prepared_by_both s h' v' v_ss) \<and> - 1 \<le> v' \<and> v' < v'' \<and> nth_ancestor s (nat (v'' - v')) h'' = Some h' \<and> validators_match s h' h'' \<Longrightarrow> n \<le> Suc 0 \<Longrightarrow> n_two \<le> Suc 0 \<Longrightarrow> \<not> on_same_heir_chain s (h'', v'') (h_two, v'') \<Longrightarrow> heir_after_n_switching n_two s (h, v) (h_two, v'') \<Longrightarrow> committed_by_both s h v \<Longrightarrow> committed_by_both s h_two v'' \<Longrightarrow> \<not> one_third (FwdValidators s h) (slashed s) \<Longrightarrow> prepared s (FwdValidators s h) h'' v'' v' \<Longrightarrow> False" apply(subgoal_tac "\<exists> v_two_src. prepared s (FwdValidators s h) h_two v'' v_two_src") apply(subgoal_tac "h'' \<noteq> h_two") apply(subgoal_tac "one_third (FwdValidators s h) (slashed_four s)") using slashed_four_means_slashed_on_a_group apply blast using slashed_four_on_a_group apply blast using heir_self on_same_heir_chain_def apply blast using committed_so_prepared by blast lemma smaller_induction_skipping_violation : "heir_after_n_switching n s (h, v) (h', v') \<Longrightarrow> finite (FwdValidators s h) \<Longrightarrow> prepared_by_both s h'' v'' v' \<and> (\<exists>v_ss. prepared_by_both s h' v' v_ss) \<and> - 1 \<le> v' \<and> nth_ancestor s (nat (v'' - v')) h'' = Some h' \<and> validators_match s h' h'' \<Longrightarrow> v_two \<le> v'' \<Longrightarrow> n \<le> Suc 0 \<Longrightarrow> n_two \<le> Suc 0 \<Longrightarrow> \<not> on_same_heir_chain s (h'', v'') (h_two, v_two) \<Longrightarrow> heir_after_n_switching n_two s (h, v) (h_two, v_two) \<Longrightarrow> committed_by_both s h v \<Longrightarrow> committed_by_both s h_two v_two \<Longrightarrow> \<not> one_third (FwdValidators s h) (slashed s) \<Longrightarrow> \<not> v_two \<le> v' \<Longrightarrow> prepared s (FwdValidators s h) h'' v'' v' \<Longrightarrow> v_two \<noteq> v'' \<Longrightarrow> False" apply(subgoal_tac "one_third (FwdValidators s h) (slashed_three s)") using one_third_mp slashed_def apply blast apply(subgoal_tac "committed s (FwdValidators s h) h_two v_two") apply(simp add: prepared_def committed_def two_thirds_sent_message_def) apply(subgoal_tac "one_third (FwdValidators s h) (\<lambda>n. (n, Prepare (h'', v'', v')) \<in> Messages s \<and> (n, Commit (h_two, v_two)) \<in> Messages s)") apply(subgoal_tac "v_two > v'") apply(subgoal_tac "v_two < v''") using slashed_three_on_a_group apply blast apply linarith apply linarith apply(rule two_thirds_two_thirds_one_third; simp) by (metis committed_by_both_def committed_by_fwd_def committed_by_rear_def fst_conv one_validator_change_leaves_one_set) lemma smaller_induction_case_normal: "heir_after_n_switching n s (h, v) (h', v') \<Longrightarrow> (finite (FwdValidators s (fst (h, v))) \<Longrightarrow> v_two \<le> snd (h', v') \<Longrightarrow> n \<le> Suc 0 \<Longrightarrow> n_two \<le> Suc 0 \<Longrightarrow> \<not> on_same_heir_chain s (h', v') (h_two, v_two) \<Longrightarrow> heir_after_n_switching n_two s (h, v) (h_two, v_two) \<Longrightarrow> committed_by_both s (fst (h, v)) (snd (h, v)) \<Longrightarrow> committed_by_both s h_two v_two \<Longrightarrow> \<not> one_third (FwdValidators s (fst (h, v))) (slashed s) \<Longrightarrow> False) \<Longrightarrow> inherit_normal s (h', v') (h'', v'') \<Longrightarrow> ancestor_descendant_with_chosen_validators s (h', v') (h'', v'') \<Longrightarrow> finite (FwdValidators s (fst (h, v))) \<Longrightarrow> v_two \<le> snd (h'', v'') \<Longrightarrow> n \<le> Suc 0 \<Longrightarrow> n_two \<le> Suc 0 \<Longrightarrow> \<not> on_same_heir_chain s (h'', v'') (h_two, v_two) \<Longrightarrow> heir_after_n_switching n_two s (h, v) (h_two, v_two) \<Longrightarrow> committed_by_both s (fst (h, v)) (snd (h, v)) \<Longrightarrow> committed_by_both s h_two v_two \<Longrightarrow> \<not> one_third (FwdValidators s (fst (h, v))) (slashed s) \<Longrightarrow> False" apply(case_tac "v_two \<le> snd (h', v')") apply(case_tac "on_same_heir_chain s (h', v') (h_two, v_two)") using chain_and_inherit apply blast apply blast (* The group in question has prepared at v'' *) apply(subgoal_tac "prepared s (FwdValidators s h) h'' v'' v'") defer using prepared_by_fwd_of_origin apply blast apply(case_tac "v_two = v''") apply simp using smaller_induction_same_height_violation apply blast apply simp using smaller_induction_skipping_violation by blast lemma some_h : "heir_after_n_switching n s (h, v) (h', v') \<Longrightarrow> ancestor_descendant_with_chosen_validators s (h', v') (h'', v'') \<Longrightarrow> inherit_switching_validators s (h', v') (h'', v'') \<Longrightarrow> heir s (h', v') (h'', v'')" apply(subgoal_tac "\<exists> x. prepared_by_both s h' v' x") using heir_self heir_switching_step apply blast by auto lemma smaller_induction_switching_case: "heir_after_n_switching n s (h, v) (h', v') \<Longrightarrow> (finite (FwdValidators s (fst (h, v))) \<Longrightarrow> v_two \<le> snd (h', v') \<Longrightarrow> n \<le> Suc 0 \<Longrightarrow> n_two \<le> Suc 0 \<Longrightarrow> \<not> on_same_heir_chain s (h', v') (h_two, v_two) \<Longrightarrow> heir_after_n_switching n_two s (h, v) (h_two, v_two) \<Longrightarrow> committed_by_both s (fst (h, v)) (snd (h, v)) \<Longrightarrow> committed_by_both s h_two v_two \<Longrightarrow> \<not> one_third (FwdValidators s (fst (h, v))) (slashed s) \<Longrightarrow> False) \<Longrightarrow> ancestor_descendant_with_chosen_validators s (h', v') (h'', v'') \<Longrightarrow> inherit_switching_validators s (h', v') (h'', v'') \<Longrightarrow> finite (FwdValidators s (fst (h, v))) \<Longrightarrow> v_two \<le> snd (h'', v'') \<Longrightarrow> Suc n \<le> Suc 0 \<Longrightarrow> n_two \<le> Suc 0 \<Longrightarrow> \<not> on_same_heir_chain s (h'', v'') (h_two, v_two) \<Longrightarrow> heir_after_n_switching n_two s (h, v) (h_two, v_two) \<Longrightarrow> committed_by_both s (fst (h, v)) (snd (h, v)) \<Longrightarrow> committed_by_both s h_two v_two \<Longrightarrow> \<not> one_third (FwdValidators s (fst (h, v))) (slashed s) \<Longrightarrow> False" apply(case_tac "v_two < v'") apply(case_tac "\<not> on_same_heir_chain s (h', v') (h_two, v_two)") apply simp apply(subgoal_tac "heir s (h', v') (h_two, v_two)") using heir_increases_view apply force using heir_switching_step on_same_heir_chain_def apply blast apply(case_tac "v' = v_two") apply(subgoal_tac "heir s (h', v') (h'', v'')") apply simp apply(subgoal_tac "\<exists> v'_src. prepared s (FwdValidators s h) h' v_two v'_src") apply(subgoal_tac "\<exists> v_two_src. prepared s (FwdValidators s h) h_two v_two v_two_src") apply(subgoal_tac "h' \<noteq> h_two") apply (meson slashed_four_means_slashed_on_a_group slashed_four_on_a_group) using on_same_heir_chain_def apply blast using committed_so_prepared apply blast using heir_same_height on_same_heir_chain_def apply blast using some_h apply blast apply(subgoal_tac "v' < v_two") apply(subgoal_tac "prepared s (FwdValidators s h) h'' v'' v'") apply(subgoal_tac "committed s (FwdValidators s h) h_two v_two") apply(case_tac "v_two < v''") apply(subgoal_tac "one_third (FwdValidators s h) (\<lambda>n. (n, Prepare (h'', v'', v')) \<in> Messages s \<and> (n, Commit (h_two, v_two)) \<in> Messages s)") apply(subgoal_tac "one_third (FwdValidators s h) (slashed_three s)") apply (metis fst_conv one_third_mp slashed_def) apply(rule slashed_three_on_group) apply simp apply simp apply simp apply simp apply(simp only: prepared_def committed_def two_thirds_sent_message_def) apply(rule two_thirds_two_thirds_one_third; simp) apply simp apply(subgoal_tac "\<exists> v_two_src. prepared s (FwdValidators s h) h_two v'' v_two_src") apply(subgoal_tac "h'' \<noteq> h_two") apply (simp add: slashed_four_means_slashed_on_a_group slashed_four_on_a_group) using heir_self on_same_heir_chain_def apply blast using committed_so_prepared apply blast apply (metis committed_by_both_def committed_by_fwd_def committed_by_rear_def fst_conv one_validator_change_leaves_one_set) apply simp apply (metis One_nat_def Suc_neq_Zero fst_conv le_numeral_extra(1) one_validator_change_leaves_one_set prepared_by_both_def prepared_by_rear_def validators_change_def) apply linarith done lemma accountable_safety_smaller_induction: "heir_after_n_switching n_one s (h, v) (h_one, v_one) \<Longrightarrow> finite (FwdValidators s (fst (h, v))) \<Longrightarrow> v_two \<le> snd (h_one, v_one) \<Longrightarrow> n_one \<le> Suc 0 \<Longrightarrow> n_two \<le> Suc 0 \<Longrightarrow> \<not> on_same_heir_chain s (h_one, v_one) (h_two, v_two) \<Longrightarrow> heir_after_n_switching n_two s (h, v) (h_two, v_two) \<Longrightarrow> committed_by_both s (fst (h, v)) (snd (h, v)) (* maybe not necessary *) \<Longrightarrow> committed_by_both s h_two v_two \<Longrightarrow> \<not> one_third (FwdValidators s (fst (h, v))) (slashed s) \<Longrightarrow> False" apply(induction rule: heir_after_n_switching.induct) apply (simp add: forget_number_of_switching on_same_heir_chain_def) using smaller_induction_case_normal apply blast using smaller_induction_switching_case by blast lemma accountable_safety_from_legitimacy_fork_with_high_root_base_one_longer : "n_one \<le> 1 \<and> n_two \<le> 1 \<and> v_one \<ge> v_two \<Longrightarrow> finite (FwdValidators s h) \<Longrightarrow> legitimacy_fork_with_center_with_high_root_with_n_switching s (h_orig, v_orig) (h, v) n_one (h_one, v_one) n_two (h_two, v_two) \<Longrightarrow> \<exists> h' v'. heir s (h_orig, v_orig) (h', v') \<and> one_third_of_fwd_slashed s h'" apply(simp only: legitimacy_fork_with_center_with_high_root_with_n_switching.simps) apply(simp only: legitimacy_fork_with_center_with_n_switching.simps) apply(simp only: legitimacy_fork_with_n_switching.simps) apply clarify apply(rule_tac x = h in exI) apply(rule_tac x = v in exI) apply(rule conjI) apply simp apply(case_tac "one_third_of_fwd_slashed s h") apply simp apply(simp add: one_third_of_fwd_slashed_def) using accountable_safety_smaller_induction by fastforce lemma accountable_safety_from_legitimacy_fork_with_high_root_base_two_longer : "n_one \<le> 1 \<and> n_two \<le> 1 \<and> v_one \<le> v_two \<Longrightarrow> finite (FwdValidators s h) \<Longrightarrow> legitimacy_fork_with_center_with_high_root_with_n_switching s (h_orig, v_orig) (h, v) n_one (h_one, v_one) n_two (h_two, v_two) \<Longrightarrow> \<exists> h' v'. heir s (h_orig, v_orig) (h', v') \<and> one_third_of_fwd_slashed s h'" apply(rule_tac n_one = n_two and n_two = n_one and v_one = v_two and v_two = v_one and h_two = h_one and h_one = h_two and h = h and v = v in accountable_safety_from_legitimacy_fork_with_high_root_base_one_longer) apply blast apply simp using on_same_heir_chain_def by auto lemma accountable_safety_from_legitimacy_fork_with_high_root_base : "n_one \<le> 1 \<and> n_two \<le> 1 \<and> legitimacy_fork_with_center_with_high_root_with_n_switching s (h_orig, v_orig) (h, v) n_one (h_one, v_one) n_two (h_two, v_two) \<Longrightarrow> finite (FwdValidators s h) \<Longrightarrow> \<exists> h' v'. heir s (h_orig, v_orig) (h', v') \<and> one_third_of_fwd_slashed s h'" (* the forward set of h should have one-third slashed here. *) (* Take the longer chain and do an induction and so, (prepared_view, prepared) and (committed, committed_view) is not in the relation make prepared shorter and shorter...! *) apply(subgoal_tac "v_one \<le> v_two \<or> v_two \<le> v_one") apply (meson accountable_safety_from_legitimacy_fork_with_high_root_base_one_longer accountable_safety_from_legitimacy_fork_with_high_root_base_two_longer) by linarith subsection "Mainline Arguments for Accountable Safety" lemma use_highness : "1 \<le> n_one_pre \<Longrightarrow> \<forall>h' v'. v < v' \<longrightarrow> \<not> legitimacy_fork_with_center s (h_orig, v_orig) (h', v') (h_one, v_one) (h_two, v_two) \<Longrightarrow> heir s (h_orig, v_orig) (h, v) \<Longrightarrow> heir_after_n_switching n_two s (h, v) (h_two, v_two) \<Longrightarrow> committed_by_both s h v \<Longrightarrow> committed_by_both s h_one v_one \<Longrightarrow> committed_by_both s h_two v_two \<Longrightarrow> heir_after_n_switching (Suc n_one_pre - 1) s (h, v) (h_onea, v_onea) \<Longrightarrow> inherit_switching_validators s (h_onea, v_onea) (h_twoa, v_twoa) \<Longrightarrow> ancestor_descendant_with_chosen_validators s (h_onea, v_onea) (h_twoa, v_twoa) \<Longrightarrow> heir_after_n_switching 0 s (h_twoa, v_twoa) (h_one, v_one) \<Longrightarrow> \<not> heir s (h_two, v_two) (h_one, v_one) \<Longrightarrow> \<not> heir s (h_one, v_one) (h_two, v_two) \<Longrightarrow> heir s (h_onea, v_onea) (h_two, v_two) \<Longrightarrow> False" apply(drule_tac x = h_onea in spec) apply(drule_tac x = v_onea in spec) apply(subgoal_tac "v < v_onea") defer apply (metis One_nat_def at_least_one_switching_means_higher diff_Suc_1 snd_conv) apply(subgoal_tac "legitimacy_fork_with_center s (h_orig, v_orig) (h_onea, v_onea) (h_one, v_one) (h_two, v_two)") apply blast using shallower_legitimacy_fork by blast lemma confluence_should_not: "1 \<le> n_one_pre \<Longrightarrow> \<forall>h' v'. v < v' \<longrightarrow> \<not> legitimacy_fork_with_center s (h_orig, v_orig) (h', v') (h_one, v_one) (h_two, v_two) \<Longrightarrow> heir s (h_orig, v_orig) (h, v) \<Longrightarrow> heir_after_n_switching n_two s (h, v) (h_two, v_two) \<Longrightarrow> committed_by_both s h v \<Longrightarrow> committed_by_both s h_one v_one \<Longrightarrow> committed_by_both s h_two v_two \<Longrightarrow> heir_after_n_switching (Suc n_one_pre - 1) s (h, v) (h_onea, v_onea) \<Longrightarrow> ancestor_descendant_with_chosen_validators s (h_onea, v_onea) (h_twoa, v_twoa) \<Longrightarrow> inherit_switching_validators s (h_onea, v_onea) (h_twoa, v_twoa) \<Longrightarrow> heir_after_n_switching 0 s (h_twoa, v_twoa) (h_one, v_one) \<Longrightarrow> \<not> heir s (h_two, v_two) (h_one, v_one) \<Longrightarrow> \<not> heir s (h_one, v_one) (h_two, v_two) \<Longrightarrow> heir s (h_two, v_two) (h_onea, v_onea) \<Longrightarrow> False" proof - assume "inherit_switching_validators s (h_onea, v_onea) (h_twoa, v_twoa)" moreover assume "ancestor_descendant_with_chosen_validators s (h_onea, v_onea) (h_twoa, v_twoa)" ultimately have "heir s (h_onea, v_onea) (h_twoa, v_twoa)" by (meson heir_self heir_switching_step inherit_switching_validators.simps sourcing_switching_validators.simps) moreover assume "heir s (h_two, v_two) (h_onea, v_onea)" ultimately have "heir s (h_two, v_two) (h_twoa, v_twoa)" using heir_trans by blast moreover assume "heir_after_n_switching 0 s (h_twoa, v_twoa) (h_one, v_one)" then have "heir s (h_twoa, v_twoa) (h_one, v_one)" using forget_number_of_switching by blast ultimately have "heir s (h_two, v_two) (h_one, v_one)" using heir_trans by blast moreover assume " \<not> heir s (h_two, v_two) (h_one, v_one)" ultimately show "False" by blast qed lemma prev_switch_not_on_same_heir_chain : "1 \<le> n_one_pre \<Longrightarrow> \<forall>h' v'. v < v' \<longrightarrow> \<not> legitimacy_fork_with_center s (h_orig, v_orig) (h', v') (h_one, v_one) (h_two, v_two) \<Longrightarrow> \<not> on_same_heir_chain s (h_one, v_one) (h_two, v_two) \<Longrightarrow> heir s (h_orig, v_orig) (h, v) \<Longrightarrow> heir_after_n_switching n_two s (h, v) (h_two, v_two) \<Longrightarrow> committed_by_both s h v \<Longrightarrow> committed_by_both s h_one v_one \<Longrightarrow> committed_by_both s h_two v_two \<Longrightarrow> heir_after_n_switching (Suc n_one_pre - 1) s (h, v) (h_onea, v_onea) \<Longrightarrow> inherit_switching_validators s (h_onea, v_onea) (h_twoa, v_twoa) \<Longrightarrow> ancestor_descendant_with_chosen_validators s (h_onea, v_onea) (h_twoa, v_twoa) \<Longrightarrow> heir_after_n_switching 0 s (h_twoa, v_twoa) (h_one, v_one) \<Longrightarrow> \<not> on_same_heir_chain s (h_onea, v_onea) (h_two, v_two)" apply(auto simp only: on_same_heir_chain_def) using use_highness apply blast using confluence_should_not by blast lemma reduce_legitimacy_fork : "legitimacy_fork_with_center_with_high_root_with_n_switching s (h_orig, v_orig) (h, v) (Suc n_one_pre) (h_one, v_one) n_two (h_two, v_two) \<Longrightarrow> 1 \<le> n_one_pre \<Longrightarrow> \<exists>h_one' v_one'. legitimacy_fork_with_center_with_high_root_with_n_switching s (h_orig, v_orig) (h, v) n_one_pre (h_one', v_one') n_two (h_two, v_two)" apply (simp only: legitimacy_fork_with_center_with_high_root_with_n_switching.simps) apply (simp only: legitimacy_fork_with_center_with_n_switching.simps) apply (simp only: legitimacy_fork_with_n_switching.simps) apply clarify apply(drule heir_after_one_or_more_switching_dest) apply simp apply clarify apply(rule_tac x = "h_onea" in exI) apply(rule_tac x = "v_onea" in exI) apply(rule conjI) apply(rule conjI) apply(rule conjI) using prev_switch_not_on_same_heir_chain apply blast apply auto[1] apply auto[1] using high_point_still_high by blast lemma switching_induction_case_one : "\<forall>n_one n_twoa h_one v_one h_two v_two. n_one + n_twoa \<le> n_one_pre + n_two \<longrightarrow> finite (FwdValidators s h) \<longrightarrow> legitimacy_fork_with_center_with_high_root_with_n_switching s (h_orig, v_orig) (h, v) n_one (h_one, v_one) n_twoa (h_two, v_two) \<longrightarrow> (\<exists>h' v'. heir s (h_orig, v_orig) (h', v') \<and> one_third_of_fwd_slashed s h') \<Longrightarrow> finite (FwdValidators s h) \<Longrightarrow> legitimacy_fork_with_center_with_high_root_with_n_switching s (h_orig, v_orig) (h, v) (Suc n_one_pre) (h_one, v_one) n_two (h_two, v_two) \<Longrightarrow> 1 \<le> n_one_pre \<Longrightarrow> k = n_one_pre + n_two \<Longrightarrow> \<exists>h' v'. heir s (h_orig, v_orig) (h', v') \<and> one_third_of_fwd_slashed s h'" apply (subgoal_tac "\<exists> h_one' v_one'. legitimacy_fork_with_center_with_high_root_with_n_switching s (h_orig, v_orig) (h, v) n_one_pre (h_one', v_one') n_two (h_two, v_two)") apply blast using reduce_legitimacy_fork by blast lemma some_symmetry : "\<forall>n_onea n_two h_one v_one h_two v_two. n_onea + n_two \<le> n_one + n_two_pre \<longrightarrow> finite (FwdValidators s h) \<longrightarrow> legitimacy_fork_with_center_with_high_root_with_n_switching s (h_orig, v_orig) (h, v) n_onea (h_one, v_one) n_two (h_two, v_two) \<longrightarrow> (\<exists>h' v'. heir s (h_orig, v_orig) (h', v') \<and> one_third_of_fwd_slashed s h') \<Longrightarrow> \<forall>n_onea n_twoa h_one v_one h_two v_two. n_onea + n_twoa \<le> n_two_pre + n_one \<longrightarrow> finite (FwdValidators s h) \<longrightarrow> legitimacy_fork_with_center_with_high_root_with_n_switching s (h_orig, v_orig) (h, v) n_onea (h_one, v_one) n_twoa (h_two, v_two) \<longrightarrow> (\<exists>h' v'. heir s (h_orig, v_orig) (h', v') \<and> one_third_of_fwd_slashed s h')" apply clarify apply (drule_tac x = n_onea in spec) apply (drule_tac x = n_twoa in spec) apply(drule_tac x = h_one in spec) apply(drule_tac x = v_one in spec) apply(drule_tac x = h_two in spec) apply(drule_tac x = v_two in spec) apply(erule impE) apply auto[1] apply(erule impE) apply simp apply(erule impE) apply blast apply blast done lemma switching_induction_case_two : " \<forall>n_onea n_two h_one v_one h_two v_two. n_onea + n_two \<le> n_one + n_two_pre \<longrightarrow> finite (FwdValidators s h) \<longrightarrow> legitimacy_fork_with_center_with_high_root_with_n_switching s (h_orig, v_orig) (h, v) n_onea (h_one, v_one) n_two (h_two, v_two) \<longrightarrow> (\<exists>h' v'. heir s (h_orig, v_orig) (h', v') \<and> one_third_of_fwd_slashed s h') \<Longrightarrow> finite (FwdValidators s h) \<Longrightarrow> legitimacy_fork_with_center_with_high_root_with_n_switching s (h_orig, v_orig) (h, v) n_one (h_one, v_one) (Suc n_two_pre) (h_two, v_two) \<Longrightarrow> 1 \<le> n_two_pre \<Longrightarrow> k = n_one + n_two_pre \<Longrightarrow> \<exists>h' v'. heir s (h_orig, v_orig) (h', v') \<and> one_third_of_fwd_slashed s h'" apply(rule_tac k = k and n_two = n_one and n_one_pre = n_two_pre and h = h and v = v and h_one = h_two and v_one = v_two and h_two = h_one and v_two = v_one in switching_induction_case_one) defer apply simp using legitimacy_fork_with_center_with_high_root_with_n_switching_sym apply blast using legitimacy_fork_with_center_with_high_root_with_n_switching_sym apply blast using add.commute apply blast apply simp by (simp add: add.commute) lemma switching_induction : "\<forall>n_one n_two h_one v_one h_two v_two. n_one + n_two \<le> k \<longrightarrow> finite (FwdValidators s h) \<longrightarrow> legitimacy_fork_with_center_with_high_root_with_n_switching s (h_orig, v_orig) (h, v) n_one (h_one, v_one) n_two (h_two, v_two) \<longrightarrow> (\<exists>h' v'. heir s (h_orig, v_orig) (h', v') \<and> one_third_of_fwd_slashed s h') \<Longrightarrow> \<forall>n_one n_two h_one v_one h_two v_two. n_one + n_two \<le> Suc k \<longrightarrow> finite (FwdValidators s h) \<longrightarrow> legitimacy_fork_with_center_with_high_root_with_n_switching s (h_orig, v_orig) (h, v) n_one (h_one, v_one) n_two (h_two, v_two) \<longrightarrow> (\<exists>h' v'. heir s (h_orig, v_orig) (h', v') \<and> one_third_of_fwd_slashed s h')" apply clarify apply (drule sum_suc) apply (erule disjE) apply blast apply (erule disjE) using accountable_safety_from_legitimacy_fork_with_high_root_base apply blast apply (erule disjE) apply clarify using switching_induction_case_one apply blast apply clarify using switching_induction_case_two apply blast done lemma accountable_safety_from_legitimacy_fork_with_high_root_with_n_ind : "\<forall> n_one n_two h_one v_one h_two v_two. n_one + n_two \<le> k \<longrightarrow> finite (FwdValidators s h) \<longrightarrow> legitimacy_fork_with_center_with_high_root_with_n_switching s (h_orig, v_orig) (h, v) n_one (h_one, v_one) n_two (h_two, v_two) \<longrightarrow> (\<exists> h' v'. heir s (h_orig, v_orig) (h', v') \<and> one_third_of_fwd_slashed s h')" apply(induction k) using accountable_safety_from_legitimacy_fork_with_high_root_base apply blast using switching_induction by blast lemma accountable_safety_from_legitimacy_fork_with_high_root_with_n : "finite (FwdValidators s h) \<Longrightarrow> legitimacy_fork_with_center_with_high_root_with_n_switching s (h_orig, v_orig) (h, v) n_one (h_one, v_one) n_two (h_two, v_two) \<Longrightarrow> \<exists> h' v'. heir s (h_orig, v_orig) (h', v') \<and> one_third_of_fwd_slashed s h'" using accountable_safety_from_legitimacy_fork_with_high_root_with_n_ind by blast lemma accountable_safety_from_legitimacy_fork_with_high_root : "finite (FwdValidators s h) \<Longrightarrow> legitimacy_fork_with_center_with_high_root s (h_orig, v_orig) (h, v) (h_one, v_one) (h_two, v_two) \<Longrightarrow> \<exists> h' v'. heir s (h_orig, v_orig) (h', v') \<and> one_third_of_fwd_slashed s h'" by (meson accountable_safety_from_legitimacy_fork_with_high_root_with_n legitimacy_fork_with_center_with_high_root_has_n_switching) lemma accountable_safety_center : "validator_sets_finite s \<Longrightarrow> legitimacy_fork_with_center s (h, v) (h, v) (h1, v1) (h2, v2) \<Longrightarrow> \<exists> h' v'. heir s (h, v) (h', v') \<and> one_third_of_fwd_slashed s h'" apply(drule legitimacy_fork_with_center_choose_high_root) apply(clarify) using accountable_safety_from_legitimacy_fork_with_high_root validator_sets_finite_def by blast lemma heir_initial : "heir s (h, v) (h1, v1) \<Longrightarrow> heir s (h, v) (h, v)" apply(induction rule: heir.induct) using heir_self apply auto[1] apply simp apply simp done lemma legitimacy_fork_with_center_and_root : " legitimacy_fork_with_commits s (h, v) (h1, v1) (h2, v2) \<Longrightarrow> legitimacy_fork_with_center s (h, v) (h, v) (h1, v1) (h2, v2) " apply simp using heir_initial by blast text "If a situation has a finite number of validators on each hash, a legitimacy fork means some validator set suffers 1/3 slashing. A legitimacy fork is defined using the @{term heir} relation. The slashed validator set is also a heir of the original validator set. " text "This variant of accountable safety only requires slashing conditions 3 and 4." lemma accountable_safety_for_legitimacy_fork : "validator_sets_finite s \<Longrightarrow> legitimacy_fork_with_commits s (h, v) (h1, v1) (h2, v2) \<Longrightarrow> \<exists> h' v'. heir s (h, v) (h', v') \<and> one_third_of_fwd_slashed s h'" using accountable_safety_center legitimacy_fork_with_center_and_root by blast text "The above theorem only works for forks whose branches are made of the chain of sourcing. We are now going to turn any forks into such legitimacy forks. A fork is simply three hashes that form a forking shape. " definition heir_or_self :: "situation \<Rightarrow> (hash \<times> view) \<Rightarrow> (hash \<times> view) \<Rightarrow> bool" where "heir_or_self s p0 p1 = (p0 = p1 \<or> heir s p0 p1)" subsection "Turning Any Fork into Legitimacy-Fork" lemma inherit_normal_means_ancestor_descendant : "inherit_normal s (h', v') (h'', v'') \<Longrightarrow> ancestor_descendant s h' h''" using ancestor_descendant_def by auto lemma nth_ancestor_trans: "\<forall> n h' h h''. nth_ancestor s n h' = Some h \<longrightarrow> nth_ancestor s na h'' = Some h' \<longrightarrow> nth_ancestor s (na + n) h'' = Some h" apply(induction na) apply simp apply auto apply(case_tac "PrevHash s h''"; simp) done lemma ancestor_descendant_trans: "ancestor_descendant s h h' \<Longrightarrow> ancestor_descendant s h' h'' \<Longrightarrow> ancestor_descendant s h h''" apply(auto simp add: ancestor_descendant_def) apply(rule_tac x = "na + n" in exI) by (simp add: nth_ancestor_trans) lemma heir_is_descendant : "heir s (h1, v1) (h2, v2) \<Longrightarrow> ancestor_descendant s (fst (h1, v1)) (fst (h2, v2)) " apply(induction rule: heir.induct) apply simp using ancestor_descendant_def nth_ancestor.simps(1) apply blast apply (metis ancestor_descendant_trans fst_conv inherit_normal_means_ancestor_descendant) using ancestor_descendant_def nth_ancestor_trans by fastforce lemma heir_chain_means_same_chain : "on_same_heir_chain s (h1, v1) (h2, v2) \<Longrightarrow> on_same_chain s h1 h2" apply(simp add: on_same_heir_chain_def on_same_chain_def) using heir_is_descendant by auto lemma prepared_self_is_heir : "prepared_by_both s h1 v v1_src \<Longrightarrow> ancestor_descendant_with_chosen_validators s (h, v) (h1, v) \<Longrightarrow> heir s (h, v) (h1, v)" proof - assume "ancestor_descendant_with_chosen_validators s (h,v) (h1, v)" then have "h = h1" using ancestor_with_same_view by auto assume "prepared_by_both s h1 v v1_src" then have "heir s (h1, v) (h1, v)" using heir_self by auto show "heir s (h, v) (h1, v)" using \<open>h = h1\<close> \<open>heir s (h1, v) (h1, v)\<close> by blast qed lemma younger_ancestor : "nat (v1 - v) \<le> 0 \<longrightarrow> validator_sets_finite s \<longrightarrow> prepared_by_both s h1 v1 v1_src \<longrightarrow> ancestor_descendant_with_chosen_validators s (h, v) (h1, v1) \<longrightarrow> heir s (h, v) (h1, v1)" using ancestor_with_same_view prepared_self_is_heir by fastforce lemma one_third_of_non_empty : "X \<noteq> {} \<Longrightarrow> finite X \<Longrightarrow> one_third X f \<Longrightarrow> \<exists> x. x \<in> X \<and> f x" apply(simp add: one_third_def) apply(case_tac " card {n \<in> X. f n} > 0") apply (metis (no_types, lifting) Collect_empty_eq card.infinite card_0_eq gr_implies_not_zero) by force definition more_than_two_thirds :: "validator set \<Rightarrow> (validator \<Rightarrow> bool) \<Rightarrow> bool" where "more_than_two_thirds X f = (2 * card X < 3 * card ({n. n \<in> X \<and> f n}))" lemma not_one_third : "finite s \<Longrightarrow> s \<noteq> {} \<Longrightarrow> (\<not> one_third s f) = (more_than_two_thirds s (\<lambda> n. \<not> f n))" apply(auto simp add: one_third_def more_than_two_thirds_def) done lemma two_thirds_not_one_third: "validator_sets_finite s \<Longrightarrow> \<not> one_third (RearValidators s h1) (slashed s) \<Longrightarrow> two_thirds (RearValidators s h1) (\<lambda>n. (n, Prepare (h1, v1, v1_src)) \<in> Messages s) \<Longrightarrow> \<exists>n. n \<in> RearValidators s h1 \<and> \<not> slashed s n \<and> (n, Prepare (h1, v1, v1_src)) \<in> Messages s" apply(subgoal_tac "two_thirds (RearValidators s h1) (\<lambda> n. \<not> slashed s n)") apply(subgoal_tac "one_third (RearValidators s h1) (\<lambda> n. \<not> slashed s n \<and> (n, Prepare (h1, v1, v1_src)) \<in> Messages s)") apply(subgoal_tac "RearValidators s h1 \<noteq> {} \<and> finite (RearValidators s h1)") using one_third_of_non_empty apply blast using validator_sets_finite_def apply blast apply (simp add: two_thirds_two_thirds_one_third validator_sets_finite_def) apply(subgoal_tac "more_than_two_thirds (RearValidators s h1) (\<lambda>n. \<not> slashed s n)") apply (simp add: more_than_two_thirds_def two_thirds_def) using not_one_third validator_sets_finite_def by auto lemma cutting_prev : "ancestor_descendant_with_chosen_validators s (h, v) (h1, v1) \<Longrightarrow> v < v1 \<Longrightarrow> PrevHash s h1 = Some a \<Longrightarrow> ancestor_descendant_with_chosen_validators s (h, v) (a, v1 - 1)" apply(erule ancestor_descendant_with_chosen_validators.cases) apply simp apply simp done lemma ancestor_descendant_with_chosen_validators_go_back: "\<forall> v1 v1_src h v h_anc h1. k = nat (v1 - v1_src) \<longrightarrow> ancestor_descendant_with_chosen_validators s (h, v) (h1, v1) \<longrightarrow> nth_ancestor s (nat (v1 - v1_src)) h1 = Some h_anc \<longrightarrow> v \<le> v1_src \<longrightarrow> v1_src < v1 \<longrightarrow> ancestor_descendant_with_chosen_validators s (h, v) (h_anc, v1_src)" apply(induction k) apply clarify apply simp apply clarsimp apply(subgoal_tac "(case PrevHash s h1 of None \<Rightarrow> None | Some h' \<Rightarrow> nth_ancestor s k h') = Some h_anc") apply(case_tac "PrevHash s h1") apply simp apply simp apply(drule_tac x = "v1 - 1" in spec) apply(drule_tac x = "v1_src" in spec) apply(subgoal_tac "k = nat (v1 - 1 - v1_src)") apply simp apply(drule_tac x = h in spec) apply(drule_tac x = v in spec) apply(drule_tac x = h_anc in spec) apply(drule_tac x = a in spec) apply simp apply(case_tac "v1_src < v1 - 1") apply(subgoal_tac "ancestor_descendant_with_chosen_validators s (h, v) (a, v1 - 1)") apply blast apply (simp add: cutting_prev) apply(subgoal_tac "v1_src = v1 - 1") apply simp apply(erule ancestor_descendant_with_chosen_validators.cases) apply simp apply simp apply simp apply linarith by (metis nth_ancestor.simps(2)) lemma tmp: "committed_by_both s h1a v1a \<Longrightarrow> committed_by_both s h1a (v1a + 1 - 1)" apply simp done lemma ancestor_of_parent : "nth_ancestor s (nat (v1 - v1_src)) h1 = Some h_anc \<Longrightarrow> PrevHash s h1 = Some h_prev \<Longrightarrow> v1 > v1_src \<Longrightarrow> nth_ancestor s (nat (v1 - 1 - v1_src)) h_prev = Some h_anc" apply(case_tac "nat (v1 - v1_src)") apply simp apply simp proof - fix nata :: nat assume "v1_src < v1" assume a1: "nat (v1 - v1_src) = Suc nata" assume a2: "nth_ancestor s nata h_prev = Some h_anc" have "nat (- 1 + v1 + - 1 * v1_src) = nata" using a1 by linarith then show ?thesis using a2 by simp qed lemma ancestor_descendant_shorter : "\<forall> v1 h1 v1_src h_anc. nat (v1 - v) = k \<longrightarrow> ancestor_descendant_with_chosen_validators s (h, v) (h1, v1) \<longrightarrow> nth_ancestor s (nat (v1 - v1_src)) h1 = Some h_anc \<longrightarrow> v1_src < v1 \<longrightarrow> v \<le> v1_src \<longrightarrow> ancestor_descendant_with_chosen_validators s (h_anc, v1_src) (h1, v1)" apply(induction k) apply simp apply clarify apply(case_tac "v1_src = v1 - 1") apply simp apply(case_tac "PrevHash s h1") apply simp apply simp apply(erule ancestor_descendant_with_chosen_validators.cases) apply simp apply(subgoal_tac "prev_next_with_chosen_validators s (h_anc, v1 - 1) (h1, v1)") using inheritance_self inheritances_step apply blast apply clarify apply(simp only: prev_next_with_chosen_validators.simps) apply(rule conjI) apply blast apply(rule conjI) apply linarith apply(erule conjE) apply(erule conjE) apply(erule disjE) apply(rule disjI1) apply blast apply(rule disjI2) apply(subgoal_tac "h_anc = h1a") apply(rule conjI) apply blast apply(subgoal_tac "committed_by_both s h1a (v1a + 1 - 1)") apply blast apply(rule tmp) apply blast apply blast apply(drule_tac x = "v1 - 1" in spec) apply(subgoal_tac "\<exists> h_prev. PrevHash s h1 = Some h_prev") apply clarify apply(drule_tac x = h_prev in spec) apply(drule_tac x = v1_src in spec) apply(drule_tac x = h_anc in spec) apply(subgoal_tac "nat (v1 - 1 - v) = k") apply(subgoal_tac "ancestor_descendant_with_chosen_validators s (h, v) (h_prev, v1 - 1)") apply(subgoal_tac " nth_ancestor s (nat (v1 - 1 - v1_src)) h_prev = Some h_anc") apply(subgoal_tac "v1_src < v1 - 1") apply(subgoal_tac " ancestor_descendant_with_chosen_validators s (h_anc, v1_src) (h_prev, v1 - 1)") apply(rule_tac inheritances_step) apply blast apply(simp) apply(erule ancestor_descendant_with_chosen_validators.cases) apply simp apply simp apply blast apply linarith using ancestor_of_parent apply blast apply (simp add: cutting_prev) apply linarith apply(case_tac "nat (v1 - v1_src)") apply simp apply simp apply(case_tac "PrevHash s h1"; auto) done lemma no_commits_in_between_induction : "\<forall> v1 h1. nat (v1 - v) \<le> Suc k \<longrightarrow> validator_sets_finite s \<longrightarrow> committed_by_both s h v \<longrightarrow> v \<noteq> v1 \<longrightarrow> v1_src < v \<longrightarrow> ancestor_descendant_with_chosen_validators s (h, v) (h1, v1) \<longrightarrow> (\<forall>v>v1_src. nat (v1 - v) \<le> k \<longrightarrow> (\<forall>h. committed_by_both s h v \<longrightarrow> v = v1 \<or> \<not> ancestor_descendant_with_chosen_validators s (h, v) (h1, v1))) \<longrightarrow> v < v1 \<and> (validators_match s h h1 \<or> validators_change s h h1)" apply(induction k) apply clarify apply (rule conjI) apply auto[1] apply(erule ancestor_descendant_with_chosen_validators.cases) apply simp using ancestor_with_same_view apply fastforce apply clarsimp apply(rule conjI) apply auto[1] apply(erule ancestor_descendant_with_chosen_validators.cases) apply simp apply clarsimp apply(case_tac " committed_by_both s h1a v1a ") apply simp apply(rotate_tac 5) apply(drule_tac x = v1a in spec) apply(subgoal_tac "v1_src < v1a") apply(subgoal_tac " nat (v1a + 1 - v1a) \<le> Suc k") apply simp apply(drule_tac x = h1a in spec) apply simp apply(subgoal_tac "ancestor_descendant_with_chosen_validators s (h1a, v1a) (h2, v1a + 1)") apply blast using inheritance_self inheritances_step prev_next_with_chosen_validators.simps apply blast apply linarith apply(subgoal_tac "v \<le> v1a") apply linarith using ancestor_with_same_view apply auto[1] apply(subgoal_tac "validators_match s h h1a \<or> validators_change s h h1a") using validators_change_def validators_match_def apply auto[1] apply(drule_tac x = v1a in spec) apply(subgoal_tac "nat (v1a - v) \<le> Suc k") apply(subgoal_tac "v \<noteq> v1a") apply simp apply(drule_tac x = h1a in spec) apply(subgoal_tac "\<forall>v>v1_src. nat (v1a - v) \<le> k \<longrightarrow> (\<forall>h. committed_by_both s h v \<longrightarrow> v = v1a \<or> \<not> ancestor_descendant_with_chosen_validators s (h, v) (h1a, v1a))") apply simp apply(rule allI) apply(case_tac " v1_src < va") apply simp apply(case_tac "nat (v1a - va) \<le> k") apply simp apply(rule allI) apply(rule impI) apply(drule_tac x = va in spec) apply simp apply(case_tac "nat (v1a + 1 - va) \<le> Suc k") apply simp apply(drule_tac x = ha in spec) apply simp apply(erule disjE) apply simp apply(subgoal_tac "\<not> v1a + 1 \<le> v1a") apply (metis ancestor_with_same_view prod.sel(2)) apply simp using inheritances_step prev_next_with_chosen_validators.simps apply blast apply linarith apply blast apply blast using ancestor_with_same_view apply fastforce by linarith lemma no_commits_in_between : "nat (v1 - v) \<le> Suc k \<longrightarrow> validator_sets_finite s \<longrightarrow> committed_by_both s h v \<longrightarrow> prepared_by_both s h1 v1 v1_src \<longrightarrow> - 1 \<le> v1_src \<longrightarrow> v \<noteq> v1 \<longrightarrow> v1_src < v1 \<longrightarrow> v1_src < v \<longrightarrow> ancestor_descendant_with_chosen_validators s (h, v) (h1, v1) \<longrightarrow> (\<nexists>v h. nat (v1 - v) \<le> k \<and> validator_sets_finite s \<and> committed_by_both s h v \<and> prepared_by_both s h1 v1 v1_src \<and> - 1 \<le> v1_src \<and> v \<noteq> v1 \<and> v1_src < v1 \<and> v1_src < v \<and> ancestor_descendant_with_chosen_validators s (h, v) (h1, v1)) \<longrightarrow> committed_by_both s h v \<and> ancestor_descendant_with_chosen_validators s (h, v) (h1, v1) \<and> v1_src < v \<and> v < v1 \<and> (validators_match s h h1 \<or> validators_change s h h1)" apply clarsimp using no_commits_in_between_induction by blast lemma pick_max_induction' : "\<forall> v h. nat (v1 - v) \<le> k \<longrightarrow> validator_sets_finite s \<longrightarrow> committed_by_both s h v \<longrightarrow> prepared_by_both s h1 v1 v1_src \<longrightarrow> - 1 \<le> v1_src \<longrightarrow> v \<noteq> v1 \<longrightarrow> v1_src < v1 \<longrightarrow> v1_src < v \<longrightarrow> ancestor_descendant_with_chosen_validators s (h, v) (h1, v1) \<longrightarrow> (\<exists>h_max v_max. committed_by_both s h_max v_max \<and> ancestor_descendant_with_chosen_validators s (h_max, v_max) (h1, v1) \<and> v1_src < v_max \<and> v_max < v1 \<and> (validators_match s h_max h1 \<or> validators_change s h_max h1))" apply(induction k) using ancestor_with_same_view apply fastforce apply clarify apply(case_tac "\<exists> v h. nat (v1 - v) \<le> k \<and> validator_sets_finite s \<and> committed_by_both s h v \<and> prepared_by_both s h1 v1 v1_src \<and> - 1 \<le> v1_src \<and> v \<noteq> v1 \<and> v1_src < v1 \<and> v1_src < v \<and> ancestor_descendant_with_chosen_validators s (h, v) (h1, v1)") apply blast apply(rule_tac x = h in exI) apply(rule_tac x = v in exI) using no_commits_in_between by blast lemma pick_max_induction : "nat (v1 - v) = k \<longrightarrow> validator_sets_finite s \<longrightarrow> committed_by_both s h v \<longrightarrow> prepared_by_both s h1 v1 v1_src \<longrightarrow> - 1 \<le> v1_src \<longrightarrow> v \<noteq> v1 \<longrightarrow> v1_src < v1 \<longrightarrow> v1_src < v \<longrightarrow> ancestor_descendant_with_chosen_validators s (h, v) (h1, v1) \<longrightarrow> (\<exists>h_max v_max. committed_by_both s h_max v_max \<and> ancestor_descendant_with_chosen_validators s (h_max, v_max) (h1, v1) \<and> v1_src < v_max \<and> v_max < v1 \<and> (validators_match s h_max h1 \<or> validators_change s h_max h1))" using pick_max_induction' apply blast done lemma pick_max : "validator_sets_finite s \<longrightarrow> committed_by_both s h v \<longrightarrow> prepared_by_both s h1 v1 v1_src \<longrightarrow> - 1 \<le> v1_src \<longrightarrow> v \<noteq> v1 \<longrightarrow> v1_src < v1 \<longrightarrow> v1_src < v \<longrightarrow> ancestor_descendant_with_chosen_validators s (h, v) (h1, v1) \<longrightarrow> (\<exists>h_max v_max. committed_by_both s h_max v_max \<and> ancestor_descendant_with_chosen_validators s (h_max, v_max) (h1, v1) \<and> v1_src < v_max \<and> v_max < v1 \<and> (validators_match s h_max h1 \<or> validators_change s h_max h1))" by (simp add: pick_max_induction) lemma slashing_three_aux' : "finite (RearValidators s h1) \<Longrightarrow> one_third (RearValidators s h1) (\<lambda>n. (n, Commit (h_max, v_max)) \<in> Messages s \<and> (n, Prepare (h1, v1, v1_src)) \<in> Messages s \<and> v1_src < v_max \<and> v_max < v1) \<Longrightarrow> one_third (RearValidators s h1) (\<lambda>n. \<exists>x y v w u. (n, Commit (x, v)) \<in> Messages s \<and> (n, Prepare (y, w, u)) \<in> Messages s \<and> u < v \<and> v < w)" apply(rule one_third_mp) apply simp defer apply blast apply blast done lemma using_max : "validator_sets_finite s \<Longrightarrow> committed_by_both s h v \<Longrightarrow> prepared_by_both s h1 v1 v1_src \<Longrightarrow> - 1 \<le> v1_src \<Longrightarrow> v1_src < v1 \<Longrightarrow> ancestor_descendant_with_chosen_validators s (h_max, v_max) (h1, v1) \<Longrightarrow> committed_by_both s h_max v_max \<Longrightarrow> v1_src < v_max \<Longrightarrow> v_max < v1 \<Longrightarrow> validators_match s h_max h1 \<or> validators_change s h_max h1 \<Longrightarrow> one_third_of_fwd_or_rear_slashed s h1" apply(subgoal_tac "one_third (RearValidators s h1) (slashed_three s)") apply (meson one_third_mp one_third_of_fwd_or_rear_slashed_def one_third_of_rear_slashed_def slashed_def validator_sets_finite_def) apply(subgoal_tac "one_third (RearValidators s h1) (\<lambda> n. (\<exists> x y v w u. (n, Commit (x, v)) \<in> Messages s \<and> (n, Prepare (y, w, u)) \<in> Messages s \<and> u < v \<and> v < w))") using slashed_three_def apply presburger apply(subgoal_tac " one_third (RearValidators s h1) (\<lambda>n. (n, Commit (h_max, v_max)) \<in> Messages s \<and> (n, Prepare (h1, v1, v1_src)) \<in> Messages s \<and> v1_src < v_max \<and> v_max < v1)") apply(rule slashing_three_aux') using validator_sets_finite_def apply blast apply blast apply(rule two_thirds_two_thirds_one_third) using validator_sets_finite_def apply blast apply(erule disjE) apply(subgoal_tac "two_thirds (RearValidators s h_max) (\<lambda>x. (x, Commit (h_max, v_max)) \<in> Messages s)") apply (simp add: validators_match_def) apply(simp add: committed_by_both_def committed_by_rear_def committed_def two_thirds_sent_message_def) apply(subgoal_tac "two_thirds (FwdValidators s h_max) (\<lambda>x. (x, Commit (h_max, v_max)) \<in> Messages s)") apply (simp add: validators_change_def) apply(simp add: committed_by_both_def committed_by_fwd_def committed_def two_thirds_sent_message_def) apply(simp add: prepared_by_both_def prepared_by_rear_def prepared_def two_thirds_sent_message_def) done lemma commit_skipping : "validator_sets_finite s \<Longrightarrow> committed_by_both s h v \<Longrightarrow> prepared_by_both s h1 v1 v1_src \<Longrightarrow> - 1 \<le> v1_src \<Longrightarrow> v \<noteq> v1 \<Longrightarrow> v1_src < v1 \<Longrightarrow> v1_src < v \<Longrightarrow> ancestor_descendant_with_chosen_validators s (h, v) (h1, v1) \<Longrightarrow> one_third_of_fwd_or_rear_slashed s h1 " apply(subgoal_tac "\<exists> h_max v_max. committed_by_both s h_max v_max \<and> ancestor_descendant_with_chosen_validators s (h_max, v_max) (h1, v1) \<and> v1_src < v_max \<and> v_max < v1 \<and> (validators_match s h_max h1 \<or> validators_change s h_max h1) ") apply clarify using using_max apply blast by (simp add: pick_max) lemma slashed_two_essense : "validator_sets_finite s \<Longrightarrow> prepared_by_both s h1 v1 v1_src \<Longrightarrow> v1_src < v1 \<Longrightarrow> 0 \<le> v \<Longrightarrow> - 1 < v1_src \<Longrightarrow> \<not> one_third (RearValidators s h1) (slashed s) \<Longrightarrow> prepared_by_rear s h1 v1 v1_src \<Longrightarrow> \<exists>h_anc. sourcing s h_anc (h1, v1, v1_src)" apply(subgoal_tac "\<not> one_third (RearValidators s h1) (slashed_two s)") apply(subgoal_tac "\<exists> n. n \<in> RearValidators s h1 \<and> \<not> slashed_two s n \<and> (n, Prepare (h1, v1, v1_src)) \<in> Messages s") using slashed_two_def apply blast apply(subgoal_tac "0 < card {n. n \<in> RearValidators s h1 \<and> \<not> slashed_two s n \<and> (n, Prepare (h1, v1, v1_src)) \<in> Messages s}") apply (metis (no_types, lifting) Collect_empty_eq card_0_eq card_infinite less_not_refl2) apply(subgoal_tac "one_third (RearValidators s h1) (\<lambda> n. (\<not> slashed_two s n) \<and> (n, Prepare (h1, v1, v1_src)) \<in> Messages s)") apply(subgoal_tac "0 < card (RearValidators s h1)") apply(simp add: one_third_def) using card_gt_0_iff validator_sets_finite_def apply auto[1] apply(subgoal_tac "two_thirds (RearValidators s h1) (\<lambda>n. (n, Prepare (h1, v1, v1_src)) \<in> Messages s)") apply(subgoal_tac "two_thirds (RearValidators s h1) (\<lambda>n. \<not> slashed_two s n)") apply(rule two_thirds_two_thirds_one_third) using validator_sets_finite_def apply blast apply blast apply blast using more_than_two_thirds_def not_one_third two_thirds_def validator_sets_finite_def apply auto[1] using prepared_by_rear_def prepared_def two_thirds_sent_message_def apply auto[1] by (meson one_third_mp slashed_def validator_sets_finite_def) lemma use_slashed_two : " validator_sets_finite s \<Longrightarrow> prepared_by_both s h1 v1 v1_src \<Longrightarrow> committed_by_both s h v \<Longrightarrow> v \<noteq> v1 \<Longrightarrow> v1_src < v1 \<Longrightarrow> 0 \<le> v \<Longrightarrow> ancestor_descendant_with_chosen_validators s (h, v) (h1, v1) \<Longrightarrow> \<forall>h'. (\<forall>v'. \<not> ancestor_descendant_with_chosen_validators s (h, v) (h', v')) \<or> \<not> one_third_of_fwd_or_rear_slashed s h' \<Longrightarrow> - 1 < v1_src \<Longrightarrow> \<not> one_third (RearValidators s h1) (slashed s) \<Longrightarrow> prepared_by_rear s h1 v1 v1_src \<Longrightarrow> \<exists>h_src srcsrc. prepared_by_both s h_src v1_src srcsrc \<and> - 1 \<le> srcsrc \<and> srcsrc < v1_src \<and> ancestor_descendant_with_chosen_validators s (h, v) (h_src, v1_src) \<and> heir s (h_src, v1_src) (h1, v1)" apply(subgoal_tac "\<exists> h_anc. sourcing s h_anc (h1, v1, v1_src)") apply clarify apply(simp only: sourcing_def sourcing_normal.simps sourcing_switching_validators.simps) apply(erule disjE) apply clarify apply(rule_tac x = h_anc in exI) apply(rule_tac x = v_ss in exI) apply(rule conjI) apply simp apply(rule conjI) apply simp apply (rule conjI) apply simp apply(case_tac "v \<le> v1_src") apply(rule conjI) using ancestor_descendant_with_chosen_validators_go_back apply blast apply(rule_tac h' = h_anc and v' = v1_src in heir_normal_step) using heir_self apply blast apply auto[1] using ancestor_descendant_shorter apply blast apply(subgoal_tac "one_third_of_fwd_or_rear_slashed s h1") apply blast apply (simp add: commit_skipping) apply clarify apply(rule_tac x = h_anc in exI) apply(rule_tac x = v_ss in exI) apply(rule conjI) apply simp apply(rule conjI) apply simp apply (rule conjI) apply simp apply(case_tac "v \<le> v1_src") apply(rule conjI) using ancestor_descendant_with_chosen_validators_go_back apply blast apply(rule_tac h' = h_anc and v' = v1_src in heir_switching_step) using heir_self apply blast apply auto[1] using ancestor_descendant_shorter apply blast apply(subgoal_tac "one_third_of_fwd_or_rear_slashed s h1") apply blast apply (simp add: commit_skipping) using slashed_two_essense by blast lemma induction_step_following_back_history: "\<forall>v1 h1 v1_src. nat (v1 - v) \<le> k \<longrightarrow> validator_sets_finite s \<longrightarrow> committed_by_both s h v \<longrightarrow> prepared_by_both s h1 v1 v1_src \<longrightarrow> - 1 \<le> v1_src \<longrightarrow> v1_src < v1 \<longrightarrow> 0 \<le> v \<longrightarrow> ancestor_descendant_with_chosen_validators s (h, v) (h1, v1) \<longrightarrow> heir s (h, v) (h1, v1) \<or> (\<exists>h' v'. ancestor_descendant_with_chosen_validators s (h, v) (h', v') \<and> one_third_of_fwd_or_rear_slashed s h') \<Longrightarrow> nat (v1 - v) \<le> Suc k \<Longrightarrow> validator_sets_finite s \<Longrightarrow> committed_by_both s h v \<Longrightarrow> prepared_by_both s h1 v1 v1_src \<Longrightarrow> - 1 \<le> v1_src \<Longrightarrow> v1_src < v1 \<Longrightarrow> 0 \<le> v \<Longrightarrow> ancestor_descendant_with_chosen_validators s (h, v) (h1, v1) \<Longrightarrow> \<nexists>h' v'. ancestor_descendant_with_chosen_validators s (h, v) (h', v') \<and> one_third_of_fwd_or_rear_slashed s h' \<Longrightarrow> heir s (h, v) (h1, v1)" apply(case_tac "v = v1") defer apply(case_tac "-1 < v1_src") apply(subgoal_tac "\<exists> h_src srcsrc. prepared_by_both s h_src v1_src srcsrc \<and> -1 \<le> srcsrc \<and> srcsrc < v1_src \<and> 0 \<le> v \<and> ancestor_descendant_with_chosen_validators s (h, v) (h_src, v1_src) \<and> heir s (h_src, v1_src) (h1, v1) ") apply clarify apply(drule_tac x = v1_src in spec) apply(drule_tac x = h_src in spec) apply(drule_tac x = srcsrc in spec) apply clarsimp apply(subgoal_tac " nat (v1_src - v) \<le> k ") using heir_trans apply blast apply linarith apply(case_tac "one_third_of_rear_slashed s h1") using one_third_of_fwd_or_rear_slashed_def apply blast apply(simp add: one_third_of_rear_slashed_def) apply(subgoal_tac "prepared_by_rear s h1 v1 v1_src") using use_slashed_two apply blast using prepared_by_both_def apply blast apply(subgoal_tac "v1_src < v") apply(subgoal_tac "one_third_of_fwd_or_rear_slashed s h1") apply blast using commit_skipping apply blast apply linarith using prepared_self_is_heir apply blast done lemma follow_back_history_with_prepares_ind : "\<forall> v1 h1 v1_src. nat (v1 - v) \<le> k \<longrightarrow> validator_sets_finite s \<longrightarrow> committed_by_both s h v \<longrightarrow> prepared_by_both s h1 v1 v1_src \<longrightarrow> -1 \<le> v1_src \<longrightarrow> v1_src < v1 \<longrightarrow> v \<ge> 0 \<longrightarrow> ancestor_descendant_with_chosen_validators s (h, v) (h1, v1) \<longrightarrow> heir s (h, v) (h1, v1) \<or> (\<exists> h' v'. ancestor_descendant_with_chosen_validators s (h, v) (h', v') \<and> one_third_of_fwd_or_rear_slashed s h')" apply(induction k) apply (simp add: younger_ancestor) apply clarify using induction_step_following_back_history by blast lemma follow_back_history_with_prepares : "validator_sets_finite s \<Longrightarrow> committed_by_both s h v \<Longrightarrow> prepared_by_both s h1 v1 v1_src \<Longrightarrow> -1 \<le> v1_src \<Longrightarrow> v1_src < v1 \<Longrightarrow> 0 \<le> v \<Longrightarrow> ancestor_descendant_with_chosen_validators s (h, v) (h1, v1) \<Longrightarrow> heir s (h, v) (h1, v1) \<or> (\<exists> h' v'. ancestor_descendant_with_chosen_validators s (h, v) (h', v') \<and> one_third_of_fwd_or_rear_slashed s h')" using follow_back_history_with_prepares_ind apply blast done lemma slashed_one_on_rear': " validator_sets_finite s \<Longrightarrow> committed_by_rear s h1 v1 \<Longrightarrow> (\<exists>v1_src. -1 \<le> v1_src \<and> v1_src < v1 \<and> prepared_by_both s h1 v1 v1_src) \<or> one_third (RearValidators s h1) (slashed_one s)" apply(simp add: committed_by_rear_def committed_def two_thirds_sent_message_def) by (metis (no_types, lifting) one_third_mp slashed_one_def two_thirds_two_thirds_one_third validator_sets_finite_def) lemma slashed_one_on_rear : "validator_sets_finite s \<Longrightarrow> committed_by_rear s h1 v1 \<Longrightarrow> (\<exists>v1_src. -1 \<le> v1_src \<and> v1_src < v1 \<and> prepared_by_both s h1 v1 v1_src) \<or> one_third_of_rear_slashed s h1" apply(simp add: one_third_of_rear_slashed_def) by (metis one_third_mp slashed_def slashed_one_on_rear' validator_sets_finite_def) lemma slashed_one_on_descendant_with_chosen_validators' : "validator_sets_finite s \<Longrightarrow> committed_by_both s h1 v1 \<Longrightarrow> (\<exists> v1_src. -1 \<le> v1_src \<and> v1_src < v1 \<and> prepared_by_both s h1 v1 v1_src) \<or> one_third_of_fwd_or_rear_slashed s h1" apply(simp add: committed_by_both_def one_third_of_fwd_or_rear_slashed_def) using slashed_one_on_rear by auto lemma slashed_one_on_descendant_with_chosen_validators : "validator_sets_finite s \<Longrightarrow> committed_by_both s h v \<Longrightarrow> committed_by_both s h1 v1 \<Longrightarrow> ancestor_descendant_with_chosen_validators s (h, v) (h1, v1) \<Longrightarrow> (\<exists> v1_src. -1 \<le> v1_src \<and> v1_src < v1 \<and> prepared_by_both s h1 v1 v1_src) \<or> (\<exists> h' v'. ancestor_descendant_with_chosen_validators s (h, v) (h', v') \<and> one_third_of_fwd_or_rear_slashed s h')" using slashed_one_on_descendant_with_chosen_validators' by blast lemma follow_back_history : "validator_sets_finite s \<Longrightarrow> committed_by_both s h v \<Longrightarrow> committed_by_both s h1 v1 \<Longrightarrow> 0 \<le> v \<Longrightarrow> ancestor_descendant_with_chosen_validators s (h, v) (h1, v1) \<Longrightarrow> heir s (h, v) (h1, v1) \<or> (\<exists> h' v'. ancestor_descendant_with_chosen_validators s (h, v) (h', v') \<and> one_third_of_fwd_or_rear_slashed s h')" using follow_back_history_with_prepares slashed_one_on_descendant_with_chosen_validators' by blast lemma fork_contains_legitimacy_fork : "validator_sets_finite s \<Longrightarrow> 0 \<le> v \<Longrightarrow> fork_with_commits s (h, v) (h1, v1) (h2, v2) \<Longrightarrow> legitimacy_fork_with_commits s (h, v) (h1, v1) (h2, v2) \<or> (\<exists> h' v'. ancestor_descendant_with_chosen_validators s (h, v) (h', v') \<and> one_third_of_fwd_or_rear_slashed s h')" apply(simp only: fork_with_commits.simps legitimacy_fork_with_commits.simps legitimacy_fork.simps fork.simps) using follow_back_history heir_chain_means_same_chain by blast lemma heir_means_ad_inheritance : "heir s (h, v) (h', v') \<Longrightarrow> ancestor_descendant_with_chosen_validators s (h, v) (h', v') " apply(induction rule: heir.induct) apply (simp add: inheritance_self) using ancestor_descendant_with_chosen_validators_trans apply blast using ancestor_descendant_with_chosen_validators_trans by blast lemma accountable_safety_for_legitimacy_fork_weak : "validator_sets_finite s \<Longrightarrow> v \<ge> 0 \<Longrightarrow> legitimacy_fork_with_commits s (h, v) (h1, v1) (h2, v2) \<Longrightarrow> \<exists> h' v'. ancestor_descendant_with_chosen_validators s (h, v) (h', v') \<and> one_third_of_fwd_slashed s h'" using accountable_safety_for_legitimacy_fork heir_means_ad_inheritance by blast section "Accountable Safety for Any Fork with Commits (not skippable)" text "Accountable safety states that, if there is a fork with commits, there is some legitimate heir of the validator sets of the root, of which 2/3 are slashed. " lemma accountable_safety : "validator_sets_finite s \<Longrightarrow> v \<ge> 0 \<Longrightarrow> fork_with_commits s (h, v) (h1, v1) (h2, v2) \<Longrightarrow> \<exists> h' v'. ancestor_descendant_with_chosen_validators s (h, v) (h', v') \<and> one_third_of_fwd_or_rear_slashed s h'" using accountable_safety_for_legitimacy_fork_weak fork_contains_legitimacy_fork one_third_of_fwd_or_rear_slashed_def by blast end
/- Prove the following in Lean: variables A B C D : Prop example : A ∧ (A → B) → B := sorry example : A → ¬ (¬ A ∧ B) := sorry example : ¬ (A ∧ B) → (A → ¬ B) := sorry example (h₁ : A ∨ B) (h₂ : A → C) (h₃ : B → D) : C ∨ D := sorry example (h : ¬ A ∧ ¬ B) : ¬ (A ∨ B) := sorry example : ¬ (A ↔ ¬ A) := sorry -/ variables A B C D : Prop example : A ∧ (A → B) → B := λ h: A ∧ (A → B), have hA: A, from and.elim_left h, have A → B, from and.elim_right h, show B, from this hA example : A → ¬ (¬ A ∧ B) := assume hA: A, assume hnAB: ¬ A ∧ B, show false, from ( have hnA: ¬ A, from and.elim_left hnAB, hnA(hA) ) example : ¬ (A ∧ B) → (A → ¬ B) := assume h: ¬ (A ∧ B), assume hA: A, assume hB: B, show false, from ( have hAB: A ∧ B, from ⟨ hA, hB ⟩, h(hAB) ) example (h₁ : A ∨ B) (h₂ : A → C) (h₃ : B → D) : C ∨ D := or.elim(h₁)( assume hA, or.inl(h₂(hA)) )( assume hB, or.inr(h₃(hB)) ) example (h : ¬ A ∧ ¬ B) : ¬ (A ∨ B) := have hnA: ¬ A, from and.elim_left(h), have hnB: ¬ B, from and.elim_right(h), assume hAoB: A ∨ B, show false, from ( or.elim(hAoB)( assume hA, hnA(hA) )( assume hB, hnB(hB) ) ) example : ¬ (A ↔ ¬ A) := assume h: A ↔ ¬ A, show false, from ( have hAtnA: A → ¬ A, from iff.mp(h), have hnAtA: ¬ A → A, from iff.mpr(h), have hnA: ¬ A, from ( assume hA: A, show false, from hAtnA(hA)(hA) ) have hA: A, from hnAtA(hnA), hnA(hA) )
If two paths are homotopic and a third path is homotopic to the second, then the third path is homotopic to the first.
#include <cstdio> #include <glog/logging.h> #include <ros/ros.h> #include <nav_msgs/Odometry.h> #include <sensor_msgs/NavSatFix.h> #include <rosbag/bag.h> #include <rosbag/view.h> #include <boost/foreach.hpp> #include <pcl/point_types.h> #include <pcl/point_cloud.h> #include <pcl/common/transforms.h> #include <pcl/filters/voxel_grid.h> //#define foreach_boost BOOST_FOREACH #include "typedefs.h" #include <opencv2/core/persistence.hpp> // 通过初始GPS坐标与航姿参考,对整幅地图作坐标系变换,使0,0,0对应初始GPS点,旋转地图使其采用NWU坐标系,保存旋转后的地图文件,并创建配置文件供map_publisher读取. // 目前仅支持px4飞控的消息. 这个模块与地图读取定位等隔离,因此可以做自己的高精地图实现而不影响其他组件. bool loadPCDmap(MapCloudT::Ptr& pmap_cloud) { string map_path; bool path_exist = ros::param::get("map_path",map_path); if(!path_exist) { LOG(ERROR)<<"Fatal error in map_config_generator: get map_path failed!"<<endl; LOG(ERROR)<<"map_path:"<<map_path<<endl; exit(-1); } pmap_cloud = MapCloudT::Ptr(new MapCloudT); pcl::io::loadPCDFile(map_path, *pmap_cloud); if(pmap_cloud->size()>0) { LOG(INFO)<<"map pointcloud size:"<<pmap_cloud->size()<<endl; return true; } return false; } bool loadFirstOdometryFromROSBagFile(nav_msgs::Odometry& ahrs_msg,sensor_msgs::NavSatFix& gps_msg)//同时读取gps坐标和航姿信息. { string bag_path; bool path_exist = ros::param::get("rosbag_path",bag_path); if(!path_exist) { LOG(ERROR)<<"Fatal error in map_config_generator: get bag_path failed!"<<endl; LOG(ERROR)<<"bag_path:"<<bag_path<<endl; exit(-1); } rosbag::Bag bag; bag.open(bag_path); vector<string> topics; string ahrs_local_topic = "/mavros/local_position/odom";//"/mavros/global_position/local" global_position/local is for simulation only. string gps_global_topic = "/mavros/global_position/raw/fix"; topics.push_back(ahrs_local_topic); topics.push_back(gps_global_topic); rosbag::View views(bag,rosbag::TopicQuery(topics)); bool flag_ahrs = false; bool flag_gps = false; ros::Time ahrs_msg_timestamp; ros::Time gps_msg_timestamp; for(rosbag::MessageInstance const m:views) { if(m.getTopic()==ahrs_local_topic) { nav_msgs::Odometry::Ptr p_odom_msg = m.instantiate<nav_msgs::Odometry>(); ahrs_msg = *p_odom_msg; //这两个时间是sim world里的时间,不是bag开始时的时间. // if(m.getTime().toSec()>2) // { // LOG(ERROR)<<"[map_config_generator] ERROR: Odometry msg is not found in initial 2s!"<<endl; // throw "Error"; // } flag_ahrs = true; ahrs_msg_timestamp = m.getTime(); } else if(m.getTopic()==gps_global_topic) { sensor_msgs::NavSatFix::Ptr p_gps_msg = m.instantiate<sensor_msgs::NavSatFix>(); gps_msg = *p_gps_msg; // if(m.getTime().toSec()>2) // { // LOG(ERROR)<<"[map_config_generator] ERROR: GPS msg is not found in initial 2s!"<<endl; // throw "Error"; // } flag_gps = true; gps_msg_timestamp = m.getTime(); } if(flag_ahrs&&flag_gps) { double time_diff = (gps_msg_timestamp-ahrs_msg_timestamp).toSec(); if(time_diff > 0.5||time_diff<-0.5) { LOG(ERROR)<<"[map_config_generator] ERROR: GPS AHRS time diff >0.5s!"<<endl; throw "Error"; } break; } } if(!(flag_ahrs&&flag_gps)) { LOG(ERROR)<< "[map_config_generator] get gps and ahrs msg failed"<<endl; throw "Error"; } bag.close(); return true; } void transformMapByInitialOdometryAndGPSCoordinate(const nav_msgs::Odometry& odom_msg,const sensor_msgs::NavSatFix& gps_msg,MapCloudT::Ptr pMap) { LOG(INFO)<<"transforming map..."<<endl; string output_map_path; ros::param::get("output_map_path",output_map_path); string output_map_config_path = output_map_path+".yaml"; //绕0,0,0旋转地图坐标系,获取新地图并保存. //pcl::transformPointCloud(*pcloud_current,*transformed,output_pose); MapCloudT::Ptr new_map(new MapCloudT); Eigen::Quaterniond quat; auto ps = odom_msg.pose.pose.orientation; LOG(INFO)<<"Quaternion:"<<ps.x<<";"<<ps.y<<";"<<ps.z<<";"<<ps.w<<endl; quat.x() = ps.x; quat.y() = ps.y; quat.z() = ps.z; quat.w() = ps.w; Eigen::Matrix3d R_flu_luf; bool using_liosam_coordinate = false; if(!ros::param::get("using_liosam_coordinate",using_liosam_coordinate)) { LOG(ERROR)<<"Error: Using_liosam_coordinate param not found in launch file!"<<endl; throw "Error!"; } if(using_liosam_coordinate) { R_flu_luf<<1,0,0,0,1,0,0,0,1;//LIO_SAM Only } else { R_flu_luf<<1,0,0,0,0,-1,0,1,0;//For LOAM LeGO_LOAM and SC_LeGOLOAM. } Eigen::Matrix3d rot_original = quat.toRotationMatrix(); Eigen::Matrix3d rot_transformed_ = R_flu_luf.inverse()*rot_original.inverse()*(R_flu_luf); Eigen::Matrix3d rot_LUF = rot_transformed_.inverse(); double z_comps = 0; //z_angle_compensation if(using_liosam_coordinate) { z_comps-=90; LOG(INFO)<<"z_angle_compensation -=90 deg for using liosam coordinate!"<<endl; } if(!ros::param::get("z_angle_compensation",z_comps)) { LOG(ERROR)<<"ERROR: z_angle_compensation not set in launch file!!!"<<endl; throw "Error!"; } else { LOG(INFO)<<"Using z_angle_compensation of "<<z_comps<<"deg!"<<endl; } z_comps*=(3.1415926535/180.0); Eigen::Matrix3d rot_z_compensation; rot_z_compensation<<cos(z_comps),-sin(z_comps),0,sin(z_comps),cos(z_comps),0,0,0,1; //Eigen::Quaterniond quat_inv = quat; //Eigen::Matrix3d rot = quat.toRotationMatrix(); //const Eigen::Matrix<Scalar, 3, 1> trans(0,0,0); double height_compensation = 0; if(!ros::param::get("lidar_to_ground_height",height_compensation)) { LOG(WARNING)<<"WARNING: lidar_to_ground_height not set in launch file!!!"<<endl; } LOG(INFO)<<"Using height_compensation:"<<height_compensation<<endl; Eigen::Quaterniond rot_flu (rot_z_compensation*R_flu_luf*rot_LUF); const Eigen::Vector3d trans(0,0,height_compensation); pcl::transformPointCloud(*pMap,*new_map,trans,rot_flu); float DOWNSAMPLE_SIZE = 0.1; if(!ros::param::get("map_downsampling_size",DOWNSAMPLE_SIZE)) { LOG(ERROR)<<"ERROR: map_downsampling_size not set in launch file!!!"<<endl; throw "Error!"; } LidarCloudT::Ptr downsampled_new_map(new LidarCloudT); pcl::VoxelGrid<MapPointT> sor; sor.setInputCloud(new_map); //sor.setLeafSize(0.2f, 0.2f, 0.2f); sor.setLeafSize(DOWNSAMPLE_SIZE, DOWNSAMPLE_SIZE, DOWNSAMPLE_SIZE); sor.filter(*downsampled_new_map); pcl::io::savePCDFile(output_map_path,*downsampled_new_map); //保存新地图的0,0,0所对应的经纬度到配置文件. LOG(INFO)<<"saving config file..."<<endl; std::FILE* fp_config_output; fp_config_output = fopen(output_map_config_path.c_str(),"w"); fprintf(fp_config_output,"\%YAML:1.0\n"); fprintf(fp_config_output,"---\n"); fprintf(fp_config_output,"initial_longitude: %.10f\n",gps_msg.longitude); fprintf(fp_config_output,"initial_latitude: %.10f\n",gps_msg.latitude); fprintf(fp_config_output,"initial_altitude: %.10f\n",gps_msg.altitude); fprintf(fp_config_output,"coordinate_mode: \"NWU\" "); fclose(fp_config_output); return; // cv::FileStorage fs; // fs.open(output_map_config_path,cv::FileStorage::WRITE); // fs.writeObj(); } bool run_all_pipeline() { LOG(INFO)<<"run all pipeline..."<<endl; MapCloudT::Ptr pMap; nav_msgs::Odometry odom_msg; sensor_msgs::NavSatFix gps_msg; bool flag_map = loadPCDmap(pMap); LOG(INFO)<<"map loaded..."<<endl; bool flag_bag = loadFirstOdometryFromROSBagFile(odom_msg,gps_msg); LOG(INFO)<<"bag loaded..."<<endl; if(flag_map&&flag_bag) { transformMapByInitialOdometryAndGPSCoordinate(odom_msg,gps_msg,pMap); } return true; } int main(int argc,char** argv) { FLAGS_alsologtostderr = 1; google::InitGoogleLogging("map_config_generator_node"); ros::init(argc,argv,"map_config_generator_node"); ros::NodeHandle nh; LOG(INFO)<<"Start node map_config_generator"<<endl; run_all_pipeline(); return 0; }
abstract type SpecEnv end #global settings for spectral calculations #= Spectral environment λmin: lower wavelength boundary for all spectral calculations default: 380.0 nm, suitable for the 2012 CIE CMFs Δλ : wavelength stepwidth default: 1.0 nm, suitable for the 2012 CIE CMFs λmax: upper wavelength boundery for all spectral calculations default: 830.0 nm, suitable for the 2012 CIE CMFs cmf: color matching function used for all spectral calculations default: CMF2012_10 (CIE2012 color matching function for a 10° observer) ex: extrapolation mode for spectra that have a smaller range than the environment settings default: :linear (linear extrapolation) =# mutable struct SpecEnvironment <: SpecEnv λmin::Real Δλ::Real λmax::Real cmf ex::Symbol function SpecEnvironment(λmin,Δλ,λmax,cmf,ex) new(λmin,Δλ,λmax,cmf,ex) end end """ `set_specenv(λmin=390.0,Δλ=1.0,λmax=830.0,extrapolate= :zero)` Initializes the spectral environment to standard values: λmin = 380.0 nm λmax = 830.0 nm Δλ = 1.0 nm cmf = cmfunc(:cie12_10) ex = :linear """ function set_specenv(λmin = 390.0, Δλ = 1.0, λmax = 830.0, cmf = cmfunc(:cie12_10), ex = :linear) SpecEnvironment(λmin,Δλ,λmax,cmf,ex) end # Set spectral environment to default settings SPECENV=set_specenv() """ `set_extrap(env::SpecEnvironment,extrapolation::Symbol)` Sets the extrapolation mode of the environment `env` to one of the available extrapolation settings: `:none` or `:zero`: out of range values are zero. `:constant`: out of range values are set to λmin and λmax, respectively. `:linear`: out of range values are linearly extrapolated through the first/last two data points. `:parabolic`: out fo range values are extrapolated with a parabola through the first/last three datapoints. *Recommended extrapolation method according to D.L. MacAdams* in "Color Measurement", chapter 5.4 *"Truncations"*. """ function set_extrap(env::SpecEnvironment,extrapolation::Symbol) extrapolation in (:none, :boundary, :linear, :parabolic) ? env.ex = extrapolation : extrapolation == :zero ? set_extrap(env, :none) : throw(DomainError(extrapolation,"Extrapolation mode does not exist!")) end """ `set_extrap(extrapolation::Symbol)` Sets the extrapolation mode of the environment `env` to one of the available extrapolation settings: `:none` or `:zero`: out of range values are zero. `:constant`: out of range values are set to λmin and λmax, respectively. `:linear`: out of range values are linearly extrapolated through the first/last two data points. `:parabolic`: out fo range values are extrapolated with a parabola through the first/last three datapoints. *Recommended extrapolation method according to D.L. MacAdams* in "Color Measurement", chapter 5.4 *"Truncations"*. """ function set_extrap(extrapolation::Symbol) set_extrap(SPECENV, extrapolation) end """ `set_limits(env::SpecEnvironment, λmin::Real,Δλ::Real,λmax::Real)` sets the wavelength limits and resolution of the spectral environment """ function set_limits(env::SpecEnvironment, λmin::Real,Δλ::Real,λmax::Real) env.λmin = λmin env.Δλ = Δλ env.λmax = λmax end """ `set_limits(λmin::Real,Δλ::Real,λmax::Real)` sets the wavelength limits and resolution of the spectral environment """ function set_limits(λmin::Real,Δλ::Real,λmax::Real) set_limits(SPECENV, λmin, Δλ, λmax) end """ `set_colormatch(env::SpecEnvironment, colormatch::CMatch)` Sets the color matching function (CMF) used in a defined spectral environment. Available CMFs: `:cie31`, `:cie31_j`, `:cie31_jv`, `:cie64`, `:cie12_2`, `:cie12_10`, `:lms06_2`, `:lms06_10` """ function set_colormatch(env::SpecEnvironment, colormatch::Symbol) env.cmf = create_cmf(colormatch) end """ `set_colormatch(colormatch::CMatch)` Sets the color matching function (CMF) used in the spectral environment. Available CMFs: `:cie31`, `:cie31_j`, `:cie31_jv`, `:cie64`, `:cie12_2`, `:cie12_10`, `:lms06_2`, `:lms06_10` """ function set_colormatch(colormatch::Symbol) set_colormatch(SPECENV, colormatch) end
(* A Sudoku solver in Coq, v0.1, https://github.com/danielgerigk/Sudoku *) Inductive A := | a1 : A | a2 : A | a3 : A. Definition B := (A*A)%type. Definition M := (B*B*B -> option unit). (* Framework to formalize Sudoku problems and their solutions *) Definition translate : (B*B -> option B) -> M. (* The type B*B -> option B is used by the user, the type M is used by the algorithm *) admit. Defined. Definition isSudoku : forall m:M, Prop. (* Checks whether the conditions to be a valid Sudoku solution are satisfied. *) (* (forall x y : B, exists! z : B, m(x,y,z) = Some tt) /\ (forall x z : B, exists! y : B, m(x,y,z) = Some tt) /\ (forall y z : B, exists! x : B, m(x,y,z) = Some tt) /\ (forall z : B, forall x1 y1 : A, exists! x2 y2 : A, m((x1,x2),(y1,y2),z) = Some tt). *) admit. Defined. Definition isContinuation : forall m1 m2 : M, Prop. (* Checks whether m2 is a continuation of m1 *) (* forall x y z : B, m1(x,y,z) = None \/ m1(x,y,z) = m2(x,y,z). *) admit. Defined. Definition solve : forall m : M, exists n : M, isContinuation m n /\ isSudoku(n). (* Solve the Sudoku problem m *) (* *) admit. Defined.
State Before: α : Type u_1 E : Type u_2 m : MeasurableSpace α μ : Measure α p : ℝ≥0∞ inst✝ : NormedLatticeAddCommGroup E f g : { x // x ∈ Lp E p } ⊢ ↑↑f ≤ᵐ[μ] ↑↑g ↔ f ≤ g State After: no goals Tactic: rw [← Subtype.coe_le_coe, ← AEEqFun.coeFn_le]
function de00 = deltaE2000(Labstd,Labsample) % Based on the article: % "The CIEDE2000 Color-Difference Formula: Implementation Notes, % Supplementary Test Data, and Mathematical Observations,", G. Sharma, % W. Wu, E. N. Dalal, Color Research and Application, vol. 30. No. 1, pp. % 21-30, February 2005. % available at http://www.ece.rochester.edu/~/gsharma/ciede2000/ kl = 1; kc=1; kh =1; Lstd = Labstd(:,1)'; astd = Labstd(:,2)'; bstd = Labstd(:,3)'; Cabstd = sqrt(astd.^2+bstd.^2); Lsample = Labsample(:,1)'; asample = Labsample(:,2)'; bsample = Labsample(:,3)'; Cabsample = sqrt(asample.^2+bsample.^2); Cabarithmean = (Cabstd + Cabsample)/2; G = 0.5* (1 - sqrt( (Cabarithmean.^7)./(Cabarithmean.^7 + 25^7))); apstd = (1+G).*astd; apsample = (1+G).*asample; Cpsample = sqrt(apsample.^2+bsample.^2); Cpstd = sqrt(apstd.^2+bstd.^2); Cpprod = (Cpsample.*Cpstd); zcidx = find(Cpprod == 0); hpstd = atan2(bstd,apstd); hpstd = hpstd+2*pi*(hpstd < 0); hpstd((abs(apstd)+abs(bstd))== 0) = 0; hpsample = atan2(bsample,apsample); hpsample = hpsample+2*pi*(hpsample < 0); hpsample((abs(apsample)+abs(bsample))==0) = 0; dL = (Lsample-Lstd); dC = (Cpsample-Cpstd); dhp = (hpsample-hpstd); dhp = dhp - 2*pi* (dhp > pi ); dhp = dhp + 2*pi* (dhp < (-pi) ); dhp(zcidx ) = 0; dH = 2*sqrt(Cpprod).*sin(dhp/2); Lp = (Lsample+Lstd)/2; Cp = (Cpstd+Cpsample)/2; hp = (hpstd+hpsample)/2; hp = hp - ( abs(hpstd-hpsample) > pi ) *pi; hp = hp+ (hp < 0) *2*pi; hp(zcidx) = hpsample(zcidx)+hpstd(zcidx); Lpm502 = (Lp-50).^2; Sl = 1 + 0.015*Lpm502./sqrt(20+Lpm502); Sc = 1+0.045*Cp; T = 1 - 0.17*cos(hp - pi/6 ) + 0.24*cos(2*hp) + 0.32*cos(3*hp+pi/30) ... -0.20*cos(4*hp-63*pi/180); Sh = 1 + 0.015*Cp.*T; delthetarad = (30*pi/180)*exp(- ( (180/pi*hp-275)/25).^2); Rc = 2*sqrt((Cp.^7)./(Cp.^7 + 25^7)); RT = - sin(2*delthetarad).*Rc; klSl = kl*Sl; kcSc = kc*Sc; khSh = kh*Sh; de00 = sqrt( (dL./klSl).^2 + (dC./kcSc).^2 + (dH./khSh).^2 + ... RT.*(dC./kcSc).*(dH./khSh) ); return
(* File: Real.thy Author: Bohua Zhan Construction of the real numbers. This construction (starting from Sequence.thy and SeqRing.thy) roughly follows that in the Isabelle/HOL library. *) theory Real imports CompleteOrder Auto2_FOL.Rat OrderTopology begin section \<open>Real numbers as a quotient set\<close> abbreviation seq_ring_rat :: i where "seq_ring_rat \<equiv> seq_ring(\<rat>)" notation seq_ring_rat ("S") definition cauchy_seqs :: i where [rewrite]: "cauchy_seqs = {X \<in>. S. cauchy(X)}" lemma cauchy_seqsI [forward]: "cauchy(X) \<Longrightarrow> X \<in> seqs(\<rat>) \<Longrightarrow> X \<in> cauchy_seqs" by auto2 lemma cauchy_seqs_iff [rewrite]: "X \<in> cauchy_seqs \<longleftrightarrow> (cauchy(X) \<and> X \<in> seqs(\<rat>))" by auto2 setup {* del_prfstep_thm @{thm cauchy_seqs_def} *} definition real_rel :: i where [rewrite]: "real_rel = Equiv(cauchy_seqs, \<lambda>X Y. vanishes(X -\<^sub>S Y))" notation real_rel ("\<R>") lemma real_rel_sym [resolve]: "X \<in>. S \<Longrightarrow> Y \<in>. S \<Longrightarrow> vanishes(X -\<^sub>S Y) \<Longrightarrow> vanishes(Y -\<^sub>S X)" @proof @have "Y -\<^sub>S X = -\<^sub>S (X -\<^sub>S Y)" @qed lemma real_rel_trans [backward2]: "X \<in>. S \<Longrightarrow> Y \<in>. S \<Longrightarrow> Z \<in>. S \<Longrightarrow> vanishes(X -\<^sub>S Y) \<Longrightarrow> vanishes(Y -\<^sub>S Z) \<Longrightarrow> vanishes(X -\<^sub>S Z)" @proof @have "X -\<^sub>S Z = (X -\<^sub>S Y) +\<^sub>S (Y -\<^sub>S Z)" @qed lemma real_rel_is_rel [typing]: "\<R> \<in> equiv_space(cauchy_seqs)" by auto2 setup {* fold del_prfstep_thm [@{thm real_rel_sym}, @{thm real_rel_trans}] *} lemma real_rel_eval: "X \<in>. \<R> \<Longrightarrow> Y \<in>. \<R> \<Longrightarrow> X \<sim>\<^sub>\<R> Y \<longleftrightarrow> vanishes(X -\<^sub>S Y)" by auto2 setup {* add_rewrite_rule_cond @{thm real_rel_eval} [with_cond "?X \<noteq> ?Y"] *} setup {* del_prfstep_thm @{thm real_rel_def} *} definition real :: i where real_def [rewrite_bidir]: "real = cauchy_seqs // \<R>" abbreviation Real :: "i \<Rightarrow> i" where "Real(X) \<equiv> equiv_class(\<R>,X)" section \<open>Real numbers as a ring\<close> definition nonneg_real :: "i \<Rightarrow> o" where [rewrite]: "nonneg_real(x) \<longleftrightarrow> nonneg_seq(rep(\<R>,x))" definition nonneg_reals :: i where [rewrite]: "nonneg_reals = {x\<in>real. nonneg_real(x)}" lemma nonneg_reals_in_real: "nonneg_reals \<subseteq> real" by auto2 setup {* add_forward_prfstep_cond @{thm nonneg_reals_in_real} [with_term "nonneg_reals"] *} definition real_ring :: i where [rewrite]: "real_ring = Ring(real, Real(\<zero>\<^sub>S), \<lambda>x y. Real(rep(\<R>,x) +\<^sub>S rep(\<R>,y)), Real(\<one>\<^sub>S), \<lambda>x y. Real(rep(\<R>,x) *\<^sub>S rep(\<R>,y)))" lemma real_ring_is_ring_raw [forward]: "ring_form(real_ring)" by auto2 definition real_ord_ring :: i where [rewrite]: "real_ord_ring = ord_ring_from_nonneg(real_ring, nonneg_reals)" definition real_ord_ring_top :: i ("\<real>") where [rewrite]: "\<real> = ord_ring_top_from_ord_ring(real_ord_ring)" lemma real_is_ring_raw [forward]: "is_ring_raw(\<real>)" by auto2 lemma real_carrier [rewrite_bidir]: "carrier(\<real>) = real" by auto2 lemma real_evals [rewrite]: "\<zero>\<^sub>\<real> = Real(\<zero>\<^sub>S)" "\<one>\<^sub>\<real> = Real(\<one>\<^sub>S)" "x \<in>. \<real> \<Longrightarrow> y \<in>. \<real> \<Longrightarrow> x +\<^sub>\<real> y = Real(rep(\<R>,x) +\<^sub>S rep(\<R>,y))" "x \<in>. \<real> \<Longrightarrow> y \<in>. \<real> \<Longrightarrow> x *\<^sub>\<real> y = Real(rep(\<R>,x) *\<^sub>S rep(\<R>,y))" "x \<in>. \<real> \<Longrightarrow> y \<in>. \<real> \<Longrightarrow> is_abgroup(\<real>) \<Longrightarrow> x \<le>\<^sub>\<real> y \<longleftrightarrow> y -\<^sub>\<real> x \<in> nonneg_reals" by auto2+ setup {* register_wellform_data ("nonneg_compat(R,A)", ["A \<subseteq> carrier(R)"]) *} lemma nonneg_compat_prop [forward]: "is_ring_raw(R) \<Longrightarrow> is_ring(T) \<Longrightarrow> A \<subseteq> carrier(R) \<Longrightarrow> eq_str_ring(R,T) \<Longrightarrow> nonneg_compat(R,A) \<Longrightarrow> nonneg_compat(T,A)" by auto2 lemma real_is_ord_field_prep [forward]: "is_field(\<real>) \<Longrightarrow> nonneg_compat(\<real>,nonneg_reals) \<Longrightarrow> is_ord_field(\<real>)" by auto2 lemma ord_ring_card_ge2 [forward]: "is_ord_ring(R) \<Longrightarrow> card_ge2(carrier(R))" @proof @have "{\<zero>\<^sub>R,\<one>\<^sub>R} \<subseteq> carrier(R)" @qed lemma real_is_order_top_prep [backward]: "is_ord_ring(\<real>) \<Longrightarrow> order_topology(\<real>)" by auto2 setup {* fold del_prfstep_thm [@{thm real_ring_def}, @{thm real_ord_ring_def}, @{thm real_ord_ring_top_def}] *} lemma seq_const_in_cauchy_seqs [typing]: "r \<in>. \<rat> \<Longrightarrow> {r}\<^sub>\<rat> \<in>. \<R>" by auto2 lemma real_choose_rep: "x \<in>. \<real> \<Longrightarrow> x = Real(rep(\<R>,x))" by auto2 setup {* add_rewrite_rule_cond @{thm real_choose_rep} [with_filt (size1_filter "x")] *} section \<open>Addition on real numbers\<close> lemma real_add_eval [rewrite]: "x \<in>. \<R> \<Longrightarrow> y \<in>. \<R> \<Longrightarrow> Real(x) +\<^sub>\<real> Real(y) = Real(x +\<^sub>S y)" @proof @have "compat_meta_bin(\<R>, \<lambda>X Y. X +\<^sub>S Y)" @qed setup {* del_prfstep_thm @{thm real_evals(3)} *} lemma real_add_comm [forward]: "is_plus_comm(\<real>)" by auto2 lemma real_add_assoc [forward]: "is_plus_assoc(\<real>)" by auto2 section \<open>Multiplication on real numbers\<close> lemma real_mult_eval [rewrite]: "x \<in>. \<R> \<Longrightarrow> y \<in>. \<R> \<Longrightarrow> Real(x) *\<^sub>\<real> Real(y) = Real(x *\<^sub>S y)" @proof @have "\<forall>X Y Z. X \<in>. \<R> \<longrightarrow> Y \<sim>\<^sub>\<R> Z \<longrightarrow> X *\<^sub>S Y \<sim>\<^sub>\<R> X *\<^sub>S Z" @with @have "X *\<^sub>S Y -\<^sub>S X *\<^sub>S Z = X *\<^sub>S (Y -\<^sub>S Z)" @end @have "\<forall>X Y Z. X \<in>. \<R> \<longrightarrow> Y \<sim>\<^sub>\<R> Z \<longrightarrow> Y *\<^sub>S X \<sim>\<^sub>\<R> Z *\<^sub>S X" @with @have "Y *\<^sub>S X -\<^sub>S Z *\<^sub>S X = (Y -\<^sub>S Z) *\<^sub>S X" @end @have "compat_meta_bin(\<R>, \<lambda>X Y. X *\<^sub>S Y)" @qed setup {* del_prfstep_thm @{thm real_evals(4)} *} lemma real_mult_comm [forward]: "is_times_comm(\<real>)" by auto2 lemma real_mult_assoc [forward]: "is_times_assoc(\<real>)" by auto2 lemma real_distrib_l [forward]: "is_left_distrib(\<real>)" by auto2 section \<open>0 and 1\<close> lemma real_is_add_id [forward]: "is_add_id(\<real>)" by auto2 lemma real_is_mult_id [forward]: "is_mult_id(\<real>)" by auto2 lemma real_zero_neq_one [resolve]: "\<zero>\<^sub>\<real> \<noteq> \<one>\<^sub>\<real>" by auto2 section \<open>Negation on real numbers\<close> lemma real_is_comm_ring [forward]: "is_comm_ring(\<real>)" @proof @have "\<forall>x\<in>.\<real>. x +\<^sub>\<real> Real(-\<^sub>S rep(\<R>,x)) = \<zero>\<^sub>\<real>" @qed section \<open>Inverse in real numbers\<close> lemma real_is_field [forward]: "is_field(\<real>)" @proof @have "\<forall>x\<in>.\<real>. x \<noteq> \<zero>\<^sub>\<real> \<longrightarrow> x *\<^sub>\<real> Real(seq_inverse(rep(\<R>,x))) = \<one>\<^sub>\<real>" @qed section \<open>Nonnegative real numbers\<close> lemma real_neg_eval [rewrite]: "x \<in>. \<R> \<Longrightarrow> -\<^sub>\<real> Real(x) = Real(-\<^sub>S x)" @proof @have "Real(x) +\<^sub>\<real> Real(-\<^sub>S x) = \<zero>\<^sub>\<real>" @qed lemma nonneg_real_eval [rewrite]: "x \<in>. \<R> \<Longrightarrow> nonneg_real(Real(x)) \<longleftrightarrow> nonneg_seq(x)" by auto2 setup {* del_prfstep_thm @{thm nonneg_real_def} *} lemma real_is_ord_field [forward]: "is_ord_field(\<real>)" @proof @have "nonneg_compat(\<real>, nonneg_reals)" @with @have "subset_add_closed(\<real>, nonneg_reals)" @have "subset_mult_closed(\<real>, nonneg_reals)" @end @qed setup {* del_prfstep_thm @{thm real_is_ord_field_prep} *} section \<open>of\_nat, of\_int, of\_rat in terms of sequences\<close> lemma real_of_nat [rewrite]: "n \<in> nat \<Longrightarrow> of_nat(\<real>,n) = Real({of_nat(\<rat>,n)}\<^sub>\<rat>)" @proof @var_induct "n \<in> nat" @qed lemma real_diff_eval [rewrite]: "x \<in>. \<R> \<Longrightarrow> y \<in>. \<R> \<Longrightarrow> Real(x) -\<^sub>\<real> Real(y) = Real(x -\<^sub>S y)" @proof @have "Real(x) -\<^sub>\<real> Real(y) = Real(x) +\<^sub>\<real> (-\<^sub>\<real> Real(y))" @qed lemma real_diff_eval_seq [rewrite]: "r \<in>. \<rat> \<Longrightarrow> s \<in>. \<rat> \<Longrightarrow> Real({r}\<^sub>\<rat>) -\<^sub>\<real> Real({s}\<^sub>\<rat>) = Real({r -\<^sub>\<rat> s}\<^sub>\<rat>)" by auto2 lemma real_inv_eval [rewrite]: "r \<in> units(\<rat>) \<Longrightarrow> inv(\<real>,Real({r}\<^sub>\<rat>)) = Real({inv(\<rat>,r)}\<^sub>\<rat>)" @proof @have "Real({r}\<^sub>\<rat>) *\<^sub>\<real> Real({inv(\<rat>,r)}\<^sub>\<rat>) = \<one>\<^sub>\<real>" @qed lemma real_mult_eval_seq [rewrite]: "r \<in>. \<rat> \<Longrightarrow> s \<in>. \<rat> \<Longrightarrow> Real({r}\<^sub>\<rat>) *\<^sub>\<real> Real({s}\<^sub>\<rat>) = Real({r *\<^sub>\<rat> s}\<^sub>\<rat>)" by auto2 lemma real_divide_eval [rewrite]: "r \<in>. \<rat> \<Longrightarrow> s \<in> units(\<rat>) \<Longrightarrow> Real({r}\<^sub>\<rat>) /\<^sub>\<real> Real({s}\<^sub>\<rat>) = Real({r /\<^sub>\<rat> s}\<^sub>\<rat>)" @proof @have "Real({r}\<^sub>\<rat>) /\<^sub>\<real> Real({s}\<^sub>\<rat>) = Real({r}\<^sub>\<rat>) *\<^sub>\<real> inv(\<real>,Real({s}\<^sub>\<rat>))" @have "r /\<^sub>\<rat> s = r *\<^sub>\<rat> inv(\<rat>,s)" @qed lemma real_of_int [rewrite]: "z \<in>. \<int> \<Longrightarrow> of_int(\<real>,z) = Real({of_int(\<rat>,z)}\<^sub>\<rat>)" @proof @obtain "a\<in>.\<nat>" "b\<in>.\<nat>" where "z = of_nat(\<int>,a) -\<^sub>\<int> of_nat(\<int>,b)" @qed lemma of_rat_divide [rewrite_bidir]: "is_ord_field(R) \<Longrightarrow> x \<in>. \<rat> \<Longrightarrow> y \<in> units(\<rat>) \<Longrightarrow> of_rat(R,x /\<^sub>\<rat> y) = of_rat(R,x) /\<^sub>R of_rat(R,y)" @proof @have "of_rat(R,x) /\<^sub>R of_rat(R,y) = of_rat(R,x) *\<^sub>R inv(R,of_rat(R,y))" @qed lemma real_of_rat [backward]: "r \<in>. \<rat> \<Longrightarrow> of_rat(\<real>,r) = Real({r}\<^sub>\<rat>)" @proof @obtain "a\<in>.\<int>" b where "b>\<^sub>\<int>0\<^sub>\<int>" "r = of_int(\<rat>,a) /\<^sub>\<rat> of_int(\<rat>,b)" @qed setup {* fold del_prfstep_thm [@{thm real_of_nat}, @{thm real_of_int}] *} setup {* del_prfstep_thm @{thm real_choose_rep} *} section \<open>Ordering on real numbers\<close> lemma le_Real1 [rewrite]: "X \<in>. \<R> \<Longrightarrow> Y \<in>. \<R> \<Longrightarrow> Real(X) \<le>\<^sub>\<real> Real(Y) \<longleftrightarrow> nonneg_seq(Y -\<^sub>S X)" by auto2 setup {* fold del_prfstep_thm [@{thm real_evals(5)}, @{thm nonneg_real_eval}, @{thm nonneg_reals_def}] *} lemma le_Real [rewrite]: "X \<in>. \<R> \<Longrightarrow> Y \<in>. \<R> \<Longrightarrow> Real(X) \<le>\<^sub>\<real> Real(Y) \<longleftrightarrow> (\<forall>r>\<^sub>\<rat>\<zero>\<^sub>\<rat>. \<exists>k\<in>.\<nat>. \<forall>n\<ge>\<^sub>\<nat>k. (X -\<^sub>S Y)`n \<le>\<^sub>\<rat> r)" @proof @have "Y -\<^sub>S X = -\<^sub>S (X -\<^sub>S Y)" @qed setup {* del_prfstep_thm @{thm le_Real1} *} lemma le_RealI [resolve]: "X \<in>. \<R> \<Longrightarrow> Y \<in>. \<R> \<Longrightarrow> \<forall>r>\<^sub>\<rat>\<zero>\<^sub>\<rat>. \<exists>k\<in>.\<nat>. \<forall>n\<ge>\<^sub>\<nat>k. (X -\<^sub>S Y)`n \<le>\<^sub>\<rat> r \<Longrightarrow> Real(X) \<le>\<^sub>\<real> Real(Y)" by auto2 lemma le_RealD [backward2]: "X \<in>. \<R> \<Longrightarrow> Y \<in>. \<R> \<Longrightarrow> Real(X) \<le>\<^sub>\<real> Real(Y) \<Longrightarrow> r >\<^sub>\<rat> \<zero>\<^sub>\<rat> \<Longrightarrow> \<exists>k\<in>.\<nat>. \<forall>n\<ge>\<^sub>\<nat>k. (X -\<^sub>S Y)`n \<le>\<^sub>\<rat> r" by auto2 setup {* del_prfstep_thm @{thm le_Real} *} lemma le_Real_all_n [resolve]: "X \<in>. \<R> \<Longrightarrow> Y \<in>. \<R> \<Longrightarrow> \<forall>n\<in>.\<nat>. X`n \<le>\<^sub>\<rat> Y`n \<Longrightarrow> Real(X) \<le>\<^sub>\<real> Real(Y)" @proof @have "\<forall>r. r >\<^sub>\<rat> \<zero>\<^sub>\<rat> \<longrightarrow> (\<exists>k\<in>.\<nat>. \<forall>n\<ge>\<^sub>\<nat>k. (X -\<^sub>S Y)`n \<le>\<^sub>\<rat> r)" @with @have "\<forall>n\<ge>\<^sub>\<nat>0. (X -\<^sub>S Y)`n \<le>\<^sub>\<rat> r" @end @qed lemma archimedeal_Real [forward]: "is_archimedean(\<real>)" @proof @have "\<forall>x>\<^sub>\<real>0\<^sub>\<real>. \<exists>r\<in>.\<rat>. of_rat(\<real>,r) \<ge>\<^sub>\<real> x" @with @let "X = rep(\<R>,x)" @obtain b where "b >\<^sub>\<rat> \<zero>\<^sub>\<rat>" "\<forall>n\<in>.\<nat>. \<bar>X`n\<bar>\<^sub>\<rat> \<le>\<^sub>\<rat> b" @have "of_rat(\<real>,b) = Real({b}\<^sub>\<rat>)" @have "\<forall>n\<in>.\<nat>. X`n \<le>\<^sub>\<rat> {b}\<^sub>\<rat>`n" @end @qed lemma le_rat_real [backward1]: "X \<in>. \<R> \<Longrightarrow> c \<in>. \<rat> \<Longrightarrow> r >\<^sub>\<rat> \<zero>\<^sub>\<rat> \<Longrightarrow> Real(X) \<le>\<^sub>\<real> of_rat(\<real>,c) \<Longrightarrow> \<exists>k\<in>.\<nat>. \<forall>n\<ge>\<^sub>\<nat>k. X`n \<le>\<^sub>\<rat> c +\<^sub>\<rat> r" @proof @have "of_rat(\<real>,c) = Real({c}\<^sub>\<rat>)" @obtain "k\<in>.\<nat>" where "\<forall>n\<ge>\<^sub>\<nat>k. (X -\<^sub>S {c}\<^sub>\<rat>)`n \<le>\<^sub>\<rat> r" @have "\<forall>n\<ge>\<^sub>\<nat>k. X`n \<le>\<^sub>\<rat> c +\<^sub>\<rat> r" @with @have "X`n -\<^sub>\<rat> c \<le>\<^sub>\<rat> r" @end @qed lemma diff_le_rat_real2 [backward1]: "X \<in>. \<R> \<Longrightarrow> Y \<in>. \<R> \<Longrightarrow> c \<in>. \<rat> \<Longrightarrow> r >\<^sub>\<rat> \<zero>\<^sub>\<rat> \<Longrightarrow> Real(X) -\<^sub>\<real> Real(Y) \<le>\<^sub>\<real> of_rat(\<real>,c) \<Longrightarrow> \<exists>k\<in>.\<nat>. \<forall>m\<ge>\<^sub>\<nat>k. \<forall>n\<ge>\<^sub>\<nat>k. X`m -\<^sub>\<rat> Y`n \<le>\<^sub>\<rat> c +\<^sub>\<rat> r" @proof @obtain s t where "s >\<^sub>\<rat> \<zero>\<^sub>\<rat>" "t >\<^sub>\<rat> \<zero>\<^sub>\<rat>" "r = s +\<^sub>\<rat> t" @obtain "i\<in>.\<nat>" where "\<forall>n\<ge>\<^sub>\<nat>i. (X -\<^sub>S Y)`n \<le>\<^sub>\<rat> c +\<^sub>\<rat> s" @obtain j where "j\<ge>\<^sub>\<nat>i" "\<forall>m\<ge>\<^sub>\<nat>j. \<forall>n\<ge>\<^sub>\<nat>j. \<bar>X`m -\<^sub>\<rat> X`n\<bar>\<^sub>\<rat> <\<^sub>\<rat> t" @have "\<forall>m\<ge>\<^sub>\<nat>j. \<forall>n\<ge>\<^sub>\<nat>j. X`m -\<^sub>\<rat> Y`n \<le>\<^sub>\<rat> c +\<^sub>\<rat> r" @with @have "\<bar>X`m -\<^sub>\<rat> X`n\<bar>\<^sub>\<rat> <\<^sub>\<rat> t" @have "X`m -\<^sub>\<rat> Y`n = (X`m -\<^sub>\<rat> X`n) +\<^sub>\<rat> (X`n -\<^sub>\<rat> Y`n)" @have "t +\<^sub>\<rat> (c +\<^sub>\<rat> s) = c +\<^sub>\<rat> (s +\<^sub>\<rat> t)" @end @qed lemma abs_diff_le_rat_real2D [backward1]: "X \<in>. \<R> \<Longrightarrow> Y \<in>. \<R> \<Longrightarrow> c \<in>. \<rat> \<Longrightarrow> r >\<^sub>\<rat> \<zero>\<^sub>\<rat> \<Longrightarrow> \<bar>Real(X) -\<^sub>\<real> Real(Y)\<bar>\<^sub>\<real> \<le>\<^sub>\<real> of_rat(\<real>,c) \<Longrightarrow> \<exists>k\<in>.\<nat>. \<forall>m\<ge>\<^sub>\<nat>k. \<forall>n\<ge>\<^sub>\<nat>k. \<bar>X`m -\<^sub>\<rat> Y`n\<bar>\<^sub>\<rat> \<le>\<^sub>\<rat> c +\<^sub>\<rat> r" @proof @obtain "i\<in>.\<nat>" where "\<forall>m\<ge>\<^sub>\<nat>i. \<forall>n\<ge>\<^sub>\<nat>i. X`m -\<^sub>\<rat> Y`n \<le>\<^sub>\<rat> c +\<^sub>\<rat> r" @with @have "Real(X) -\<^sub>\<real> Real(Y) \<le>\<^sub>\<real> of_rat(\<real>,c)" @end @obtain "j\<in>.\<nat>" where "\<forall>m\<ge>\<^sub>\<nat>j. \<forall>n\<ge>\<^sub>\<nat>j. Y`m -\<^sub>\<rat> X`n \<le>\<^sub>\<rat> c +\<^sub>\<rat> r" @with @have "Real(Y) -\<^sub>\<real> Real(X) \<le>\<^sub>\<real> of_rat(\<real>,c)" @end @let "k = max(\<nat>,i,j)" @have "\<forall>m\<ge>\<^sub>\<nat>k. \<forall>n\<ge>\<^sub>\<nat>k. \<bar>X`m -\<^sub>\<rat> Y`n\<bar>\<^sub>\<rat> \<le>\<^sub>\<rat> c +\<^sub>\<rat> r" @with @have "X`m -\<^sub>\<rat> Y`n \<le>\<^sub>\<rat> c +\<^sub>\<rat> r" @have "Y`n -\<^sub>\<rat> X`m \<le>\<^sub>\<rat> c +\<^sub>\<rat> r" @end @qed setup {* del_prfstep_thm @{thm diff_le_rat_real2} *} lemma le_rat_realI [resolve]: "X \<in>. \<R> \<Longrightarrow> c \<in>. \<rat> \<Longrightarrow> \<forall>r>\<^sub>\<rat>\<zero>\<^sub>\<rat>. \<exists>k\<in>.\<nat>. \<forall>n\<ge>\<^sub>\<nat>k. X`n \<le>\<^sub>\<rat> c +\<^sub>\<rat> r \<Longrightarrow> Real(X) \<le>\<^sub>\<real> of_rat(\<real>,c)" @proof @have "of_rat(\<real>,c) = Real({c}\<^sub>\<rat>)" @have "\<forall>r>\<^sub>\<rat>\<zero>\<^sub>\<rat>. \<exists>k\<in>.\<nat>. \<forall>n\<ge>\<^sub>\<nat>k. (X -\<^sub>S {c}\<^sub>\<rat>)`n \<le>\<^sub>\<rat> r" @with @obtain "k\<in>.\<nat>" where "\<forall>n\<ge>\<^sub>\<nat>k. X`n \<le>\<^sub>\<rat> c +\<^sub>\<rat> r" @have "\<forall>n\<ge>\<^sub>\<nat>k. (X -\<^sub>S {c}\<^sub>\<rat>)`n \<le>\<^sub>\<rat> r" @with @have "X`n -\<^sub>\<rat> c \<le>\<^sub>\<rat> r" @end @end @qed lemma diff_le_rat_realI [resolve]: "X \<in>. \<R> \<Longrightarrow> Y \<in>. \<R> \<Longrightarrow> c \<in>. \<rat> \<Longrightarrow> \<forall>r>\<^sub>\<rat>\<zero>\<^sub>\<rat>. \<exists>k\<in>.\<nat>. \<forall>n\<ge>\<^sub>\<nat>k. (X -\<^sub>S Y)`n \<le>\<^sub>\<rat> c +\<^sub>\<rat> r \<Longrightarrow> Real(X) -\<^sub>\<real> Real(Y) \<le>\<^sub>\<real> of_rat(\<real>,c)" by auto2 setup {* del_prfstep_thm @{thm le_rat_realI} *} lemma abs_diff_le_rat_realI [resolve]: "X \<in>. \<R> \<Longrightarrow> Y \<in>. \<R> \<Longrightarrow> c \<in>. \<rat> \<Longrightarrow> \<forall>r>\<^sub>\<rat>\<zero>\<^sub>\<rat>. \<exists>k\<in>.\<nat>. \<forall>n\<ge>\<^sub>\<nat>k. \<bar>X`n -\<^sub>\<rat> Y`n\<bar>\<^sub>\<rat> \<le>\<^sub>\<rat> c +\<^sub>\<rat> r \<Longrightarrow> \<bar>Real(X) -\<^sub>\<real> Real(Y)\<bar>\<^sub>\<real> \<le>\<^sub>\<real> of_rat(\<real>,c)" @proof @have "Real(X) -\<^sub>\<real> Real(Y) \<le>\<^sub>\<real> of_rat(\<real>,c)" @with @have "\<forall>r. r >\<^sub>\<rat> \<zero>\<^sub>\<rat> \<longrightarrow> (\<exists>k\<in>.\<nat>. \<forall>n\<ge>\<^sub>\<nat>k. (X -\<^sub>S Y)`n \<le>\<^sub>\<rat> c +\<^sub>\<rat> r)" @with @obtain "k\<in>.\<nat>" where "\<forall>n\<ge>\<^sub>\<nat>k. \<bar>X`n -\<^sub>\<rat> Y`n\<bar>\<^sub>\<rat> \<le>\<^sub>\<rat> c +\<^sub>\<rat> r" @have "\<forall>n\<ge>\<^sub>\<nat>k. (X -\<^sub>S Y)`n \<le>\<^sub>\<rat> c +\<^sub>\<rat> r" @end @end @have "Real(Y) -\<^sub>\<real> Real(X) \<le>\<^sub>\<real> of_rat(\<real>,c)" @with @have "\<forall>r. r >\<^sub>\<rat> \<zero>\<^sub>\<rat> \<longrightarrow> (\<exists>k\<in>.\<nat>. \<forall>n\<ge>\<^sub>\<nat>k. (Y -\<^sub>S X)`n \<le>\<^sub>\<rat> c +\<^sub>\<rat> r)" @with @obtain "k\<in>.\<nat>" where "\<forall>n\<ge>\<^sub>\<nat>k. \<bar>X`n -\<^sub>\<rat> Y`n\<bar>\<^sub>\<rat> \<le>\<^sub>\<rat> c +\<^sub>\<rat> r" @have "\<forall>n\<ge>\<^sub>\<nat>k. (Y -\<^sub>S X)`n \<le>\<^sub>\<rat> c +\<^sub>\<rat> r" @end @end @qed setup {* del_prfstep_thm @{thm diff_le_rat_realI} *} lemma abs_diff_le_rat_realI' [backward1]: "X \<in>. \<R> \<Longrightarrow> Y \<in>. \<R> \<Longrightarrow> c \<in>. \<rat> \<Longrightarrow> i \<in>. \<nat> \<Longrightarrow> \<forall>n\<ge>\<^sub>\<nat>i. \<bar>X`n -\<^sub>\<rat> Y`n\<bar>\<^sub>\<rat> <\<^sub>\<rat> c \<Longrightarrow> \<bar>Real(X) -\<^sub>\<real> Real(Y)\<bar>\<^sub>\<real> \<le>\<^sub>\<real> of_rat(\<real>,c)" @proof @have "\<forall>r. r >\<^sub>\<rat> \<zero>\<^sub>\<rat> \<longrightarrow> (\<exists>k\<in>.\<nat>. \<forall>n\<ge>\<^sub>\<nat>k. \<bar>X`n -\<^sub>\<rat> Y`n\<bar>\<^sub>\<rat> \<le>\<^sub>\<rat> c +\<^sub>\<rat> r)" @with @obtain "k\<in>.\<nat>" where "\<forall>n\<ge>\<^sub>\<nat>k. \<bar>X`n -\<^sub>\<rat> Y`n\<bar>\<^sub>\<rat> <\<^sub>\<rat> c" @have "\<forall>n\<ge>\<^sub>\<nat>k. \<bar>X`n -\<^sub>\<rat> Y`n\<bar>\<^sub>\<rat> \<le>\<^sub>\<rat> c +\<^sub>\<rat> r" @end @qed lemma converges_to_in_rat [resolve]: "ord_field_seq(X) \<Longrightarrow> R = target_str(X) \<Longrightarrow> s \<in>. R \<Longrightarrow> is_archimedean(R) \<Longrightarrow> \<forall>r>\<^sub>\<rat>\<zero>\<^sub>\<rat>. \<exists>k\<in>.\<nat>. \<forall>n\<ge>\<^sub>\<nat>k. \<bar>X`n -\<^sub>R s\<bar>\<^sub>R \<le>\<^sub>R of_rat(R,r) \<Longrightarrow> converges_to(X,s)" @proof @have "\<forall>r'. r' >\<^sub>R \<zero>\<^sub>R \<longrightarrow> (\<exists>k\<in>.\<nat>. \<forall>n\<ge>\<^sub>\<nat>k. \<bar>X`n -\<^sub>R s\<bar>\<^sub>R <\<^sub>R r')" @with @obtain r where "r >\<^sub>\<rat> \<zero>\<^sub>\<rat>" "of_rat(R,r) <\<^sub>R r'" @obtain "k\<in>.\<nat>" where "\<forall>n\<ge>\<^sub>\<nat>k. \<bar>X`n -\<^sub>R s\<bar>\<^sub>R \<le>\<^sub>R of_rat(R,r)" @end @qed section \<open>A sequence that converges to zero\<close> definition err :: i where [rewrite]: "err = Seq(\<rat>, \<lambda>n. inv(\<rat>,of_nat(\<rat>,n +\<^sub>\<nat> 1)))" lemma err_type [typing]: "err \<in> seqs(\<rat>)" by auto2 lemma err_gt_zero: "n \<in>. \<nat> \<Longrightarrow> err`n >\<^sub>\<rat> \<zero>\<^sub>\<rat>" by auto2 setup {* add_forward_prfstep_cond @{thm err_gt_zero} [with_term "err`?n"] *} lemma err_decreasing [backward]: "m >\<^sub>\<nat> n \<Longrightarrow> err`m <\<^sub>\<rat> err`n" @proof @have "of_nat(\<rat>,n +\<^sub>\<nat> 1) >\<^sub>\<rat> \<zero>\<^sub>\<rat>" @qed lemma err_less_than_r [backward]: "r >\<^sub>\<rat> \<zero>\<^sub>\<rat>\<Longrightarrow> \<exists>n\<in>.\<nat>. err`n <\<^sub>\<rat> r" @proof @obtain "n\<in>nat" where "inv(\<rat>,of_nat(\<rat>,n +\<^sub>\<nat> 1)) <\<^sub>\<rat> r" @qed lemma err_converge_to_zero [backward]: "r >\<^sub>\<rat> \<zero>\<^sub>\<rat> \<Longrightarrow> \<exists>k\<in>.\<nat>. \<forall>n\<ge>\<^sub>\<nat>k. err`n <\<^sub>\<rat> r" @proof @obtain "k\<in>.\<nat>" where "err`k <\<^sub>\<rat> r" @have "\<forall>n\<ge>\<^sub>\<nat>k. err`n <\<^sub>\<rat> r" @qed setup {* del_prfstep_thm @{thm err_def} *} section \<open>Cauchy completeness of real numbers\<close> (* We start by defining, for each Cauchy sequence X of real numbers, a "diagonal" sequence for the double sequence of representatives of X. *) definition Diag :: "i \<Rightarrow> i" where [rewrite]: "Diag(X) = Seq(\<rat>,\<lambda>n. rep(\<R>,X`n)`(SOME k\<in>.\<nat>. \<forall>i\<ge>\<^sub>\<nat>k. \<bar>rep(\<R>,X`n)`i -\<^sub>\<rat> rep(\<R>,X`n)`k\<bar>\<^sub>\<rat> <\<^sub>\<rat> err`n))" setup {* register_wellform_data ("Diag(X)", ["X \<in> seqs(\<real>)"]) *} lemma Diag_type [typing]: "X \<in> seqs(\<real>) \<Longrightarrow> Diag(X) \<in> seqs(\<rat>)" by auto2 lemma Diag_prop [backward]: "X \<in> seqs(\<real>) \<Longrightarrow> n \<in>. \<nat> \<Longrightarrow> \<exists>k\<in>.\<nat>. \<forall>i\<ge>\<^sub>\<nat>k. \<bar>rep(\<R>,X`n)`i -\<^sub>\<rat> Diag(X)`n\<bar>\<^sub>\<rat> <\<^sub>\<rat> err`n" by auto2 lemma Diag_prop_ge_nat [backward]: "X \<in> seqs(\<real>) \<Longrightarrow> n \<in>. \<nat> \<Longrightarrow> i \<in>. \<nat> \<Longrightarrow> \<exists>k\<ge>\<^sub>\<nat>i. \<forall>i\<ge>\<^sub>\<nat>k. \<bar>rep(\<R>,X`n)`i -\<^sub>\<rat> Diag(X)`n\<bar>\<^sub>\<rat> <\<^sub>\<rat> err`n" @proof @obtain "k\<in>.\<nat>" where "\<forall>i\<ge>\<^sub>\<nat>k. \<bar>rep(\<R>,X`n)`i -\<^sub>\<rat> Diag(X)`n\<bar>\<^sub>\<rat> <\<^sub>\<rat> err`n" @have "max(\<nat>,k,i) \<ge>\<^sub>\<nat> i" @qed setup {* del_prfstep_thm @{thm Diag_def} *} lemma Diag_is_cauchy [forward]: "cauchy(X) \<Longrightarrow> X \<in> seqs(\<real>) \<Longrightarrow> cauchy(Diag(X))" @proof @contradiction @let "W = Diag(X)" @obtain r where "r >\<^sub>\<rat> \<zero>\<^sub>\<rat>" "\<not>(\<exists>k\<in>.\<nat>. \<forall>m\<ge>\<^sub>\<nat>k. \<forall>n\<ge>\<^sub>\<nat>k. \<bar>W`m -\<^sub>\<rat> W`n\<bar>\<^sub>\<rat> <\<^sub>\<rat> r)" @obtain r1 r2 r3 where "r1 >\<^sub>\<rat> \<zero>\<^sub>\<rat>" "r2 >\<^sub>\<rat> \<zero>\<^sub>\<rat>" "r3 >\<^sub>\<rat> \<zero>\<^sub>\<rat>" "r = r1 +\<^sub>\<rat> (r2 +\<^sub>\<rat> r3) +\<^sub>\<rat> r1" @with @have "r = r /\<^sub>\<rat> 4\<^sub>\<rat> +\<^sub>\<rat> (r /\<^sub>\<rat> 4\<^sub>\<rat> +\<^sub>\<rat> r /\<^sub>\<rat> 4\<^sub>\<rat>) +\<^sub>\<rat> r /\<^sub>\<rat> 4\<^sub>\<rat>" @end @obtain "i\<in>.\<nat>" where "\<forall>n\<ge>\<^sub>\<nat>i. err`n <\<^sub>\<rat> r1" @obtain j where "j\<ge>\<^sub>\<nat>i" "\<forall>m\<ge>\<^sub>\<nat>j. \<forall>n\<ge>\<^sub>\<nat>j. \<bar>X`m -\<^sub>\<real> X`n\<bar>\<^sub>\<real> \<le>\<^sub>\<real> of_rat(\<real>,r2)" @have "\<forall>m\<ge>\<^sub>\<nat>j. \<forall>n\<ge>\<^sub>\<nat>j. \<bar>W`m -\<^sub>\<rat> W`n\<bar>\<^sub>\<rat> <\<^sub>\<rat> r" @with @let "Sm = rep(\<R>,X`m)" "Sn = rep(\<R>,X`n)" @obtain "k1\<in>.\<nat>" where "\<forall>k'\<ge>\<^sub>\<nat>k1. \<bar>Sm`k' -\<^sub>\<rat> W`m\<bar>\<^sub>\<rat> <\<^sub>\<rat> err`m" @obtain "k2\<in>.\<nat>" where "\<forall>k'\<ge>\<^sub>\<nat>k2. \<bar>Sn`k' -\<^sub>\<rat> W`n\<bar>\<^sub>\<rat> <\<^sub>\<rat> err`n" @obtain "k3\<in>.\<nat>" where "\<forall>k'\<ge>\<^sub>\<nat>k3. \<forall>k''\<ge>\<^sub>\<nat>k3. \<bar>Sm`k' -\<^sub>\<rat> Sn`k''\<bar>\<^sub>\<rat> \<le>\<^sub>\<rat> r2 +\<^sub>\<rat> r3" @with @have "\<bar>X`m -\<^sub>\<real> X`n\<bar>\<^sub>\<real> \<le>\<^sub>\<real> of_rat(\<real>,r2)" @end @obtain k where "k \<ge>\<^sub>\<nat> k1" "k \<ge>\<^sub>\<nat> k2" "k \<ge>\<^sub>\<nat> k3" @have "\<bar>W`m -\<^sub>\<rat> Sn`k\<bar>\<^sub>\<rat> <\<^sub>\<rat> err`m +\<^sub>\<rat> (r2 +\<^sub>\<rat> r3)" @with @have "\<bar>Sm`k -\<^sub>\<rat> Sn`k\<bar>\<^sub>\<rat> \<le>\<^sub>\<rat> r2 +\<^sub>\<rat> r3" @end @end @qed lemma Diag_converges [forward]: "cauchy(X) \<Longrightarrow> X \<in> seqs(\<real>) \<Longrightarrow> converges_to(X,Real(Diag(X)))" @proof @contradiction @let "W = Diag(X)" @obtain r where "r >\<^sub>\<rat> \<zero>\<^sub>\<rat>" "\<not>(\<exists>k\<in>.\<nat>. \<forall>n\<ge>\<^sub>\<nat>k. \<bar>X`n -\<^sub>\<real> Real(W)\<bar>\<^sub>\<real> \<le>\<^sub>\<real> of_rat(\<real>,r))" @obtain s t where "s >\<^sub>\<rat> \<zero>\<^sub>\<rat>" "t >\<^sub>\<rat> \<zero>\<^sub>\<rat>" "r = s +\<^sub>\<rat> t" @obtain "i\<in>.\<nat>" where "\<forall>n\<ge>\<^sub>\<nat>i. err`n <\<^sub>\<rat> s" @obtain j where "j\<ge>\<^sub>\<nat>i" "\<forall>m\<ge>\<^sub>\<nat>j. \<forall>n\<ge>\<^sub>\<nat>j. \<bar>W`m -\<^sub>\<rat> W`n\<bar>\<^sub>\<rat> <\<^sub>\<rat> t" @have "\<forall>n\<ge>\<^sub>\<nat>j. \<bar>X`n -\<^sub>\<real> Real(W)\<bar>\<^sub>\<real> \<le>\<^sub>\<real> of_rat(\<real>,r)" @with @let "Sn = rep(\<R>,X`n)" @obtain k where "k\<ge>\<^sub>\<nat>j" "\<forall>k'\<ge>\<^sub>\<nat>k. \<bar>Sn`k' -\<^sub>\<rat> W`n\<bar>\<^sub>\<rat> <\<^sub>\<rat> err`n" @have "\<forall>p\<ge>\<^sub>\<nat>k. \<bar>Sn`p -\<^sub>\<rat> W`p\<bar>\<^sub>\<rat> <\<^sub>\<rat> r" @end @qed lemma real_cauchy_complete [forward]: "cauchy_complete_field(\<real>)" @proof @have "\<forall>X\<in>seqs(\<real>). cauchy(X) \<longrightarrow> converges(X)" @qed setup {* fold del_prfstep_thm [@{thm Diag_is_cauchy}, @{thm Diag_converges}] *} setup {* fold del_prfstep_thm [@{thm abs_diff_le_rat_real2D}, @{thm abs_diff_le_rat_realI}, @{thm abs_diff_le_rat_realI'}, @{thm converges_to_in_rat}] *} lemma real_continuum [forward]: "linear_continuum(\<real>)" by auto2 lemma real_order_topology [forward]: "order_topology(\<real>)" by auto2 setup {* del_prfstep_thm @{thm real_is_order_top_prep} *} no_notation seq_ring_rat ("S") no_notation real_rel ("\<R>") setup {* del_prfstep_thm @{thm real_def} *} setup {* fold del_prfstep_thm @{thms real_evals(1-2)} *} setup {* fold del_prfstep_thm [@{thm real_add_eval}, @{thm real_mult_eval}] *} end
# Import Problem Instance We start by importing a simple problem instance to demonstrate the tsplib reader. ```python from tsplib95 import tsplib95 import itertools import networkx as nx instance = tsplib95.load_problem('./tsplib/ulysses16.tsp') instance.comment ``` 'Odyssey of Ulysses (Groetschel/Padberg)' Remember, this repository contains a small selection of TSP instances that you can use to test your algorithms. | name | nodes | description | |------|-------|-------------| | ulysses16.tsp | 16 | Odyssey of Ulysses | | ulysses7.tsp | 7 | subset of ulysses16 for testing purposes | | bayg29.tsp | 29 | 29 Cities in Bavaria | | bier127.tsp | 127 | 127 Biergaerten in Augsburg | | bier20.tsp | 20 | subset of bier127 | | brazil58.tsp | 58 | 58 cities in Brazil | | ali535.tsp | 535 | 535 Airports around the globe | | d18512.tsp | 18512 | 18512 places in Germany | The following calls show the dimension = number of nodes of the problem, its node set and the edge weights. The functions `instance.get_nodes()` and `instance.get_edges()` are implemented as iterators, so you can loop over the nodes or edges. To get a list of nodes or edges, you have to explicitly construct one using `list(instance.get_nodes())`. Note that node counting may start at 1 for some instances while others use 0 as starting point. For convenience, we store the index of the first node as `first_node`. ```python instance.dimension instance.get_nodes() print("List of nodes: ", list(instance.get_nodes())) first_node = min(instance.get_nodes()) first_node for i,j in instance.get_edges(): if i >= j: continue print(f"edge {{ {i:2},{j:2} }} has weight {instance.wfunc(i,j):3}.") ``` List of nodes: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] edge { 1, 2 } has weight 6. edge { 1, 3 } has weight 5. edge { 1, 4 } has weight 3. edge { 1, 5 } has weight 11. edge { 1, 6 } has weight 8. edge { 1, 7 } has weight 7. edge { 1, 8 } has weight 1. edge { 1, 9 } has weight 12. edge { 1,10 } has weight 8. edge { 1,11 } has weight 26. edge { 1,12 } has weight 5. edge { 1,13 } has weight 5. edge { 1,14 } has weight 5. edge { 1,15 } has weight 7. edge { 1,16 } has weight 1. edge { 2, 3 } has weight 1. edge { 2, 4 } has weight 4. edge { 2, 5 } has weight 17. edge { 2, 6 } has weight 14. edge { 2, 7 } has weight 13. edge { 2, 8 } has weight 6. edge { 2, 9 } has weight 17. edge { 2,10 } has weight 13. edge { 2,11 } has weight 32. edge { 2,12 } has weight 11. edge { 2,13 } has weight 11. edge { 2,14 } has weight 11. edge { 2,15 } has weight 13. edge { 2,16 } has weight 7. edge { 3, 4 } has weight 5. edge { 3, 5 } has weight 16. edge { 3, 6 } has weight 13. edge { 3, 7 } has weight 12. edge { 3, 8 } has weight 6. edge { 3, 9 } has weight 16. edge { 3,10 } has weight 12. edge { 3,11 } has weight 31. edge { 3,12 } has weight 10. edge { 3,13 } has weight 10. edge { 3,14 } has weight 11. edge { 3,15 } has weight 12. edge { 3,16 } has weight 6. edge { 4, 5 } has weight 13. edge { 4, 6 } has weight 11. edge { 4, 7 } has weight 10. edge { 4, 8 } has weight 3. edge { 4, 9 } has weight 15. edge { 4,10 } has weight 11. edge { 4,11 } has weight 28. edge { 4,12 } has weight 8. edge { 4,13 } has weight 8. edge { 4,14 } has weight 8. edge { 4,15 } has weight 9. edge { 4,16 } has weight 5. edge { 5, 6 } has weight 4. edge { 5, 7 } has weight 6. edge { 5, 8 } has weight 11. edge { 5, 9 } has weight 8. edge { 5,10 } has weight 8. edge { 5,11 } has weight 16. edge { 5,12 } has weight 7. edge { 5,13 } has weight 7. edge { 5,14 } has weight 6. edge { 5,15 } has weight 4. edge { 5,16 } has weight 11. edge { 6, 7 } has weight 1. edge { 6, 8 } has weight 8. edge { 6, 9 } has weight 5. edge { 6,10 } has weight 4. edge { 6,11 } has weight 17. edge { 6,12 } has weight 3. edge { 6,13 } has weight 3. edge { 6,14 } has weight 3. edge { 6,15 } has weight 3. edge { 6,16 } has weight 8. edge { 7, 8 } has weight 7. edge { 7, 9 } has weight 5. edge { 7,10 } has weight 3. edge { 7,11 } has weight 18. edge { 7,12 } has weight 2. edge { 7,13 } has weight 2. edge { 7,14 } has weight 2. edge { 7,15 } has weight 3. edge { 7,16 } has weight 7. edge { 8, 9 } has weight 12. edge { 8,10 } has weight 8. edge { 8,11 } has weight 26. edge { 8,12 } has weight 5. edge { 8,13 } has weight 5. edge { 8,14 } has weight 5. edge { 8,15 } has weight 6. edge { 8,16 } has weight 2. edge { 9,10 } has weight 4. edge { 9,11 } has weight 15. edge { 9,12 } has weight 7. edge { 9,13 } has weight 7. edge { 9,14 } has weight 7. edge { 9,15 } has weight 8. edge { 9,16 } has weight 11. edge { 10,11 } has weight 19. edge { 10,12 } has weight 3. edge { 10,13 } has weight 4. edge { 10,14 } has weight 4. edge { 10,15 } has weight 6. edge { 10,16 } has weight 7. edge { 11,12 } has weight 20. edge { 11,13 } has weight 21. edge { 11,14 } has weight 20. edge { 11,15 } has weight 20. edge { 11,16 } has weight 25. edge { 12,13 } has weight 0. edge { 12,14 } has weight 1. edge { 12,15 } has weight 3. edge { 12,16 } has weight 5. edge { 13,14 } has weight 1. edge { 13,15 } has weight 3. edge { 13,16 } has weight 4. edge { 14,15 } has weight 2. edge { 14,16 } has weight 5. edge { 15,16 } has weight 7. You have already seen how to draw a graph, here is the relevant code again. ```python G = instance.get_graph() if instance.is_depictable(): pos = {i: instance.get_display(i) for i in instance.get_nodes()} else: pos = nx.drawing.layout.spring_layout(G) nx.draw_networkx_nodes(G, pos, node_color='#66a3ff', node_size=200) #nx.draw_networkx_labels(G, pos, font_weight='bold' ) nx.draw_networkx_edges(G, pos, edge_color='#e6e6e6') ``` # Implementing the MTZ model in Gurobi We will implement the Miller-Tucker-Zemlin formulation using binary variables $x_{ij} \in \{0,1\}$ to indicate whether arc $(i,j)$ is being used in the tour and variables $y_i \ge 0$ as "position variables" for each node $i \in V$. The formulation looks like this: \begin{align} \min\;&\sum_{i,j \in V; i \ne j} c_{i,j} \cdot x_{i,j}\\ &\sum_{j \ne i} x_{i,j} = 1 \quad \text{for all nodes $i$}\\ &\sum_{j \ne i} x_{j,i} = 1 \quad \text{for all nodes $i$}\\ &y_1 = 1\\ &y_i \le n \quad \text{for all nodes $i \ne v_0$}\\ &y_i \ge 2 \quad \text{for all nodes $i \ne v_0$}\\ &y_i - y_j + (n-1) \cdot x_{i,j} \le n-2 \quad \text{for all nodes $i,j$ with $i,j \ne 1$ and $i \ne j$}\\ &x_{i,j} \in \{0,1\}\\ &y_i \ge 0 \end{align} ## Creating the variables We start by creating the model and the variables. Notice that we already define the objective function by using the `obj` parameter upon variable creation. For demonstration purposes, we use a loop for defining the $x$ variables and a shortcut function for the $y$ variables. Note that the assumption $y_i \ge 0$ is implicit. ```python import gurobipy as grb # create a new model model = grb.Model(name="MTZ TSP formulation") # create dictionary with tupel as keys # elements of x can then be accesses via x[i,j] x = grb.tupledict() # filter discards all elements that do not fit the filter criteria for i,j in filter(lambda e: e[0] != e[1], instance.get_edges()): # add variable with objective coefficient obj, type {0,1} (binary) x[i,j] = model.addVar(obj=instance.wfunc(i,j), vtype=grb.GRB.BINARY, name=f"x[{i},{j}]") # add variable for every element of instance.get_nodes() y = model.addVars(instance.get_nodes(), name="y") ``` ## Adding the constraints Next, we add the constraints for our model. Notice that $y_i \ge 0$ is implicit and $x_{i,j} \in \{0,1\}$ has been included in the variable definition already. We start with the simple constraints on the $y_i$ variables. Notice that we could also have used methods to set lower and upper bounds for the $y_i$ for most of the simple constraints. ```python model.addConstr(y[first_node] == 1) for i in itertools.islice(instance.get_nodes(),first_node+1,None): model.addConstr(y[i] <= instance.dimension) ``` **Task 1:** Add another loop to formulate the constraints $y_i \ge 2$ for all nodes $i \ne v_0$. ```python ``` The remaining constraints require iteration over all edges. We use the keys() set of our variables for this purpose to ensure we only iterate over edges that are actually defined. ```python for i,j in filter(lambda e: e[0] != e[1] and e[0] != first_node and e[1] != first_node, x.keys()): model.addConstr(y[i] - y[j] + (instance.dimension-1)*x[i,j] <= (instance.dimension-2)) ``` **Task 2:** Add the in- and out-degree constraints \begin{align} &\sum_{j \ne i} x_{i,j} = 1 \quad \text{for all nodes $i$}\\ &\sum_{j \ne i} x_{j,i} = 1 \quad \text{for all nodes $i$}\\ \end{align} ```python ``` ## Starting the Optimization Process Finally, we set the objective to minimization and call the optimizer. ```python model.ModelSense = grb.GRB.MINIMIZE model.optimize() ``` Optimize a model with 240 rows, 256 columns and 660 nonzeros Variable types: 16 continuous, 240 integer (240 binary) Coefficient statistics: Matrix range [1e+00, 2e+01] Objective range [1e+00, 3e+01] Bounds range [1e+00, 1e+00] RHS range [1e+00, 2e+01] Found heuristic solution: objective 0.0000000 Explored 0 nodes (0 simplex iterations) in 0.01 seconds Thread count was 1 (of 8 available processors) Solution count 1: 0 Optimal solution found (tolerance 1.00e-04) Best objective 0.000000000000e+00, best bound 0.000000000000e+00, gap 0.0000% ## Querying and Visualizing the Solution Before we visualize our result, let us look at a few key figures of our solution. ```python print(f"Objective value: {model.ObjVal}") print(f"value of x variables: {model.x}") solution_edges = [(i,j) for i,j in x.keys() if x[i,j].x > 0.9] solution_edges ``` Objective value: 0.0 value of x variables: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] [] For debugging purposes, it might be helpful to export the model held by Gurobi into a human-readable format: ```python model.write('test.lp') ``` Finally, let us visualize the solution using NetworkX. In this case, we need to prescribe positions and draw the nodes and two layers of edges separately. ```python if instance.is_depictable(): pos = {i: instance.get_display(i) for i in instance.get_nodes()} else: pos = nx.drawing.layout.spring_layout(G) nx.draw_networkx_nodes(G, pos, node_color='#66a3ff', node_size=500) nx.draw_networkx_labels(G, pos, font_weight='bold' ) nx.draw_networkx_edges(G, pos, edge_color='#e6e6e6') nx.draw_networkx_edges(G, pos, edgelist=solution_edges, edge_color='#ffa31a', width=4) ``` ```python ```
/- Copyright (c) 2018 Simon Hudon. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Simon Hudon Traversable instance for lazy_lists. -/ import Mathlib.PrePort import Mathlib.Lean3Lib.init.default import Mathlib.control.traversable.equiv import Mathlib.control.traversable.instances import Mathlib.Lean3Lib.data.lazy_list import Mathlib.PostPort universes u_1 u u_2 u_3 namespace Mathlib /-! ## Definitions on lazy lists This file contains various definitions and proofs on lazy lists. TODO: move the `lazy_list.lean` file from core to mathlib. -/ namespace thunk /-- Creates a thunk with a (non-lazy) constant value. -/ def mk {α : Type u_1} (x : α) : thunk α := fun (_x : Unit) => x protected instance decidable_eq {α : Type u} [DecidableEq α] : DecidableEq (thunk α) := sorry end thunk namespace lazy_list /-- Isomorphism between strict and lazy lists. -/ def list_equiv_lazy_list (α : Type u_1) : List α ≃ lazy_list α := equiv.mk of_list to_list sorry sorry protected instance inhabited {α : Type u} : Inhabited (lazy_list α) := { default := nil } protected instance decidable_eq {α : Type u} [DecidableEq α] : DecidableEq (lazy_list α) := sorry /-- Traversal of lazy lists using an applicative effect. -/ protected def traverse {m : Type u → Type u} [Applicative m] {α : Type u} {β : Type u} (f : α → m β) : lazy_list α → m (lazy_list β) := sorry protected instance traversable : traversable lazy_list := traversable.mk lazy_list.traverse protected instance is_lawful_traversable : is_lawful_traversable lazy_list := equiv.is_lawful_traversable' list_equiv_lazy_list sorry sorry sorry /-- `init xs`, if `xs` non-empty, drops the last element of the list. Otherwise, return the empty list. -/ def init {α : Type u_1} : lazy_list α → lazy_list α := sorry /-- Return the first object contained in the list that satisfies predicate `p` -/ def find {α : Type u_1} (p : α → Prop) [decidable_pred p] : lazy_list α → Option α := sorry /-- `interleave xs ys` creates a list where elements of `xs` and `ys` alternate. -/ def interleave {α : Type u_1} : lazy_list α → lazy_list α → lazy_list α := sorry /-- `interleave_all (xs::ys::zs::xss)` creates a list where elements of `xs`, `ys` and `zs` and the rest alternate. Every other element of the resulting list is taken from `xs`, every fourth is taken from `ys`, every eighth is taken from `zs` and so on. -/ def interleave_all {α : Type u_1} : List (lazy_list α) → lazy_list α := sorry /-- Monadic bind operation for `lazy_list`. -/ protected def bind {α : Type u_1} {β : Type u_2} : lazy_list α → (α → lazy_list β) → lazy_list β := sorry /-- Reverse the order of a `lazy_list`. It is done by converting to a `list` first because reversal involves evaluating all the list and if the list is all evaluated, `list` is a better representation for it than a series of thunks. -/ def reverse {α : Type u_1} (xs : lazy_list α) : lazy_list α := of_list (list.reverse (to_list xs)) protected instance monad : Monad lazy_list := sorry theorem append_nil {α : Type u_1} (xs : lazy_list α) : (append xs fun (_ : Unit) => nil) = xs := sorry theorem append_assoc {α : Type u_1} (xs : lazy_list α) (ys : lazy_list α) (zs : lazy_list α) : (append (append xs fun (_ : Unit) => ys) fun (_ : Unit) => zs) = append xs fun (_ : Unit) => append ys fun (_ : Unit) => zs := sorry theorem append_bind {α : Type u_1} {β : Type u_2} (xs : lazy_list α) (ys : thunk (lazy_list α)) (f : α → lazy_list β) : lazy_list.bind (append xs ys) f = append (lazy_list.bind xs f) fun (_ : Unit) => lazy_list.bind (ys Unit.unit) f := sorry protected instance is_lawful_monad : is_lawful_monad lazy_list := sorry /-- Try applying function `f` to every element of a `lazy_list` and return the result of the first attempt that succeeds. -/ def mfirst {m : Type u_1 → Type u_2} [alternative m] {α : Type u_3} {β : Type u_1} (f : α → m β) : lazy_list α → m β := sorry /-- Membership in lazy lists -/ protected def mem {α : Type u_1} (x : α) : lazy_list α → Prop := sorry protected instance has_mem {α : outParam (Type u_1)} : has_mem α (lazy_list α) := has_mem.mk lazy_list.mem protected instance mem.decidable {α : Type u_1} [DecidableEq α] (x : α) (xs : lazy_list α) : Decidable (x ∈ xs) := sorry @[simp] theorem mem_nil {α : Type u_1} (x : α) : x ∈ nil ↔ False := iff.rfl @[simp] theorem mem_cons {α : Type u_1} (x : α) (y : α) (ys : thunk (lazy_list α)) : x ∈ cons y ys ↔ x = y ∨ x ∈ ys Unit.unit := iff.rfl theorem forall_mem_cons {α : Type u_1} {p : α → Prop} {a : α} {l : thunk (lazy_list α)} : (∀ (x : α), x ∈ cons a l → p x) ↔ p a ∧ ∀ (x : α), x ∈ l Unit.unit → p x := sorry /-! ### map for partial functions -/ /-- Partial map. If `f : Π a, p a → β` is a partial function defined on `a : α` satisfying `p`, then `pmap f l h` is essentially the same as `map f l` but is defined only when all members of `l` satisfy `p`, using the proof to apply `f`. -/ @[simp] def pmap {α : Type u_1} {β : Type u_2} {p : α → Prop} (f : (a : α) → p a → β) (l : lazy_list α) : (∀ (a : α), a ∈ l → p a) → lazy_list β := sorry /-- "Attach" the proof that the elements of `l` are in `l` to produce a new `lazy_list` with the same elements but in the type `{x // x ∈ l}`. -/ def attach {α : Type u_1} (l : lazy_list α) : lazy_list (Subtype fun (x : α) => x ∈ l) := pmap Subtype.mk l sorry protected instance has_repr {α : Type u_1} [has_repr α] : has_repr (lazy_list α) := has_repr.mk fun (xs : lazy_list α) => repr (to_list xs) end Mathlib
Starting June 6th, 2008, Java California will no longer be opened for business. They are currently selling their inventory (including Torani and chocolate syrup, tea, and coffee) for a discounted price. They will be opened for limited hours everyday until the final closing day. This was a great place. It was no longer open 24 hours, but from 5am to 1am. However, they said they would stay open later if students are still there studying. For a night owl, it was an oasis in the desert of closed businesses. During the summer they closed earlier, which isnt surprising if youve seen the absence of people there during the summer. They were open on all holidays too. They had a Buy 10, Get the 11th drink free card. History Java California was in business for 8 or 9 years, and was under the management of John for about three and a half years. John sold Java California some time after 20070306. The shop continued to be open 24 hours and the last owners were Larry and Users/LoriLuporini. Coffee Shop This was an awesome place to study and hang out! One might have wanted to bring a jacket, it was colder than outside. Like a lot of coffee shops, it had a few bookshelves full of books that you could look at (it turned out that these were for sale, but in store perusal was OK), and it had a shelf full of board games that you could play on the spot. There was a large bookshelf full of games running from Risk to Jenga, including a copy of Davisopoly, which was created by John, the former owner of Java California. Additionally, it had some couches and comfortable arm chairs, etc. Like a lot of places in Davis, it had free WiFi. Unlike a lot of places in Davis, there were plenty of outlets to plug in your computer. Java was broken down into three areas with unwritten rules of noise level. In the main area where you enter and order your drinks, being louder and chatting with your studybuddies was ok. The second area had the couches, bookshelves and bathroom; people tended to be quieter in this area but briefly discussing with friends was ok. The third area was the meeting room; here everyone was quiet just like it is the library. Please remember to shut the door between the meeting room and the second area. Cool Extras Beyond these normal coffee shop amenities, Java California also had a business center and meeting room. The business center consisted mainly of a copy machine and some tools like a stapler, a papercutter and a light table. The copy machine made black and white copies for $0.08 and color copies for $0.80. (it worked 60% of the time.) The meeting room was a large room with about 10 folding tables and 40 wheeled business chairs. When it wasnt reserved for meetings, it was open for studying and made a great study room. Most nights, people were very quiet in this room and it felt like you are at the library. The meeting room also had a small whiteboard and a TV/VCR combo. Finally, they provided a PA for free if you needed it. The meeting room was free to use as long as you were using it for nonprofit purposes. They even had an occasional LAN party. As a note to studiers, you should have checked the schedule before you got too comfortable in the room, because you could be kicked out by people that scheduled it. During finals, the room could be reserved for studying. The schedule for the meeting room was posted from time to time (maybe weekly?) on the Craigslist events calender (try a http://sacramento.craigslist.org/cgibin/search?areaID12&queryjava+california&cateee search for Java California). To find the right posting, just look for events in Dixon. You were allowed to bring in outside food and drink, were encouraged to hang out, and about the only thing you could do that is against the rules was bringing in Starbucks they had the rules from other coffee shops hanging on the wall with each rule marked Okay with us!. The meeting room. The Product The coffee was pretty good, and they had an extensive menu. This included several specialty mochas and 9 flavors of whipped cream. The whipped cream was made with real cream (and WhipIt). In addition, they had a variety of snacks from donuts and pastries to Cup oNoodles and ChefBoyardee ravioli. Recommended: Whipped Cream Raspberry tea Lemon Ginger tea Caramel anything (they use real caramel) Mint Chip Chiller (or any blended drink) No Carb Blended Mocha (make sure its blended... good even if you dont care about carbs, simply because its less sweet and more rich and ice creamy) Krispy Kreme doghnuts Hot Cup O Noodles $1 Gone now.. but not forgotten They used to (until 2007 with the new management) have two internet connected computers that could be used free of charge, as well as a play area. There used to be a small play area stocked with toys, a gumball machine, and a bouncy ball machine. It has been replaced by three oneperson desks, with a pencil sharpener. Unfortunately, some people feel that this place scores low in ambiance. They say that its flooded by flourescent lights, you can hear the Interstate 80 I80 traffic, the furniture is cheap, and theres all kinds of crap lying around, as if it were a childs nursery. I feel compelled to note that Ive been going to JC for quite a while now, and Im a semiregular night customer there. I see and meet a lot of other people who go there either regularly or just occasionally, and the only complaint I ever really hear of is overcrowding. Most people like the atmosphere Johns set up, and like the fact that it has a smalltown independent sort of feel. Its hard to complain about the overcrowding even, because it just means that the place is doing well and will probably be around for quite a while (and honestly, its mainly only overcrowded during finals and midterms at UCD). Users/EricKlein VERY cool place. Users/JabberWokky This place is actually quite nice. Ive been there several times in the middle of the night to either hang out or work. As said above, the coffee is good, but the atmosphere is what makes it. The staff is friendly, and the free power/wifi coupled with the coffee and food selection makes it possible to stay there working for very long periods of time. Unlike a lot of places, this place is so cool, I WANT to spend money there, to support them for being such a great place to go. My only complaint is that theyre in Dixon, rather than Davis, so biking out there in the middle of the night isnt as convenient as if it were in Davis. Users/EricKlein This place is great, as long as youre not a punk to the owner. He used to host live music Friday or Saturday nights. My brothers band used to play there all the time until the crowd got to be too much. (You just cant control metalheads under 18.) But last I heard they still have great music on some weekend night. Further and more detailed information would be available on the white boards in the meeting room. Users/MichelleAccurso 20050724 18:42:31 nbsp Friendly people, good drinks. Dont forget to tip. Users/IrenePark 20050819 14:20:46 nbsp If you go in there soon, make sure you congratulate the owner on the birth of his first grandson! Users/JessicaLuedtke 20051003 13:02:20 nbsp This is an awesome place. Its very comforting when finals are rolling around and you need to learn a quarters worth of material out of one bloodshot eye. The owner is always pleasant and nice. Ive only been here once during the day to use their desktop computer for a long time when my internet was out of action in the summer and it was even more enjoyable in the day time than at night. Its nice to see a small shop succeed and deserve it too. Users/LiRic 20051130 16:56:28 nbsp All businesses should be modeled after Java California Users/GiladGurantz 20051215 01:11:03 nbsp I came here once over the summer for the very first time. Since Fall quarter started, I have been here studying w/ my roomate. The manager is super nice and funny. I really wish there are more coffee place like this one. The staff here are awesome as well. I enjoy the study area here especially the coffee. Users/AlexandraLiu 20060116 00:53:11 nbsp Hey, isnt that guy in the big kids room the Math Departments own Spyridon Michalakis? Users/RoyWright 20060509 10:48:47 nbsp By maintaining the community room, Java California is providing an essential service to the city. If for no other reason the shop should be patronized. But happily, Java Californias house coffee is the best tasting in town. Solano Bakings home brew is watery and tasteless. Starbucks sells the usual overpriced and harsh concoction. Users/PeteBasofin 20060610 23:35:26 nbsp I LOVE JAVA CALIFORNIA. I am entirely too busy during the day to make it to another cafe to study...esp if it closes 2 hours after I get there. I was there so much during winter quarter 2006 that I got onto the Deans list for the first time during my four years here. NO JOKE! ) John is such a nice guy...gives out free food to starving and studying students once in awhile and he even reserves his large meeting rooms for finals! And I really really love the mocha chiller...its freakin perfect! Users/TereseSantos 20060611 08:35:32 nbsp I love this place. Working on the IAJ over Spring this quarter I spent two allnighters there with my home PC. They are great hosts, with great food (and better service), and best of all, a lot of outlets. BEST PLACE TO STUDY Users/AlvinTsao 20060807 17:12:44 nbsp This is the place to find the absolutely BEST caramel macchiato!! Users/CissiePerkins 20061024 23:28:51 nbsp Java California is awesome! I can see myself spending many a late night here. Big, comfy study areas, wifi, and an unlimited cup of coffee? Count me in. The only problems are that its amazingly cold here and the noise from the traffic can be distracting. Still, mad props for this wellplanned business. Now if youll excuse me I have to get back to studying :) Users/SteveKent 20061031 21:26:15 nbsp In short, the cafe is great and the owner is awesome. On at least three occasions that Ive gone there at night, hes tossed out jokes and given out freebies to cut down stress. Users/DanAlcantara 20061108 14:30:21 nbsp I love studying here. I just curl up on one of the reclining couches sipping my hot tea and study. Johns a really nice guy and if youre there at the right time, hell give away some pastries. Users/JoAnnaRich 20061114 15:12:49 nbsp Just for your amusement: a list of things John did when bored during our NaNoWriMo writein: http://fullmoon.typepad.com/nanowrimo/2006/11/i_love_java_cal.html Users/JenniferRutherford 20070223 00:00:02 nbsp I was there a couple nights ago. John stopped by and told some painfully bad jokes that we all still managed to laugh at. After he left, about 2030 minutes later, an older woman walked in asking to take pictures of us studying. She announced that she is BUYING Java California and needs to pictures for some business somethingorother... I guess its been sold. Users/JesseSingh 20070223 05:26:04 nbsp WHAT!!??!?!?!! Users/JabberWokky 20070223 12:32:12 nbsp Yep, thats exactly what I thought. Someone in the room pleaded Dont change anything! to which the forthcoming new manager said Im not going to change anything, why would I? But I got the feeling she was just patronizing us. I should also note that, while the study room was full that night, maybe 40% of the people there walked into Java Cal from the other entrance and just plopped down on the table, without buying a drink. No wonder John has to sell it. Users/JesseSingh 20070224 23:55:17 nbsp LOVE this place. I kind of wish others didnt love it quite so much, so it could be a little less crowded. Users/MattHh 20070225 17:30:23 nbsp ( Alas, next week, John will no longer be the owner. I was there studying earlier today and it is confirmed ( Probably one of the last times Ill see John and get to hear the funny jokes. Users/JoAnnaRich 20070225 19:26:48 nbsp It would really be cool if somebody could get one of those big cards and have all the regulars sign it as a goodbye gift. Johns a rather unique character, and hes one hell of a good guy. Other than making people suffer through his jokes, he honestly cares about his customers. Users/JabberWokky 20070228 13:33:56 nbsp Best place to study ever! Totally worth the drive. Only thing that sucks is when theres tons of people and when theres really annoying girls who talk too loud. People are trying to study, geniuses... and that sucks that John isnt the owner anymore! Users/CourageCowardlyDog 20070320 14:38:01 nbsp Thanks to all you FAITHFUL and LOYAL customers who have been coming to my brother Johns Store these years that he has owned it, and thanks for indulging him his jokes! : ) I know it touches John that you have been there with him, through thick and thin, and no matter what, you have been loyal and his friend. Since I now live in So. CA I dont get to the store that often, but it pleases me and warms my heart to see what John has created for himself, the community, and YOU, each of you. I know it will different without John there at the helm, and it was not an easy choice for him to make. Join me in sincerest wishes that John will now pass on to the next place in life he belongs. And when you do see him, give him a BIG BEAR HUG from me, his Sis, Cindi. God bless you all. Users/CindiWatermanBissellKidd 20070328 14:41:51 nbsp Thank you to all who support Java California. We are the new owners, Larry and Lori Luporini Users/LoriLuporini 20070505 13:08:48 nbsp Its ok, the person behind the counter is super slow. Its nice that it isnt as stressful as say mishkas, but not really worth the drive. Sac has some better coffeeshops. Users/AmyGoogenspa 20070513 23:23:35 nbsp Oh man. I went back to Davis this weekend for a visit, an naturally I thought Id hit up good ol JC... What a disappointing experience. Whats with all the fake mahogany furniture? And the rooster lamps and other kitchy junk for sale? No outside food or drink, WTF? And no more computers! I couldnt bring myself to buy anything so I cant testify to the food/drink. I dont believe Ill go there again... I wish I had spent more money there when John was the owner. Users/MaryLieth 20070515 02:53:02 nbsp Maybe people bringing in outside food and drink is one of the reasons John had to sell. Users/JesseSingh 20070522 03:25:00 nbsp As much as I enjoy walking it out and partying like a rockstar...needs less loud fm 103.5 late at night. Otherwise this place is cool, the annoying giggling conversations die down at about 1:30am, then you can really study uninterupted. The coffee is good too, and the bathroom door sounds like a fart. Users/IanBenton 20070625 21:42:02 nbsp Bathroom smells like death and its always either too hot or too cold, but the coffee and studying atmosphere is great especially late at night. Lori is really nice and gave me a kahlua blast chiller for free (she had created it that day and wanted someone to try it). Its soo good! Everyone should remember to at least buy one drink when studying or hanging out, so they can stay open 24 hours and keep wireless free. Users/MandiPeterson 20071104 10:30:20 nbsp I dont believe this place is open 24 hours anymore. According to their hours sign on their window, they close at 1am almost every day. Ive been here several times, making a trip out here from Davis just for a good quiet place to study. Their meeting room is probably the best place and hopefully, there arent groups back there chit chatting if you needed it to be quiet. The owner is affable and also offers some sort of art class there as well. Id say, if you cant find a place to study in Davis, then give this place a try. (oh, and i usually get the red eye) Users/at86 I just called them and their official hours are 5am to 1am now. They said that if students are still there studying, they will stay open late. Users/apalley 20071117 21:51:18 nbsp Owners came in and said we cant bring in outside food and drink anymore. Good place to study in the meeting room thats a lot more welcoming, than say the 24 hour room, although it feels suspiciously like a campus lounge. Okay food/drinks. Users/BessieChu 20071213 21:10:00 nbsp I can attest to what apalley said I was there earlier this week and they will stay open as long as there are customers. Interesting literature to read (and for sale as well). Users/AndrewLeonard 20080525 23:10:11 nbsp Im at Java CA right now. I still love this place. Hot chocate with caramel and white chocolate whipped cream is delicious. And the wifi is locked unless you buy something so thats cool. And finally, the people behind the counter are always interesting and personable. I always find myself chatting with the person behind the counter. Users/MichelleAccurso 20080526 09:36:09 nbsp i have been kicked out at around 11 for closing, there were only about 3 people there, but it would be nice if they had set hours that they definitely would not close until unless there is no one there. Just my opinion, but i understand that they are a business and if no one is ordering anything anymore, they probably close to be cost effective. Users/MattHh 20080528 21:23:45 nbsp Im sorry to hear that this isnt open 24 hours anymore. It was a great spot to study late at night/early in the morning when I was a student. Users/PS2 20080531 18:48:12 nbsp This is definitely my favorite place to study. The big meeting room is great for studying and really sprawling out with your books / laptop / materials. Im not a coffee drinker, but I always order an iced chai tea latte with almond roca whipped cream (highly recommended their flavored whipped cream is amazing!) and now they also have a great chai frappe. The employees are great. Very friendly and personable, and if you ask you can get a Java Card which theyll stamp with each purchase after 10 drinks youll get a free one. They always have fun stuff going on, like little community bands coming to play. Not the most convenient for some Davis students, but I prefer it this way as it usually is not too crowded with students. Users/ChrissyNoble 20080603 21:03:49 nbsp I want to be the first to say that I am crushed that Java CA will be closing their doors. When I spoke with the owners son today he told me the main reason why their closing is that the costs for supplies are becoming far too expensive. I had some great study memories at this place...so sad to see it go. Users/EmilyTung 20080603 22:11:18 nbsp Oh no!! This is so sad...I will have such great memories of Java California as well. Im really going to miss it...I shed a tear when I found out its closing! Best wishes to all the Java employees. Users/ChrissyNoble 20080608 20:46:02 nbsp man this sucks, i had a lot of good studying times here too (if those exist). i even drove out there today to find a closed sign on the door. i was halfway to the starbucks when i came to my senses and went back to davis. heres wishing someone buys it and moves it to davis, they would get a lot more people in there, as the article says, in a desert of closed businesses. Users/IanBenton 20080609 11:13:52 nbsp Well there will always be the coffee at the fast and easy ... yuck! I really wish davis coffee shops would experiment with longer hours. Davis kept one alive in Dixon for what? As long as Ive been around at least (thats 8+). Seems to be enough market for some of the smaller shops in town. Users/ChristopherMckenzie 20080912 20:28:24 nbsp I have lots and lots of good memories of this coffee shop. My friends and I used to go there on a regular basis about 4 years ago when the original owner was there. He was the coolest guy. Then some mean lady took it over and changed the hours and changed everything and it wasnt the same. And now its closed. Im very sad to see it go. :( Oh, and I do agree that a business like this needs to exist in Davis and STAT. If only for those procrastinating students. a 24hour coffee shop was invaluable to me in college. Users/SarahOsler 20091123 04:00:54 nbsp :( I feel like I missed an incredible place Users/GreatRyan
function d = month_to_nones_roman ( m ) %*****************************************************************************80 % %% MONTH_TO_NONES_ROMAN returns the day of the nones of a Roman month. % % Licensing: % % This code is distributed under the GNU LGPL license. % % Modified: % % 16 March 2013 % % Author: % % John Burkardt % % Parameters: % % Input, integer M, the month index. % % Output, integer D, the day of the nones of the month. % nones = [ ... 5, 5, 7, 5, 7, 5, 7, 5, 5, 7, 5, 5 ]; if ( m < 1 || 12 < m ) d = -1; else d = nones(m); end return end
SUBROUTINE SCHMIT(U,N,NDIM) IMPLICIT DOUBLE PRECISION (A-H,O-Z) INCLUDE 'SIZES' DIMENSION U(NDIM,NDIM) DATA ZERO,SMALL,ONE/0.0,0.01,1.0/ II=0 DO 110 K=1,N K1=K-1 C C NORMALIZE KTH COLUMN VECTOR C DOT = ZERO DO 10 I=1,N 10 DOT=DOT+U(I,K)*U(I,K) IF(DOT.EQ.ZERO) GO TO 100 SCALE=ONE/SQRT(DOT) DO 20 I=1,N 20 U(I,K)=SCALE*U(I,K) 30 IF(K1.EQ.0) GO TO 110 NPASS=0 C C PROJECT OUT K-1 PREVIOUS ORTHONORMAL VECTORS FROM KTH VECTOR C 40 NPASS=NPASS+1 DO 70 J=1,K1 DOT=ZERO DO 50 I=1,N 50 DOT=DOT+U(I,J)*U(I,K) DO 60 I=1,N 60 U(I,K)=U(I,K)-DOT*U(I,J) 70 CONTINUE C C SECOND NORMALIZATION (AFTER PROJECTION) C IF KTH VECTOR IS SMALL BUT NOT ZERO THEN NORMALIZE C AND PROJECT AGAIN TO CONTROL ROUND-OFF ERRORS. C DOT=ZERO DO 80 I=1,N 80 DOT=DOT+U(I,K)*U(I,K) IF(DOT.EQ.ZERO) GO TO 100 IF(DOT.LT.SMALL.AND.NPASS.GT.2) GO TO 100 SCALE=ONE/SQRT(DOT) DO 90 I=1,N 90 U(I,K)=SCALE*U(I,K) IF(DOT.LT.SMALL) GO TO 40 GO TO 110 C C REPLACE LINEARLY DEPENDENT KTH VECTOR BY A UNIT VECTOR. C 100 II=II+1 C IF(II.GT.N) STOP U(II,K)=ONE GO TO 30 110 CONTINUE RETURN END
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import tensorflow as tf import os from decoder import tf_decoder, op_decoder, init_op_cache, init_tf_cache from common import create_initializer, _get_shape_invariants from utils.position import SinusoidalPositionEncoder def initialize_decoding_variables(decoding_args): start_ids = tf.fill([decoding_args.decoder_args.batch_size * decoding_args.decoder_args.beam_width], decoding_args.start_id) # [batch_size * beam_width] step = tf.constant(0, dtype=tf.int32) # save the output ids for each step outputs = tf.TensorArray(tf.int32, size=0, dynamic_size=True) cache = init_tf_cache(decoding_args.decoder_args.batch_size * decoding_args.decoder_args.beam_width, decoding_args.decoder_args.head_num, decoding_args.decoder_args.size_per_head, decoding_args.decoder_args.num_layer, dtype=decoding_args.decoder_args.dtype, num_sources=1) finished = tf.zeros([decoding_args.decoder_args.batch_size * decoding_args.decoder_args.beam_width], dtype=tf.bool) # [batch_size * beam_width], record that a sentence is finished or not initial_log_probs = tf.cast(tf.tile([0.] + [-float("inf")] * (decoding_args.decoder_args.beam_width - 1), [decoding_args.decoder_args.batch_size]), dtype=tf.float32) # [batch_size * beam_width] # [batch_size * beam_width], record the lengths of all sentences sequence_lengths = tf.zeros( [decoding_args.decoder_args.batch_size * decoding_args.decoder_args.beam_width], dtype=tf.int32) # record the beam search indices, used for rebuild the whole sentence in the final parent_ids = tf.TensorArray(tf.int32, size=0, dynamic_size=True) extra_vars = tuple([parent_ids, sequence_lengths]) return start_ids, step, outputs, cache, finished, initial_log_probs, sequence_lengths, extra_vars def generate_encoder_result(batch_size, max_seq_len, memory_hidden_dim, dtype): memory_sequence_length = np.random.randint( 1, max_seq_len + 1, size=batch_size).astype(np.int32) outter_embbeding = np.random.randn(memory_hidden_dim) * 0.01 memory = [] for i in range(batch_size): data = np.random.randn(max_seq_len, memory_hidden_dim) * 0.01 for j in range(memory_sequence_length[i], max_seq_len): data[j] = outter_embbeding memory.append(data) memory = np.asarray(memory) memory = tf.convert_to_tensor(memory, dtype=dtype) return memory, memory_sequence_length def beam_search(beam_width, vocab_size, step, log_probs, cum_log_probs, finished, cache, extra_vars, op_self_cache=None): parent_ids = extra_vars[0] sequence_lengths = extra_vars[1] # [batch_size * beam_width, vocab_size] + [batch_size * beam_width], has to broadcast total_probs = log_probs + tf.expand_dims(cum_log_probs, 1) # [batch_size, beam_width * vocab_size], can skip in cuda total_probs = tf.reshape(total_probs, [-1, beam_width * vocab_size]) # both shapes are: [batch_size, beam_width] _, sample_ids = tf.nn.top_k(total_probs, beam_width) # [batch_size * beam_width], can skip in cuda sample_ids = tf.reshape(sample_ids, [-1]) word_ids = sample_ids % vocab_size # [batch_size * beam_width] beam_ids = sample_ids // vocab_size # [batch_size * beam_width] # [batch_size * beam_width] beam_indices = ( tf.range(sample_ids.shape[0]) // beam_width) * beam_width + beam_ids sequence_lengths = tf.where( finished, x=sequence_lengths, y=sequence_lengths + 1) # [batch_size * beam_width] batch_pos = tf.range(sample_ids.shape[0]) // beam_width cum_log_probs = tf.gather_nd(total_probs, tf.stack( [batch_pos, sample_ids], axis=-1)) # [batch_size * beam_width] finished = tf.gather(finished, beam_indices) sequence_lengths = tf.gather(sequence_lengths, beam_indices) cache = tf.contrib.framework.nest.map_structure( lambda s: tf.gather(s, beam_indices), cache) if op_self_cache != None: op_self_cache = tf.contrib.framework.nest.map_structure( lambda s: tf.gather(s, beam_indices, axis=3), op_self_cache) parent_ids = parent_ids.write(step, beam_ids) extra_vars = [parent_ids, sequence_lengths] return word_ids, cum_log_probs, finished, cache, tuple(extra_vars), op_self_cache def finalize(beam_width, parent_ids, sequence_lengths, outputs, end_id, max_seq_len=None): maximum_lengths = tf.reduce_max(tf.reshape( sequence_lengths, [-1, beam_width]), axis=-1) if max_seq_len != None: array_shape = [max_seq_len, -1, beam_width] else: array_shape = [maximum_lengths[0], -1, beam_width] step_ids = tf.reshape(outputs, array_shape) parent_ids = tf.reshape(parent_ids, array_shape) ids = tf.contrib.seq2seq.gather_tree( step_ids, parent_ids, maximum_lengths, end_id) ids = tf.transpose(ids, perm=[1, 2, 0]) lengths = tf.not_equal(ids, end_id) lengths = tf.cast(lengths, tf.int32) lengths = tf.reduce_sum(lengths, axis=-1) return ids, lengths def op_decoding(memory_tensor, memory_sequence_length, embedding_table, decoding_vars, decoding_args): decoding_op_module = tf.load_op_library( os.path.join('./lib/libtf_decoding.so')) val_off = 26 decoding_vars_in_differ_layers = [] for i in range(val_off): par = [] for j in range(decoding_args.decoder_args.num_layer): par.append(decoding_vars[i + j * val_off]) decoding_vars_in_differ_layers.append(par) extended_memory = tf.contrib.seq2seq.tile_batch( memory_tensor, multiplier=decoding_args.decoder_args.beam_width) extended_memory_sequence_length = tf.contrib.seq2seq.tile_batch( memory_sequence_length, multiplier=decoding_args.decoder_args.beam_width) output_ids, parent_ids, sequence_lengths = decoding_op_module.decoding( extended_memory, extended_memory_sequence_length, decoding_vars_in_differ_layers[0], decoding_vars_in_differ_layers[1], decoding_vars_in_differ_layers[2], decoding_vars_in_differ_layers[3], decoding_vars_in_differ_layers[4], decoding_vars_in_differ_layers[5], decoding_vars_in_differ_layers[6], decoding_vars_in_differ_layers[7], decoding_vars_in_differ_layers[8], decoding_vars_in_differ_layers[9], decoding_vars_in_differ_layers[10], decoding_vars_in_differ_layers[11], decoding_vars_in_differ_layers[12], decoding_vars_in_differ_layers[13], decoding_vars_in_differ_layers[14], decoding_vars_in_differ_layers[15], decoding_vars_in_differ_layers[16], decoding_vars_in_differ_layers[17], decoding_vars_in_differ_layers[18], decoding_vars_in_differ_layers[19], decoding_vars_in_differ_layers[20], decoding_vars_in_differ_layers[21], decoding_vars_in_differ_layers[22], decoding_vars_in_differ_layers[23], decoding_vars_in_differ_layers[24], decoding_vars_in_differ_layers[25], decoding_vars[-4], decoding_vars[-3], embedding_table, decoding_vars[-2], tf.cast(decoding_vars[-1], dtype=tf.float32), batch_size=decoding_args.decoder_args.batch_size, beam_width=decoding_args.decoder_args.beam_width, max_seq_len=decoding_args.decoder_args.max_seq_len, head_num=decoding_args.decoder_args.head_num, size_per_head=decoding_args.decoder_args.size_per_head, num_layer=decoding_args.decoder_args.num_layer, memory_hidden_dim=decoding_args.encoder_hidden_dim, vocab_size=decoding_args.vocab_size, start_id=decoding_args.start_id, end_id=decoding_args.end_id ) parent_ids = parent_ids % decoding_args.decoder_args.beam_width finalized_output_ids, finalized_sequence_lengths = finalize(decoding_args.decoder_args.beam_width, parent_ids, sequence_lengths, output_ids, decoding_args.end_id, decoding_args.decoder_args.max_seq_len) finalized_sequence_lengths = tf.minimum( finalized_sequence_lengths + 1, tf.shape(finalized_output_ids)[2]) return finalized_output_ids, finalized_sequence_lengths, output_ids, parent_ids, sequence_lengths def tf_decoding(memory_tensor, memory_sequence_length, embedding_table, decoding_args, decoder_type, kernel_initializer_range, bias_initializer_range, atol_threshold=1e-6): with tf.variable_scope("transformer/decoding", reuse=tf.AUTO_REUSE): # copy memory and memory_sequence_length by beam_width times # if memory is [a, b, c], beam_width = 3, then the result is: [a a a b b b c c c ] extended_memory = tf.contrib.seq2seq.tile_batch( memory_tensor, multiplier=decoding_args.decoder_args.beam_width) extended_memory_sequence_length = tf.contrib.seq2seq.tile_batch( memory_sequence_length, multiplier=decoding_args.decoder_args.beam_width) def _cond(word_ids, cum_log_probs, finished, step, outputs, my_cache, extra_vars, op_self_cache, op_mem_cache): return tf.reduce_any(tf.logical_not(finished)) def _body(word_ids, cum_log_probs, finished, step, outputs, my_cache, extra_vars, op_self_cache, op_mem_cache): # [batch_size * beam_width, hidden_dim] inputs = tf.nn.embedding_lookup(embedding_table, word_ids) # [batch_size * beam_width, 1, hidden_dim] inputs = tf.expand_dims(inputs, 1) inputs *= decoding_args.decoder_args.hidden_dim**0.5 position_encoder = SinusoidalPositionEncoder() if position_encoder is not None: inputs = position_encoder( inputs, position=step + 1 if step is not None else None) with tf.variable_scope("decoder", reuse=tf.AUTO_REUSE): tf_result = tf_decoder(decoder_args=decoding_args.decoder_args, inputs=inputs, memory=extended_memory, memory_sequence_length=extended_memory_sequence_length, step=step, cache=my_cache, kernel_initializer_range=kernel_initializer_range, bias_initializer_range=bias_initializer_range) if decoder_type != 0: decoder_vars = tf.global_variables() decoder_vars_start_id = 0 while decoder_vars_start_id < len(decoder_vars): if decoder_vars[decoder_vars_start_id].name.find("transformer/decoding/decoder") != -1: break decoder_vars_start_id += 1 decoder_vars = decoder_vars[decoder_vars_start_id:] psuedo_input = [] if decoder_type == 2: psuedo_input = tf_result op_result, op_self_cache, op_mem_cache = op_decoder(inputs, step, extended_memory, extended_memory_sequence_length, op_self_cache, op_mem_cache, psuedo_input, decoder_vars, decoding_args.decoder_args, decoding_args.encoder_hidden_dim) result = None if decoder_type == 0: result = tf_result elif decoder_type == 1: result = op_result elif decoder_type == 2: result = tf_result result_2 = op_result flatten_result = tf.reshape(result, [-1]) flatten_result_2 = tf.reshape(result_2, [-1]) abs_diff = tf.math.abs(flatten_result - flatten_result_2) argmax = tf.math.argmax(abs_diff) result = tf.Print(result, ["[INFO][PYTHON] step:", step, "max diff: ", abs_diff[argmax], " op val: ", flatten_result_2[argmax], " tf val: ", flatten_result[argmax], tf.cond(abs_diff[argmax] < atol_threshold, lambda: "True", lambda: "False")]) else: print("[TF][ERROR] decoder type is only 0 or 1 or 2.") exit(-1) result = tf.contrib.layers.layer_norm(result, begin_norm_axis=-1) # [batch_size * beam_width, hidden_dim] result = tf.squeeze(result, axis=1) logits = tf.layers.dense(result, decoding_args.vocab_size, use_bias=True, bias_initializer=create_initializer( bias_initializer_range, decoding_args.decoder_args.dtype), kernel_initializer=create_initializer( kernel_initializer_range, decoding_args.decoder_args.dtype), activation=None) end_ids = tf.fill([decoding_args.decoder_args.batch_size * decoding_args.decoder_args.beam_width], decoding_args.end_id) # [batch_size * beam_width] eos_max_prob = tf.one_hot(end_ids, decoding_args.vocab_size, on_value=decoding_args.decoder_args.dtype.max, off_value=decoding_args.decoder_args.dtype.min) # [batch_size * beam_width, vocab_size] # [batch_size * beam_width, vocab_size] logits = tf.where(finished, x=eos_max_prob, y=logits) logits = tf.cast(logits, tf.float32) # [batch_size * beam_width, vocab_size] log_probs = tf.nn.log_softmax(logits) output_id, next_cum_log_probs, finished, my_cache, \ extra_vars, op_self_cache = beam_search(decoding_args.decoder_args.beam_width, decoding_args.vocab_size, step, log_probs, cum_log_probs, finished, my_cache, extra_vars, op_self_cache) outputs = outputs.write(step, output_id) cum_log_probs = tf.where( finished, x=cum_log_probs, y=next_cum_log_probs) finished = tf.logical_or(finished, tf.equal( output_id, decoding_args.end_id)) return output_id, cum_log_probs, finished, step + 1, outputs, my_cache, extra_vars, op_self_cache, op_mem_cache # initialization start_ids, step, outputs, tf_decoder_cache, finished, initial_log_probs, \ tf_sequence_lengths, extra_vars = initialize_decoding_variables( decoding_args) word_ids = tf.identity(start_ids, name="word_ids") cum_log_probs = tf.identity(initial_log_probs, name="cum_log_probs") # if use_op == False, these two caches are useless op_self_cache, op_mem_cache = init_op_cache(decoding_args.decoder_args) _, _, _, _, outputs, _, extra_vars, _, _ = tf.while_loop( _cond, _body, loop_vars=( word_ids, cum_log_probs, finished, step, outputs, tf_decoder_cache, extra_vars, op_self_cache, op_mem_cache ), back_prop=False, maximum_iterations=decoding_args.decoder_args.max_seq_len, shape_invariants=( start_ids.shape, initial_log_probs.shape, finished.shape, step.shape, tf.TensorShape(None), tf.contrib.framework.nest.map_structure( _get_shape_invariants, tf_decoder_cache), tf.contrib.framework.nest.map_structure( _get_shape_invariants, extra_vars), tf.contrib.framework.nest.map_structure( _get_shape_invariants, op_self_cache), tf.contrib.framework.nest.map_structure(_get_shape_invariants, op_mem_cache)) ) tf_parent_ids = extra_vars[0].stack() tf_sequence_lengths = extra_vars[1] tf_output_ids = outputs.stack() finalized_tf_output_ids, finalized_tf_sequence_lengths = finalize(decoding_args.decoder_args.beam_width, tf_parent_ids, tf_sequence_lengths, tf_output_ids, decoding_args.end_id) finalized_tf_output_ids = tf.cast( finalized_tf_output_ids, start_ids.dtype) finalized_tf_sequence_lengths = tf.minimum( finalized_tf_sequence_lengths + 1, tf.shape(finalized_tf_output_ids)[2]) return finalized_tf_output_ids, finalized_tf_sequence_lengths, tf_output_ids, tf_parent_ids, tf_sequence_lengths
State Before: R : Type u S : Type v inst✝¹ : CommRing R inst✝ : CommRing S x y : PrimeSpectrum R ⊢ x ≤ y ↔ y ∈ closure {x} State After: no goals Tactic: rw [← asIdeal_le_asIdeal, ← zeroLocus_vanishingIdeal_eq_closure, mem_zeroLocus, vanishingIdeal_singleton, SetLike.coe_subset_coe]
[STATEMENT] lemma AL_emp1: "emp\<down>a = None" [PROOF STATE] proof (prove) goal (1 subgoal): 1. emp\<down>a = None [PROOF STEP] by (simp add: emp_def)
Formal statement is: lemma mpoly_sub_conv: fixes x :: "'a::comm_ring_1" shows "poly p x - poly q x = poly p x + -1 * poly q x" Informal statement is: For any polynomial $p$ and $q$ and any ring element $x$, $p(x) - q(x) = p(x) + (-1)q(x)$.
module LastSeen public export record LastSeen where constructor CreateLastSeen time : Integer platform : Integer
So here are the rules to enter! Make sure you are a fan of my page, then go check out Summit Spice and Tea. Show them some love! Then come back here and leave a comment on this post to enter. Please spread the love and support local by sharing this post on your page! Just come back and leave a second comment that you shared it on your FB page (extra entry for sharing 😀 ). If you don’t have FB just leave a comment here 😀 Easy Peasy! Comments close at 9 PM and I will I will randomly draw the winners from the comments. Unfortunately comments after 9 can’t be counted. If you don’t see your comment right away, don’t worry, there is a spam filter and all comments must be approved 😀 Good Luck and have fun!
From Coq Require Import String List ZArith. From compcert Require Import Coqlib Integers Floats AST Ctypes Cop Clight Clightdefs. Local Open Scope Z_scope. Module Info. Definition version := "3.3"%string. Definition build_number := ""%string. Definition build_tag := ""%string. Definition arch := "x86"%string. Definition model := "32sse2"%string. Definition abi := "standard"%string. Definition bitsize := 32. Definition big_endian := false. Definition source_file := "progs/queue2.c"%string. Definition normalized := true. End Info. Definition _Q : ident := 65%positive. Definition ___builtin_ais_annot : ident := 7%positive. Definition ___builtin_annot : ident := 14%positive. Definition ___builtin_annot_intval : ident := 15%positive. Definition ___builtin_bswap : ident := 8%positive. Definition ___builtin_bswap16 : ident := 10%positive. Definition ___builtin_bswap32 : ident := 9%positive. Definition ___builtin_bswap64 : ident := 40%positive. Definition ___builtin_clz : ident := 41%positive. Definition ___builtin_clzl : ident := 42%positive. Definition ___builtin_clzll : ident := 43%positive. Definition ___builtin_ctz : ident := 44%positive. Definition ___builtin_ctzl : ident := 45%positive. Definition ___builtin_ctzll : ident := 46%positive. Definition ___builtin_debug : ident := 58%positive. Definition ___builtin_fabs : ident := 11%positive. Definition ___builtin_fmadd : ident := 49%positive. Definition ___builtin_fmax : ident := 47%positive. Definition ___builtin_fmin : ident := 48%positive. Definition ___builtin_fmsub : ident := 50%positive. Definition ___builtin_fnmadd : ident := 51%positive. Definition ___builtin_fnmsub : ident := 52%positive. Definition ___builtin_fsqrt : ident := 12%positive. Definition ___builtin_membar : ident := 16%positive. Definition ___builtin_memcpy_aligned : ident := 13%positive. Definition ___builtin_nop : ident := 57%positive. Definition ___builtin_read16_reversed : ident := 53%positive. Definition ___builtin_read32_reversed : ident := 54%positive. Definition ___builtin_va_arg : ident := 18%positive. Definition ___builtin_va_copy : ident := 19%positive. Definition ___builtin_va_end : ident := 20%positive. Definition ___builtin_va_start : ident := 17%positive. Definition ___builtin_write16_reversed : ident := 55%positive. Definition ___builtin_write32_reversed : ident := 56%positive. Definition ___compcert_i64_dtos : ident := 25%positive. Definition ___compcert_i64_dtou : ident := 26%positive. Definition ___compcert_i64_sar : ident := 37%positive. Definition ___compcert_i64_sdiv : ident := 31%positive. Definition ___compcert_i64_shl : ident := 35%positive. Definition ___compcert_i64_shr : ident := 36%positive. Definition ___compcert_i64_smod : ident := 33%positive. Definition ___compcert_i64_smulh : ident := 38%positive. Definition ___compcert_i64_stod : ident := 27%positive. Definition ___compcert_i64_stof : ident := 29%positive. Definition ___compcert_i64_udiv : ident := 32%positive. Definition ___compcert_i64_umod : ident := 34%positive. Definition ___compcert_i64_umulh : ident := 39%positive. Definition ___compcert_i64_utod : ident := 28%positive. Definition ___compcert_i64_utof : ident := 30%positive. Definition ___compcert_va_composite : ident := 24%positive. Definition ___compcert_va_float64 : ident := 23%positive. Definition ___compcert_va_int32 : ident := 21%positive. Definition ___compcert_va_int64 : ident := 22%positive. Definition _data : ident := 1%positive. Definition _elem : ident := 2%positive. Definition _exit : ident := 61%positive. Definition _fifo : ident := 6%positive. Definition _fifo_empty : ident := 70%positive. Definition _fifo_get : ident := 71%positive. Definition _fifo_new : ident := 66%positive. Definition _fifo_put : ident := 69%positive. Definition _free : ident := 60%positive. Definition _h : ident := 67%positive. Definition _head : ident := 4%positive. Definition _i : ident := 73%positive. Definition _main : ident := 74%positive. Definition _make_elem : ident := 72%positive. Definition _malloc : ident := 59%positive. Definition _n : ident := 62%positive. Definition _next : ident := 3%positive. Definition _p : ident := 63%positive. Definition _surely_malloc : ident := 64%positive. Definition _t : ident := 68%positive. Definition _tail : ident := 5%positive. Definition _t'1 : ident := 75%positive. Definition _t'2 : ident := 76%positive. Definition _t'3 : ident := 77%positive. Definition _t'4 : ident := 78%positive. Definition f_surely_malloc := {| fn_return := (tptr tvoid); fn_callconv := cc_default; fn_params := ((_n, tuint) :: nil); fn_vars := nil; fn_temps := ((_p, (tptr tvoid)) :: (_t'1, (tptr tvoid)) :: nil); fn_body := (Ssequence (Ssequence (Scall (Some _t'1) (Evar _malloc (Tfunction (Tcons tuint Tnil) (tptr tvoid) cc_default)) ((Etempvar _n tuint) :: nil)) (Sset _p (Etempvar _t'1 (tptr tvoid)))) (Ssequence (Sifthenelse (Eunop Onotbool (Etempvar _p (tptr tvoid)) tint) (Scall None (Evar _exit (Tfunction (Tcons tint Tnil) tvoid cc_default)) ((Econst_int (Int.repr 1) tint) :: nil)) Sskip) (Sreturn (Some (Etempvar _p (tptr tvoid)))))) |}. Definition f_fifo_new := {| fn_return := (tptr (Tstruct _fifo noattr)); fn_callconv := cc_default; fn_params := nil; fn_vars := nil; fn_temps := ((_Q, (tptr (Tstruct _fifo noattr))) :: (_t'1, (tptr tvoid)) :: nil); fn_body := (Ssequence (Ssequence (Scall (Some _t'1) (Evar _surely_malloc (Tfunction (Tcons tuint Tnil) (tptr tvoid) cc_default)) ((Esizeof (Tstruct _fifo noattr) tuint) :: nil)) (Sset _Q (Ecast (Etempvar _t'1 (tptr tvoid)) (tptr (Tstruct _fifo noattr))))) (Ssequence (Sassign (Efield (Ederef (Etempvar _Q (tptr (Tstruct _fifo noattr))) (Tstruct _fifo noattr)) _head (tptr (Tstruct _elem noattr))) (Ecast (Econst_int (Int.repr 0) tint) (tptr tvoid))) (Ssequence (Sassign (Efield (Ederef (Etempvar _Q (tptr (Tstruct _fifo noattr))) (Tstruct _fifo noattr)) _tail (tptr (Tstruct _elem noattr))) (Ecast (Econst_int (Int.repr 0) tint) (tptr tvoid))) (Sreturn (Some (Etempvar _Q (tptr (Tstruct _fifo noattr)))))))) |}. Definition f_fifo_put := {| fn_return := tvoid; fn_callconv := cc_default; fn_params := ((_Q, (tptr (Tstruct _fifo noattr))) :: (_p, (tptr (Tstruct _elem noattr))) :: nil); fn_vars := nil; fn_temps := ((_h, (tptr (Tstruct _elem noattr))) :: (_t, (tptr (Tstruct _elem noattr))) :: nil); fn_body := (Ssequence (Sassign (Efield (Ederef (Etempvar _p (tptr (Tstruct _elem noattr))) (Tstruct _elem noattr)) _next (tptr (Tstruct _elem noattr))) (Ecast (Econst_int (Int.repr 0) tint) (tptr tvoid))) (Ssequence (Sset _h (Efield (Ederef (Etempvar _Q (tptr (Tstruct _fifo noattr))) (Tstruct _fifo noattr)) _head (tptr (Tstruct _elem noattr)))) (Sifthenelse (Ebinop Oeq (Etempvar _h (tptr (Tstruct _elem noattr))) (Ecast (Econst_int (Int.repr 0) tint) (tptr tvoid)) tint) (Ssequence (Sassign (Efield (Ederef (Etempvar _Q (tptr (Tstruct _fifo noattr))) (Tstruct _fifo noattr)) _head (tptr (Tstruct _elem noattr))) (Etempvar _p (tptr (Tstruct _elem noattr)))) (Sassign (Efield (Ederef (Etempvar _Q (tptr (Tstruct _fifo noattr))) (Tstruct _fifo noattr)) _tail (tptr (Tstruct _elem noattr))) (Etempvar _p (tptr (Tstruct _elem noattr))))) (Ssequence (Sset _t (Efield (Ederef (Etempvar _Q (tptr (Tstruct _fifo noattr))) (Tstruct _fifo noattr)) _tail (tptr (Tstruct _elem noattr)))) (Ssequence (Sassign (Efield (Ederef (Etempvar _t (tptr (Tstruct _elem noattr))) (Tstruct _elem noattr)) _next (tptr (Tstruct _elem noattr))) (Etempvar _p (tptr (Tstruct _elem noattr)))) (Sassign (Efield (Ederef (Etempvar _Q (tptr (Tstruct _fifo noattr))) (Tstruct _fifo noattr)) _tail (tptr (Tstruct _elem noattr))) (Etempvar _p (tptr (Tstruct _elem noattr))))))))) |}. Definition f_fifo_empty := {| fn_return := tint; fn_callconv := cc_default; fn_params := ((_Q, (tptr (Tstruct _fifo noattr))) :: nil); fn_vars := nil; fn_temps := ((_h, (tptr (Tstruct _elem noattr))) :: nil); fn_body := (Ssequence (Sset _h (Efield (Ederef (Etempvar _Q (tptr (Tstruct _fifo noattr))) (Tstruct _fifo noattr)) _head (tptr (Tstruct _elem noattr)))) (Sreturn (Some (Ebinop Oeq (Etempvar _h (tptr (Tstruct _elem noattr))) (Ecast (Econst_int (Int.repr 0) tint) (tptr tvoid)) tint)))) |}. Definition f_fifo_get := {| fn_return := (tptr (Tstruct _elem noattr)); fn_callconv := cc_default; fn_params := ((_Q, (tptr (Tstruct _fifo noattr))) :: nil); fn_vars := nil; fn_temps := ((_h, (tptr (Tstruct _elem noattr))) :: (_n, (tptr (Tstruct _elem noattr))) :: nil); fn_body := (Ssequence (Sset _h (Efield (Ederef (Etempvar _Q (tptr (Tstruct _fifo noattr))) (Tstruct _fifo noattr)) _head (tptr (Tstruct _elem noattr)))) (Ssequence (Sset _n (Efield (Ederef (Etempvar _h (tptr (Tstruct _elem noattr))) (Tstruct _elem noattr)) _next (tptr (Tstruct _elem noattr)))) (Ssequence (Sassign (Efield (Ederef (Etempvar _Q (tptr (Tstruct _fifo noattr))) (Tstruct _fifo noattr)) _head (tptr (Tstruct _elem noattr))) (Etempvar _n (tptr (Tstruct _elem noattr)))) (Sreturn (Some (Etempvar _h (tptr (Tstruct _elem noattr)))))))) |}. Definition f_make_elem := {| fn_return := (tptr (Tstruct _elem noattr)); fn_callconv := cc_default; fn_params := ((_data, tint) :: nil); fn_vars := nil; fn_temps := ((_p, (tptr (Tstruct _elem noattr))) :: (_t'1, (tptr tvoid)) :: nil); fn_body := (Ssequence (Ssequence (Scall (Some _t'1) (Evar _surely_malloc (Tfunction (Tcons tuint Tnil) (tptr tvoid) cc_default)) ((Esizeof (Tstruct _elem noattr) tuint) :: nil)) (Sset _p (Etempvar _t'1 (tptr tvoid)))) (Ssequence (Sassign (Efield (Ederef (Etempvar _p (tptr (Tstruct _elem noattr))) (Tstruct _elem noattr)) _data tint) (Etempvar _data tint)) (Sreturn (Some (Etempvar _p (tptr (Tstruct _elem noattr))))))) |}. Definition f_main := {| fn_return := tint; fn_callconv := cc_default; fn_params := nil; fn_vars := nil; fn_temps := ((_i, tint) :: (_Q, (tptr (Tstruct _fifo noattr))) :: (_p, (tptr (Tstruct _elem noattr))) :: (_t'4, (tptr (Tstruct _elem noattr))) :: (_t'3, (tptr (Tstruct _elem noattr))) :: (_t'2, (tptr (Tstruct _elem noattr))) :: (_t'1, (tptr (Tstruct _fifo noattr))) :: nil); fn_body := (Ssequence (Ssequence (Ssequence (Scall (Some _t'1) (Evar _fifo_new (Tfunction Tnil (tptr (Tstruct _fifo noattr)) cc_default)) nil) (Sset _Q (Etempvar _t'1 (tptr (Tstruct _fifo noattr))))) (Ssequence (Ssequence (Scall (Some _t'2) (Evar _make_elem (Tfunction (Tcons tint Tnil) (tptr (Tstruct _elem noattr)) cc_default)) ((Econst_int (Int.repr 1) tint) :: nil)) (Sset _p (Etempvar _t'2 (tptr (Tstruct _elem noattr))))) (Ssequence (Scall None (Evar _fifo_put (Tfunction (Tcons (tptr (Tstruct _fifo noattr)) (Tcons (tptr (Tstruct _elem noattr)) Tnil)) tvoid cc_default)) ((Etempvar _Q (tptr (Tstruct _fifo noattr))) :: (Etempvar _p (tptr (Tstruct _elem noattr))) :: nil)) (Ssequence (Ssequence (Scall (Some _t'3) (Evar _make_elem (Tfunction (Tcons tint Tnil) (tptr (Tstruct _elem noattr)) cc_default)) ((Econst_int (Int.repr 2) tint) :: nil)) (Sset _p (Etempvar _t'3 (tptr (Tstruct _elem noattr))))) (Ssequence (Scall None (Evar _fifo_put (Tfunction (Tcons (tptr (Tstruct _fifo noattr)) (Tcons (tptr (Tstruct _elem noattr)) Tnil)) tvoid cc_default)) ((Etempvar _Q (tptr (Tstruct _fifo noattr))) :: (Etempvar _p (tptr (Tstruct _elem noattr))) :: nil)) (Ssequence (Ssequence (Scall (Some _t'4) (Evar _fifo_get (Tfunction (Tcons (tptr (Tstruct _fifo noattr)) Tnil) (tptr (Tstruct _elem noattr)) cc_default)) ((Etempvar _Q (tptr (Tstruct _fifo noattr))) :: nil)) (Sset _p (Etempvar _t'4 (tptr (Tstruct _elem noattr))))) (Ssequence (Sset _i (Efield (Ederef (Etempvar _p (tptr (Tstruct _elem noattr))) (Tstruct _elem noattr)) _data tint)) (Ssequence (Scall None (Evar _free (Tfunction (Tcons (tptr tvoid) Tnil) tvoid cc_default)) ((Etempvar _p (tptr (Tstruct _elem noattr))) :: nil)) (Sreturn (Some (Etempvar _i tint))))))))))) (Sreturn (Some (Econst_int (Int.repr 0) tint)))) |}. Definition composites : list composite_definition := (Composite _elem Struct ((_data, tint) :: (_next, (tptr (Tstruct _elem noattr))) :: nil) noattr :: Composite _fifo Struct ((_head, (tptr (Tstruct _elem noattr))) :: (_tail, (tptr (Tstruct _elem noattr))) :: nil) noattr :: nil). Definition global_definitions : list (ident * globdef fundef type) := ((___builtin_ais_annot, Gfun(External (EF_builtin "__builtin_ais_annot" (mksignature (AST.Tint :: nil) None {|cc_vararg:=true; cc_unproto:=false; cc_structret:=false|})) (Tcons (tptr tschar) Tnil) tvoid {|cc_vararg:=true; cc_unproto:=false; cc_structret:=false|})) :: (___builtin_bswap, Gfun(External (EF_builtin "__builtin_bswap" (mksignature (AST.Tint :: nil) (Some AST.Tint) cc_default)) (Tcons tuint Tnil) tuint cc_default)) :: (___builtin_bswap32, Gfun(External (EF_builtin "__builtin_bswap32" (mksignature (AST.Tint :: nil) (Some AST.Tint) cc_default)) (Tcons tuint Tnil) tuint cc_default)) :: (___builtin_bswap16, Gfun(External (EF_builtin "__builtin_bswap16" (mksignature (AST.Tint :: nil) (Some AST.Tint) cc_default)) (Tcons tushort Tnil) tushort cc_default)) :: (___builtin_fabs, Gfun(External (EF_builtin "__builtin_fabs" (mksignature (AST.Tfloat :: nil) (Some AST.Tfloat) cc_default)) (Tcons tdouble Tnil) tdouble cc_default)) :: (___builtin_fsqrt, Gfun(External (EF_builtin "__builtin_fsqrt" (mksignature (AST.Tfloat :: nil) (Some AST.Tfloat) cc_default)) (Tcons tdouble Tnil) tdouble cc_default)) :: (___builtin_memcpy_aligned, Gfun(External (EF_builtin "__builtin_memcpy_aligned" (mksignature (AST.Tint :: AST.Tint :: AST.Tint :: AST.Tint :: nil) None cc_default)) (Tcons (tptr tvoid) (Tcons (tptr tvoid) (Tcons tuint (Tcons tuint Tnil)))) tvoid cc_default)) :: (___builtin_annot, Gfun(External (EF_builtin "__builtin_annot" (mksignature (AST.Tint :: nil) None {|cc_vararg:=true; cc_unproto:=false; cc_structret:=false|})) (Tcons (tptr tschar) Tnil) tvoid {|cc_vararg:=true; cc_unproto:=false; cc_structret:=false|})) :: (___builtin_annot_intval, Gfun(External (EF_builtin "__builtin_annot_intval" (mksignature (AST.Tint :: AST.Tint :: nil) (Some AST.Tint) cc_default)) (Tcons (tptr tschar) (Tcons tint Tnil)) tint cc_default)) :: (___builtin_membar, Gfun(External (EF_builtin "__builtin_membar" (mksignature nil None cc_default)) Tnil tvoid cc_default)) :: (___builtin_va_start, Gfun(External (EF_builtin "__builtin_va_start" (mksignature (AST.Tint :: nil) None cc_default)) (Tcons (tptr tvoid) Tnil) tvoid cc_default)) :: (___builtin_va_arg, Gfun(External (EF_builtin "__builtin_va_arg" (mksignature (AST.Tint :: AST.Tint :: nil) None cc_default)) (Tcons (tptr tvoid) (Tcons tuint Tnil)) tvoid cc_default)) :: (___builtin_va_copy, Gfun(External (EF_builtin "__builtin_va_copy" (mksignature (AST.Tint :: AST.Tint :: nil) None cc_default)) (Tcons (tptr tvoid) (Tcons (tptr tvoid) Tnil)) tvoid cc_default)) :: (___builtin_va_end, Gfun(External (EF_builtin "__builtin_va_end" (mksignature (AST.Tint :: nil) None cc_default)) (Tcons (tptr tvoid) Tnil) tvoid cc_default)) :: (___compcert_va_int32, Gfun(External (EF_external "__compcert_va_int32" (mksignature (AST.Tint :: nil) (Some AST.Tint) cc_default)) (Tcons (tptr tvoid) Tnil) tuint cc_default)) :: (___compcert_va_int64, Gfun(External (EF_external "__compcert_va_int64" (mksignature (AST.Tint :: nil) (Some AST.Tlong) cc_default)) (Tcons (tptr tvoid) Tnil) tulong cc_default)) :: (___compcert_va_float64, Gfun(External (EF_external "__compcert_va_float64" (mksignature (AST.Tint :: nil) (Some AST.Tfloat) cc_default)) (Tcons (tptr tvoid) Tnil) tdouble cc_default)) :: (___compcert_va_composite, Gfun(External (EF_external "__compcert_va_composite" (mksignature (AST.Tint :: AST.Tint :: nil) (Some AST.Tint) cc_default)) (Tcons (tptr tvoid) (Tcons tuint Tnil)) (tptr tvoid) cc_default)) :: (___compcert_i64_dtos, Gfun(External (EF_runtime "__compcert_i64_dtos" (mksignature (AST.Tfloat :: nil) (Some AST.Tlong) cc_default)) (Tcons tdouble Tnil) tlong cc_default)) :: (___compcert_i64_dtou, Gfun(External (EF_runtime "__compcert_i64_dtou" (mksignature (AST.Tfloat :: nil) (Some AST.Tlong) cc_default)) (Tcons tdouble Tnil) tulong cc_default)) :: (___compcert_i64_stod, Gfun(External (EF_runtime "__compcert_i64_stod" (mksignature (AST.Tlong :: nil) (Some AST.Tfloat) cc_default)) (Tcons tlong Tnil) tdouble cc_default)) :: (___compcert_i64_utod, Gfun(External (EF_runtime "__compcert_i64_utod" (mksignature (AST.Tlong :: nil) (Some AST.Tfloat) cc_default)) (Tcons tulong Tnil) tdouble cc_default)) :: (___compcert_i64_stof, Gfun(External (EF_runtime "__compcert_i64_stof" (mksignature (AST.Tlong :: nil) (Some AST.Tsingle) cc_default)) (Tcons tlong Tnil) tfloat cc_default)) :: (___compcert_i64_utof, Gfun(External (EF_runtime "__compcert_i64_utof" (mksignature (AST.Tlong :: nil) (Some AST.Tsingle) cc_default)) (Tcons tulong Tnil) tfloat cc_default)) :: (___compcert_i64_sdiv, Gfun(External (EF_runtime "__compcert_i64_sdiv" (mksignature (AST.Tlong :: AST.Tlong :: nil) (Some AST.Tlong) cc_default)) (Tcons tlong (Tcons tlong Tnil)) tlong cc_default)) :: (___compcert_i64_udiv, Gfun(External (EF_runtime "__compcert_i64_udiv" (mksignature (AST.Tlong :: AST.Tlong :: nil) (Some AST.Tlong) cc_default)) (Tcons tulong (Tcons tulong Tnil)) tulong cc_default)) :: (___compcert_i64_smod, Gfun(External (EF_runtime "__compcert_i64_smod" (mksignature (AST.Tlong :: AST.Tlong :: nil) (Some AST.Tlong) cc_default)) (Tcons tlong (Tcons tlong Tnil)) tlong cc_default)) :: (___compcert_i64_umod, Gfun(External (EF_runtime "__compcert_i64_umod" (mksignature (AST.Tlong :: AST.Tlong :: nil) (Some AST.Tlong) cc_default)) (Tcons tulong (Tcons tulong Tnil)) tulong cc_default)) :: (___compcert_i64_shl, Gfun(External (EF_runtime "__compcert_i64_shl" (mksignature (AST.Tlong :: AST.Tint :: nil) (Some AST.Tlong) cc_default)) (Tcons tlong (Tcons tint Tnil)) tlong cc_default)) :: (___compcert_i64_shr, Gfun(External (EF_runtime "__compcert_i64_shr" (mksignature (AST.Tlong :: AST.Tint :: nil) (Some AST.Tlong) cc_default)) (Tcons tulong (Tcons tint Tnil)) tulong cc_default)) :: (___compcert_i64_sar, Gfun(External (EF_runtime "__compcert_i64_sar" (mksignature (AST.Tlong :: AST.Tint :: nil) (Some AST.Tlong) cc_default)) (Tcons tlong (Tcons tint Tnil)) tlong cc_default)) :: (___compcert_i64_smulh, Gfun(External (EF_runtime "__compcert_i64_smulh" (mksignature (AST.Tlong :: AST.Tlong :: nil) (Some AST.Tlong) cc_default)) (Tcons tlong (Tcons tlong Tnil)) tlong cc_default)) :: (___compcert_i64_umulh, Gfun(External (EF_runtime "__compcert_i64_umulh" (mksignature (AST.Tlong :: AST.Tlong :: nil) (Some AST.Tlong) cc_default)) (Tcons tulong (Tcons tulong Tnil)) tulong cc_default)) :: (___builtin_bswap64, Gfun(External (EF_builtin "__builtin_bswap64" (mksignature (AST.Tlong :: nil) (Some AST.Tlong) cc_default)) (Tcons tulong Tnil) tulong cc_default)) :: (___builtin_clz, Gfun(External (EF_builtin "__builtin_clz" (mksignature (AST.Tint :: nil) (Some AST.Tint) cc_default)) (Tcons tuint Tnil) tint cc_default)) :: (___builtin_clzl, Gfun(External (EF_builtin "__builtin_clzl" (mksignature (AST.Tint :: nil) (Some AST.Tint) cc_default)) (Tcons tuint Tnil) tint cc_default)) :: (___builtin_clzll, Gfun(External (EF_builtin "__builtin_clzll" (mksignature (AST.Tlong :: nil) (Some AST.Tint) cc_default)) (Tcons tulong Tnil) tint cc_default)) :: (___builtin_ctz, Gfun(External (EF_builtin "__builtin_ctz" (mksignature (AST.Tint :: nil) (Some AST.Tint) cc_default)) (Tcons tuint Tnil) tint cc_default)) :: (___builtin_ctzl, Gfun(External (EF_builtin "__builtin_ctzl" (mksignature (AST.Tint :: nil) (Some AST.Tint) cc_default)) (Tcons tuint Tnil) tint cc_default)) :: (___builtin_ctzll, Gfun(External (EF_builtin "__builtin_ctzll" (mksignature (AST.Tlong :: nil) (Some AST.Tint) cc_default)) (Tcons tulong Tnil) tint cc_default)) :: (___builtin_fmax, Gfun(External (EF_builtin "__builtin_fmax" (mksignature (AST.Tfloat :: AST.Tfloat :: nil) (Some AST.Tfloat) cc_default)) (Tcons tdouble (Tcons tdouble Tnil)) tdouble cc_default)) :: (___builtin_fmin, Gfun(External (EF_builtin "__builtin_fmin" (mksignature (AST.Tfloat :: AST.Tfloat :: nil) (Some AST.Tfloat) cc_default)) (Tcons tdouble (Tcons tdouble Tnil)) tdouble cc_default)) :: (___builtin_fmadd, Gfun(External (EF_builtin "__builtin_fmadd" (mksignature (AST.Tfloat :: AST.Tfloat :: AST.Tfloat :: nil) (Some AST.Tfloat) cc_default)) (Tcons tdouble (Tcons tdouble (Tcons tdouble Tnil))) tdouble cc_default)) :: (___builtin_fmsub, Gfun(External (EF_builtin "__builtin_fmsub" (mksignature (AST.Tfloat :: AST.Tfloat :: AST.Tfloat :: nil) (Some AST.Tfloat) cc_default)) (Tcons tdouble (Tcons tdouble (Tcons tdouble Tnil))) tdouble cc_default)) :: (___builtin_fnmadd, Gfun(External (EF_builtin "__builtin_fnmadd" (mksignature (AST.Tfloat :: AST.Tfloat :: AST.Tfloat :: nil) (Some AST.Tfloat) cc_default)) (Tcons tdouble (Tcons tdouble (Tcons tdouble Tnil))) tdouble cc_default)) :: (___builtin_fnmsub, Gfun(External (EF_builtin "__builtin_fnmsub" (mksignature (AST.Tfloat :: AST.Tfloat :: AST.Tfloat :: nil) (Some AST.Tfloat) cc_default)) (Tcons tdouble (Tcons tdouble (Tcons tdouble Tnil))) tdouble cc_default)) :: (___builtin_read16_reversed, Gfun(External (EF_builtin "__builtin_read16_reversed" (mksignature (AST.Tint :: nil) (Some AST.Tint) cc_default)) (Tcons (tptr tushort) Tnil) tushort cc_default)) :: (___builtin_read32_reversed, Gfun(External (EF_builtin "__builtin_read32_reversed" (mksignature (AST.Tint :: nil) (Some AST.Tint) cc_default)) (Tcons (tptr tuint) Tnil) tuint cc_default)) :: (___builtin_write16_reversed, Gfun(External (EF_builtin "__builtin_write16_reversed" (mksignature (AST.Tint :: AST.Tint :: nil) None cc_default)) (Tcons (tptr tushort) (Tcons tushort Tnil)) tvoid cc_default)) :: (___builtin_write32_reversed, Gfun(External (EF_builtin "__builtin_write32_reversed" (mksignature (AST.Tint :: AST.Tint :: nil) None cc_default)) (Tcons (tptr tuint) (Tcons tuint Tnil)) tvoid cc_default)) :: (___builtin_nop, Gfun(External (EF_builtin "__builtin_nop" (mksignature nil None cc_default)) Tnil tvoid cc_default)) :: (___builtin_debug, Gfun(External (EF_external "__builtin_debug" (mksignature (AST.Tint :: nil) None {|cc_vararg:=true; cc_unproto:=false; cc_structret:=false|})) (Tcons tint Tnil) tvoid {|cc_vararg:=true; cc_unproto:=false; cc_structret:=false|})) :: (_malloc, Gfun(External EF_malloc (Tcons tuint Tnil) (tptr tvoid) cc_default)) :: (_free, Gfun(External EF_free (Tcons (tptr tvoid) Tnil) tvoid cc_default)) :: (_exit, Gfun(External (EF_external "exit" (mksignature (AST.Tint :: nil) None cc_default)) (Tcons tint Tnil) tvoid cc_default)) :: (_surely_malloc, Gfun(Internal f_surely_malloc)) :: (_fifo_new, Gfun(Internal f_fifo_new)) :: (_fifo_put, Gfun(Internal f_fifo_put)) :: (_fifo_empty, Gfun(Internal f_fifo_empty)) :: (_fifo_get, Gfun(Internal f_fifo_get)) :: (_make_elem, Gfun(Internal f_make_elem)) :: (_main, Gfun(Internal f_main)) :: nil). Definition public_idents : list ident := (_main :: _make_elem :: _fifo_get :: _fifo_empty :: _fifo_put :: _fifo_new :: _surely_malloc :: _exit :: _free :: _malloc :: ___builtin_debug :: ___builtin_nop :: ___builtin_write32_reversed :: ___builtin_write16_reversed :: ___builtin_read32_reversed :: ___builtin_read16_reversed :: ___builtin_fnmsub :: ___builtin_fnmadd :: ___builtin_fmsub :: ___builtin_fmadd :: ___builtin_fmin :: ___builtin_fmax :: ___builtin_ctzll :: ___builtin_ctzl :: ___builtin_ctz :: ___builtin_clzll :: ___builtin_clzl :: ___builtin_clz :: ___builtin_bswap64 :: ___compcert_i64_umulh :: ___compcert_i64_smulh :: ___compcert_i64_sar :: ___compcert_i64_shr :: ___compcert_i64_shl :: ___compcert_i64_umod :: ___compcert_i64_smod :: ___compcert_i64_udiv :: ___compcert_i64_sdiv :: ___compcert_i64_utof :: ___compcert_i64_stof :: ___compcert_i64_utod :: ___compcert_i64_stod :: ___compcert_i64_dtou :: ___compcert_i64_dtos :: ___compcert_va_composite :: ___compcert_va_float64 :: ___compcert_va_int64 :: ___compcert_va_int32 :: ___builtin_va_end :: ___builtin_va_copy :: ___builtin_va_arg :: ___builtin_va_start :: ___builtin_membar :: ___builtin_annot_intval :: ___builtin_annot :: ___builtin_memcpy_aligned :: ___builtin_fsqrt :: ___builtin_fabs :: ___builtin_bswap16 :: ___builtin_bswap32 :: ___builtin_bswap :: ___builtin_ais_annot :: nil). Definition prog : Clight.program := mkprogram composites global_definitions public_idents _main Logic.I.
{-# LANGUAGE OverloadedStrings #-} module Lisp.EvalSpec (spec) where import Test.Hspec import Test.QuickCheck import Data.Complex import Data.Ratio import Data.Text import Lisp import Lisp.LispVal (h2l) spec :: Spec spec = do describe "Basic primitives self-eval" $ do it "Ints self-eval" $ property $ \i -> let l = Int i in eval l == l it "Reals self-eval" $ property $ \r -> let l = Real r in eval l == l it "Rationals self-eval" $ property $ \q -> let l = Rational q in eval l == l it "Complex numbers self-eval" $ property $ \c -> let l = Complex c in eval l == l it "Strings self-eval" $ property $ \s -> let l = String (pack s) in eval l == l it "Bools self-eval" $ property $ \b -> let l = Bool b in eval l == l describe "quoted forms return their contents" $ do let quoted v = Pair (Symbol "quote") (Pair v Nil) let foo = Symbol "foo" let bar = Symbol "bar" let foobar = Pair foo bar it "'foo evaluates to foo" $ eval (quoted foo) `shouldBe` foo it "'(foo . bar) evaluates to (foo . bar)" $ eval (quoted foobar) `shouldBe` foobar describe "basic primitives" $ do it "Can add two Ints" $ eval (h2l [Symbol "+", Int 5, Int 5]) `shouldBe` Int 10 it "Can add two Reals" $ eval (h2l [Symbol "+", Real 5.0, Real 5.0]) `shouldBe` Real 10.0 it "Can add Ints and Reals" $ eval (h2l [Symbol "+", Int 5, Real 5.0]) `shouldBe` Real 10.0 it "Can add Rationals and Complex" $ eval (h2l [Symbol "+", Rational (5%2), Complex (2.4:+3.1)]) `shouldBe` Complex (4.9:+3.1)
[STATEMENT] lemma canonical_tARR[dest]: assumes a: "[] \<turnstile> t : T1 \<rightarrow> T2" and b: "val t" shows "\<exists>x t'. t = LAM [x].t'" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<exists>x t'. t = LAM [x].t' [PROOF STEP] using b a [PROOF STATE] proof (prove) using this: val t [] \<turnstile> t : T1 \<rightarrow> T2 goal (1 subgoal): 1. \<exists>x t'. t = LAM [x].t' [PROOF STEP] by (induct) (auto)
{-# OPTIONS --prop --without-K --rewriting #-} module Calf.Prelude where open import Agda.Builtin.Equality open import Agda.Builtin.Equality.Rewrite public Ω = Prop □ = Set postulate funext : ∀ {a b} {A : Set a} {B : A → Set b} {f g : (a : A) → B a} → (∀ x → f x ≡ g x) → f ≡ g funext/Ω : {A : Prop} {B : □} {f g : A → B} → (∀ x → f x ≡ g x) → f ≡ g
= BRIX11 help command == Collection common == Usage brix11 help [command] == Description Displays the main documentation for BRIX11 or for the command specified. == Example $ brix11 help Displays BRIX11 main documentation. $ brix11 help make Displays documentation for the BRIX11 *make* command.
module DivMod where -- From examples/simple-lib open import Lib.Vec open import Lib.Nat open import Lib.Id open import Lib.Logic open import Lib.Fin -- Certified implementation of division and modulo module Direct where data DivMod : Nat -> Nat -> Set where dm : forall {b} q (r : Fin b) -> DivMod (toNat r + q * b) b getQ : forall {a b} -> DivMod a b -> Nat getQ (dm q _) = q getR : forall {a b} -> DivMod a b -> Nat getR (dm _ r) = toNat r divModˢ : (a b : Nat) -> DivMod a (suc b) divModˢ zero b = dm 0 zero divModˢ (suc a) b with divModˢ a b divModˢ (suc ._) b | dm q r with maxView r divModˢ (suc ._) b | dm q .(fromNat b) | theMax with toNat (fromNat b) | lem-toNat-fromNat b ... | .b | refl = dm {suc b} (suc q) zero divModˢ (suc ._) b | dm q .(weaken i) | notMax i with toNat (weaken i) | lem-toNat-weaken i ... | .(toNat i) | refl = dm q (suc i) divMod : (a b : Nat){nz : NonZero b} -> DivMod a b divMod a zero {} divMod a (suc b) = divModˢ a b -- Let's try the inductive version. Less obvious that this one is correct. module Inductive where data DivMod : Nat -> Nat -> Set where dmZ : forall {b} (i : Fin b) -> DivMod (toNat i) b dmS : forall {a b} -> DivMod a b -> DivMod (b + a) b getQ : forall {a b} -> DivMod a b -> Nat getQ (dmZ _) = 0 getQ (dmS d) = suc (getQ d) getR : forall {a b} -> DivMod a b -> Nat getR (dmZ r) = toNat r getR (dmS d) = getR d data BoundView (n : Nat) : Nat -> Set where below : (i : Fin n) -> BoundView n (toNat i) above : forall a -> BoundView n (n + a) boundView : (a b : Nat) -> BoundView a b boundView zero b = above b boundView (suc a) zero = below zero boundView (suc a) (suc b) with boundView a b boundView (suc a) (suc .(toNat i)) | below i = below (suc i) boundView (suc a) (suc .(a + k)) | above k = above k data _≤_ : Nat -> Nat -> Set where leqZ : forall {n} -> zero ≤ n leqS : forall {n m} -> n ≤ m -> suc n ≤ suc m ≤-suc : forall {a b} -> a ≤ b -> a ≤ suc b ≤-suc leqZ = leqZ ≤-suc (leqS p) = leqS (≤-suc p) plus-≤ : forall a {b c} -> a + b ≤ c -> b ≤ c plus-≤ zero p = p plus-≤ (suc a) (leqS p) = ≤-suc (plus-≤ a p) ≤-refl : forall {a} -> a ≤ a ≤-refl {zero} = leqZ ≤-refl {suc n} = leqS ≤-refl -- Recursion over a bound on a (needed for termination). divModˢ : forall {size} a b -> a ≤ size -> DivMod a (suc b) divModˢ a b prf with boundView (suc b) a divModˢ .(toNat r) b _ | below r = dmZ r divModˢ .(suc b + k) b (leqS prf) | above k = dmS (divModˢ k b (plus-≤ b prf)) divMod : forall a b {nz : NonZero b} -> DivMod a b divMod a zero {} divMod a (suc b) = divModˢ a b ≤-refl -- We ought to prove that the inductive version behaves the same as the -- direct version... but that's more work than we're willing to spend. open Inductive _div_ : (a b : Nat){nz : NonZero b} -> Nat _div_ a b {nz} = getQ (divMod a b {nz}) _mod_ : (a b : Nat){nz : NonZero b} -> Nat _mod_ a b {nz} = getR (divMod a b {nz})
program main_bisection implicit none real x1,x2,x,eps,xcosx external xcosx integer ind x1=0 x2=3.14 eps=1e-7 call bisection(xcosx,x1,x2,x,eps,ind) if (ind>=0) then print *,'X= ',x,xcosx(x),ind else if (ind==-1) then print *,'f(x1)*f(x2) must be less than 0' else print *,'No Convergence' endif end program main_bisection function xcosx(x) real xcosx,x xcosx = x-cos(x) end function xcosx subroutine bisection(func,x10,x20,x,eps,ind) implicit none real func,x10,x20,x,eps,x1,x2,y1,y2,xm,ym external func integer ind,it integer, parameter :: itmax=100000000 x1=x10 x2=x20 y1=func(x1) y2=func(x2) ind=0 if(y1==0) then x=x1 return else if (y2==0) then x=x2 return else if(y1*y2>0) then ind=-1 return endif do it = 1, itmax xm=(x1+x2)/2 ym=func(xm) if(ym==0) then x=xm exit endif if(ym*y1<0) then x2=xm y2=ym else x1=xm y1=ym endif if(abs(x2-x1)<eps) then x=xm exit endif enddo if(it>itmax) then ind=-itmax else ind=it endif end subroutine bisection
close all clear clc graphics_toolkit qt h.user = 'username'; h.ii = 1; h.measurements = {}; h.measurements(1, :) = {'Measurement', 'Frequency', 'Amplitude', 'Phase'}; h.freqs = [177, 297, 500, 841, 1414, 2378, 4000, 6727, 11314]; h.gui = figure('Position', [10 10 2000 1200], 'NumberTitle', 'off', 'Name', 'Comb-filter phase measurement GUI', 'toolbar', 'none', 'menubar', 'none'); % Selection of the pure tone frequency and adaption of the figure axes function set_freq(obj) h = guidata(obj); string = get(obj, 'string'); h.f = str2num(string); set(h.dot,'visible','off') h.dot = plot(0, 0, 'o', 'color', 'r', 'markersize', 1); guidata(obj, h); end % Callback for click in the figure function clickcallback(obj) h = guidata(obj); axesHandle = get(obj, 'Parent'); coordinates = get(gca, 'CurrentPoint'); coordinates = coordinates(1, 1:2); [x_max, y_max] = size(get(obj, 'cdata')); y = coordinates(2); coordinates(2) = abs((coordinates(2) - 128)); h.x = coordinates(1); h.y = coordinates(2)./128; guidata(obj, h); coord2param(obj); h = guidata(obj); h.fs = 48000; delay = 1+h.phi/(2*pi).*h.fs./h.f; b = zeros(1, 100); b(round(delay)) = h.a; system(['echo mha.transducers.mhachain.injector.B = [' sprintf('%.15f ',b) '] | nc -w 1 127.0.0.1 33337']); hold all delete(h.dot); h.dot = plot(h.x, y, 'o', 'markerfacecolor', 'r', 'markeredgecolor', 'r', 'markersize', 8); drawnow; guidata(obj, h); end % Convert input values to amplitude (a) and phase (phi) function coord2param(obj) h = guidata(obj); h.a = h.y; h.phi = (5/2*pi)/160.*h.x; guidata(obj, h); amplification_rule(obj); h = guidata(obj); guidata(obj, h); end % Scale amplitude function amplification_rule(obj) h = guidata(obj); switch h.f case 177 h.a = 10.^((h.a*30-30)/20); % 0 dB to -30 dB case 297 h.a = 10.^((h.a*30-30)/20); % 0 dB to -30 dB case 500 h.a = 10.^((h.a*30-30)/20); % 0 dB to -30 dB case 841 h.a = 10.^((h.a*30-35)/20); % -5 dB to -35 dB case 1414 h.a = 10.^((h.a*35-45)/20); % -10 dB to -40 dB case 2378 h.a = 10.^((h.a*35-50)/20); % -15 dB to -45 dB case 4000 h.a = 10.^((h.a*30-30)/20); % -5 dB to -35 dB (0 dB to -30 dB) case 6727 h.a = 10.^((h.a*30-35)/20); % -5 dB to -35 dB case 11314 h.a = 10.^((h.a*30-35)/20); % -5 dB to -35 dB endswitch guidata(obj, h); end % Saving of the data function finish(obj) h = guidata(obj); h.measurements(end+1, :) = {h.ii, h.f, h.a, h.phi}; progress = [num2str(h.ii), ' out of 9 complete']; set(h.measurement_complete, 'string', progress); if h.ii == 9 % # of measurements set(h.measurement_complete, 'string', 'Measurement complete!'); filename = num2str([h.user, '.csv']); cell2csv(filename, h.measurements) end h.ii = h.ii + 1; guidata(obj, h); end % Toggle between amplification and no amplification function play_reference(obj) h = guidata(obj); b = zeros(1, 100); string = get(obj, 'string'); switch string case 'Amp off' system(['echo mha.transducers.mhachain.injector.B = [' sprintf('%.15f ',b) '] | nc -w 1 127.0.0.1 33337']); case 'Amp on' delay = 1+h.phi/(2*pi).*h.fs./h.f; b(round(delay)) = h.a; system(['echo mha.transducers.mhachain.injector.B = [' sprintf('%.15f ',b) '] | nc -w 1 127.0.0.1 33337']); endswitch guidata(obj, h); end % Keyboard event detection function keyPress(obj, e) h = guidata(obj); switch e.Key case '1' play_reference(h.reference, [], []); case '2' play_reference(h.adjusted, [], []); endswitch guidata(obj); end h.finish = uicontrol('style', 'pushbutton', 'units', 'normalized', 'string', 'Save', 'fontsize', 18, 'fontweight', 'bold', 'callback', @finish, 'position', [0.75 0.1 0.2 0.1]); h.amplification_buttons = uibuttongroup('position', [0.75 0.35 0.2 0.1]); h.amp_off = uicontrol(h.amplification_buttons, 'style', 'radiobutton', 'units', 'normalized', 'string', 'Amp off', 'fontsize', 10, 'callback', @play_reference, 'position', [0.1 0.4 0.3 0.2]); h.amp_on = uicontrol(h.amplification_buttons, 'style', 'radiobutton', 'units', 'normalized', 'string', 'Amp on', 'fontsize', 10, 'callback', @play_reference, 'position', [0.6 0.4 0.3 0.2]); h.freq_buttons = uibuttongroup('position', [0.75 0.475 0.09 0.3]); h.freq_177 = uicontrol(h.freq_buttons, 'style', 'radiobutton', 'units', 'normalized', 'selected', 'on', 'string', '177', 'fontsize',10, 'fontweight','bold', 'callback', @set_freq, 'position', [0.05 0.85 0.9 0.05]); h.freq_297 = uicontrol(h.freq_buttons, 'style', 'radiobutton', 'units', 'normalized', 'string', '297', 'fontsize',10, 'fontweight','bold', 'callback', @set_freq, 'position', [0.05 0.75 0.9 0.05]); h.freq_500 = uicontrol(h.freq_buttons, 'style', 'radiobutton', 'units', 'normalized', 'string', '500', 'fontsize',10, 'fontweight','bold', 'callback', @set_freq, 'position', [0.05 0.65 0.9 0.05]); h.freq_841 = uicontrol(h.freq_buttons, 'style', 'radiobutton', 'units', 'normalized', 'string', '841', 'fontsize', 10, 'fontweight','bold', 'callback', @set_freq, 'position', [0.05 0.55 0.9 0.05]); h.freq_1414 = uicontrol(h.freq_buttons, 'style', 'radiobutton', 'units', 'normalized', 'string', '1414', 'fontsize',10, 'fontweight','bold', 'callback', @set_freq, 'position', [0.05 0.45 0.9 0.05]); h.freq_2378 = uicontrol(h.freq_buttons, 'style', 'radiobutton', 'units', 'normalized', 'string', '2378', 'fontsize',10, 'fontweight','bold', 'callback', @set_freq, 'position', [0.05 0.35 0.9 0.05]); h.freq_4000 = uicontrol(h.freq_buttons, 'style', 'radiobutton', 'units', 'normalized', 'string', '4000', 'fontsize',10, 'fontweight','bold', 'callback', @set_freq, 'position', [0.05 0.25 0.9 0.05]); h.freq_6727 = uicontrol(h.freq_buttons, 'style', 'radiobutton', 'units', 'normalized', 'string', '6727', 'fontsize',10, 'fontweight','bold', 'callback', @set_freq, 'position', [0.05 0.15 0.9 0.05]); h.freq_11314 = uicontrol(h.freq_buttons, 'style', 'radiobutton', 'units', 'normalized', 'string', '11314', 'fontsize',10, 'fontweight','bold', 'callback', @set_freq, 'position', [0.05 0.05 0.9 0.05]); h.measurement_complete = uicontrol('style', 'text', 'units', 'normalized', 'string', '', 'fontsize', 11, 'fontweight', 'bold', 'position', [0.75 0.225 0.2 0.1]); img = imread('grid_extended.png'); min_x = 0; max_x = 160; min_y = 0; max_y = 128; fig_pos_click = {[0.05 0.1 0.65 0.8]}; h.ax_click = axes('position', fig_pos_click{}); box('on') h_image = imagesc([min_x max_x], [min_y, max_y], img); set(gca, 'xtick', [0 32 64 96 128 160]); set(gca, 'xticklabel', {'0', '^{\pi}/_{2}', '\pi', '^{3\pi}/_{2}', '2\pi', '^{5\pi}/_{2}'}); set(gca, 'ytick', [0 64/3 128/3 64 256/3 320/3 128]); set(gca, 'yticklabel', {'+15', '+10', '+5', '0', '-5', '-10', '-15'}); set(gca, 'linewidth', 3, 'fontsize', 20); hold all h.dot = plot(0, 0, 'o', 'color', 'r', 'markersize', 1); set(h_image,'ButtonDownFcn',@clickcallback); set(gcf, 'KeyPressFcn', @keyPress); guidata(gcf, h);
module Data.Trifunctor %default total %access public export ||| Trifunctor is abstraction over type with 3 type parameters ||| Progression of abstractions: Functor => Bifunctor => Trifunctor interface Trifunctor (t : Type -> Type -> Type -> Type) where timap : (r -> rr) -> (e -> ee) -> (a -> aa) -> t r e a -> t rr ee aa bimap : (e -> ee) -> (a -> aa) -> t r e a -> t r ee aa bimap = timap id map : (a -> aa) -> t r e a -> t r e aa map = timap id id mapLeft : (e -> ee) -> t r e a -> t r ee a mapLeft e = timap id e id
module QIO.Circuit.Compile where import qualified QIO.Circuit.Circuit as C import Data.Complex import QIO.QioSyn import QIO.Qio compileCircuit :: C.Circuit -> QIO [Bool] compileCircuit circuit = do qs <- mkQbits (length $ C.qubits circuit) False applyU (gatesToU (C.gates circuit) qs) measQbits qs gatesToU :: [C.Gate] -> [Qbit] -> U gatesToU [] _ = mempty gatesToU (g:gs) qs = (gateToU g qs) <> (gatesToU gs qs) gateToU :: C.Gate -> [Qbit] -> U gateToU (C.Had _ qis) qs = condU uhad $ map (\i -> qs !! i) qis gateToU (C.PX _ qis) qs = condU unot $ map (\i -> qs !! i) qis gateToU (C.PY _ qis) qs = condU (\q -> rot q yRot) $ map (\i -> qs !! i) qis where yRot (False, True) = 0 :+ 1 yRot (True, False) = 0 :+ (-1) yRot _ = 0 gateToU (C.PZ _ qis) qs = condU (\q -> uphase q pi) $ map (\i -> qs !! i) qis gateToU (C.Swap _ (q1:q2:[])) qs = swap (qs !! q1) (qs !! q2) gateToU _ _ = mempty mkQbits :: Int -> Bool -> QIO [Qbit] mkQbits n b = mkQbits' n b [] where mkQbits' 0 _ qs = return qs mkQbits' n b qs = do q <- mkQbit b mkQbits' (n-1) b (qs ++ [q]) measQbits :: [Qbit] -> QIO [Bool] measQbits qs = measQbits' qs [] where measQbits' [] bs = return bs measQbits' (q:qs) bs = do b <- measQbit q measQbits' qs (bs ++ [b]) condU :: (Qbit -> U) -> [Qbit] -> U condU u (ql:[]) = u ql condU u (q:qs) = cond q (\x -> if x then (condU u qs) else mempty)
module lift-tests where open import lib open import cedille-types open import convervsion open import ctxt open import lift open import syntax-util open import to-string t0 = Lft posinfo-gen "X" (Parens posinfo-gen (mlam "x" (mlam "y" (mvar "y"))) posinfo-gen) (LiftArrow (LiftStar posinfo-gen) (LiftArrow (LiftStar posinfo-gen) (LiftStar posinfo-gen))) t = TpApp (TpApp t0 (mall "X" (Tkk star) (TpArrow (mtpvar "X") (mtpvar "X")))) (mtpvar "False") s = type-to-string t s1 = type-to-string (hnf new-ctxt tt t)
// Copyright (c) 2014-2018, The Monero Project // // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, are // permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, this list // of conditions and the following disclaimer in the documentation and/or other // materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its contributors may be // used to endorse or promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL // THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF // THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include <boost/filesystem.hpp> #include "cryptonote_basic/cryptonote_basic.h" #include "cryptonote_basic/tx_extra.h" #include "cryptonote_core/blockchain.h" #include "common/command_line.h" #include "version.h" #undef MONERO_DEFAULT_LOG_CATEGORY #define MONERO_DEFAULT_LOG_CATEGORY "debugtools.deserialize" namespace po = boost::program_options; using namespace epee; using namespace cryptonote; int main(int argc, char* argv[]) { uint32_t log_level = 0; std::string input; tools::on_startup(); boost::filesystem::path output_file_path; po::options_description desc_cmd_only("Command line options"); po::options_description desc_cmd_sett("Command line options and settings options"); const command_line::arg_descriptor<std::string> arg_output_file = {"output-file", "Specify output file", "", true}; const command_line::arg_descriptor<uint32_t> arg_log_level = {"log-level", "", log_level}; const command_line::arg_descriptor<std::string> arg_input = {"input", "Specify input has a hexadecimal string", ""}; command_line::add_arg(desc_cmd_sett, arg_output_file); command_line::add_arg(desc_cmd_sett, arg_log_level); command_line::add_arg(desc_cmd_sett, arg_input); command_line::add_arg(desc_cmd_only, command_line::arg_help); po::options_description desc_options("Allowed options"); desc_options.add(desc_cmd_only).add(desc_cmd_sett); po::variables_map vm; bool r = command_line::handle_error_helper(desc_options, [&]() { po::store(po::parse_command_line(argc, argv, desc_options), vm); po::notify(vm); return true; }); if (! r) return 1; if (command_line::get_arg(vm, command_line::arg_help)) { std::cout << "Monero '" << MONERO_RELEASE_NAME << "' (v" << MONERO_VERSION_FULL << ")" << ENDL << ENDL; std::cout << desc_options << std::endl; return 1; } log_level = command_line::get_arg(vm, arg_log_level); input = command_line::get_arg(vm, arg_input); if (input.empty()) { std::cerr << "--input is mandatory" << std::endl; return 1; } mlog_configure("", true); std::string m_config_folder; std::ostream *output; std::ofstream *raw_data_file = NULL; if (command_line::has_arg(vm, arg_output_file)) { output_file_path = boost::filesystem::path(command_line::get_arg(vm, arg_output_file)); const boost::filesystem::path dir_path = output_file_path.parent_path(); if (!dir_path.empty()) { if (boost::filesystem::exists(dir_path)) { if (!boost::filesystem::is_directory(dir_path)) { std::cerr << "output directory path is a file: " << dir_path << std::endl; return 1; } } else { if (!boost::filesystem::create_directory(dir_path)) { std::cerr << "Failed to create directory " << dir_path << std::endl; return 1; } } } raw_data_file = new std::ofstream(); raw_data_file->open(output_file_path.string(), std::ios_base::out | std::ios::trunc); if (raw_data_file->fail()) return 1; output = raw_data_file; } else { output_file_path = ""; output = &std::cout; } cryptonote::blobdata blob; if (!epee::string_tools::parse_hexstr_to_binbuff(input, blob)) { std::cerr << "Invalid hex input" << std::endl; std::cerr << "Invalid hex input: " << input << std::endl; return 1; } cryptonote::block block; cryptonote::transaction tx; std::vector<cryptonote::tx_extra_field> fields; if (cryptonote::parse_and_validate_block_from_blob(blob, block)) { std::cout << "Parsed block:" << std::endl; std::cout << cryptonote::obj_to_json_str(block) << std::endl; } else if (cryptonote::parse_and_validate_tx_from_blob(blob, tx)) { std::cout << "Parsed transaction:" << std::endl; std::cout << cryptonote::obj_to_json_str(tx) << std::endl; bool parsed = cryptonote::parse_tx_extra(tx.extra, fields); if (!parsed) std::cout << "Failed to parse tx_extra" << std::endl; if (!fields.empty()) { std::cout << "tx_extra has " << fields.size() << " field(s)" << std::endl; for (size_t n = 0; n < fields.size(); ++n) { std::cout << "field " << n << ": "; if (typeid(cryptonote::tx_extra_padding) == fields[n].type()) std::cout << "extra padding: " << boost::get<cryptonote::tx_extra_padding>(fields[n]).size << " bytes"; else if (typeid(cryptonote::tx_extra_pub_key) == fields[n].type()) std::cout << "extra pub key: " << boost::get<cryptonote::tx_extra_pub_key>(fields[n]).pub_key; else if (typeid(cryptonote::tx_extra_nonce) == fields[n].type()) std::cout << "extra nonce: " << epee::string_tools::buff_to_hex_nodelimer(boost::get<cryptonote::tx_extra_nonce>(fields[n]).nonce); else if (typeid(cryptonote::tx_extra_merge_mining_tag) == fields[n].type()) std::cout << "extra merge mining tag: depth " << boost::get<cryptonote::tx_extra_merge_mining_tag>(fields[n]).depth << ", merkle root " << boost::get<cryptonote::tx_extra_merge_mining_tag>(fields[n]).merkle_root; else if (typeid(cryptonote::tx_extra_mysterious_minergate) == fields[n].type()) std::cout << "extra minergate custom: " << epee::string_tools::buff_to_hex_nodelimer(boost::get<cryptonote::tx_extra_mysterious_minergate>(fields[n]).data); else std::cout << "unknown"; std::cout << std::endl; } } else { std::cout << "No fields were found in tx_extra" << std::endl; } } else { std::cerr << "Not a recognized CN type" << std::endl; return 1; } if (output->fail()) return 1; output->flush(); if (raw_data_file) delete raw_data_file; return 0; }
[STATEMENT] lemma pt_eqvt_fun1b: fixes f :: "'a\<Rightarrow>'b" assumes a: "\<forall>(pi::'x prm). pi\<bullet>f = f" shows "((supp f)::'x set)={}" [PROOF STATE] proof (prove) goal (1 subgoal): 1. supp f = {} [PROOF STEP] using a [PROOF STATE] proof (prove) using this: \<forall>pi. pi \<bullet> f = f goal (1 subgoal): 1. supp f = {} [PROOF STEP] by (simp add: supp_def)
If $A$ and $B$ are measurable sets with $B \subseteq A$, then the measure of the difference $A - B$ is the measure of $A$ minus the measure of $B$.