text
stringlengths 0
3.34M
|
---|
{-# OPTIONS --without-K --safe #-}
open import Categories.Category.Core
open import Categories.Category.Monoidal.Core
module Categories.Object.Monoid {o ℓ e} {𝒞 : Category o ℓ e} (C : Monoidal 𝒞) where
open import Level
-- a monoid object is a generalization of the idea from algebra of a monoid,
-- extended into any monoidal category
open Category 𝒞
open Monoidal C
record IsMonoid (M : Obj) : Set (ℓ ⊔ e) where
field
μ : M ⊗₀ M ⇒ M
η : unit ⇒ M
field
assoc : μ ∘ μ ⊗₁ id ≈ μ ∘ id ⊗₁ μ ∘ associator.from
identityˡ : unitorˡ.from ≈ μ ∘ η ⊗₁ id
identityʳ : unitorʳ.from ≈ μ ∘ id ⊗₁ η
record Monoid : Set (o ⊔ ℓ ⊔ e) where
field
Carrier : Obj
isMonoid : IsMonoid Carrier
open IsMonoid isMonoid public
open Monoid
record Monoid⇒ (M M′ : Monoid) : Set (ℓ ⊔ e) where
field
arr : Carrier M ⇒ Carrier M′
preserves-μ : arr ∘ μ M ≈ μ M′ ∘ arr ⊗₁ arr
preserves-η : arr ∘ η M ≈ η M′
|
Formal statement is: lemma setdist_sym: "setdist S T = setdist T S" Informal statement is: The distance between two sets is symmetric. |
lemma islimpt_approachable: fixes x :: "'a::metric_space" shows "x islimpt S \<longleftrightarrow> (\<forall>e>0. \<exists>x'\<in>S. x' \<noteq> x \<and> dist x' x < e)" |
Y := function(f)
local u;
u := x -> x(x);
return u(y -> f(a -> y(y)(a)));
end;
fib := function(f)
local u;
u := function(n)
if n < 2 then
return n;
else
return f(n-1) + f(n-2);
fi;
end;
return u;
end;
Y(fib)(10);
# 55
fac := function(f)
local u;
u := function(n)
if n < 2 then
return 1;
else
return n*f(n-1);
fi;
end;
return u;
end;
Y(fac)(8);
# 40320
|
Formal statement is: lemma closed_diagonal: "closed {y. \<exists> x::('a::t2_space). y = (x,x)}" Informal statement is: The diagonal of a topological space is closed. |
-- BSTree.idr
--
-- Demonstates generic data types
module BSTree
||| A binary tree
public export
data BSTree : Type -> Type where
Empty : Ord elem => BSTree elem
Node : Ord elem => (left : BSTree elem) -> (val : elem) ->
(right : BSTree elem) -> BSTree elem
%name BSTree tree, tree1
||| Inserts a new element into a binary search tree
export
insert : elem -> BSTree elem -> BSTree elem
insert x Empty = Node Empty x Empty
insert x (Node left val right) = case compare x val of
LT => Node (insert x left) val right
EQ => Node left val right
GT => Node left val (insert x right)
|
$a(b - b') = ab - ab'$. |
lemma complete_subspace: "subspace s \<Longrightarrow> complete s" for s :: "'a::euclidean_space set" |
{-# OPTIONS --cubical-compatible #-}
open import Agda.Builtin.Bool
data D : Bool → Set where
true : D true
false : D false
F : @0 D false → Set₁
F false = Set
|
theory CS_Ch3_Ex4
imports Main
begin
type_synonym vname = string
datatype aexp = N int | V vname | Plus aexp aexp | Times aexp aexp
type_synonym val = int
type_synonym state = "vname \<Rightarrow> val"
fun aval :: "aexp \<Rightarrow> state \<Rightarrow> val" where
"aval (N n) s = n" |
"aval (V x) s = s x" |
"aval (Plus a1 a2) s = aval a1 s + aval a2 s" |
"aval (Times a1 a2) s = aval a1 s * aval a2 s"
fun plus :: "aexp \<Rightarrow> aexp \<Rightarrow> aexp" where
"plus (N i1) (N i2) = N (i1 + i2)" |
"plus (N i) a = (if i = 0 then a else Plus (N i) a)" |
"plus a (N i) = (if i = 0 then a else Plus a (N i))" |
"plus a1 a2 = Plus a1 a2"
lemma aval_plus: "aval (plus a1 a2) s = aval a1 s + aval a2 s"
apply(induction a1 a2 rule: plus.induct)
apply(auto)
done
fun times :: "aexp \<Rightarrow> aexp \<Rightarrow> aexp" where
"times (N i1) (N i2) = N (i1 * i2)" |
"times (N i) a = (if i = 0 then (N 0) else if i = 1 then a else Times (N i) a)" |
"times a (N i) = (if i = 0 then (N 0) else if i = 1 then a else Times (N i) a)" |
"times a b = Times a b"
lemma aval_times: "aval (times a1 a2) s = aval a1 s * aval a2 s"
apply(induction a1 a2 rule: times.induct)
apply(auto)
done
fun asimp :: "aexp \<Rightarrow> aexp" where
"asimp (N n) = N n" |
"asimp (V x) = V x" |
"asimp (Plus a1 a2) = plus (asimp a1) (asimp a2)" |
"asimp (Times a1 a2) = times (asimp a1) (asimp a2)"
lemma "aval (asimp a) s = aval a s"
apply(induction a)
apply(auto simp add: aval_plus aval_times)
done
end |
Below, you'll find forms that allow you to manage your tenant account with ease. Simply click on any form title below to access the full form and submit it to our team.
Use this form to give notice to move out. All fields are required. If you do not receive your move out instructions email within two days, please contact the office.
When do I have to give notice?
You must submit your notice on or before the FIRST DAY of the month. If we receive your move out notice after the first of the month there is a late move out notice fee = 1 times rent.
Rent and utilities must be paid through the end of your move out month.
Turn off utilities on the last day of the month. If utilities are turned off before the end of the month, then you may be charged a fee.
If you leave before your lease expires, then you are required to pay an early termination fee.
Please submit this form if you would like to change the name on your lease due to marriage or divorce, etc. There is a $75.00 lease change fee to make this change.
Each new tenant must fill out an application and be approved before they can move in. If approved, there is a $150 lease change fee.
Removing a tenant from the lease requires the remaining tenant(s) to submit a new rental application(s). It also requires the tenant that is leaving to sign a lease amendment. If approved, there is a $150 lease change fee.
We will review a charge ONE TIME. Provide all the information we need to make our decision. This form is for CURRENT TENANTS disputing a charge or PAST TENANTS disputing a security deposit disbursement. Requests for review must be made using this form. We will respond within 10 business days. Our decision is final.
Gather documents, receipts, photos, etc to support your request. These must be attached to this form. |
Formal statement is: lemma contour_integral_circlepath_eq: assumes "open s" and f_holo:"f holomorphic_on (s-{z})" and "0<e1" "e1\<le>e2" and e2_cball:"cball z e2 \<subseteq> s" shows "f contour_integrable_on circlepath z e1" "f contour_integrable_on circlepath z e2" "contour_integral (circlepath z e2) f = contour_integral (circlepath z e1) f" Informal statement is: If $f$ is holomorphic on the punctured disk $D(z,r)$, then the integral of $f$ over the circle of radius $r$ is equal to the integral of $f$ over the circle of radius $r'$ for any $0 < r' < r$. |
-- Andreas, 2017-08-24, issue #2253
--
-- Better error message for matching on abstract constructor.
-- {-# OPTIONS -v tc.lhs.split:30 #-}
-- {-# OPTIONS -v tc.lhs:30 #-}
-- {-# OPTIONS -v tc.lhs.flex:60 #-}
abstract
data B : Set where
x : B
data C : Set where
c : B → C
f : C → C
f (c x) = c x
-- WAS:
--
-- Not in scope:
-- AbstractPatternShadowsConstructor.B.x
-- (did you mean 'x'?)
-- when checking that the pattern c x has type C
-- Expected:
--
-- Cannot split on abstract data type B
-- when checking that the pattern x has type B
|
-- A simple word counter
open import Coinduction using ( ♯_ )
open import Data.Char.Classifier using ( isSpace )
open import Data.Bool using ( Bool ; true ; false )
open import Data.Natural using ( Natural ; show )
open import System.IO using ( Command )
open import System.IO.Transducers.Lazy using ( _⇒_ ; inp ; out ; done ; _⟫_ ; _⟨&⟩_ )
open import System.IO.Transducers.List using ( length )
open import System.IO.Transducers.Bytes using ( bytes )
open import System.IO.Transducers.IO using ( run )
open import System.IO.Transducers.UTF8 using ( split ; encode )
open import System.IO.Transducers.Session using ( ⟨_⟩ ; _&_ ; Bytes ; Strings )
module System.IO.Examples.WC where
words : Bytes ⇒ ⟨ Natural ⟩
words = split isSpace ⟫ inp (♯ length { Bytes })
-- TODO: this isn't exactly lovely user syntax.
report : ⟨ Natural ⟩ & ⟨ Natural ⟩ ⇒ Strings
report =
(inp (♯ λ #bytes →
(out true
(out (show #bytes)
(out true
(out " "
(inp (♯ λ #words →
(out true
(out (show #words)
(out true
(out "\n"
(out false done)))))))))))))
wc : Bytes ⇒ Bytes
wc = bytes ⟨&⟩ words ⟫ report ⟫ inp (♯ encode)
main : Command
main = run wc
|
Welcome to Thanjavur.net, the website for the Chola Capital city and the Granary of South India. Thanjavur was the royal city of the Cholas, Nayaks and the Mahrattas. Thanjavur derives its name from Tanjan-an asura (giant), who according to local legend devastated the neighborhood and was killed by Sri Anandavalli Amman and Vishnu, Sri Neelamegapperumal. Tanjan's last request that the city might be named after him was granted.
Thanjavur was at height of its glory during Rajaraja cholan. Let us take Thanjavur back to its past glory in the information age. Thanjavur is still the center of all the classical arts and music. It has produced many classical musicians and bharathanatyam dancers and is also well known for its unique painting style called Tanjore Painting and Thavil, a percussion instrument.
Here we have the ultimate site for all the information about Thanjavur and its neighborhood. It is the capital of the Thanjavur district and its been the center for learning Tamil during Chola and now with the establishment of Tamil University by our late Chief Minister Dr. M.G. Ramachandran. There are a lot of beautiful temples in Thanjavur region. Big Temple stands tall with its beaming tower. It's one of the architectural wonders of the world. |
/-
Copyright (c) 2021 Eric Wieser. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Eric Wieser
! This file was ported from Lean 3 source module group_theory.perm.option
! leanprover-community/mathlib commit c3019c79074b0619edb4b27553a91b2e82242395
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathlib.Data.Fintype.Perm
import Mathlib.GroupTheory.Perm.Sign
import Mathlib.Logic.Equiv.Option
/-!
# Permutations of `option α`
-/
open Equiv
@[simp]
theorem Equiv.optionCongr_one {α : Type _} : (1 : Perm α).optionCongr = 1 :=
Equiv.optionCongr_refl
#align equiv.option_congr_one Equiv.optionCongr_one
@[simp]
theorem Equiv.optionCongr_swap {α : Type _} [DecidableEq α] (x y : α) :
optionCongr (swap x y) = swap (some x) (some y) := by
ext (_ | i)
· simp [swap_apply_of_ne_of_ne]
· by_cases hx : i = x
simp [hx, swap_apply_of_ne_of_ne]
by_cases hy : i = y <;> simp [hx, hy, swap_apply_of_ne_of_ne]
#align equiv.option_congr_swap Equiv.optionCongr_swap
@[simp]
theorem Equiv.optionCongr_sign {α : Type _} [DecidableEq α] [Fintype α] (e : Perm α) :
Perm.sign e.optionCongr = Perm.sign e := by
refine Perm.swap_induction_on e ?_ ?_
· simp [Perm.one_def]
· intro f x y hne h
simp [h, hne, Perm.mul_def, ← Equiv.optionCongr_trans]
#align equiv.option_congr_sign Equiv.optionCongr_sign
@[simp]
theorem map_equiv_removeNone {α : Type _} [DecidableEq α] (σ : Perm (Option α)) :
(removeNone σ).optionCongr = swap none (σ none) * σ := by
ext1 x
have : Option.map (⇑(removeNone σ)) x = (swap none (σ none)) (σ x) :=
by
cases' x with x
· simp
· cases h : σ (some _)
· simp [removeNone_none _ h]
· have hn : σ (some x) ≠ none := by simp [h]
have hσn : σ (some x) ≠ σ none := σ.injective.ne (by simp)
simp [removeNone_some _ ⟨_, h⟩, ← h, swap_apply_of_ne_of_ne hn hσn]
simpa using this
#align map_equiv_remove_none map_equiv_removeNone
/-- Permutations of `Option α` are equivalent to fixing an
`Option α` and permuting the remaining with a `Perm α`.
The fixed `Option α` is swapped with `none`. -/
@[simps]
def Equiv.Perm.decomposeOption {α : Type _} [DecidableEq α] : Perm (Option α) ≃ Option α × Perm α
where
toFun σ := (σ none, removeNone σ)
invFun i := swap none i.1 * i.2.optionCongr
left_inv σ := by simp
right_inv := fun ⟨x, σ⟩ =>
by
have : removeNone (swap none x * σ.optionCongr) = σ :=
Equiv.optionCongr_injective (by simp [← mul_assoc])
simp [← Perm.eq_inv_iff_eq, this]
#align equiv.perm.decompose_option Equiv.Perm.decomposeOption
theorem Equiv.Perm.decomposeOption_symm_of_none_apply {α : Type _} [DecidableEq α] (e : Perm α)
(i : Option α) : Equiv.Perm.decomposeOption.symm (none, e) i = i.map e := by simp
#align equiv.perm.decompose_option_symm_of_none_apply Equiv.Perm.decomposeOption_symm_of_none_apply
theorem Equiv.Perm.decomposeOption_symm_sign {α : Type _} [DecidableEq α] [Fintype α] (e : Perm α) :
Perm.sign (Equiv.Perm.decomposeOption.symm (none, e)) = Perm.sign e := by simp
#align equiv.perm.decompose_option_symm_sign Equiv.Perm.decomposeOption_symm_sign
/-- The set of all permutations of `Option α` can be constructed by augmenting the set of
permutations of `α` by each element of `Option α` in turn. -/
theorem Finset.univ_perm_option {α : Type _} [DecidableEq α] [Fintype α] :
@Finset.univ (Perm <| Option α) _ =
(Finset.univ : Finset <| Option α × Perm α).map Equiv.Perm.decomposeOption.symm.toEmbedding :=
(Finset.univ_map_equiv_to_embedding _).symm
#align finset.univ_perm_option Finset.univ_perm_option
|
[STATEMENT]
lemma lasso_more_cong[cong]:"state.more s = state.more s' \<Longrightarrow> lasso s = lasso s'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. state.more s = state.more s' \<Longrightarrow> lasso s = lasso s'
[PROOF STEP]
by (cases s, cases s') simp |
Formal statement is: lemma (in t2_space) LIMSEQ_unique: "X \<longlonglongrightarrow> a \<Longrightarrow> X \<longlonglongrightarrow> b \<Longrightarrow> a = b" Informal statement is: If a sequence converges to two different limits, then the two limits are equal. |
lemma dist_minus: fixes x y :: "'a::real_normed_vector" shows "dist (- x) (- y) = dist x y" |
-- Copyright 2017, the blau.io contributors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
module API.Web.DOM.Event
import IdrisScript
%access public export
%default total
||| An Event is an object used for signaling that something has occured, e.g.,
||| that an image has completed downloading.
|||
||| The original interface specification can be found at
||| https://dom.spec.whatwg.org/#interface-event
record Event where
constructor New
||| The type of *Event*, e.g. "`click`", "`hashchange`", or "`submit`"
type : String
||| Non standard field for easier JS integration
self : JSRef
|
module _ where
open import Relation.Binary.PropositionalEquality using (refl)
_ : 2 + 1 ≡ 3
_ = refl |
module JSON.Option
import Generics.Derive
%language ElabReflection
||| Specifies how to encode constructors of a sum datatype.
public export
data SumEncoding : Type where
||| Constructor names won't be encoded. Instead only the contents of the
||| constructor will be encoded as if the type had a single constructor. JSON
||| encodings have to be disjoint for decoding to work properly.
|||
||| When decoding, constructors are tried in the order of definition. If some
||| encodings overlap, the first one defined will succeed.
|||
||| Note: Nullary constructors are encoded as strings (using
||| constructorTagModifier). Having a nullary constructor
||| alongside a single field constructor that encodes to a
||| string leads to ambiguity.
|||
||| Note: Only the last error is kept when decoding, so in the case of
||| malformed JSON, only an error for the last constructor will be reported.
UntaggedValue : SumEncoding
||| A constructor will be encoded to an object with a single field named
||| after the constructor tag (modified by the constructorTagModifier) which
||| maps to the encoded contents of the constructor.
ObjectWithSingleField : SumEncoding
||| A constructor will be encoded to a 2-element array where the first
||| element is the tag of the constructor (modified by the constructorTagModifier)
||| and the second element the encoded contents of the constructor.
TwoElemArray : SumEncoding
||| A constructor will be encoded to an object with a field `tagFieldName`
||| which specifies the constructor tag (modified by the
||| constructorTagModifier). If the constructor is a record the
||| encoded record fields will be unpacked into this object. So
||| make sure that your record doesn't have a field with the
||| same label as the tagFieldName. Otherwise the tag gets
||| overwritten by the encoded value of that field! If the constructor
||| is not a record the encoded constructor contents will be
||| stored under the contentsFieldName field.
TaggedObject : (tagFieldName : String)
-> (contentsFieldName : String)
-> SumEncoding
%runElab derive "SumEncoding" [Generic,Meta,Show,Eq]
||| Corresponds to `TaggedObject "tag" "contents"`
public export
defaultTaggedObject : SumEncoding
defaultTaggedObject = TaggedObject "tag" "contents"
public export
adjustConnames : (String -> String) -> TypeInfo' k kss -> TypeInfo' k kss
adjustConnames f = { constructors $= mapNP adjCon }
where adjCon : ConInfo_ k ks -> ConInfo_ k ks
adjCon (MkConInfo ns n fs) = MkConInfo ns (f n) fs
public export
adjustInfo : (adjFields : String -> String)
-> (adjCons : String -> String)
-> TypeInfo' k kss
-> TypeInfo' k kss
adjustInfo af ac = { constructors $= mapNP adjCon }
where adjArg : ArgName -> ArgName
adjArg (NamedArg ix n) = NamedArg ix $ af n
adjArg arg = arg
adjCon : ConInfo_ k ks -> ConInfo_ k ks
adjCon (MkConInfo ns n fs) = MkConInfo ns (ac n) (mapNP adjArg fs)
public export
adjustFieldNames : (String -> String) -> TypeInfo' k kss -> TypeInfo' k kss
adjustFieldNames f = adjustInfo f id
public export
nullaryInjections : NP_ (List k) (ConInfo_ k) kss
-> (0 et : EnumType kss)
-> NP_ (List k) (K (NS_ (List k) (NP f) kss)) kss
nullaryInjections [] _ = []
nullaryInjections (MkConInfo _ _ [] :: vs) es =
Z [] :: mapNP (\ns => S ns) (nullaryInjections vs (enumTail es))
|
theory Chap2
imports Main
begin
datatype nat = Zero | Suc nat
fun add :: "nat \<Rightarrow> nat \<Rightarrow> nat" where
"add Zero n = n" |
"add (Suc m) n = Suc(add m n)"
lemma add_02: "add m Zero = m"
apply(induction m)
apply(auto)
done
end
|
Formal statement is: lemma upd_space: "i < n \<Longrightarrow> upd i < n" Informal statement is: If $i$ is less than $n$, then the update function $upd_i$ is less than $n$. |
!!
!! ASSIGNMENT TO ALLOCATABLE ARRAY YIELDS BAD ARRAY
!!
!! The following example makes an assignment to an allocatable array.
!! The array appears good, but when passed to a subroutine it is bad.
!! A key ingredient seems to be that the rhs expression of the assignment
!! is a reference to an elemental type bound function.
!!
!! % ifort --version
!! ifort (IFORT) 15.0.2 20150121
!!
!! % ifort -assume realloc_lhs intel-bug-20150601.f90
!! % ./a.out
!! 1 2 3 4 5 <== BEFORE CALL (GOOD)
!! 1 5 0 0 0 <== AFTER CALL (BAD)
!!
module set_type
type :: set
type(set), pointer :: foo
integer :: n
contains
procedure :: size => set_size
end type
contains
elemental integer function set_size (this)
class(set), intent(in) :: this
set_size = this%n
end function
end module
program main
use set_type
integer, allocatable :: sizes(:)
type(set), allocatable :: sets(:)
allocate(sets(5))
sets%n = [1, 2, 3, 4, 5] ! YIELDS BAD ARRAY WHEN PASSED
sizes = sets%size() ! AUTOMATIC ALLOCATION YIELDS BAD ARRAY WHEN PASSED
! BUT THIS WORKS
!allocate(sizes(5))
!sizes(:) = sets%size()
!print *, sizes ! PRINTS EXPECTED VALUES
call sub (sizes) ! PASS TO SUBROUTINE TO PRINT
contains
subroutine sub (array)
integer :: array(:)
if (any(array /= [1, 2, 3, 4, 5])) then
print '(a,5(1x,i0))', 'fail:', array
else
print '(a,5(1x,i0))', 'pass:', array
end if
end subroutine
end program
|
using Test
@testset "JuliaExpr.emit" begin
include("julia/node.jl")
include("julia/arg.jl")
include("julia/dash.jl")
include("julia/optional.jl")
include("julia/options.jl")
include("julia/vararg.jl")
end
@testset "ZSHCompletions.emit" begin
include("zsh/zsh.jl")
end
|
-- @@stderr --
dtrace: failed to compile script test/unittest/struct/err.D_DECL_INCOMPLETE.recursive.d: [D_DECL_INCOMPLETE] line 19: incomplete struct/union/enum struct record: rec
|
/-
Copyright (c) 2021 Nicolò Cavalleri. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Nicolò Cavalleri
-/
import topology.homeomorph
/-!
# Topological space structure on the opposite monoid and on the units group
In this file we define `topological_space` structure on `Mᵐᵒᵖ`, `Mᵃᵒᵖ`, `Mˣ`, and `add_units M`.
This file does not import definitions of a topological monoid and/or a continuous multiplicative
action, so we postpone the proofs of `has_continuous_mul Mᵐᵒᵖ` etc till we have these definitions.
## Tags
topological space, opposite monoid, units
-/
variables {M X : Type*}
open filter
open_locale topological_space
namespace mul_opposite
/-- Put the same topological space structure on the opposite monoid as on the original space. -/
@[to_additive] instance [topological_space M] : topological_space Mᵐᵒᵖ :=
topological_space.induced (unop : Mᵐᵒᵖ → M) ‹_›
variables [topological_space M]
@[continuity, to_additive] lemma continuous_unop : continuous (unop : Mᵐᵒᵖ → M) :=
continuous_induced_dom
@[continuity, to_additive] lemma continuous_op : continuous (op : M → Mᵐᵒᵖ) :=
continuous_induced_rng continuous_id
/-- `mul_opposite.op` as a homeomorphism. -/
@[to_additive "`add_opposite.op` as a homeomorphism."]
def op_homeomorph : M ≃ₜ Mᵐᵒᵖ :=
{ to_equiv := op_equiv,
continuous_to_fun := continuous_op,
continuous_inv_fun := continuous_unop }
@[simp, to_additive] lemma map_op_nhds (x : M) : map (op : M → Mᵐᵒᵖ) (𝓝 x) = 𝓝 (op x) :=
op_homeomorph.map_nhds_eq x
@[simp, to_additive] lemma map_unop_nhds (x : Mᵐᵒᵖ) : map (unop : Mᵐᵒᵖ → M) (𝓝 x) = 𝓝 (unop x) :=
op_homeomorph.symm.map_nhds_eq x
@[simp, to_additive] lemma comap_op_nhds (x : Mᵐᵒᵖ) : comap (op : M → Mᵐᵒᵖ) (𝓝 x) = 𝓝 (unop x) :=
op_homeomorph.comap_nhds_eq x
@[simp, to_additive]
end mul_opposite
namespace units
open mul_opposite
variables [topological_space M] [monoid M]
/-- The units of a monoid are equipped with a topology, via the embedding into `M × M`. -/
@[to_additive] instance : topological_space Mˣ :=
topological_space.induced (embed_product M) prod.topological_space
@[to_additive] lemma continuous_embed_product : continuous (embed_product M) :=
continuous_induced_dom
@[to_additive] lemma continuous_coe : continuous (coe : Mˣ → M) :=
(@continuous_embed_product M _ _).fst
end units
|
(** * Iteration: Bounded Loops *)
(* *********************************************************************)
(* *)
(* The Compcert verified compiler *)
(* *)
(* Xavier Leroy, INRIA Paris-Rocquencourt *)
(* *)
(* Copyright Institut National de Recherche en Informatique et en *)
(* Automatique. All rights reserved. This file is distributed *)
(* under the terms of the GNU General Public License as published by *)
(* the Free Software Foundation, either version 2 of the License, or *)
(* (at your option) any later version. This file is also distributed *)
(* under the terms of the INRIA Non-Commercial License Agreement. *)
(* *)
(* *********************************************************************)
(* -------------------------------------------------------------------------- *
* Vellvm - the Verified LLVM project *
* *
* Copyright (c) 2017 Steve Zdancewic <[email protected]> *
* *
* This file is distributed under the terms of the GNU General Public *
* License as published by the Free Software Foundation, either version *
* 3 of the License, or (at your option) any later version. *
---------------------------------------------------------------------------- *)
(* ################################################################# *)
(** * Bounded iterators *)
Require Import NArith FunctionalExtensionality.
Set Implicit Arguments.
Module Iter.
Section ITERATION.
Variables A B: Type.
Variable step: A -> B + A.
Definition num_iterations := 1000000000000%N.
Open Scope N_scope.
Definition iter_step (x: N)
(next: forall y, y < x -> A -> option B)
(s: A) : option B :=
match N.eq_dec x N.zero with
| left EQ => None
| right NOTEQ =>
match step s with
| inl res => Some res
| inr s' => next (N.pred x) (N.lt_pred_l x NOTEQ) s'
end
end.
Definition iter: N -> A -> option B := Fix N.lt_wf_0 _ iter_step.
Definition iterate := iter num_iterations.
Variable P: A -> Prop.
Variable Q: B -> Prop.
Hypothesis step_prop:
forall a : A, P a ->
match step a with inl b => Q b | inr a' => P a' end.
Lemma iter_prop:
forall n b a, P a -> iter n a = Some b -> Q b.
Proof.
intros n b. pattern n. apply (well_founded_ind N.lt_wf_0).
intros until 2. rewrite (Fix_eq N.lt_wf_0 _ iter_step).
unfold iter_step at 1. destruct (N.eq_dec _ _).
discriminate 1. specialize (step_prop H0).
destruct (step a).
inversion 1; subst b0; exact step_prop.
apply H; auto. apply N.lt_pred_l; auto.
intros. f_equal.
apply functional_extensionality_dep. intro.
apply functional_extensionality_dep. auto.
Qed.
Lemma iterate_prop:
forall a b, iterate a = Some b -> P a -> Q b.
Proof.
intros. apply iter_prop with num_iterations a; assumption.
Qed.
End ITERATION.
End Iter.
|
using TypeVars
using Test
struct Zm{M, T<:Integer}
val::T
end
import Base: +
+(a::Z, b::Z) where Z<:Zm = Z(mod(a.val + b.val, typevar(Z)))
@testset "TypeVars" begin
m = big"2"^512 -1
M = Symbol(hash(m))
Z = Zm{M,BigInt}
settypevar(Z, m)
a = Z(m - 5)
b = Z(m - 10)
c = a + b
@test c.val == m - 15
end
|
#ifndef EV3PLOTTER_MESSAGEQUEUE_H
#define EV3PLOTTER_MESSAGEQUEUE_H
#include <gsl/span>
#include <memory>
#include <string_view>
#include <type_safe/flag_set.hpp>
namespace ev3plotter {
class message_queue {
public:
enum class option { read, write, non_blocking, remove_on_destruction, _flag_set_size };
enum class send_result { success, failure_queue_full, failure };
enum class receive_result { success, failure_no_messages, failure };
message_queue(std::string_view name, std::size_t max_message_size, type_safe::flag_set<option> options);
~message_queue();
send_result send(std::string_view msg);
receive_result receive(gsl::span<char>& buffer);
std::size_t message_size() const noexcept;
private:
class impl;
std::unique_ptr<impl> impl_;
};
} // namespace ev3plotter
#endif // EV3PLOTTER_MESSAGEQUEUE_H |
Formal statement is: lemma LIMSEQ_Suc: "f \<longlonglongrightarrow> l \<Longrightarrow> (\<lambda>n. f (Suc n)) \<longlonglongrightarrow> l" Informal statement is: If $f$ converges to $l$, then $f(n+1)$ converges to $l$. |
theory Datatypes
imports inc.Prelude
begin
default_sort type
datatype channel = cin1 | cin2 | cout | cbot
section \<open>Message Definition\<close>
text\<open>The same is true for the "Message" Datatype. Every kind of message has to be described here:\<close>
datatype M_pure = \<N> nat | \<B> bool
instance M_pure::countable
apply(countable_datatype)
done
text \<open>Then one describes the types of each channel. Only Messages included are allowed to be
transmitted\<close>
fun cMsg :: "channel \<Rightarrow> M_pure set" where
"cMsg cin1 = range \<N>" |
"cMsg cin2 = range \<N>" |
"cMsg cout = range \<N>" |
"cMsg _ = {}"
lemma cmsgempty_ex:"\<exists>c. cMsg c = {}"
using cMsg.simps by blast
fun cTime :: "channel \<Rightarrow> timeType" where
"cTime cin1 = TTsyn" |
"cTime cin2 = TTsyn" |
"cTime cout = TTsyn" |
"cTime _ = undefined"
end
|
{-# OPTIONS --cubical --no-import-sorts --safe #-}
module Cubical.Algebra.Magma.Morphism where
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.Equiv
open import Cubical.Functions.Embedding
open import Cubical.Algebra
private
variable
m n : Level
IsMagmaHom : (M : Magma m) (N : Magma n) → (⟨ M ⟩ → ⟨ N ⟩) → Type (ℓ-max m n)
IsMagmaHom M N fun = Homomorphic₂ fun (Magma._•_ M) (Magma._•_ N)
record MagmaHom (M : Magma m) (N : Magma n) : Type (ℓ-max m n) where
constructor magmahom
field
fun : ⟨ M ⟩ → ⟨ N ⟩
isHom : IsMagmaHom M N fun
record MagmaEquiv (M : Magma m) (N : Magma n) : Type (ℓ-max m n) where
constructor magmaequiv
field
eq : ⟨ M ⟩ ≃ ⟨ N ⟩
isHom : IsMagmaHom M N (equivFun eq)
hom : MagmaHom M N
hom = record { isHom = isHom }
instance
MagmaHomOperators : HomOperators (Magma m) (Magma n) (ℓ-max m n)
MagmaHomOperators = record { _⟶ᴴ_ = MagmaHom; _≃ᴴ_ = MagmaEquiv } |
module Sesam
using ProgressMeter
using CSV
using DataFrames
using Dates
using JuMP
using Gurobi
println("Using Sesam module.")
# utility functions
include("util.jl")
# power plants
include("powerplants.jl")
# renewables
include("renewables.jl")
# storages
include("storages.jl")
# nodes
include("nodes.jl")
# lines
include("lines.jl")
# Powerflow helpers
include("powerflow.jl")
### models
# economic dispatch
include("economic_dispatch.jl")
# congestion management
include("congestion_management.jl")
# congestion management with PtG
include("congestion_management_ptg.jl")
# helper functions
include("helpers.jl")
export
CSV,
DataFrames,
JuMP,
Gurobi,
ProgressMeter,
ElectronDisplay,
Nodes,
Lines,
linespmax,
bprime,
PowerPlants,
Renewables,
Storages,
EconomicDispatch,
EconomicDispatch80,
CongestionManagement,
CongestionManagementPHS80,
CongestionManagementPtG,
hoursInWeek,
hoursInDay,
hoursInHalfDay,
has,
getKeyVector,
dayslicer
end
|
!> Implementation of the meta data for libraries.
!
! A library table can currently have the following fields
!
! ```toml
! [library]
! source-dir = "path"
! build-script = "file"
! ```
module fpm_manifest_library
use fpm_error, only : error_t, syntax_error
use fpm_toml, only : toml_table, toml_key, toml_stat, get_value
implicit none
private
public :: library_t, new_library
!> Configuration meta data for a library
type :: library_t
!> Source path prefix
character(len=:), allocatable :: source_dir
!> Alternative build script to be invoked
character(len=:), allocatable :: build_script
contains
!> Print information on this instance
procedure :: info
end type library_t
contains
!> Construct a new library configuration from a TOML data structure
subroutine new_library(self, table, error)
!> Instance of the library configuration
type(library_t), intent(out) :: self
!> Instance of the TOML data structure
type(toml_table), intent(inout) :: table
!> Error handling
type(error_t), allocatable, intent(out) :: error
call check(table, error)
if (allocated(error)) return
call get_value(table, "source-dir", self%source_dir, "src")
call get_value(table, "build-script", self%build_script)
end subroutine new_library
!> Check local schema for allowed entries
subroutine check(table, error)
!> Instance of the TOML data structure
type(toml_table), intent(inout) :: table
!> Error handling
type(error_t), allocatable, intent(out) :: error
type(toml_key), allocatable :: list(:)
integer :: ikey
call table%get_keys(list)
! table can be empty
if (size(list) < 1) return
do ikey = 1, size(list)
select case(list(ikey)%key)
case default
call syntax_error(error, "Key "//list(ikey)%key//" is not allowed in library")
exit
case("source-dir", "build-script")
continue
end select
end do
end subroutine check
!> Write information on instance
subroutine info(self, unit, verbosity)
!> Instance of the library configuration
class(library_t), intent(in) :: self
!> Unit for IO
integer, intent(in) :: unit
!> Verbosity of the printout
integer, intent(in), optional :: verbosity
integer :: pr
character(len=*), parameter :: fmt = '("#", 1x, a, t30, a)'
if (present(verbosity)) then
pr = verbosity
else
pr = 1
end if
if (pr < 1) return
write(unit, fmt) "Library target"
if (allocated(self%source_dir)) then
write(unit, fmt) "- source directory", self%source_dir
end if
if (allocated(self%build_script)) then
write(unit, fmt) "- custom build", self%build_script
end if
end subroutine info
end module fpm_manifest_library
|
{-# OPTIONS --safe #-}
module Cubical.Algebra.Module.Base where
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.Equiv
open import Cubical.Foundations.Equiv.HalfAdjoint
open import Cubical.Foundations.HLevels
open import Cubical.Foundations.Isomorphism
open import Cubical.Foundations.SIP
open import Cubical.Data.Sigma
open import Cubical.Displayed.Base
open import Cubical.Displayed.Auto
open import Cubical.Displayed.Record
open import Cubical.Displayed.Universe
open import Cubical.Reflection.RecordEquiv
open import Cubical.Algebra.Ring
open import Cubical.Algebra.AbGroup
open import Cubical.Algebra.Group
open Iso
private
variable
ℓ ℓ' : Level
record IsLeftModule (R : Ring ℓ) {M : Type ℓ'}
(0m : M)
(_+_ : M → M → M)
(-_ : M → M)
(_⋆_ : ⟨ R ⟩ → M → M) : Type (ℓ-max ℓ ℓ') where
constructor ismodule
open RingStr (snd R) using (_·_; 1r) renaming (_+_ to _+r_)
field
+-isAbGroup : IsAbGroup 0m _+_ -_
⋆-assoc : (r s : ⟨ R ⟩) (x : M) → (r · s) ⋆ x ≡ r ⋆ (s ⋆ x)
⋆-ldist : (r s : ⟨ R ⟩) (x : M) → (r +r s) ⋆ x ≡ (r ⋆ x) + (s ⋆ x)
⋆-rdist : (r : ⟨ R ⟩) (x y : M) → r ⋆ (x + y) ≡ (r ⋆ x) + (r ⋆ y)
⋆-lid : (x : M) → 1r ⋆ x ≡ x
open IsAbGroup +-isAbGroup public
renaming
( assoc to +-assoc
; identity to +-identity
; lid to +-lid
; rid to +-rid
; inverse to +-inv
; invl to +-linv
; invr to +-rinv
; comm to +-comm
; isSemigroup to +-isSemigroup
; isMonoid to +-isMonoid
; isGroup to +-isGroup
)
unquoteDecl IsLeftModuleIsoΣ = declareRecordIsoΣ IsLeftModuleIsoΣ (quote IsLeftModule)
record LeftModuleStr (R : Ring ℓ) (A : Type ℓ') : Type (ℓ-max ℓ ℓ') where
constructor leftmodulestr
field
0m : A
_+_ : A → A → A
-_ : A → A
_⋆_ : ⟨ R ⟩ → A → A
isLeftModule : IsLeftModule R 0m _+_ -_ _⋆_
open IsLeftModule isLeftModule public
LeftModule : (R : Ring ℓ) → ∀ ℓ' → Type (ℓ-max ℓ (ℓ-suc ℓ'))
LeftModule R ℓ' = Σ[ A ∈ Type ℓ' ] LeftModuleStr R A
module _ {R : Ring ℓ} where
LeftModule→AbGroup : (M : LeftModule R ℓ') → AbGroup ℓ'
LeftModule→AbGroup (_ , leftmodulestr _ _ _ _ isLeftModule) =
_ , abgroupstr _ _ _ (IsLeftModule.+-isAbGroup isLeftModule)
isSetLeftModule : (M : LeftModule R ℓ') → isSet ⟨ M ⟩
isSetLeftModule M = isSetAbGroup (LeftModule→AbGroup M)
open RingStr (snd R) using (1r) renaming (_+_ to _+r_; _·_ to _·s_)
makeIsLeftModule : {M : Type ℓ'} {0m : M}
{_+_ : M → M → M} { -_ : M → M} {_⋆_ : ⟨ R ⟩ → M → M}
(isSet-M : isSet M)
(+-assoc : (x y z : M) → x + (y + z) ≡ (x + y) + z)
(+-rid : (x : M) → x + 0m ≡ x)
(+-rinv : (x : M) → x + (- x) ≡ 0m)
(+-comm : (x y : M) → x + y ≡ y + x)
(⋆-assoc : (r s : ⟨ R ⟩) (x : M) → (r ·s s) ⋆ x ≡ r ⋆ (s ⋆ x))
(⋆-ldist : (r s : ⟨ R ⟩) (x : M) → (r +r s) ⋆ x ≡ (r ⋆ x) + (s ⋆ x))
(⋆-rdist : (r : ⟨ R ⟩) (x y : M) → r ⋆ (x + y) ≡ (r ⋆ x) + (r ⋆ y))
(⋆-lid : (x : M) → 1r ⋆ x ≡ x)
→ IsLeftModule R 0m _+_ -_ _⋆_
makeIsLeftModule isSet-M +-assoc +-rid +-rinv +-comm ⋆-assoc ⋆-ldist ⋆-rdist ⋆-lid =
ismodule (makeIsAbGroup isSet-M +-assoc +-rid +-rinv +-comm) ⋆-assoc ⋆-ldist ⋆-rdist ⋆-lid
record IsLeftModuleHom {R : Ring ℓ} {A B : Type ℓ'}
(M : LeftModuleStr R A) (f : A → B) (N : LeftModuleStr R B)
: Type (ℓ-max ℓ ℓ')
where
-- Shorter qualified names
private
module M = LeftModuleStr M
module N = LeftModuleStr N
field
pres0 : f M.0m ≡ N.0m
pres+ : (x y : A) → f (x M.+ y) ≡ f x N.+ f y
pres- : (x : A) → f (M.- x) ≡ N.- (f x)
pres⋆ : (r : ⟨ R ⟩) (y : A) → f (r M.⋆ y) ≡ r N.⋆ f y
LeftModuleHom : {R : Ring ℓ} (M N : LeftModule R ℓ') → Type (ℓ-max ℓ ℓ')
LeftModuleHom M N = Σ[ f ∈ (⟨ M ⟩ → ⟨ N ⟩) ] IsLeftModuleHom (M .snd) f (N .snd)
IsLeftModuleEquiv : {R : Ring ℓ} {A B : Type ℓ'}
(M : LeftModuleStr R A) (e : A ≃ B) (N : LeftModuleStr R B)
→ Type (ℓ-max ℓ ℓ')
IsLeftModuleEquiv M e N = IsLeftModuleHom M (e .fst) N
LeftModuleEquiv : {R : Ring ℓ} (M N : LeftModule R ℓ') → Type (ℓ-max ℓ ℓ')
LeftModuleEquiv M N = Σ[ e ∈ ⟨ M ⟩ ≃ ⟨ N ⟩ ] IsLeftModuleEquiv (M .snd) e (N .snd)
isPropIsLeftModule : (R : Ring ℓ) {M : Type ℓ'}
(0m : M)
(_+_ : M → M → M)
(-_ : M → M)
(_⋆_ : ⟨ R ⟩ → M → M)
→ isProp (IsLeftModule R 0m _+_ -_ _⋆_)
isPropIsLeftModule R _ _ _ _ =
isOfHLevelRetractFromIso 1 IsLeftModuleIsoΣ
(isPropΣ (isPropIsAbGroup _ _ _)
(λ ab →
isProp× (isPropΠ3 λ _ _ _ → ab .is-set _ _)
(isProp× (isPropΠ3 λ _ _ _ → ab .is-set _ _)
(isProp× (isPropΠ3 λ _ _ _ → ab .is-set _ _)
(isPropΠ λ _ → ab .is-set _ _)))))
where
open IsAbGroup
𝒮ᴰ-LeftModule : (R : Ring ℓ) → DUARel (𝒮-Univ ℓ') (LeftModuleStr R) (ℓ-max ℓ ℓ')
𝒮ᴰ-LeftModule R =
𝒮ᴰ-Record (𝒮-Univ _) (IsLeftModuleEquiv {R = R})
(fields:
data[ 0m ∣ autoDUARel _ _ ∣ pres0 ]
data[ _+_ ∣ autoDUARel _ _ ∣ pres+ ]
data[ -_ ∣ autoDUARel _ _ ∣ pres- ]
data[ _⋆_ ∣ autoDUARel _ _ ∣ pres⋆ ]
prop[ isLeftModule ∣ (λ _ _ → isPropIsLeftModule _ _ _ _ _) ])
where
open LeftModuleStr
open IsLeftModuleHom
LeftModulePath : {R : Ring ℓ} (M N : LeftModule R ℓ') → (LeftModuleEquiv M N) ≃ (M ≡ N)
LeftModulePath {R = R} = ∫ (𝒮ᴰ-LeftModule R) .UARel.ua
|
(*
Copyright (C) 2017 M.A.L. Marques
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*)
(* type: gga_exc *)
$include "gga_x_pw86.mpl"
params_a_aa := 2.208:
params_a_bb := 9.27:
params_a_cc := 0.2:
f := (rs, z, xt, xs0, xs1) -> gga_kinetic(pw86_f, rs, z, xs0, xs1):
|
Formal statement is: lemma linear_continuous_on_compose: fixes f :: "'a::euclidean_space \<Rightarrow> 'b::euclidean_space" and g :: "'b \<Rightarrow> 'c::real_normed_vector" assumes "continuous_on S f" "linear g" shows "continuous_on S (\<lambda>x. g(f x))" Informal statement is: If $f$ is a continuous function from a set $S$ to a Euclidean space and $g$ is a linear function from a Euclidean space to a normed vector space, then the composition $g \circ f$ is continuous. |
State Before: R : Type u_1
inst✝ : Semiring R
f : R[X]
h : f ≠ 0
⊢ card (support (eraseLead f)) < card (support f) State After: R : Type u_1
inst✝ : Semiring R
f : R[X]
h : f ≠ 0
⊢ card (Finset.erase (support f) (natDegree f)) < card (support f) Tactic: rw [eraseLead_support] State Before: R : Type u_1
inst✝ : Semiring R
f : R[X]
h : f ≠ 0
⊢ card (Finset.erase (support f) (natDegree f)) < card (support f) State After: no goals Tactic: exact card_lt_card (erase_ssubset <| natDegree_mem_support_of_nonzero h) |
(* Author: Tobias Nipkow *)
theory Abs_Int1_const_ITP
imports Abs_Int1_ITP "../Abs_Int_Tests"
begin
subsection "Constant Propagation"
datatype const = Const val | Any
fun \<gamma>_const where
"\<gamma>_const (Const n) = {n}" |
"\<gamma>_const (Any) = UNIV"
fun plus_const where
"plus_const (Const m) (Const n) = Const(m+n)" |
"plus_const _ _ = Any"
lemma plus_const_cases: "plus_const a1 a2 =
(case (a1,a2) of (Const m, Const n) \<Rightarrow> Const(m+n) | _ \<Rightarrow> Any)"
by(auto split: prod.split const.split)
instantiation const :: SL_top
begin
fun le_const where
"_ \<sqsubseteq> Any = True" |
"Const n \<sqsubseteq> Const m = (n=m)" |
"Any \<sqsubseteq> Const _ = False"
fun join_const where
"Const m \<squnion> Const n = (if n=m then Const m else Any)" |
"_ \<squnion> _ = Any"
definition "\<top> = Any"
instance
proof
case goal1 thus ?case by (cases x) simp_all
next
case goal2 thus ?case by(cases z, cases y, cases x, simp_all)
next
case goal3 thus ?case by(cases x, cases y, simp_all)
next
case goal4 thus ?case by(cases y, cases x, simp_all)
next
case goal5 thus ?case by(cases z, cases y, cases x, simp_all)
next
case goal6 thus ?case by(simp add: Top_const_def)
qed
end
global_interpretation Val_abs
where \<gamma> = \<gamma>_const and num' = Const and plus' = plus_const
proof
case goal1 thus ?case
by(cases a, cases b, simp, simp, cases b, simp, simp)
next
case goal2 show ?case by(simp add: Top_const_def)
next
case goal3 show ?case by simp
next
case goal4 thus ?case
by(auto simp: plus_const_cases split: const.split)
qed
global_interpretation Abs_Int
where \<gamma> = \<gamma>_const and num' = Const and plus' = plus_const
defines AI_const = AI and step_const = step' and aval'_const = aval'
..
subsubsection "Tests"
value "show_acom (((step_const \<top>)^^0) (\<bottom>\<^sub>c test1_const))"
value "show_acom (((step_const \<top>)^^1) (\<bottom>\<^sub>c test1_const))"
value "show_acom (((step_const \<top>)^^2) (\<bottom>\<^sub>c test1_const))"
value "show_acom (((step_const \<top>)^^3) (\<bottom>\<^sub>c test1_const))"
value "show_acom_opt (AI_const test1_const)"
value "show_acom_opt (AI_const test2_const)"
value "show_acom_opt (AI_const test3_const)"
value "show_acom (((step_const \<top>)^^0) (\<bottom>\<^sub>c test4_const))"
value "show_acom (((step_const \<top>)^^1) (\<bottom>\<^sub>c test4_const))"
value "show_acom (((step_const \<top>)^^2) (\<bottom>\<^sub>c test4_const))"
value "show_acom (((step_const \<top>)^^3) (\<bottom>\<^sub>c test4_const))"
value "show_acom_opt (AI_const test4_const)"
value "show_acom (((step_const \<top>)^^0) (\<bottom>\<^sub>c test5_const))"
value "show_acom (((step_const \<top>)^^1) (\<bottom>\<^sub>c test5_const))"
value "show_acom (((step_const \<top>)^^2) (\<bottom>\<^sub>c test5_const))"
value "show_acom (((step_const \<top>)^^3) (\<bottom>\<^sub>c test5_const))"
value "show_acom (((step_const \<top>)^^4) (\<bottom>\<^sub>c test5_const))"
value "show_acom (((step_const \<top>)^^5) (\<bottom>\<^sub>c test5_const))"
value "show_acom_opt (AI_const test5_const)"
value "show_acom (((step_const \<top>)^^0) (\<bottom>\<^sub>c test6_const))"
value "show_acom (((step_const \<top>)^^1) (\<bottom>\<^sub>c test6_const))"
value "show_acom (((step_const \<top>)^^2) (\<bottom>\<^sub>c test6_const))"
value "show_acom (((step_const \<top>)^^3) (\<bottom>\<^sub>c test6_const))"
value "show_acom (((step_const \<top>)^^4) (\<bottom>\<^sub>c test6_const))"
value "show_acom (((step_const \<top>)^^5) (\<bottom>\<^sub>c test6_const))"
value "show_acom (((step_const \<top>)^^6) (\<bottom>\<^sub>c test6_const))"
value "show_acom (((step_const \<top>)^^7) (\<bottom>\<^sub>c test6_const))"
value "show_acom (((step_const \<top>)^^8) (\<bottom>\<^sub>c test6_const))"
value "show_acom (((step_const \<top>)^^9) (\<bottom>\<^sub>c test6_const))"
value "show_acom (((step_const \<top>)^^10) (\<bottom>\<^sub>c test6_const))"
value "show_acom (((step_const \<top>)^^11) (\<bottom>\<^sub>c test6_const))"
value "show_acom_opt (AI_const test6_const)"
text{* Monotonicity: *}
global_interpretation Abs_Int_mono
where \<gamma> = \<gamma>_const and num' = Const and plus' = plus_const
proof
case goal1 thus ?case
by(auto simp: plus_const_cases split: const.split)
qed
text{* Termination: *}
definition "m_const x = (case x of Const _ \<Rightarrow> 1 | Any \<Rightarrow> 0)"
lemma measure_const:
"(strict{(x::const,y). x \<sqsubseteq> y})^-1 \<subseteq> measure m_const"
by(auto simp: m_const_def split: const.splits)
lemma measure_const_eq:
"\<forall> x y::const. x \<sqsubseteq> y \<and> y \<sqsubseteq> x \<longrightarrow> m_const x = m_const y"
by(auto simp: m_const_def split: const.splits)
lemma "EX c'. AI_const c = Some c'"
by(rule AI_Some_measure[OF measure_const measure_const_eq])
end
|
-- Copyright 2017, the blau.io contributors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
module API.Web.DOM.Element
import API.Web.HTML.HTMLElement
import API.Web.Infra.Namespaces
import IdrisScript
%access public export
%default total
||| An Element represents an object of a Document.
|||
||| The original specification can be found at
||| https://dom.spec.whatwg.org/#interface-element
data Element : Type where
FromHTMLElement : HTMLElement -> Element
New : (localName : String) -> (self : JSRef) -> Element
||| elementFromPointer is a helper function for easily creating Elements from
||| JavaScript references.
|||
||| @ ref A pointer to an element
elementFromPointer : (ref : JSRef) -> JS_IO $ Maybe Element
elementFromPointer ref = case !maybeNamespace of
Nothing => pure Nothing
(Just ns) => case !maybeLocalName of
Nothing => pure Nothing
(Just localName) => case ns of
API.Web.Infra.Namespaces.html => case !(htmlElementFromPointer ref) of
Nothing => pure Nothing
(Just htmlElement) => pure $ Just $ FromHTMLElement htmlElement
_ => pure $ Just $ New localName ref
where
maybeNamespace : JS_IO $ Maybe String
maybeNamespace = let
getNameSpace = jscall "%0.namespaceURI" (JSRef -> JS_IO JSRef) ref
in
case !(IdrisScript.pack !getNameSpace) of
(JSString ** str) => pure $ Just $ fromJS str
_ => pure Nothing
maybeLocalName : JS_IO $ Maybe String
maybeLocalName = let
getLocalName = jscall "%0.localName" (JSRef -> JS_IO JSRef) ref
in
case !(IdrisScript.pack !getLocalName) of
(JSString ** str) => pure $ Just $ fromJS str
_ => pure Nothing
|
module Ch04.SubTerm
import Ch03.Arith
||| Propositional type describing that one term is an direct subterm of another one.
data DirectSubTerm : Term -> Term -> Type where
IsIfTerm : (x : Term) -> DirectSubTerm x (IfThenElse x y z)
IsThenTerm : (y : Term) -> DirectSubTerm y (IfThenElse x y z)
IsElseTerm : (z : Term) -> DirectSubTerm z (IfThenElse x y z)
IsSuccSubTerm : (x : Term) -> DirectSubTerm x (Succ x)
IsPredSubTerm : (x : Term) -> DirectSubTerm x (Pred x)
||| Propositional type describing that one term is a subterm of another one.
data SubTerm : Term -> Term -> Type where
IsSubTermOfDirectSubTerm : SubTerm x y -> DirectSubTerm y z -> SubTerm x z
IsEqual : SubTerm x x
|
corollary no_retraction_cball: fixes a :: "'a::euclidean_space" assumes "e > 0" shows "\<not> (frontier (cball a e) retract_of (cball a e))" |
import for_mathlib.derived.example
import breen_deligne.eval
noncomputable theory
open category_theory category_theory.preadditive
namespace breen_deligne
namespace package
variables (BD : package)
variables {𝒜 : Type*} [category 𝒜] [abelian 𝒜]
variables (F : 𝒜 ⥤ 𝒜)
def eval' : 𝒜 ⥤ cochain_complex 𝒜 ℤ :=
(data.eval_functor F).obj BD.data ⋙ homological_complex.embed complex_shape.embedding.nat_down_int_up
def eval : 𝒜 ⥤ bounded_homotopy_category 𝒜 :=
(data.eval_functor F).obj BD.data ⋙ chain_complex.to_bounded_homotopy_category
instance eval_additive : (BD.eval F).additive :=
functor.additive_of_map_fst_add_snd _ $ λ A,
begin
refine homotopy_category.eq_of_homotopy _ _ _,
rw [← functor.map_add],
exact homological_complex.embed_homotopy _ _ (eval_functor_homotopy F BD A) _,
end
lemma eval_functor_obj_X (X : 𝒜) (n : ℕ) :
(((data.eval_functor F).obj BD.data).obj X).X n = F.obj ((Pow (BD.data.X n)).obj X) := rfl
lemma eval_functor_obj_d (X : 𝒜) (m n : ℕ) :
(((data.eval_functor F).obj BD.data).obj X).d m n =
(universal_map.eval_Pow F (BD.data.d m n)).app X := rfl
lemma eval'_obj_X (X : 𝒜) (n : ℕ) :
((BD.eval' F).obj X).X (-n:ℤ) = F.obj ((Pow (BD.data.X n)).obj X) :=
by { cases n; apply eval_functor_obj_X }
lemma eval'_obj_X_0 (X : 𝒜) :
((BD.eval' F).obj X).X 0 = F.obj ((Pow (BD.data.X 0)).obj X) := rfl
lemma eval'_obj_X_succ (X : 𝒜) (n : ℕ) :
((BD.eval' F).obj X).X -[1+ n] = F.obj ((Pow (BD.data.X (n+1))).obj X) := rfl
lemma eval'_obj_d (X : 𝒜) (m n : ℕ) :
((BD.eval' F).obj X).d (-(m+1:ℕ):ℤ) (-(n+1:ℕ):ℤ) =
(universal_map.eval_Pow F (BD.data.d (m+1) (n+1))).app X := rfl
lemma eval'_obj_d_0 (X : 𝒜) (n : ℕ) :
((BD.eval' F).obj X).d (-(n+1:ℕ):ℤ) (-(1:ℕ)+1:ℤ) =
(universal_map.eval_Pow F (BD.data.d (n+1) 0)).app X := rfl
end package
end breen_deligne
|
Formal statement is: lemma residue_neg: assumes "open s" "z \<in> s" and f_holo: "f holomorphic_on s - {z}" shows "residue (\<lambda>z. - (f z)) z= - residue f z" Informal statement is: If $f$ is holomorphic on a punctured neighborhood of $z$, then the residue of $-f$ at $z$ is the negative of the residue of $f$ at $z$. |
Formal statement is: lemma adjoint_linear: fixes f :: "'n::euclidean_space \<Rightarrow> 'm::euclidean_space" assumes lf: "linear f" shows "linear (adjoint f)" Informal statement is: If $f$ is a linear map from $\mathbb{R}^n$ to $\mathbb{R}^m$, then its adjoint is also a linear map. |
Formal statement is: lemma adjoint_adjoint: fixes f :: "'n::euclidean_space \<Rightarrow> 'm::euclidean_space" assumes lf: "linear f" shows "adjoint (adjoint f) = f" Informal statement is: The adjoint of the adjoint of a linear map is the original linear map. |
[STATEMENT]
lemma quasi_transD: "\<lbrakk> x \<^bsub>r\<^esub>\<prec> y; y \<^bsub>r\<^esub>\<prec> z; quasi_trans r \<rbrakk> \<Longrightarrow> x \<^bsub>r\<^esub>\<prec> z"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>x \<^bsub>r\<^esub>\<prec> y; y \<^bsub>r\<^esub>\<prec> z; quasi_trans r\<rbrakk> \<Longrightarrow> x \<^bsub>r\<^esub>\<prec> z
[PROOF STEP]
unfolding quasi_trans_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>x \<^bsub>r\<^esub>\<prec> y; y \<^bsub>r\<^esub>\<prec> z; \<forall>x y z. x \<^bsub>r\<^esub>\<prec> y \<and> y \<^bsub>r\<^esub>\<prec> z \<longrightarrow> x \<^bsub>r\<^esub>\<prec> z\<rbrakk> \<Longrightarrow> x \<^bsub>r\<^esub>\<prec> z
[PROOF STEP]
by blast |
theorem Edelstein_fix: fixes S :: "'a::metric_space set" assumes S: "compact S" "S \<noteq> {}" and gs: "(g ` S) \<subseteq> S" and dist: "\<forall>x\<in>S. \<forall>y\<in>S. x \<noteq> y \<longrightarrow> dist (g x) (g y) < dist x y" shows "\<exists>!x\<in>S. g x = x" |
[STATEMENT]
lemma div_nat_eqvt:
fixes x::"nat"
shows "pi\<bullet>(x div y) = (pi\<bullet>x) div (pi\<bullet>y)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. pi \<bullet> (x div y) = pi \<bullet> x div pi \<bullet> y
[PROOF STEP]
by (simp add:perm_nat_def) |
-- @@stderr --
dtrace: failed to compile script test/unittest/printf/err.D_PROTO_ARG.d: [D_PROTO_ARG] line 18: printf( ) argument #1 is incompatible with prototype:
prototype: string
argument: int
|
-- Copyright 2017, the blau.io contributors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
module API.Web.XHR.XMLHttpRequestEventTarget
import API.Web.XHR.XMLHttpRequest
%access public export
%default total
data XMLHttpRequestEventTarget : Type where
FromXMLHttpRequest : XMLHttpRequest -> XMLHttpRequestEventTarget
|
/-
Copyright (c) 2017 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl
! This file was ported from Lean 3 source module logic.nonempty
! leanprover-community/mathlib commit d2d8742b0c21426362a9dacebc6005db895ca963
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathlib.Init.ZeroOne
import Mathlib.Logic.Basic
/-!
# Nonempty types
This file proves a few extra facts about `Nonempty`, which is defined in core Lean.
## Main declarations
* `Nonempty.some`: Extracts a witness of nonemptiness using choice. Takes `Nonempty α` explicitly.
* `Classical.arbitrary`: Extracts a witness of nonemptiness using choice. Takes `Nonempty α` as an
instance.
-/
variable {γ : α → Type _}
instance (priority := 20) Zero.nonempty [Zero α] : Nonempty α :=
⟨0⟩
instance (priority := 20) One.nonempty [One α] : Nonempty α :=
⟨1⟩
theorem exists_true_iff_nonempty {α : Sort _} : (∃ _ : α, True) ↔ Nonempty α :=
Iff.intro (fun ⟨a, _⟩ ↦ ⟨a⟩) fun ⟨a⟩ ↦ ⟨a, trivial⟩
#align exists_true_iff_nonempty exists_true_iff_nonempty
@[simp]
theorem nonempty_Prop {p : Prop} : Nonempty p ↔ p :=
Iff.intro (fun ⟨h⟩ ↦ h) fun h ↦ ⟨h⟩
#align nonempty_Prop nonempty_Prop
theorem not_nonempty_iff_imp_false {α : Sort _} : ¬Nonempty α ↔ α → False :=
⟨fun h a ↦ h ⟨a⟩, fun h ⟨a⟩ ↦ h a⟩
#align not_nonempty_iff_imp_false not_nonempty_iff_imp_false
@[simp]
theorem nonempty_sigma : Nonempty (Σa : α, γ a) ↔ ∃ a : α, Nonempty (γ a) :=
Iff.intro (fun ⟨⟨a, c⟩⟩ ↦ ⟨a, ⟨c⟩⟩) fun ⟨a, ⟨c⟩⟩ ↦ ⟨⟨a, c⟩⟩
#align nonempty_sigma nonempty_sigma
@[simp]
theorem nonempty_psigma {α} {β : α → Sort _} : Nonempty (PSigma β) ↔ ∃ a : α, Nonempty (β a) :=
Iff.intro (fun ⟨⟨a, c⟩⟩ ↦ ⟨a, ⟨c⟩⟩) fun ⟨a, ⟨c⟩⟩ ↦ ⟨⟨a, c⟩⟩
#align nonempty_psigma nonempty_psigma
@[simp]
theorem nonempty_subtype {α} {p : α → Prop} : Nonempty (Subtype p) ↔ ∃ a : α, p a :=
Iff.intro (fun ⟨⟨a, h⟩⟩ ↦ ⟨a, h⟩) fun ⟨a, h⟩ ↦ ⟨⟨a, h⟩⟩
#align nonempty_subtype nonempty_subtype
@[simp]
theorem nonempty_prod : Nonempty (α × β) ↔ Nonempty α ∧ Nonempty β :=
Iff.intro (fun ⟨⟨a, b⟩⟩ ↦ ⟨⟨a⟩, ⟨b⟩⟩) fun ⟨⟨a⟩, ⟨b⟩⟩ ↦ ⟨⟨a, b⟩⟩
#align nonempty_prod nonempty_prod
@[simp]
theorem nonempty_pprod {α β} : Nonempty (PProd α β) ↔ Nonempty α ∧ Nonempty β :=
Iff.intro (fun ⟨⟨a, b⟩⟩ ↦ ⟨⟨a⟩, ⟨b⟩⟩) fun ⟨⟨a⟩, ⟨b⟩⟩ ↦ ⟨⟨a, b⟩⟩
#align nonempty_pprod nonempty_pprod
@[simp]
theorem nonempty_sum : Nonempty (Sum α β) ↔ Nonempty α ∨ Nonempty β :=
Iff.intro
(fun ⟨h⟩ ↦
match h with
| Sum.inl a => Or.inl ⟨a⟩
| Sum.inr b => Or.inr ⟨b⟩)
fun h ↦
match h with
| Or.inl ⟨a⟩ => ⟨Sum.inl a⟩
| Or.inr ⟨b⟩ => ⟨Sum.inr b⟩
#align nonempty_sum nonempty_sum
@[simp]
theorem nonempty_psum {α β} : Nonempty (PSum α β) ↔ Nonempty α ∨ Nonempty β :=
Iff.intro
(fun ⟨h⟩ ↦
match h with
| PSum.inl a => Or.inl ⟨a⟩
| PSum.inr b => Or.inr ⟨b⟩)
fun h ↦
match h with
| Or.inl ⟨a⟩ => ⟨PSum.inl a⟩
| Or.inr ⟨b⟩ => ⟨PSum.inr b⟩
#align nonempty_psum nonempty_psum
@[simp]
theorem nonempty_ulift : Nonempty (ULift α) ↔ Nonempty α :=
Iff.intro (fun ⟨⟨a⟩⟩ ↦ ⟨a⟩) fun ⟨a⟩ ↦ ⟨⟨a⟩⟩
#align nonempty_ulift nonempty_ulift
@[simp]
theorem nonempty_plift {α} : Nonempty (PLift α) ↔ Nonempty α :=
Iff.intro (fun ⟨⟨a⟩⟩ ↦ ⟨a⟩) fun ⟨a⟩ ↦ ⟨⟨a⟩⟩
#align nonempty_plift nonempty_plift
@[simp]
theorem Nonempty.forall {α} {p : Nonempty α → Prop} : (∀ h : Nonempty α, p h) ↔ ∀ a, p ⟨a⟩ :=
Iff.intro (fun h _ ↦ h _) fun h ⟨a⟩ ↦ h a
#align nonempty.forall Nonempty.forall
@[simp]
theorem Nonempty.exists {α} {p : Nonempty α → Prop} : (∃ h : Nonempty α, p h) ↔ ∃ a, p ⟨a⟩ :=
Iff.intro (fun ⟨⟨a⟩, h⟩ ↦ ⟨a, h⟩) fun ⟨a, h⟩ ↦ ⟨⟨a⟩, h⟩
#align nonempty.exists Nonempty.exists
/-- Using `Classical.choice`, lifts a (`Prop`-valued) `Nonempty` instance to a (`Type`-valued)
`Inhabited` instance. `Classical.inhabited_of_nonempty` already exists, in
`Init/Classical.lean`, but the assumption is not a type class argument,
which makes it unsuitable for some applications. -/
noncomputable def Classical.inhabited_of_nonempty' {α} [h : Nonempty α] : Inhabited α :=
⟨Classical.choice h⟩
#align classical.inhabited_of_nonempty' Classical.inhabited_of_nonempty'
/-- Using `Classical.choice`, extracts a term from a `Nonempty` type. -/
@[reducible]
protected noncomputable def Nonempty.some {α} (h : Nonempty α) : α :=
Classical.choice h
#align nonempty.some Nonempty.some
/-- Using `Classical.choice`, extracts a term from a `Nonempty` type. -/
@[reducible]
protected noncomputable def Classical.arbitrary (α) [h : Nonempty α] : α :=
Classical.choice h
#align classical.arbitrary Classical.arbitrary
/-- Given `f : α → β`, if `α` is nonempty then `β` is also nonempty.
`Nonempty` cannot be a `functor`, because `Functor` is restricted to `Type`. -/
theorem Nonempty.map {α β} (f : α → β) : Nonempty α → Nonempty β
| ⟨h⟩ => ⟨f h⟩
#align nonempty.map Nonempty.map
protected theorem Nonempty.map2 {α β γ : Sort _} (f : α → β → γ) :
Nonempty α → Nonempty β → Nonempty γ
| ⟨x⟩, ⟨y⟩ => ⟨f x y⟩
#align nonempty.map2 Nonempty.map2
protected theorem Nonempty.congr {α β} (f : α → β) (g : β → α) : Nonempty α ↔ Nonempty β :=
⟨Nonempty.map f, Nonempty.map g⟩
#align nonempty.congr Nonempty.congr
theorem Nonempty.elim_to_inhabited {α : Sort _} [h : Nonempty α] {p : Prop} (f : Inhabited α → p) :
p :=
h.elim <| f ∘ Inhabited.mk
#align nonempty.elim_to_inhabited Nonempty.elim_to_inhabited
protected instance Prod.Nonempty {α β} [h : Nonempty α] [h2 : Nonempty β] : Nonempty (α × β) :=
h.elim fun g ↦ h2.elim fun g2 ↦ ⟨⟨g, g2⟩⟩
protected instance Pi.Nonempty {ι : Sort _} {α : ι → Sort _} [∀ i, Nonempty (α i)] :
Nonempty (∀ i, α i) :=
⟨fun _ ↦ Classical.arbitrary _⟩
theorem Classical.nonempty_pi {ι} {α : ι → Sort _} : Nonempty (∀ i, α i) ↔ ∀ i, Nonempty (α i) :=
⟨fun ⟨f⟩ a ↦ ⟨f a⟩, @Pi.Nonempty _ _⟩
#align classical.nonempty_pi Classical.nonempty_pi
theorem subsingleton_of_not_nonempty {α : Sort _} (h : ¬Nonempty α) : Subsingleton α :=
⟨fun x ↦ False.elim <| not_nonempty_iff_imp_false.mp h x⟩
#align subsingleton_of_not_nonempty subsingleton_of_not_nonempty
theorem Function.Surjective.nonempty [h : Nonempty β] {f : α → β} (hf : Function.Surjective f) :
Nonempty α :=
let ⟨y⟩ := h
let ⟨x, _⟩ := hf y
⟨x⟩
#align function.surjective.nonempty Function.Surjective.nonempty
|
module NIfTI_AFNI
using NIfTI,LightXML
export isafni,AFNIExtension,NIfTI1Extension
"""isafni(e::NIfTI.NIfTI1Extension)
Is this an AFNI Extension?"""
isafni(e::NIfTI.NIfTI1Extension) = e.ecode==4
"""parse_quotedstrings(a::AbstractString)
A simple function to parse apart the strings in AFNI NIfTI Extensions"""
function parse_quotedstrings(a::AbstractString)
strings = SubString[]
stringstart = 0
stringend = 0
i = 1
while i<=length(a)
a[i] == '\\' && (i += 2; continue)
if a[i]=='"'
if stringstart==0
stringstart = i+1
stringend = 0
elseif stringend==0
push!(strings,SubString(a,stringstart,i-1))
stringstart = 0
end
end
i += 1
end
strings
end
mutable struct AFNIExtension
ecode::Int32
edata::Vector{UInt8}
raw_xml::String
header::Dict{String,Any}
end
function AFNIExtension(e::NIfTI.NIfTI1Extension)
isafni(e) || error("Trying to convert an unknown NIfTIExtension to AFNIExtension")
edata = copy(e.edata)
raw_xml = String(edata)
xdoc = parse_string(raw_xml)
xroot = root(xdoc)
header_dict = Dict{String,Any}()
for atr in xroot["AFNI_atr"]
t = attribute(atr,"ni_type")
n = attribute(atr,"atr_name")
if t=="String"
header_dict[n] = parse_quotedstrings(content(atr))
elseif t=="int"
header_dict[n] = parse.(Int,split(content(atr)))
elseif t=="float"
header_dict[n] = parse.(Float64,split(content(atr)))
end
isa(header_dict[n],AbstractArray) && length(header_dict[n])==1 && (header_dict[n] = header_dict[n][1])
if n == "BRICK_LABS"
isa(header_dict[n],AbstractArray) && (header_dict[n] = join(header_dict[n]))
header_dict[n] = split(header_dict[n],"~")
end
n == "BRICK_STATSYM" && (header_dict[n] = split(header_dict[n],";"))
end
free(xdoc)
AFNIExtension(e.ecode,e.edata,raw_xml,header_dict)
end
NIfTI1Extension(a::AFNIExtension) = NIfTI.NIfTI1Extension(a.ecode,a.edata)
end # module
|
-- Andreas, 2012-04-18
module Issue611 where
data Nat : Set where
zero : Nat
suc : Nat -> Nat
data Bool : Set where
true false : Bool
T : Bool -> Set
T true = {x : Bool} -> Nat
T false = {x : Bool} -> Bool
data D (b : Bool) : Set where
c : T b -> D b
d : D false
d = c {_} true
|
[STATEMENT]
lemma split_list_last_sep: "\<lbrakk>y \<in> set xs; y \<noteq> last xs\<rbrakk> \<Longrightarrow> \<exists>as bs. as @ y # bs @ [last xs] = xs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>y \<in> set xs; y \<noteq> last xs\<rbrakk> \<Longrightarrow> \<exists>as bs. as @ y # bs @ [last xs] = xs
[PROOF STEP]
using split_list_not_last[of y xs] split_last_eq append_butlast_last_id
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>y \<in> set xs; y \<noteq> last xs\<rbrakk> \<Longrightarrow> \<exists>as bs. as @ y # bs = xs \<and> bs \<noteq> []
\<lbrakk>?as @ ?y # ?bs = ?xs; ?bs \<noteq> []\<rbrakk> \<Longrightarrow> last ?bs = last ?xs
?xs \<noteq> [] \<Longrightarrow> butlast ?xs @ [last ?xs] = ?xs
goal (1 subgoal):
1. \<lbrakk>y \<in> set xs; y \<noteq> last xs\<rbrakk> \<Longrightarrow> \<exists>as bs. as @ y # bs @ [last xs] = xs
[PROOF STEP]
by metis |
-- -------------------------------------------------------------- [ Lens.idr ]
-- Description : Idris port of Control.Lens
-- Copyright : (c) Huw Campbell
-- --------------------------------------------------------------------- [ EOH ]
module Control.Lens.Getter
import Control.Lens.Types
import Control.Lens.Const
import Data.Contravariant
import Control.Lens.First
import Data.Profunctor
%default total
public export
view : Getting a s a -> s -> a
view l = getConst . applyMor (l (Mor MkConst))
public export
views : Getting a s a -> (a -> r) -> s -> r
views l f = f . view l
public export
foldMapOf : Getting r s a -> (a -> r) -> s -> r
foldMapOf l f = getConst . applyMor (l (Mor (MkConst . f)))
-- Creates a lens where the Functor instance must be
-- covariant. Practically this means we can only use
-- Const, so this is a valid getter and nothing else
||| Create a Getter from arbitrary functions `s -> a`.
public export
to : Contravariant f => (s -> a) -> LensLike' f s a
to k = dimap k (contramap k)
infixl 8 ^.
public export
(^.) : s -> Getting a s a -> a
a ^. l = view l a
infixl 8 ^?
public export
(^?) : s -> Getting (First a) s a -> Maybe a
s ^? l = getFirst (foldMapOf l (MkFirst . Just) s)
-- --------------------------------------------------------------------- [ EOF ]
|
using Test
@testset "JuliaExpr.emit" begin
include("julia/node.jl")
include("julia/arg.jl")
include("julia/dash.jl")
include("julia/optional.jl")
include("julia/options.jl")
include("julia/vararg.jl")
include("julia/exception.jl")
include("julia/plugin.jl")
end
@testset "ZSHCompletions.emit" begin
include("zsh/zsh.jl")
end
|
[STATEMENT]
lemma Exit_Use_empty:"Use wfp (Main, Exit) = {}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Use wfp (Main, Exit) = {}
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. Use wfp (Main, Exit) = {}
[PROOF STEP]
obtain prog procs where [simp]:"Rep_wf_prog wfp = (prog,procs)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>prog procs. Rep_wf_prog wfp = (prog, procs) \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by(cases "Rep_wf_prog wfp") auto
[PROOF STATE]
proof (state)
this:
Rep_wf_prog wfp = (prog, procs)
goal (1 subgoal):
1. Use wfp (Main, Exit) = {}
[PROOF STEP]
hence "well_formed procs"
[PROOF STATE]
proof (prove)
using this:
Rep_wf_prog wfp = (prog, procs)
goal (1 subgoal):
1. well_formed procs
[PROOF STEP]
by(fastforce intro:wf_wf_prog)
[PROOF STATE]
proof (state)
this:
well_formed procs
goal (1 subgoal):
1. Use wfp (Main, Exit) = {}
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
well_formed procs
goal (1 subgoal):
1. Use wfp (Main, Exit) = {}
[PROOF STEP]
by(auto dest:Proc_CFG_Call_Labels
simp:Use_def ParamUses_def ParamUses_proc_def ParamDefs_def ParamDefs_proc_def)
[PROOF STATE]
proof (state)
this:
Use wfp (Main, Exit) = {}
goal:
No subgoals!
[PROOF STEP]
qed |
/-
Copyright (c) 2017 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro
-/
import order.basic logic.embedding data.nat.basic
open function
universes u v w
variables {α : Type*} {β : Type*} {γ : Type*}
{r : α → α → Prop} {s : β → β → Prop} {t : γ → γ → Prop}
structure order_embedding {α β : Type*} (r : α → α → Prop) (s : β → β → Prop) extends α ↪ β :=
(ord : ∀ {a b}, r a b ↔ s (to_embedding a) (to_embedding b))
infix ` ≼o `:50 := order_embedding
/-- the induced order on a subtype is an embedding under the natural inclusion. -/
definition subtype.order_embedding {X : Type*} (r : X → X → Prop) (p : X → Prop) :
((subtype.val : subtype p → X) ⁻¹'o r) ≼o r :=
⟨⟨subtype.val,subtype.val_injective⟩,by intros;refl⟩
theorem preimage_equivalence {α β} (f : α → β) {s : β → β → Prop}
(hs : equivalence s) : equivalence (f ⁻¹'o s) :=
⟨λ a, hs.1 _, λ a b h, hs.2.1 h, λ a b c h₁ h₂, hs.2.2 h₁ h₂⟩
namespace order_embedding
instance : has_coe_to_fun (r ≼o s) := ⟨λ _, α → β, λ o, o.to_embedding⟩
theorem ord' : ∀ (f : r ≼o s) {a b}, r a b ↔ s (f a) (f b)
| ⟨f, o⟩ := @o
@[simp] theorem coe_fn_mk (f : α ↪ β) (o) :
(@order_embedding.mk _ _ r s f o : α → β) = f := rfl
@[simp] theorem coe_fn_to_embedding (f : r ≼o s) : (f.to_embedding : α → β) = f := rfl
theorem eq_of_to_fun_eq : ∀ {e₁ e₂ : r ≼o s}, (e₁ : α → β) = e₂ → e₁ = e₂
| ⟨⟨f₁, h₁⟩, o₁⟩ ⟨⟨f₂, h₂⟩, o₂⟩ h := by congr; exact h
@[refl] protected def refl (r : α → α → Prop) : r ≼o r :=
⟨embedding.refl _, λ a b, iff.rfl⟩
@[trans] protected def trans (f : r ≼o s) (g : s ≼o t) : r ≼o t :=
⟨f.1.trans g.1, λ a b, by rw [f.2, g.2]; simp⟩
@[simp] theorem refl_apply (x : α) : order_embedding.refl r x = x := rfl
@[simp] theorem trans_apply (f : r ≼o s) (g : s ≼o t) (a : α) : (f.trans g) a = g (f a) := rfl
/-- An order embedding is also an order embedding between dual orders. -/
def rsymm (f : r ≼o s) : swap r ≼o swap s :=
⟨f.to_embedding, λ a b, f.ord'⟩
/-- If `f` is injective, then it is an order embedding from the
preimage order of `s` to `s`. -/
def preimage (f : α ↪ β) (s : β → β → Prop) : f ⁻¹'o s ≼o s := ⟨f, λ a b, iff.rfl⟩
theorem eq_preimage (f : r ≼o s) : r = f ⁻¹'o s :=
by funext a b; exact propext f.ord'
protected theorem is_irrefl : ∀ (f : r ≼o s) [is_irrefl β s], is_irrefl α r
| ⟨f, o⟩ ⟨H⟩ := ⟨λ a h, H _ (o.1 h)⟩
protected theorem is_refl : ∀ (f : r ≼o s) [is_refl β s], is_refl α r
| ⟨f, o⟩ ⟨H⟩ := ⟨λ a, o.2 (H _)⟩
protected theorem is_symm : ∀ (f : r ≼o s) [is_symm β s], is_symm α r
| ⟨f, o⟩ ⟨H⟩ := ⟨λ a b h, o.2 (H _ _ (o.1 h))⟩
protected theorem is_asymm : ∀ (f : r ≼o s) [is_asymm β s], is_asymm α r
| ⟨f, o⟩ ⟨H⟩ := ⟨λ a b h₁ h₂, H _ _ (o.1 h₁) (o.1 h₂)⟩
protected theorem is_antisymm : ∀ (f : r ≼o s) [is_antisymm β s], is_antisymm α r
| ⟨f, o⟩ ⟨H⟩ := ⟨λ a b h₁ h₂, f.inj' (H _ _ (o.1 h₁) (o.1 h₂))⟩
protected theorem is_trans : ∀ (f : r ≼o s) [is_trans β s], is_trans α r
| ⟨f, o⟩ ⟨H⟩ := ⟨λ a b c h₁ h₂, o.2 (H _ _ _ (o.1 h₁) (o.1 h₂))⟩
protected theorem is_total : ∀ (f : r ≼o s) [is_total β s], is_total α r
| ⟨f, o⟩ ⟨H⟩ := ⟨λ a b, (or_congr o o).2 (H _ _)⟩
protected theorem is_preorder : ∀ (f : r ≼o s) [is_preorder β s], is_preorder α r
| f H := by exactI {..f.is_refl, ..f.is_trans}
protected theorem is_partial_order : ∀ (f : r ≼o s) [is_partial_order β s], is_partial_order α r
| f H := by exactI {..f.is_preorder, ..f.is_antisymm}
protected theorem is_linear_order : ∀ (f : r ≼o s) [is_linear_order β s], is_linear_order α r
| f H := by exactI {..f.is_partial_order, ..f.is_total}
protected theorem is_strict_order : ∀ (f : r ≼o s) [is_strict_order β s], is_strict_order α r
| f H := by exactI {..f.is_irrefl, ..f.is_trans}
protected theorem is_trichotomous : ∀ (f : r ≼o s) [is_trichotomous β s], is_trichotomous α r
| ⟨f, o⟩ ⟨H⟩ := ⟨λ a b, (or_congr o (or_congr f.inj'.eq_iff.symm o)).2 (H _ _)⟩
protected theorem is_strict_total_order' : ∀ (f : r ≼o s) [is_strict_total_order' β s], is_strict_total_order' α r
| f H := by exactI {..f.is_trichotomous, ..f.is_strict_order}
protected theorem acc (f : r ≼o s) (a : α) : acc s (f a) → acc r a :=
begin
generalize h : f a = b, intro ac,
induction ac with _ H IH generalizing a, subst h,
exact ⟨_, λ a' h, IH (f a') (f.ord'.1 h) _ rfl⟩
end
protected theorem well_founded : ∀ (f : r ≼o s) (h : well_founded s), well_founded r
| f ⟨H⟩ := ⟨λ a, f.acc _ (H _)⟩
protected theorem is_well_order : ∀ (f : r ≼o s) [is_well_order β s], is_well_order α r
| f H := by exactI {wf := f.well_founded H.wf, ..f.is_strict_total_order'}
/-- It suffices to prove `f` is monotone between strict orders
to show it is an order embedding. -/
def of_monotone [is_trichotomous α r] [is_asymm β s] (f : α → β) (H : ∀ a b, r a b → s (f a) (f b)) : r ≼o s :=
begin
haveI := @is_irrefl_of_is_asymm β s _,
refine ⟨⟨f, λ a b e, _⟩, λ a b, ⟨H _ _, λ h, _⟩⟩,
{ refine ((@trichotomous _ r _ a b).resolve_left _).resolve_right _;
exact λ h, @irrefl _ s _ _ (by simpa [e] using H _ _ h) },
{ refine (@trichotomous _ r _ a b).resolve_right (or.rec (λ e, _) (λ h', _)),
{ subst e, exact irrefl _ h },
{ exact asymm (H _ _ h') h } }
end
@[simp] theorem of_monotone_coe [is_trichotomous α r] [is_asymm β s] (f : α → β) (H) :
(@of_monotone _ _ r s _ _ f H : α → β) = f := rfl
-- If le is preserved by an order embedding of preorders, then lt is too
def lt_embedding_of_le_embedding [preorder α] [preorder β]
(f : (has_le.le : α → α → Prop) ≼o (has_le.le : β → β → Prop)) :
(has_lt.lt : α → α → Prop) ≼o (has_lt.lt : β → β → Prop) :=
{ to_fun := f,
inj := f.inj,
ord := by intros; simp [lt_iff_le_not_le,f.ord] }
theorem nat_lt [is_strict_order α r] (f : ℕ → α) (H : ∀ n:ℕ, r (f n) (f (n+1))) :
((<) : ℕ → ℕ → Prop) ≼o r :=
of_monotone f $ λ a b h, begin
induction b with b IH, {exact (nat.not_lt_zero _ h).elim},
cases nat.lt_succ_iff_lt_or_eq.1 h with h e,
{ exact trans (IH h) (H _) },
{ subst b, apply H }
end
theorem nat_gt [is_strict_order α r] (f : ℕ → α) (H : ∀ n:ℕ, r (f (n+1)) (f n)) :
((>) : ℕ → ℕ → Prop) ≼o r :=
by haveI := is_strict_order.swap r; exact rsymm (nat_lt f H)
theorem well_founded_iff_no_descending_seq [is_strict_order α r] : well_founded r ↔ ¬ nonempty (((>) : ℕ → ℕ → Prop) ≼o r) :=
⟨λ ⟨h⟩ ⟨⟨f, o⟩⟩,
suffices ∀ a, acc r a → ∀ n, a ≠ f n, from this (f 0) (h _) 0 rfl,
λ a ac, begin
induction ac with a _ IH, intros n h, subst a,
exact IH (f (n+1)) (o.1 (nat.lt_succ_self _)) _ rfl
end,
λ N, ⟨λ a, classical.by_contradiction $ λ na,
let ⟨f, h⟩ := classical.axiom_of_choice $
show ∀ x : {a // ¬ acc r a}, ∃ y : {a // ¬ acc r a}, r y.1 x.1,
from λ ⟨x, h⟩, classical.by_contradiction $ λ hn, h $
⟨_, λ y h, classical.by_contradiction $ λ na, hn ⟨⟨y, na⟩, h⟩⟩ in
N ⟨nat_gt (λ n, (f^[n] ⟨a, na⟩).1) $ λ n,
by rw nat.iterate_succ'; apply h⟩⟩⟩
end order_embedding
/-- The inclusion map `fin n → ℕ` is an order embedding. -/
def fin.val.order_embedding (n) : @order_embedding (fin n) ℕ (<) (<) :=
⟨⟨fin.val, @fin.eq_of_veq _⟩, λ a b, iff.rfl⟩
/-- The inclusion map `fin m → fin n` is an order embedding. -/
def fin_fin.order_embedding {m n} (h : m ≤ n) : @order_embedding (fin m) (fin n) (<) (<) :=
⟨⟨λ ⟨x, h'⟩, ⟨x, lt_of_lt_of_le h' h⟩,
λ ⟨a, _⟩ ⟨b, _⟩ h, by congr; injection h⟩,
by intros; cases a; cases b; refl⟩
instance fin.lt.is_well_order (n) : is_well_order (fin n) (<) :=
(fin.val.order_embedding _).is_well_order
/-- An order isomorphism is an equivalence that is also an order embedding. -/
structure order_iso {α β : Type*} (r : α → α → Prop) (s : β → β → Prop) extends α ≃ β :=
(ord : ∀ {a b}, r a b ↔ s (to_equiv a) (to_equiv b))
infix ` ≃o `:50 := order_iso
namespace order_iso
def to_order_embedding (f : r ≃o s) : r ≼o s :=
⟨f.to_equiv.to_embedding, f.ord⟩
instance : has_coe (r ≃o s) (r ≼o s) := ⟨to_order_embedding⟩
theorem coe_coe_fn (f : r ≃o s) : ((f : r ≼o s) : α → β) = f := rfl
theorem ord' : ∀ (f : r ≃o s) {a b}, r a b ↔ s (f a) (f b)
| ⟨f, o⟩ := @o
@[simp] theorem coe_fn_mk (f : α ≃ β) (o) :
(@order_iso.mk _ _ r s f o : α → β) = f := rfl
@[simp] theorem coe_fn_to_equiv (f : r ≃o s) : (f.to_equiv : α → β) = f := rfl
theorem eq_of_to_fun_eq : ∀ {e₁ e₂ : r ≃o s}, (e₁ : α → β) = e₂ → e₁ = e₂
| ⟨e₁, o₁⟩ ⟨e₂, o₂⟩ h := by congr; exact equiv.eq_of_to_fun_eq h
@[refl] protected def refl (r : α → α → Prop) : r ≃o r :=
⟨equiv.refl _, λ a b, iff.rfl⟩
@[symm] protected def symm (f : r ≃o s) : s ≃o r :=
⟨f.to_equiv.symm, λ a b, by cases f with f o; rw o; simp⟩
@[trans] protected def trans (f₁ : r ≃o s) (f₂ : s ≃o t) : r ≃o t :=
⟨f₁.to_equiv.trans f₂.to_equiv, λ a b,
by cases f₁ with f₁ o₁; cases f₂ with f₂ o₂; rw [o₁, o₂]; simp⟩
@[simp] theorem coe_fn_symm_mk (f o) : ((@order_iso.mk _ _ r s f o).symm : β → α) = f.symm :=
rfl
@[simp] theorem refl_apply (x : α) : order_iso.refl r x = x := rfl
@[simp] theorem trans_apply : ∀ (f : r ≃o s) (g : s ≃o t) (a : α), (f.trans g) a = g (f a)
| ⟨f₁, o₁⟩ ⟨f₂, o₂⟩ a := equiv.trans_apply _ _ _
@[simp] theorem apply_symm_apply : ∀ (e : r ≃o s) (x : β), e (e.symm x) = x
| ⟨f₁, o₁⟩ x := by simp
@[simp] theorem symm_apply_apply : ∀ (e : r ≃o s) (x : α), e.symm (e x) = x
| ⟨f₁, o₁⟩ x := by simp
/-- Any equivalence lifts to an order isomorphism between `s` and its preimage. -/
def preimage (f : α ≃ β) (s : β → β → Prop) : f ⁻¹'o s ≃o s := ⟨f, λ a b, iff.rfl⟩
noncomputable def of_surjective (f : r ≼o s) (H : surjective f) : r ≃o s :=
⟨equiv.of_bijective ⟨f.inj, H⟩, by simp [f.ord']⟩
@[simp] theorem of_surjective_coe (f : r ≼o s) (H) : (of_surjective f H : α → β) = f :=
by delta of_surjective; simp
theorem sum_lex_congr {α₁ α₂ β₁ β₂ r₁ r₂ s₁ s₂}
(e₁ : @order_iso α₁ α₂ r₁ r₂) (e₂ : @order_iso β₁ β₂ s₁ s₂) :
sum.lex r₁ s₁ ≃o sum.lex r₂ s₂ :=
⟨equiv.sum_congr e₁.to_equiv e₂.to_equiv, λ a b,
by cases e₁ with f hf; cases e₂ with g hg;
cases a; cases b; simp [hf, hg]⟩
theorem prod_lex_congr {α₁ α₂ β₁ β₂ r₁ r₂ s₁ s₂}
(e₁ : @order_iso α₁ α₂ r₁ r₂) (e₂ : @order_iso β₁ β₂ s₁ s₂) :
prod.lex r₁ s₁ ≃o prod.lex r₂ s₂ :=
⟨equiv.prod_congr e₁.to_equiv e₂.to_equiv, λ a b, begin
cases e₁ with f hf; cases e₂ with g hg,
cases a with a₁ a₂; cases b with b₁ b₂,
suffices : prod.lex r₁ s₁ (a₁, a₂) (b₁, b₂) ↔
prod.lex r₂ s₂ (f a₁, g a₂) (f b₁, g b₂), {simpa [hf, hg]},
split,
{ intro h, cases h with _ _ _ _ h _ _ _ h,
{ left, exact hf.1 h },
{ right, exact hg.1 h } },
{ generalize e : f b₁ = fb₁,
intro h, cases h with _ _ _ _ h _ _ _ h,
{ subst e, left, exact hf.2 h },
{ have := f.injective e, subst b₁,
right, exact hg.2 h } }
end⟩
end order_iso
/-- A subset `p : set α` embeds into `α` -/
def set_coe_embedding {α : Type*} (p : set α) : p ↪ α := ⟨subtype.val, @subtype.eq _ _⟩
/-- `subrel r p` is the inherited relation on a subset. -/
def subrel (r : α → α → Prop) (p : set α) : p → p → Prop :=
@subtype.val _ p ⁻¹'o r
@[simp] theorem subrel_val (r : α → α → Prop) (p : set α)
{a b} : subrel r p a b ↔ r a.1 b.1 := iff.rfl
namespace subrel
protected def order_embedding (r : α → α → Prop) (p : set α) :
subrel r p ≼o r := ⟨set_coe_embedding _, λ a b, iff.rfl⟩
@[simp] theorem order_embedding_apply (r : α → α → Prop) (p a) :
subrel.order_embedding r p a = a.1 := rfl
instance (r : α → α → Prop) [is_well_order α r]
(p : set α) : is_well_order p (subrel r p) :=
order_embedding.is_well_order (subrel.order_embedding r p)
end subrel
/-- Restrict the codomain of an order embedding -/
def order_embedding.cod_restrict (p : set β) (f : r ≼o s) (H : ∀ a, f a ∈ p) : r ≼o subrel s p :=
⟨f.to_embedding.cod_restrict p H, f.ord⟩
@[simp] theorem order_embedding.cod_restrict_apply (p) (f : r ≼o s) (H a) :
order_embedding.cod_restrict p f H a = ⟨f a, H a⟩ := rfl
|
module DepSec.Declassification
import DepSec.Labeled
%access export
||| Predicate hatch builder
||| TCB
||| @D data structure type
||| @E element type
||| @d data structure where declassification is allowed from
||| @P predicate
predicateHatch : Poset labelType
=> {l, l' : labelType}
-> {D, E : Type}
-> (d : D)
-> (P : D -> E -> Type)
-> (d : D ** Labeled l (e : E ** P d e) -> Labeled l' E)
predicateHatch {labelType} {l} {l'} {E} d P = (d ** hatch)
where
hatch : Labeled l (e : E ** P d e) -> Labeled l' E
hatch (MkLabeled (x ** _)) = label x
||| Token hatch builder
||| TCB
||| @E value type
||| @S token type
||| @Q token predicate
tokenHatch : Poset labelType
=> {l, l' : labelType}
-> {E, S : Type}
-> (Q : S -> Type) -- 'when'/'who' predicate
-> (s : S ** Q s) -> Labeled l E -> Labeled l' E
tokenHatch {labelType} {l} {l'} {E} {S} Q = hatch
where
hatch : (s : S ** Q s) -> Labeled l E -> Labeled l' E
hatch _ (MkLabeled x) = label x
||| Generic combined hatch builder
||| TCB
||| @d data structure
||| @Q token predicate
||| @P predicate
hatchBuilder : Poset labelType
=> {l, l' : labelType}
-> {D, E, S : Type}
-> (d : D) -- datastructre
-> (Q : S -> Type) -- 'when'/'who' predicate
-> (P : D -> E -> Type) -- 'what' predicate
-> (d : D ** Labeled l (e : E ** P d e) -> (s : S ** Q s) -> Labeled l' E)
hatchBuilder {labelType} {l} {l'} {E} {S} d Q P = (d ** hatch)
where
hatch : Labeled l (e : E ** P d e) -> (s : S ** Q s) -> Labeled l' E
hatch (MkLabeled (x ** _)) _ = label x
|
(* Author: Tobias Nipkow *)
section \<open>Queue Implementation via 2 Lists\<close>
theory Queue_2Lists
imports
Queue_Spec
Reverse
begin
text \<open>Definitions:\<close>
type_synonym 'a queue = "'a list \<times> 'a list"
fun norm :: "'a queue \<Rightarrow> 'a queue" where
"norm (fs,rs) = (if fs = [] then (itrev rs [], []) else (fs,rs))"
fun enq :: "'a \<Rightarrow> 'a queue \<Rightarrow> 'a queue" where
"enq a (fs,rs) = norm(fs, a # rs)"
fun deq :: "'a queue \<Rightarrow> 'a queue" where
"deq (fs,rs) = (if fs = [] then (fs,rs) else norm(tl fs,rs))"
fun first :: "'a queue \<Rightarrow> 'a" where
"first (a # fs,rs) = a"
fun is_empty :: "'a queue \<Rightarrow> bool" where
"is_empty (fs,rs) = (fs = [])"
fun list :: "'a queue \<Rightarrow> 'a list" where
"list (fs,rs) = fs @ rev rs"
fun invar :: "'a queue \<Rightarrow> bool" where
"invar (fs,rs) = (fs = [] \<longrightarrow> rs = [])"
text \<open>Implementation correctness:\<close>
interpretation Queue
where empty = "([],[])" and enq = enq and deq = deq and first = first
and is_empty = is_empty and list = list and invar = invar
proof (standard, goal_cases)
case 1 show ?case by (simp)
next
case (2 q) thus ?case by(cases q) (simp)
next
case (3 q) thus ?case by(cases q) (simp add: itrev_Nil)
next
case (4 q) thus ?case by(cases q) (auto simp: neq_Nil_conv)
next
case (5 q) thus ?case by(cases q) (auto)
next
case 6 show ?case by(simp)
next
case (7 q) thus ?case by(cases q) (simp)
next
case (8 q) thus ?case by(cases q) (simp)
qed
text \<open>Running times:\<close>
fun T_norm :: "'a queue \<Rightarrow> nat" where
"T_norm (fs,rs) = (if fs = [] then T_itrev rs [] else 0) + 1"
fun T_enq :: "'a \<Rightarrow> 'a queue \<Rightarrow> nat" where
"T_enq a (fs,rs) = T_norm(fs, a # rs) + 1"
fun T_deq :: "'a queue \<Rightarrow> nat" where
"T_deq (fs,rs) = (if fs = [] then 0 else T_norm(tl fs,rs)) + 1"
fun T_first :: "'a queue \<Rightarrow> nat" where
"T_first (a # fs,rs) = 1"
fun T_is_empty :: "'a queue \<Rightarrow> nat" where
"T_is_empty (fs,rs) = 1"
text \<open>Amortized running times:\<close>
fun \<Phi> :: "'a queue \<Rightarrow> nat" where
"\<Phi>(fs,rs) = length rs"
lemma a_enq: "T_enq a (fs,rs) + \<Phi>(enq a (fs,rs)) - \<Phi>(fs,rs) \<le> 4"
by(auto simp: T_itrev)
lemma a_deq: "T_deq (fs,rs) + \<Phi>(deq (fs,rs)) - \<Phi>(fs,rs) \<le> 3"
by(auto simp: T_itrev)
end
|
[STATEMENT]
lemma sig_red_tail_rtrancl_lc_rep_list:
assumes "(sig_red sing_reg (\<prec>) F)\<^sup>*\<^sup>* p q"
shows "punit.lc (rep_list q) = punit.lc (rep_list p)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. punit.lc (rep_list q) = punit.lc (rep_list p)
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
(sig_red sing_reg (\<prec>) F)\<^sup>*\<^sup>* p q
goal (1 subgoal):
1. punit.lc (rep_list q) = punit.lc (rep_list p)
[PROOF STEP]
by (induct, auto dest: sig_red_tail_lc_rep_list) |
theory Datatypes
imports inc.Prelude
begin
default_sort type
datatype channel = cab | cin | cout
section \<open>Message Definition\<close>
text\<open>The same is true for the "Message" Datatype. Every kind of message has to be described here:\<close>
datatype M_pure = \<N> nat | \<B> bool
text \<open>Instantiate @{type M_pure} as countable. This is necessary for using @{type M_pure} streams. }\<close>
instance M_pure::countable
apply(countable_datatype)
done
lemma inj_B[simp]:"inj \<B>"
by (simp add: inj_def)
lemma inj_Bopt[simp]:"inj (map_option \<B>)"
by (simp add: option.inj_map)
text \<open>Then one describes the types of each channel. Only Messages included are allowed to be
transmitted\<close>
fun cMsg :: "channel \<Rightarrow> M_pure set" where
"cMsg cin = range \<N>" |
"cMsg cout = range \<B>" |
"cMsg _ = {}"
text\<open>Timing properties of each channel\<close>
fun cTime :: "channel \<Rightarrow> timeType" where
"cTime cin = TUntimed" |
"cTime cout = TUntimed" |
"cTime _ = undefined"
lemma cmsgempty_ex:"\<exists>c. cMsg c = {}"
using cMsg.simps by blast
end
|
/-
Copyright (c) 2021 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison
-/
import category_theory.abelian.basic
import category_theory.preadditive.opposite
import category_theory.limits.opposites
import category_theory.limits.constructions.limits_of_products_and_equalizers
/-!
# The opposite of an abelian category is abelian.
-/
noncomputable theory
namespace category_theory
open category_theory.limits
variables (C : Type*) [category C] [abelian C]
local attribute [instance]
finite_limits_from_equalizers_and_finite_products
finite_colimits_from_coequalizers_and_finite_coproducts
has_finite_limits_opposite has_finite_colimits_opposite has_finite_products_opposite
instance : abelian Cᵒᵖ :=
{ normal_mono_of_mono := λ X Y f m, by exactI
normal_mono_of_normal_epi_unop _ (normal_epi_of_epi f.unop),
normal_epi_of_epi := λ X Y f m, by exactI
normal_epi_of_normal_mono_unop _ (normal_mono_of_mono f.unop), }
section
variables {C} {X Y : C} (f : X ⟶ Y) {A B : Cᵒᵖ} (g : A ⟶ B)
-- TODO: Generalize (this will work whenever f has a cokernel)
-- (The abelian case is probably sufficient for most applications.)
/-- The kernel of `f.op` is the opposite of `cokernel f`. -/
@[simps]
def kernel_op_unop : (kernel f.op).unop ≅ cokernel f :=
{ hom := (kernel.lift f.op (cokernel.π f).op $ by simp [← op_comp]).unop,
inv := cokernel.desc f (kernel.ι f.op).unop $
by { rw [← f.unop_op, ← unop_comp, f.unop_op], simp },
hom_inv_id' := begin
rw [← unop_id, ← (cokernel.desc f _ _).unop_op, ← unop_comp],
congr' 1,
dsimp,
ext,
simp [← op_comp],
end,
inv_hom_id' := begin
dsimp,
ext,
simp [← unop_comp],
end }
-- TODO: Generalize (this will work whenever f has a kernel)
-- (The abelian case is probably sufficient for most applications.)
/-- The cokernel of `f.op` is the opposite of `kernel f`. -/
@[simps]
def cokernel_op_unop : (cokernel f.op).unop ≅ kernel f :=
{ hom := kernel.lift f (cokernel.π f.op).unop $
by { rw [← f.unop_op, ← unop_comp, f.unop_op], simp },
inv := (cokernel.desc f.op (kernel.ι f).op $ by simp [← op_comp]).unop,
hom_inv_id' := begin
rw [← unop_id, ← (kernel.lift f _ _).unop_op, ← unop_comp],
congr' 1,
dsimp,
ext,
simp [← op_comp],
end,
inv_hom_id' := begin
dsimp,
ext,
simp [← unop_comp],
end }
/-- The kernel of `g.unop` is the opposite of `cokernel g`. -/
@[simps]
def kernel_unop_op : opposite.op (kernel g.unop) ≅ cokernel g :=
(cokernel_op_unop g.unop).op
/-- The cokernel of `g.unop` is the opposite of `kernel g`. -/
@[simps]
def cokernel_unop_op : opposite.op (cokernel g.unop) ≅ kernel g :=
(kernel_op_unop g.unop).op
lemma cokernel.π_op : (cokernel.π f.op).unop =
(cokernel_op_unop f).hom ≫ kernel.ι f ≫ eq_to_hom (opposite.unop_op _).symm :=
by simp [cokernel_op_unop]
lemma kernel.ι_op : (kernel.ι f.op).unop =
eq_to_hom (opposite.unop_op _) ≫ cokernel.π f ≫ (kernel_op_unop f).inv :=
by simp [kernel_op_unop]
/-- The kernel of `f.op` is the opposite of `cokernel f`. -/
@[simps]
def kernel_op_op : kernel f.op ≅ opposite.op (cokernel f) :=
(kernel_op_unop f).op.symm
/-- The cokernel of `f.op` is the opposite of `kernel f`. -/
@[simps]
def cokernel_op_op : cokernel f.op ≅ opposite.op (kernel f) :=
(cokernel_op_unop f).op.symm
/-- The kernel of `g.unop` is the opposite of `cokernel g`. -/
@[simps]
def kernel_unop_unop : kernel g.unop ≅ (cokernel g).unop :=
(kernel_unop_op g).unop.symm
lemma kernel.ι_unop : (kernel.ι g.unop).op =
eq_to_hom (opposite.op_unop _) ≫ cokernel.π g ≫ (kernel_unop_op g).inv :=
by simp
lemma cokernel.π_unop : (cokernel.π g.unop).op =
(cokernel_unop_op g).hom ≫ kernel.ι g ≫ eq_to_hom (opposite.op_unop _).symm :=
by simp
/-- The cokernel of `g.unop` is the opposite of `kernel g`. -/
@[simps]
def cokernel_unop_unop : cokernel g.unop ≅ (kernel g).unop :=
(cokernel_unop_op g).unop.symm
end
end category_theory
|
[STATEMENT]
lemma payload_EphKI: "EphK X \<in> payload"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. EphK X \<in> payload
[PROOF STEP]
by (auto simp add: msg_defs abs_payload) |
-- La suma de dos funciones pares es una función par
-- =================================================
import data.real.basic
variables (x y : ℝ)
variables (f g : ℝ → ℝ)
-- ----------------------------------------------------
-- Ejercicio 1. Definir la función
-- par : (ℝ → ℝ) → Prop
-- tal que (par f) expresa que f es par.
-- ----------------------------------------------------
def par (f : ℝ → ℝ) : Prop :=
∀ x, f (-x) = f x
-- ----------------------------------------------------
-- Ejercicio 2. Definir la función
-- suma : (ℝ → ℝ) → (ℝ → ℝ) → (ℝ → ℝ)
-- tal que (suma f g) es la suma de las funciones f y g.
-- ----------------------------------------------------
@[simp]
def suma (f g: ℝ → ℝ) : ℝ → ℝ :=
λ x, f x + g x
-- ----------------------------------------------------
-- Ejercicio 3. Demostrar que la suma de funciones
-- pares es par.
-- ----------------------------------------------------
-- 1ª demostración
example :
par f → par g → par (suma f g) :=
begin
intro hf,
unfold par at hf,
intro hg,
unfold par at hg,
unfold par,
intro x,
unfold suma,
rw hf,
rw hg,
end
-- 2ª demostración
example :
par f → par g → par (suma f g) :=
begin
intros hf hg x,
simp [suma],
rw [hf, hg],
end
-- 3ª demostración
example :
par f → par g → par (suma f g) :=
begin
intros hf hg x,
unfold suma,
rw [hf, hg],
end
-- 4ª demostración
example :
par f → par g → par (suma f g) :=
begin
intros hf hg x,
calc (f + g) (-x)
= f (-x) + g (-x) : rfl
... = f x + g (-x) : by rw hf
... = f x + g x : by rw hg
... = (f + g) x : rfl
end
-- 5ª demostración
example :
par f → par g → par (suma f g) :=
begin
intros hf hg x,
calc (f + g) (-x)
= f (-x) + g (-x) : rfl
... = f x + g x : by rw [hf, hg]
end
|
/-
Copyright (c) 2019 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison, Johan Commelin
-/
import category_theory.limits.shapes.terminal
/-!
# Zero objects
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
A category "has a zero object" if it has an object which is both initial and terminal. Having a
zero object provides zero morphisms, as the unique morphisms factoring through the zero object;
see `category_theory.limits.shapes.zero_morphisms`.
## References
* [F. Borceux, *Handbook of Categorical Algebra 2*][borceux-vol2]
-/
noncomputable theory
universes v u v' u'
open category_theory
open category_theory.category
variables {C : Type u} [category.{v} C]
variables {D : Type u'} [category.{v'} D]
namespace category_theory
namespace limits
/-- An object `X` in a category is a *zero object* if for every object `Y`
there is a unique morphism `to : X → Y` and a unique morphism `from : Y → X`.
This is a characteristic predicate for `has_zero_object`. -/
structure is_zero (X : C) : Prop :=
(unique_to : ∀ Y, nonempty (unique (X ⟶ Y)))
(unique_from : ∀ Y, nonempty (unique (Y ⟶ X)))
namespace is_zero
variables {X Y : C}
/-- If `h : is_zero X`, then `h.to Y` is a choice of unique morphism `X → Y`. -/
protected def «to» (h : is_zero X) (Y : C) : X ⟶ Y :=
@default (X ⟶ Y) $ @unique.inhabited _ $ (h.unique_to Y).some
lemma eq_to (h : is_zero X) (f : X ⟶ Y) : f = h.to Y :=
@unique.eq_default _ (id _) _
lemma to_eq (h : is_zero X) (f : X ⟶ Y) : h.to Y = f :=
(h.eq_to f).symm
/-- If `h : is_zero X`, then `h.from Y` is a choice of unique morphism `Y → X`. -/
protected def «from» (h : is_zero X) (Y : C) : Y ⟶ X :=
@default (Y ⟶ X) $ @unique.inhabited _ $ (h.unique_from Y).some
lemma eq_from (h : is_zero X) (f : Y ⟶ X) : f = h.from Y :=
@unique.eq_default _ (id _) _
lemma from_eq (h : is_zero X) (f : Y ⟶ X) : h.from Y = f :=
(h.eq_from f).symm
lemma eq_of_src (hX : is_zero X) (f g : X ⟶ Y) : f = g :=
(hX.eq_to f).trans (hX.eq_to g).symm
lemma eq_of_tgt (hX : is_zero X) (f g : Y ⟶ X) : f = g :=
(hX.eq_from f).trans (hX.eq_from g).symm
/-- Any two zero objects are isomorphic. -/
def iso (hX : is_zero X) (hY : is_zero Y) : X ≅ Y :=
{ hom := hX.to Y,
inv := hX.from Y,
hom_inv_id' := hX.eq_of_src _ _,
inv_hom_id' := hY.eq_of_src _ _, }
/-- A zero object is in particular initial. -/
protected def is_initial (hX : is_zero X) : is_initial X :=
@is_initial.of_unique _ _ X $ λ Y, (hX.unique_to Y).some
/-- A zero object is in particular terminal. -/
protected def is_terminal (hX : is_zero X) : is_terminal X :=
@is_terminal.of_unique _ _ X $ λ Y, (hX.unique_from Y).some
/-- The (unique) isomorphism between any initial object and the zero object. -/
def iso_is_initial (hX : is_zero X) (hY : is_initial Y) : X ≅ Y :=
hX.is_initial.unique_up_to_iso hY
/-- The (unique) isomorphism between any terminal object and the zero object. -/
def iso_is_terminal (hX : is_zero X) (hY : is_terminal Y) : X ≅ Y :=
hX.is_terminal.unique_up_to_iso hY
lemma of_iso (hY : is_zero Y) (e : X ≅ Y) : is_zero X :=
begin
refine ⟨λ Z, ⟨⟨⟨e.hom ≫ hY.to Z⟩, λ f, _⟩⟩, λ Z, ⟨⟨⟨hY.from Z ≫ e.inv⟩, λ f, _⟩⟩⟩,
{ rw ← cancel_epi e.inv, apply hY.eq_of_src, },
{ rw ← cancel_mono e.hom, apply hY.eq_of_tgt, },
end
lemma op (h : is_zero X) : is_zero (opposite.op X) :=
⟨λ Y, ⟨⟨⟨(h.from (opposite.unop Y)).op⟩, λ f, quiver.hom.unop_inj (h.eq_of_tgt _ _)⟩⟩,
λ Y, ⟨⟨⟨(h.to (opposite.unop Y)).op⟩, λ f, quiver.hom.unop_inj (h.eq_of_src _ _)⟩⟩⟩
lemma unop {X : Cᵒᵖ} (h : is_zero X) : is_zero (opposite.unop X) :=
⟨λ Y, ⟨⟨⟨(h.from (opposite.op Y)).unop⟩, λ f, quiver.hom.op_inj (h.eq_of_tgt _ _)⟩⟩,
λ Y, ⟨⟨⟨(h.to (opposite.op Y)).unop⟩, λ f, quiver.hom.op_inj (h.eq_of_src _ _)⟩⟩⟩
end is_zero
end limits
open category_theory.limits
lemma iso.is_zero_iff {X Y : C} (e : X ≅ Y) :
is_zero X ↔ is_zero Y :=
⟨λ h, h.of_iso e.symm, λ h, h.of_iso e⟩
lemma functor.is_zero (F : C ⥤ D) (hF : ∀ X, is_zero (F.obj X)) :
is_zero F :=
begin
split; intros G; refine ⟨⟨⟨_⟩, _⟩⟩,
{ refine { app := λ X, (hF _).to _, naturality' := _ },
intros, exact (hF _).eq_of_src _ _ },
{ intro f, ext, apply (hF _).eq_of_src _ _ },
{ refine { app := λ X, (hF _).from _, naturality' := _ },
intros, exact (hF _).eq_of_tgt _ _ },
{ intro f, ext, apply (hF _).eq_of_tgt _ _ },
end
namespace limits
variables (C)
/-- A category "has a zero object" if it has an object which is both initial and terminal. -/
class has_zero_object : Prop :=
(zero : ∃ X : C, is_zero X)
instance has_zero_object_punit : has_zero_object (discrete punit) :=
{ zero := ⟨⟨⟨⟩⟩, by tidy, by tidy⟩, }
section
variables [has_zero_object C]
/--
Construct a `has_zero C` for a category with a zero object.
This can not be a global instance as it will trigger for every `has_zero C` typeclass search.
-/
protected def has_zero_object.has_zero : has_zero C :=
{ zero := has_zero_object.zero.some }
localized "attribute [instance] category_theory.limits.has_zero_object.has_zero" in zero_object
lemma is_zero_zero : is_zero (0 : C) :=
has_zero_object.zero.some_spec
instance has_zero_object_op : has_zero_object Cᵒᵖ := ⟨⟨opposite.op 0, is_zero.op (is_zero_zero C)⟩⟩
end
open_locale zero_object
lemma has_zero_object_unop [has_zero_object Cᵒᵖ] : has_zero_object C :=
⟨⟨opposite.unop 0, is_zero.unop (is_zero_zero Cᵒᵖ)⟩⟩
variables {C}
lemma is_zero.has_zero_object {X : C} (hX : is_zero X) : has_zero_object C := ⟨⟨X, hX⟩⟩
/-- Every zero object is isomorphic to *the* zero object. -/
def is_zero.iso_zero [has_zero_object C] {X : C} (hX : is_zero X) : X ≅ 0 :=
hX.iso (is_zero_zero C)
lemma is_zero.obj [has_zero_object D] {F : C ⥤ D} (hF : is_zero F) (X : C) :
is_zero (F.obj X) :=
begin
let G : C ⥤ D := (category_theory.functor.const C).obj 0,
have hG : is_zero G := functor.is_zero _ (λ X, is_zero_zero _),
let e : F ≅ G := hF.iso hG,
exact (is_zero_zero _).of_iso (e.app X),
end
namespace has_zero_object
variables [has_zero_object C]
/-- There is a unique morphism from the zero object to any object `X`. -/
protected def unique_to (X : C) : unique (0 ⟶ X) :=
((is_zero_zero C).unique_to X).some
/-- There is a unique morphism from any object `X` to the zero object. -/
protected def unique_from (X : C) : unique (X ⟶ 0) :=
((is_zero_zero C).unique_from X).some
localized "attribute [instance] category_theory.limits.has_zero_object.unique_to" in zero_object
localized "attribute [instance] category_theory.limits.has_zero_object.unique_from" in zero_object
@[ext]
lemma to_zero_ext {X : C} (f g : X ⟶ 0) : f = g :=
(is_zero_zero C).eq_of_tgt _ _
@[ext]
lemma from_zero_ext {X : C} (f g : 0 ⟶ X) : f = g :=
(is_zero_zero C).eq_of_src _ _
instance (X : C) : subsingleton (X ≅ 0) := by tidy
instance {X : C} (f : 0 ⟶ X) : mono f :=
{ right_cancellation := λ Z g h w, by ext, }
instance {X : C} (f : X ⟶ 0) : epi f :=
{ left_cancellation := λ Z g h w, by ext, }
instance zero_to_zero_is_iso (f : (0 : C) ⟶ 0) :
is_iso f :=
by convert (show is_iso (𝟙 (0 : C)), by apply_instance)
/-- A zero object is in particular initial. -/
def zero_is_initial : is_initial (0 : C) :=
(is_zero_zero C).is_initial
/-- A zero object is in particular terminal. -/
def zero_is_terminal : is_terminal (0 : C) :=
(is_zero_zero C).is_terminal
/-- A zero object is in particular initial. -/
@[priority 10]
instance has_initial : has_initial C :=
has_initial_of_unique 0
/-- A zero object is in particular terminal. -/
@[priority 10]
instance has_terminal : has_terminal C :=
has_terminal_of_unique 0
/-- The (unique) isomorphism between any initial object and the zero object. -/
def zero_iso_is_initial {X : C} (t : is_initial X) : 0 ≅ X :=
zero_is_initial.unique_up_to_iso t
/-- The (unique) isomorphism between any terminal object and the zero object. -/
def zero_iso_is_terminal {X : C} (t : is_terminal X) : 0 ≅ X :=
zero_is_terminal.unique_up_to_iso t
/-- The (unique) isomorphism between the chosen initial object and the chosen zero object. -/
def zero_iso_initial [has_initial C] : 0 ≅ ⊥_ C :=
zero_is_initial.unique_up_to_iso initial_is_initial
/-- The (unique) isomorphism between the chosen terminal object and the chosen zero object. -/
def zero_iso_terminal [has_terminal C] : 0 ≅ ⊤_ C :=
zero_is_terminal.unique_up_to_iso terminal_is_terminal
@[priority 100]
instance has_strict_initial : initial_mono_class C :=
initial_mono_class.of_is_initial zero_is_initial (λ X, category_theory.mono _)
end has_zero_object
end limits
open category_theory.limits
open_locale zero_object
lemma functor.is_zero_iff [has_zero_object D] (F : C ⥤ D) :
is_zero F ↔ ∀ X, is_zero (F.obj X) :=
⟨λ hF X, hF.obj X, functor.is_zero _⟩
end category_theory
|
[STATEMENT]
lemma module_iff_vector_space: "module s \<longleftrightarrow> vector_space s"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. module s = vector_space s
[PROOF STEP]
unfolding module_def vector_space_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (((\<forall>a x y. s a (x + y) = s a x + s a y) \<and> (\<forall>a b x. s (a + b) x = s a x + s b x)) \<and> (\<forall>a b x. s a (s b x) = s (a * b) x) \<and> (\<forall>x. s (1::'a) x = x)) = (((\<forall>a x y. s a (x + y) = s a x + s a y) \<and> (\<forall>a b x. s (a + b) x = s a x + s b x)) \<and> (\<forall>a b x. s a (s b x) = s (a * b) x) \<and> (\<forall>x. s (1::'a) x = x))
[PROOF STEP]
.. |
(*
- Identity pseudofunctor
From 'UniMath/Bicategories/PseudoFunctors/Examples/Identity.v'
*)
Require Import UniMath.Foundations.All.
Require Import UniMath.MoreFoundations.All.
Require Import UniMath.CategoryTheory.Core.Categories.
Require Import UniMath.CategoryTheory.Core.Functors.
Require Import UniMath.Bicategories.Core.Bicat. Import Bicat.Notations.
Require Import Integers.Prebicategories.Invertible_2cells.
Require Import Integers.Prebicategories.PseudoFunctor.
Import PseudoFunctor.Notations.
Local Open Scope cat.
(*Local Open Scope mor_disp_scope.*)
Local Open Scope bicategory_scope.
Section IdentityPseudofunctor.
Variable (C : prebicat).
Definition id_pseudofunctor_data : pseudofunctor_data C C.
Proof.
use make_pseudofunctor_data.
- exact (λ a, a).
- exact (λ a b f, f).
- exact (λ a b f g θ, θ).
- exact (λ a, id2 (identity a)).
- exact (λ a b c f g, id2 (f · g)).
Defined.
Definition id_pseudofunctor_laws : pseudofunctor_laws id_pseudofunctor_data.
Proof.
repeat split.
- intros a b c f g h θ.
exact (id2_left _ @ !id2_right _).
- intros a b c f g h θ.
exact (id2_left _ @ !id2_right _).
- intros a b f. cbn.
rewrite id2_rwhisker.
rewrite !id2_left.
reflexivity.
- intros a b f ; cbn in *.
rewrite lwhisker_id2.
rewrite !id2_left.
reflexivity.
- intros a b c d f g h ; cbn in *.
rewrite lwhisker_id2, id2_rwhisker.
rewrite !id2_left, !id2_right.
reflexivity.
Qed.
Definition id_pseudofunctor : pseudofunctor C C.
Proof.
use make_pseudofunctor.
- exact id_pseudofunctor_data.
- exact id_pseudofunctor_laws.
- split ; cbn ; intros ; is_iso.
Defined.
End IdentityPseudofunctor.
|
[STATEMENT]
lemma order_mult_of: "finite (carrier R) \<Longrightarrow> order (mult_of R) = order R - 1"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. finite (carrier R) \<Longrightarrow> order (mult_of R) = order R - 1
[PROOF STEP]
unfolding order_def carrier_mult_of
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. finite (carrier R) \<Longrightarrow> card (carrier R - {\<zero>}) = card (carrier R) - 1
[PROOF STEP]
by (simp add: card.remove) |
Formal statement is: lemma islimpt_approachable: fixes x :: "'a::metric_space" shows "x islimpt S \<longleftrightarrow> (\<forall>e>0. \<exists>x'\<in>S. x' \<noteq> x \<and> dist x' x < e)" Informal statement is: A point $x$ is a limit point of a set $S$ if and only if for every $\epsilon > 0$, there exists a point $x' \in S$ such that $x' \neq x$ and $d(x', x) < \epsilon$. |
-- @@stderr --
dtrace: failed to compile script test/unittest/providers/err.D_PDESC_ZERO.wrongdec3.d: [D_PDESC_ZERO] line 25: probe description :::profiletick-1sec does not match any probes
|
-- -------------------------------------------------------------- [ Lens.idr ]
-- Description : Idris port of Control.Lens
-- Copyright : (c) Huw Campbell
-- --------------------------------------------------------------------- [ EOH ]
module Control.Lens.Const
import Data.Contravariant
||| Const Functor.
public export
data Const : (0 a : Type) -> (b : Type) -> Type where
MkConst: a -> Const a b
public export
getConst : Const a b -> a
getConst (MkConst x) = x
public export
Functor (Const a) where
map _ (MkConst x) = (MkConst x)
public export
Contravariant (Const a) where
contramap _ (MkConst x) = (MkConst x)
public export
implementation Monoid m => Applicative (Const m) where
pure _ = MkConst neutral
(MkConst f) <*> (MkConst v) = MkConst (f <+> v)
-- --------------------------------------------------------------------- [ EOF ]
|
[STATEMENT]
lemma index_one_alt_bl_not_exist:
assumes "\<Lambda> = 1" and " blv \<in># \<B>" and "p \<subseteq> blv" and "card p = 2"
shows" \<And> bl. bl \<in># remove1_mset blv \<B> \<Longrightarrow> \<not> (p \<subseteq> bl) "
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>bl. bl \<in># remove1_mset blv \<B> \<Longrightarrow> \<not> p \<subseteq> bl
[PROOF STEP]
using index_one_empty_rm_blv
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>\<Lambda> = 1; ?blv \<in># \<B>; ?p \<subseteq> ?blv; card ?p = 2\<rbrakk> \<Longrightarrow> filter_mset ((\<subseteq>) ?p) (remove1_mset ?blv \<B>) = {#}
goal (1 subgoal):
1. \<And>bl. bl \<in># remove1_mset blv \<B> \<Longrightarrow> \<not> p \<subseteq> bl
[PROOF STEP]
by (metis assms(1) assms(2) assms(3) assms(4) filter_mset_empty_conv) |
[STATEMENT]
lemma rpd_fv_regex: "s \<in> rpd test i r \<Longrightarrow> fv_regex fv s \<subseteq> fv_regex fv r"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. s \<in> rpd test i r \<Longrightarrow> fv_regex fv s \<subseteq> fv_regex fv r
[PROOF STEP]
by (induct r arbitrary: s) (auto simp: TimesR_def TimesL_def split: if_splits nat.splits)+ |
module Main
import Text.PrettyPrint.WL
%default total
myDoc : Doc
myDoc = fold (|//|) $ map text $ words "this is a long sentence."
myString : String
myString = toString 0 15 $ myDoc
main : IO ()
main = putStrLn myString
|
-- Andreas, 2015-07-20, record patterns
open import Common.Prelude
postulate A : Set
record R : Set where
field f : A
T : Bool → Set
T true = R
T false = A
test : ∀{b} → T b → A
test record{f = a} = a
-- Could succeed by some magic.
|
Formal statement is: lemma continuous_on_finite: fixes S :: "'a::t1_space set" shows "finite S \<Longrightarrow> continuous_on S f" Informal statement is: If $S$ is a finite set, then any function $f$ defined on $S$ is continuous. |
[STATEMENT]
lemma cis_inj:
assumes "-pi < \<alpha>" and "\<alpha> \<le> pi" and "-pi < \<alpha>'" and "\<alpha>' \<le> pi"
assumes "cis \<alpha> = cis \<alpha>'"
shows "\<alpha> = \<alpha>'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<alpha> = \<alpha>'
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
- pi < \<alpha>
\<alpha> \<le> pi
- pi < \<alpha>'
\<alpha>' \<le> pi
cis \<alpha> = cis \<alpha>'
goal (1 subgoal):
1. \<alpha> = \<alpha>'
[PROOF STEP]
by (metis cis_Arg_unique sgn_cis) |
lemma adjoint_linear: fixes f :: "'n::euclidean_space \<Rightarrow> 'm::euclidean_space" assumes lf: "linear f" shows "linear (adjoint f)" |
module Scheme.Core
import Control.Isomorphism
import Data.Vect
%default total
%access public export
-- TODO Integer <= Rational <= Real <= Complex
-- TODO Exact | Inexact
||| [S]cheme [Num]ber
data SNum : Type where
SNExactInt : Integer -> SNum
||| [S]ymbolic [Exp]ression
data SExp : Type where
||| Symbols are just identifiers or pointers
||| They evaluate to something if defined in current scope
||| Some are special forms that are evaluated under different rules
SESymbol : String -> SExp
||| Unit is an empty list
SEUnit : SExp
||| Cons is the basic building block and the only way to apply a function
SECons : SExp -> SExp -> SExp
||| Vector should be just an O(1) access List
||| Vectors are terminal
SEVect : Vect n SExp -> SExp
||| Booleans are terminal
SEBool : Bool -> SExp
||| Chars are terminal
SEChar : Char -> SExp
||| Strings are terminal
SEStr : String -> SExp
||| Numbers are terminal
SENum : SNum -> SExp
listify : SExp -> (List SExp, Maybe SExp)
listify SEUnit = ([], Nothing)
listify (SECons x y) = let (ls, md) = listify y
in (x :: ls, md)
listify x = ([], Just x)
unListify : (List SExp, Maybe SExp) -> SExp
unListify (xs, ms) = foldr SECons (maybe SEUnit id ms) xs
-- unListify : (List SExp, Maybe SExp) -> SExp
-- unListify ([] , Nothing) = SEUnit
-- unListify ([] , Just s ) = s
-- unListify (x :: xs, ms ) = SECons x $ assert_total $ unListify (xs, ms)
-- ||| A theorem about lists and dotted lists
-- lispIso : Iso SExp (List SExp, Maybe SExp)
-- lispIso = MkIso to from toFrom fromTo
-- where
-- to : SExp -> (List SExp, Maybe SExp)
-- to = listify
-- from : (List SExp, Maybe SExp) -> SExp
-- from = unListify
-- toFrom : (p : (List SExp, Maybe SExp)) -> to (from p) = p
-- toFrom ([] , Nothing) = Refl
-- toFrom ([] , Just s ) = ?toFrom_rhs_5
-- toFrom (x :: xs, Nothing) = ?toFrom_rhs_4
-- toFrom (x :: xs, Just s ) = ?toFrom_rhs_1
-- fromTo : (s : SExp) -> from (to s) = s
-- fromTo SEUnit = Refl
-- fromTo (SESymbol x) = Refl
-- fromTo (SEVect xs ) = Refl
-- fromTo (SEBool x ) = Refl
-- fromTo (SEChar x ) = Refl
-- fromTo (SEStr x ) = Refl
-- fromTo (SENum x ) = Refl
-- fromTo (SECons x y) = ?whasdasd
|
/-
Copyright (c) 2021 Floris van Doorn. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Floris van Doorn, Yury Kudryashov
-/
import order.symm_diff
import tactic.monotonicity.basic
/-!
# Implication and equivalence as operations on a boolean algebra
In this file we define `lattice.imp` (notation: `a ⇒ₒ b`) and `lattice.biimp` (notation: `a ⇔ₒ b`)
to be the implication and equivalence as operations on a boolean algebra. More precisely, we put
`a ⇒ₒ b = aᶜ ⊔ b` and `a ⇔ₒ b = (a ⇒ₒ b) ⊓ (b ⇒ₒ a)`. Equivalently, `a ⇒ₒ b = (a \ b)ᶜ` and
`a ⇔ₒ b = (a ∆ b)ᶜ`. For propositions these operations are equal to the usual implication and `iff`.
-/
variables {α β : Type*}
namespace lattice
/-- Implication as a binary operation on a boolean algebra. -/
def imp [has_compl α] [has_sup α] (a b : α) : α := aᶜ ⊔ b
infix ` ⇒ₒ `:65 := lattice.imp
/-- Equivalence as a binary operation on a boolean algebra. -/
def biimp [has_compl α] [has_sup α] [has_inf α] (a b : α) : α := (a ⇒ₒ b) ⊓ (b ⇒ₒ a)
infix ` ⇔ₒ `:60 := lattice.biimp
@[simp] lemma imp_eq_arrow (p q : Prop) : p ⇒ₒ q = (p → q) := propext imp_iff_not_or.symm
@[simp] lemma biimp_eq_iff (p q : Prop) : p ⇔ₒ q = (p ↔ q) := by simp [biimp, ← iff_def]
variables [boolean_algebra α] {a b c d : α}
@[simp] lemma compl_imp (a b : α) : (a ⇒ₒ b)ᶜ = a \ b := by simp [imp, sdiff_eq]
lemma compl_sdiff (a b : α) : (a \ b)ᶜ = a ⇒ₒ b := by rw [← compl_imp, compl_compl]
@[mono] lemma imp_mono (h₁ : a ≤ b) (h₂ : c ≤ d) : b ⇒ₒ c ≤ a ⇒ₒ d :=
sup_le_sup (compl_le_compl h₁) h₂
lemma inf_imp_eq (a b c : α) : a ⊓ (b ⇒ₒ c) = (a ⇒ₒ b) ⇒ₒ (a ⊓ c) :=
by unfold imp; simp [inf_sup_left]
@[simp] lemma imp_eq_top_iff : (a ⇒ₒ b = ⊤) ↔ a ≤ b :=
by rw [← compl_sdiff, compl_eq_top, sdiff_eq_bot_iff]
@[simp] lemma imp_eq_bot_iff : (a ⇒ₒ b = ⊥) ↔ (a = ⊤ ∧ b = ⊥) := by simp [imp]
@[simp] lemma imp_bot (a : α) : a ⇒ₒ ⊥ = aᶜ := sup_bot_eq
@[simp] lemma top_imp (a : α) : ⊤ ⇒ₒ a = a := by simp [imp]
@[simp] lemma bot_imp (a : α) : ⊥ ⇒ₒ a = ⊤ := imp_eq_top_iff.2 bot_le
@[simp] lemma imp_top (a : α) : a ⇒ₒ ⊤ = ⊤ := imp_eq_top_iff.2 le_top
@[simp] lemma imp_self (a : α) : a ⇒ₒ a = ⊤ := compl_sup_eq_top
@[simp] lemma compl_imp_compl (a b : α) : aᶜ ⇒ₒ bᶜ = b ⇒ₒ a := by simp [imp, sup_comm]
lemma imp_inf_le {α : Type*} [boolean_algebra α] (a b : α) : (a ⇒ₒ b) ⊓ a ≤ b :=
by { unfold imp, rw [inf_sup_right], simp }
lemma inf_imp_eq_imp_imp (a b c : α) : ((a ⊓ b) ⇒ₒ c) = (a ⇒ₒ (b ⇒ₒ c)) := by simp [imp, sup_assoc]
lemma le_imp_iff : a ≤ (b ⇒ₒ c) ↔ a ⊓ b ≤ c :=
by rw [imp, sup_comm, is_compl_compl.le_sup_right_iff_inf_left_le]
lemma biimp_mp (a b : α) : (a ⇔ₒ b) ≤ (a ⇒ₒ b) := inf_le_left
lemma biimp_mpr (a b : α) : (a ⇔ₒ b) ≤ (b ⇒ₒ a) := inf_le_right
lemma biimp_comm (a b : α) : (a ⇔ₒ b) = (b ⇔ₒ a) :=
by {unfold lattice.biimp, rw inf_comm}
@[simp] lemma biimp_eq_top_iff : a ⇔ₒ b = ⊤ ↔ a = b :=
by simp [biimp, ← le_antisymm_iff]
@[simp] lemma biimp_self (a : α) : a ⇔ₒ a = ⊤ := biimp_eq_top_iff.2 rfl
lemma biimp_symm : a ≤ (b ⇔ₒ c) ↔ a ≤ (c ⇔ₒ b) := by rw biimp_comm
lemma compl_symm_diff (a b : α) : (a ∆ b)ᶜ = a ⇔ₒ b :=
by simp only [biimp, imp, symm_diff, sdiff_eq, compl_sup, compl_inf, compl_compl]
lemma compl_biimp (a b : α) : (a ⇔ₒ b)ᶜ = a ∆ b := by rw [← compl_symm_diff, compl_compl]
@[simp] lemma compl_biimp_compl : aᶜ ⇔ₒ bᶜ = a ⇔ₒ b := by simp [biimp, inf_comm]
end lattice
|
import algebra.group
open array
variables {n : ℕ} {α : Type*} [group α] (a b c : array n α)
instance : has_mul (array n α) := ⟨array.map₂ (*)⟩
instance : has_one (array n α) := ⟨mk_array n 1⟩
instance : has_inv (array n α) := ⟨flip array.map has_inv.inv⟩
private lemma mul_assoc : a * b * c = a * (b * c) :=
begin
apply array.ext,
intros i,
apply mul_assoc,
end
private lemma one_mul : 1 * a = a :=
begin
apply array.ext,
intros i,
apply one_mul,
end
private lemma mul_one : a * 1 = a :=
begin
apply array.ext,
intros i,
apply mul_one,
end
private lemma mul_left_inv : a⁻¹ * a = 1 :=
begin
apply array.ext,
intros i,
apply mul_left_inv,
end
instance array_group : group (array n α) :=
{
mul := (*),
mul_assoc := mul_assoc,
one := 1,
one_mul := one_mul,
mul_one := mul_one,
inv := has_inv.inv,
mul_left_inv := mul_left_inv,
}
@[simp] lemma array.read_mul {a₁ a₂ : array n α} {i : fin n} : (a₁ * a₂).read i = a₁.read i * a₂.read i := rfl |
Formal statement is: lemma infnorm_eq_0: fixes x :: "'a::euclidean_space" shows "infnorm x = 0 \<longleftrightarrow> x = 0" Informal statement is: The infnorm of a vector is zero if and only if the vector is zero. |
-- @@stderr --
dtrace: failed to compile script test/unittest/printf/err.D_SYNTAX.badconv2.d: [D_SYNTAX] line 18: format conversion #1 name expected before end of format string
|
Formal statement is: lemma residue_lmul: assumes "open s" "z \<in> s" and f_holo: "f holomorphic_on s - {z}" shows "residue (\<lambda>z. c * (f z)) z= c * residue f z" Informal statement is: If $f$ is holomorphic on a punctured neighborhood of $z$, then the residue of $f$ at $z$ is equal to the residue of $cf$ at $z$. |
M @-@ 122 was initially assumed into the state highway system in 1929 as a connector between US 31 and Straits State Park . In 1936 , US 2 was routed into St. Ignace and US 31 was scaled back to end in the Lower Peninsula in Mackinaw City . M @-@ 122 now provided a connection between US 2 and the new docks on the southeast side of the city . It existed in this capacity until 1957 when the Mackinac Bridge opened to traffic .
|
lemma translation_galois: fixes a :: "'a::ab_group_add" shows "T = ((\<lambda>x. a + x) ` S) \<longleftrightarrow> S = ((\<lambda>x. (- a) + x) ` T)" |
lemma isometry_subset_subspace: fixes S :: "'a::euclidean_space set" and T :: "'b::euclidean_space set" assumes S: "subspace S" and T: "subspace T" and d: "dim S \<le> dim T" obtains f where "linear f" "f ` S \<subseteq> T" "\<And>x. x \<in> S \<Longrightarrow> norm(f x) = norm x" |
`mu/BA` := proc()
apply_bilinear_assoc(`mu_aux0/BA`,`bar/BA`())(args);
local xx,ca,cb,pa,pb;
if nargs > 2 then
return `mu/BA`(`mu/BA`(a,b),args[3..-1]);
fi;
if type(a,numeric) or type(b,numeric) then
return expand(a*b);
elif type(a,`+`) then
return map(`mu/BA`,a,b);
elif type(b,`+`) then
return map2(`mu/BA`,a,b);
fi;
if type(a,`*`) then
xx := selectremove(type,a,numeric);
ca := xx[1];
pa := xx[2];
else
ca := 1;
pa := a;
fi;
if type(b,`*`) then
xx := selectremove(type,b,numeric);
cb := xx[1];
pb := xx[2];
else
cb := 1;
pb := b;
fi;
if type(pa,specfunc(anything,`bar/BA`)) and
type(pb,specfunc(anything,`bar/BA`)) then
return expand(ca*cb*`mu_aux0/BA`(pa,pb));
else
return('`mu/BA`'(args))
fi;
end:
`mu/EA` := proc()
apply_linear_assoc(`mu0/EA`,`bar/EA`())(args);
end:
`mu/EC` := proc(a,b)
local xx,ca,cb,pa,pb,na,nb,ka,kb,la,lb,ia,ib;
if nargs > 2 then
return `mu/EC`(`mu/EC`(a,b),args[3..-1]);
fi;
if type(a,numeric) or type(b,numeric) then
return expand(a*b);
elif type(a,`+`) then
return map(`mu/EC`,a,b);
elif type(b,`+`) then
return map2(`mu/EC`,a,b);
fi;
if type(a,`*`) then
xx := selectremove(type,a,numeric);
ca := xx[1];
pa := xx[2];
else
ca := 1;
pa := a;
fi;
if type(b,`*`) then
xx := selectremove(type,b,numeric);
cb := xx[1];
pb := xx[2];
else
cb := 1;
pb := b;
fi;
if type(pa,specfunc(anything,`e/EC`)) and
type(pb,specfunc(anything,`e/EC`)) then
na,ka,ia := op(pa);
nb,kb,ib := op(pb);
if nb <> na then return FAIL; fi;
if modp(ka,2) = 1 and modp(kb,2) = 1 then
return 0;
fi;
la := floor(ka/2);
lb := floor(kb/2);
return ca * cb * binomial(la+lb,la) * `e/EC`(na,ka+kb,modp(ia+ib,na));
else
return('`mu/EC`'(args))
fi;
end:
|
Formal statement is: lemma prime_elem_not_unit: "prime_elem p \<Longrightarrow> \<not>p dvd 1" Informal statement is: A prime element is not a unit. |
Formal statement is: lemma interior_subset: "interior S \<subseteq> S" Informal statement is: The interior of a set is a subset of the set. |
module Vprecal8
use Vmeshes
real(rprec), dimension(:), allocatable :: coh1, sih1
end module Vprecal8
|
SUBROUTINE obtain_field_line(zetacn, thetacn, iotac,
1 nsurf, alpha)
USE stel_kinds
USE normalize_data, ONLY: lasym_v ! 110909 RS: logical for asymmetric input (if True)
USE ballooning_data
USE general_dimensions
USE fmesh_quantities
IMPLICIT NONE
!-----------------------------------------------
! D u m m y A r g u m e n t s
!-----------------------------------------------
INTEGER, INTENT(IN) :: nsurf
REAL(rprec), INTENT(IN) :: zetacn, thetacn, iotac
REAL(rprec), INTENT(OUT) :: alpha
!-----------------------------------------------
! L o c a l V a r i a b l e s
!-----------------------------------------------
INTEGER :: j, lj
REAL(rprec):: lambda0, ssine, arg, ccosi
!-----------------------------------------------
lambda0 = 0
fourier: DO j = 1, mnmax_v ! Fourier invert lambda at initial point (thetacn, zetacn)
arg = xm_v(j)*thetacn-xn_v(j)*zetacn
ssine = SIN(arg); ccosi = COS(arg)
lj = mnmax_v*(nsurf-1)+j
lambda0 = lambda0 + lmnsf(lj)*ssine
IF (lasym_v) lambda0 = lambda0 + lmncf(lj)*ccosi ! 110909 RS: Asymmetric input
ENDDO fourier
alpha = thetacn + lambda0 - iotac*zetacn ! obtain field line label value
END SUBROUTINE obtain_field_line
|
count : Nat -> Stream Nat
count n = n :: count (S n)
badCount : Nat -> Stream Nat
badCount n = n :: map S (badCount n)
data SP : Type -> Type -> Type where
Get : (a -> SP a b) -> SP a b
Put : b -> Inf (SP a b) -> SP a b
copy : SP a a
copy = Get (\x => Put x copy)
process : SP a b -> Stream a -> Stream b
process (Get f) (x :: xs) = process (f x) xs
process (Put b sp) xs = b :: process sp xs
badProcess : SP a b -> Stream a -> Stream b
badProcess (Get f) (x :: xs) = badProcess (f x) xs
badProcess (Put b sp) xs = badProcess sp xs
doubleInt : SP Nat Integer
doubleInt = Get (\x => Put (the Integer (cast x))
(Put (the Integer (cast x) * 2) doubleInt))
countStream : Nat -> Stream Nat
countStream x = x :: countStream (x + 1)
main : IO ()
main = printLn (take 10 (process doubleInt (countStream 1)))
|
module Toolkit.Data.List.View.PairWise
import Data.List
%default total
public export
data PairWise : List a -> Type where
Empty : PairWise Nil
One : (x:a) -> PairWise [x]
Two : (x,y : a) -> PairWise [x,y]
N : (x,y : a)
-> PairWise (y::xs)
-> PairWise (x::y::xs)
export
pairwise : (xs : List a) -> PairWise xs
pairwise [] = Empty
pairwise (x :: []) = One x
pairwise (x :: (y :: xs)) with (pairwise (y::xs))
pairwise (x :: (y :: [])) | (One y) = Two x y
pairwise (x :: (y :: [w])) | (Two y w) = N x y (Two y w)
pairwise (x :: (y :: (w :: xs))) | (N y w v) = N x y (N y w v)
unSafeToList : {xs : List a} -> PairWise xs -> Maybe (List (a,a))
unSafeToList Empty = Just Nil
unSafeToList (One x) = Nothing
unSafeToList (Two x y) = Just [(x,y)]
unSafeToList (N x y z)
= do rest <- unSafeToList z
pure (MkPair x y :: rest)
||| Returns a list of pairs if `xs` has even number of elements, Nothing if odd.
export
unSafePairUp : (xs : List a) -> Maybe (List (a,a))
unSafePairUp xs = (unSafeToList (pairwise xs))
-- [ EOF ]
|
#' gdeltr
#'
#' @name gdeltr
#' @docType package
NULL
|
[STATEMENT]
lemma count_list_append: "count_list (x\<cdot>y) a = count_list x a + count_list y a"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. count_list (x \<cdot> y) a = count_list x a + count_list y a
[PROOF STEP]
by (induct x, auto) |
Subsets and Splits